Valgrind support requires a fork, which I've published to https://gitlab.winehq.org/rbernon/valgrind. The fork implements loading DWARF debug info from PE files, instead of the old and broken upstream PDB support. I've tried to upstream these changes a long time ago but didn't receive any feedback.
I think we could maybe consider keeping a fork, which I'm happy to maintain, as the changes aren't too large. We may want to investigate adding 32-on-64 support, which may require a bit more changes (to VEX specifically, because its amd64 guest doesn't support segment register manipulation).
The changes here are not all related to Valgrind, and I'll create separate MR for those which may make sense independently from Valgrind / GDB.
Also included is a suppression file to silent some annoying false positives, many of which are coming from the cross-stack accesses during syscalls, which are confusing Valgrind's stack heuristics. One can try this out with something like:
`WINELOADERNOEXEC=1 valgrind --suppressions=tools/valgrind.supp wine64/loader/wine64 wine64/programs/winecfg/winecfg.exe`
From: Rémi Bernon rbernon@codeweavers.com
Using a dedicated exit jmpbuf and removing the need for assembly routines.
Wine-Bug: https://bugs.winehq.org/show_bug.cgi?id=52213 --- dlls/ntdll/unix/loader.c | 1 + dlls/ntdll/unix/server.c | 2 +- dlls/ntdll/unix/signal_arm.c | 13 ------------- dlls/ntdll/unix/signal_arm64.c | 12 ------------ dlls/ntdll/unix/signal_i386.c | 25 ------------------------ dlls/ntdll/unix/signal_x86_64.c | 21 -------------------- dlls/ntdll/unix/thread.c | 34 +++++++++++++++++++++++++-------- dlls/ntdll/unix/unix_private.h | 4 +++- 8 files changed, 31 insertions(+), 81 deletions(-)
diff --git a/dlls/ntdll/unix/loader.c b/dlls/ntdll/unix/loader.c index b78efddee1f..500c9884121 100644 --- a/dlls/ntdll/unix/loader.c +++ b/dlls/ntdll/unix/loader.c @@ -2509,4 +2509,5 @@ void __wine_main( int argc, char *argv[], char *envp[] ) apple_main_thread(); #endif start_main_thread(); + exit( 0 ); } diff --git a/dlls/ntdll/unix/server.c b/dlls/ntdll/unix/server.c index 77e8d5c7566..98afc42ad27 100644 --- a/dlls/ntdll/unix/server.c +++ b/dlls/ntdll/unix/server.c @@ -1581,7 +1581,7 @@ void server_init_process_done(void) SERVER_END_REQ;
assert( !status ); - signal_start_thread( entry, peb, suspend, NtCurrentTeb() ); + thread_start( entry, peb, suspend, NtCurrentTeb() ); }
diff --git a/dlls/ntdll/unix/signal_arm.c b/dlls/ntdll/unix/signal_arm.c index 5d1478a1ff4..ab00a67d436 100644 --- a/dlls/ntdll/unix/signal_arm.c +++ b/dlls/ntdll/unix/signal_arm.c @@ -1183,19 +1183,6 @@ __ASM_GLOBAL_FUNC( signal_start_thread, "bl " __ASM_NAME("call_init_thunk") )
-/*********************************************************************** - * signal_exit_thread - */ -__ASM_GLOBAL_FUNC( signal_exit_thread, - "ldr r3, [r2, #0x1d4]\n\t" /* arm_thread_data()->exit_frame */ - "mov ip, #0\n\t" - "str ip, [r2, #0x1d4]\n\t" - "cmp r3, ip\n\t" - "it ne\n\t" - "movne sp, r3\n\t" - "blx r1" ) - - /*********************************************************************** * __wine_syscall_dispatcher */ diff --git a/dlls/ntdll/unix/signal_arm64.c b/dlls/ntdll/unix/signal_arm64.c index 4e552e0f10a..b24dbc2d8a4 100644 --- a/dlls/ntdll/unix/signal_arm64.c +++ b/dlls/ntdll/unix/signal_arm64.c @@ -1253,18 +1253,6 @@ __ASM_GLOBAL_FUNC( signal_start_thread, "1:\tmov sp, x8\n\t" "bl " __ASM_NAME("call_init_thunk") )
-/*********************************************************************** - * signal_exit_thread - */ -__ASM_GLOBAL_FUNC( signal_exit_thread, - "stp x29, x30, [sp,#-16]!\n\t" - "ldr x3, [x2, #0x2f0]\n\t" /* arm64_thread_data()->exit_frame */ - "str xzr, [x2, #0x2f0]\n\t" - "cbz x3, 1f\n\t" - "mov sp, x3\n" - "1:\tldp x29, x30, [sp], #16\n\t" - "br x1" ) -
/*********************************************************************** * __wine_syscall_dispatcher diff --git a/dlls/ntdll/unix/signal_i386.c b/dlls/ntdll/unix/signal_i386.c index 2dfce706394..0eecb5fb57e 100644 --- a/dlls/ntdll/unix/signal_i386.c +++ b/dlls/ntdll/unix/signal_i386.c @@ -2468,31 +2468,6 @@ __ASM_GLOBAL_FUNC( signal_start_thread, "call " __ASM_NAME("call_init_thunk") )
-/*********************************************************************** - * signal_exit_thread - */ -__ASM_GLOBAL_FUNC( signal_exit_thread, - "movl 8(%esp),%ecx\n\t" - "movl 12(%esp),%esi\n\t" - "xorl %edx,%edx\n\t" - /* fetch exit frame */ - "xchgl %edx,0x1f4(%esi)\n\t" /* x86_thread_data()->exit_frame */ - "testl %edx,%edx\n\t" - "jnz 1f\n\t" - "jmp *%ecx\n\t" - /* switch to exit frame stack */ - "1:\tmovl 4(%esp),%eax\n\t" - "movl %edx,%ebp\n\t" - __ASM_CFI(".cfi_def_cfa %ebp,4\n\t") - __ASM_CFI(".cfi_rel_offset %ebp,0\n\t") - __ASM_CFI(".cfi_rel_offset %ebx,-4\n\t") - __ASM_CFI(".cfi_rel_offset %esi,-8\n\t") - __ASM_CFI(".cfi_rel_offset %edi,-12\n\t") - "leal -20(%ebp),%esp\n\t" - "pushl %eax\n\t" - "call *%ecx" ) - - /*********************************************************************** * __wine_syscall_dispatcher */ diff --git a/dlls/ntdll/unix/signal_x86_64.c b/dlls/ntdll/unix/signal_x86_64.c index 88c517daecf..fc73925e819 100644 --- a/dlls/ntdll/unix/signal_x86_64.c +++ b/dlls/ntdll/unix/signal_x86_64.c @@ -3324,27 +3324,6 @@ __ASM_GLOBAL_FUNC( signal_start_thread, "call " __ASM_NAME("call_init_thunk"))
-/*********************************************************************** - * signal_exit_thread - */ -__ASM_GLOBAL_FUNC( signal_exit_thread, - /* fetch exit frame */ - "xorl %ecx,%ecx\n\t" - "xchgq %rcx,0x320(%rdx)\n\t" /* amd64_thread_data()->exit_frame */ - "testq %rcx,%rcx\n\t" - "jnz 1f\n\t" - "jmp *%rsi\n" - /* switch to exit frame stack */ - "1:\tmovq %rcx,%rsp\n\t" - __ASM_CFI(".cfi_adjust_cfa_offset 56\n\t") - __ASM_CFI(".cfi_rel_offset %rbp,48\n\t") - __ASM_CFI(".cfi_rel_offset %rbx,40\n\t") - __ASM_CFI(".cfi_rel_offset %r12,32\n\t") - __ASM_CFI(".cfi_rel_offset %r13,24\n\t") - __ASM_CFI(".cfi_rel_offset %r14,16\n\t") - __ASM_CFI(".cfi_rel_offset %r15,8\n\t") - "call *%rsi" ) - /*********************************************************************** * __wine_syscall_dispatcher */ diff --git a/dlls/ntdll/unix/thread.c b/dlls/ntdll/unix/thread.c index bb8c1c2936b..19647f7622b 100644 --- a/dlls/ntdll/unix/thread.c +++ b/dlls/ntdll/unix/thread.c @@ -1050,24 +1050,42 @@ static void pthread_exit_wrapper( int status ) close( ntdll_get_thread_data()->wait_fd[1] ); close( ntdll_get_thread_data()->reply_fd ); close( ntdll_get_thread_data()->request_fd ); - pthread_exit( UIntToPtr(status) ); +} + +void DECLSPEC_NORETURN thread_exit( int status, void (*func)(int), TEB *teb ) +{ + struct ntdll_thread_data *thread_data = (struct ntdll_thread_data *)&teb->GdiTebBatch; + + func(status); + __wine_longjmp( &thread_data->exit_buf, 1 ); }
/*********************************************************************** - * start_thread + * pthread_start_wrapper * * Startup routine for a newly created thread. */ -static void start_thread( TEB *teb ) +static void *pthread_start_wrapper( void *arg ) { + TEB *teb = arg; struct ntdll_thread_data *thread_data = (struct ntdll_thread_data *)&teb->GdiTebBatch; BOOL suspend;
thread_data->pthread_id = pthread_self(); signal_init_thread( teb ); server_init_thread( thread_data->start, &suspend ); - signal_start_thread( thread_data->start, thread_data->param, suspend, teb ); + thread_start( thread_data->start, thread_data->param, suspend, teb ); + return NULL; +} + +void thread_start( PRTL_THREAD_START_ROUTINE entry, void *arg, + BOOL suspend, TEB *teb ) +{ + struct ntdll_thread_data *thread_data = (struct ntdll_thread_data *)&teb->GdiTebBatch; + + if (!__wine_setjmpex( &thread_data->exit_buf, NULL )) + signal_start_thread( entry, arg, suspend, teb ); }
@@ -1345,7 +1363,7 @@ NTSTATUS WINAPI NtCreateThreadEx( HANDLE *handle, ACCESS_MASK access, OBJECT_ATT pthread_attr_setguardsize( &pthread_attr, 0 ); pthread_attr_setscope( &pthread_attr, PTHREAD_SCOPE_SYSTEM ); /* force creating a kernel thread */ InterlockedIncrement( &nb_threads ); - if (pthread_create( &pthread_id, &pthread_attr, (void * (*)(void *))start_thread, teb )) + if (pthread_create( &pthread_id, &pthread_attr, pthread_start_wrapper, teb )) { InterlockedDecrement( &nb_threads ); virtual_free_teb( teb ); @@ -1373,7 +1391,7 @@ void abort_thread( int status ) { pthread_sigmask( SIG_BLOCK, &server_block_set, NULL ); if (InterlockedDecrement( &nb_threads ) <= 0) abort_process( status ); - signal_exit_thread( status, pthread_exit_wrapper, NtCurrentTeb() ); + thread_exit( status, pthread_exit_wrapper, NtCurrentTeb() ); }
@@ -1406,7 +1424,7 @@ static DECLSPEC_NORETURN void exit_thread( int status ) virtual_free_teb( teb ); } } - signal_exit_thread( status, pthread_exit_wrapper, NtCurrentTeb() ); + thread_exit( status, pthread_exit_wrapper, NtCurrentTeb() ); }
@@ -1416,7 +1434,7 @@ static DECLSPEC_NORETURN void exit_thread( int status ) void exit_process( int status ) { pthread_sigmask( SIG_BLOCK, &server_block_set, NULL ); - signal_exit_thread( get_unix_exit_code( status ), process_exit_wrapper, NtCurrentTeb() ); + thread_exit( get_unix_exit_code( status ), process_exit_wrapper, NtCurrentTeb() ); }
diff --git a/dlls/ntdll/unix/unix_private.h b/dlls/ntdll/unix/unix_private.h index 47f0f9c56a9..ad349207889 100644 --- a/dlls/ntdll/unix/unix_private.h +++ b/dlls/ntdll/unix/unix_private.h @@ -61,6 +61,7 @@ struct ntdll_thread_data PRTL_THREAD_START_ROUTINE start; /* thread entry point */ void *param; /* thread entry point parameter */ void *jmp_buf; /* setjmp buffer for exception handling */ + __wine_jmp_buf exit_buf; /* setjmp buffer for thread exit */ };
C_ASSERT( sizeof(struct ntdll_thread_data) <= sizeof(((TEB *)0)->GdiTebBatch) ); @@ -237,7 +238,8 @@ extern void signal_init_thread( TEB *teb ) DECLSPEC_HIDDEN; extern void signal_init_process(void) DECLSPEC_HIDDEN; extern void DECLSPEC_NORETURN signal_start_thread( PRTL_THREAD_START_ROUTINE entry, void *arg, BOOL suspend, TEB *teb ) DECLSPEC_HIDDEN; -extern void DECLSPEC_NORETURN signal_exit_thread( int status, void (*func)(int), TEB *teb ) DECLSPEC_HIDDEN; +extern void thread_start( PRTL_THREAD_START_ROUTINE entry, void *arg, + BOOL suspend, TEB *teb ) DECLSPEC_HIDDEN; extern SYSTEM_SERVICE_TABLE KeServiceDescriptorTable[4] DECLSPEC_HIDDEN; extern void __wine_syscall_dispatcher(void) DECLSPEC_HIDDEN; extern void WINAPI DECLSPEC_NORETURN __wine_syscall_dispatcher_return( void *frame, ULONG_PTR retval ) DECLSPEC_HIDDEN;
From: Rémi Bernon rbernon@codeweavers.com
--- dlls/ntdll/unix/signal_arm.c | 14 +++++--------- dlls/ntdll/unix/signal_arm64.c | 15 +++++---------- dlls/ntdll/unix/signal_i386.c | 13 +++++-------- dlls/ntdll/unix/signal_x86_64.c | 20 ++++++++------------ 4 files changed, 23 insertions(+), 39 deletions(-)
diff --git a/dlls/ntdll/unix/signal_arm.c b/dlls/ntdll/unix/signal_arm.c index ab00a67d436..d5905622d1c 100644 --- a/dlls/ntdll/unix/signal_arm.c +++ b/dlls/ntdll/unix/signal_arm.c @@ -202,13 +202,11 @@ C_ASSERT( sizeof( struct syscall_frame ) == 0x160);
struct arm_thread_data { - void *exit_frame; /* 1d4 exit frame pointer */ - struct syscall_frame *syscall_frame; /* 1d8 frame pointer on syscall entry */ + struct syscall_frame *syscall_frame; /* 1d4 frame pointer on syscall entry */ };
C_ASSERT( sizeof(struct arm_thread_data) <= sizeof(((struct ntdll_thread_data *)0)->cpu_data) ); -C_ASSERT( offsetof( TEB, GdiTebBatch ) + offsetof( struct arm_thread_data, exit_frame ) == 0x1d4 ); -C_ASSERT( offsetof( TEB, GdiTebBatch ) + offsetof( struct arm_thread_data, syscall_frame ) == 0x1d8 ); +C_ASSERT( offsetof( TEB, GdiTebBatch ) + offsetof( struct arm_thread_data, syscall_frame ) == 0x1d4 );
static inline struct arm_thread_data *arm_thread_data(void) { @@ -1172,13 +1170,11 @@ void DECLSPEC_HIDDEN call_init_thunk( LPTHREAD_START_ROUTINE entry, void *arg, B */ __ASM_GLOBAL_FUNC( signal_start_thread, "push {r4-r12,lr}\n\t" - /* store exit frame */ - "str sp, [r3, #0x1d4]\n\t" /* arm_thread_data()->exit_frame */ /* set syscall frame */ - "ldr r6, [r3, #0x1d8]\n\t" /* arm_thread_data()->syscall_frame */ + "ldr r6, [r3, #0x1d4]\n\t" /* arm_thread_data()->syscall_frame */ "cbnz r6, 1f\n\t" "sub r6, sp, #0x160\n\t" /* sizeof(struct syscall_frame) */ - "str r6, [r3, #0x1d8]\n\t" /* arm_thread_data()->syscall_frame */ + "str r6, [r3, #0x1d4]\n\t" /* arm_thread_data()->syscall_frame */ "1:\tmov sp, r6\n\t" "bl " __ASM_NAME("call_init_thunk") )
@@ -1188,7 +1184,7 @@ __ASM_GLOBAL_FUNC( signal_start_thread, */ __ASM_GLOBAL_FUNC( __wine_syscall_dispatcher, "mrc p15, 0, r1, c13, c0, 2\n\t" /* NtCurrentTeb() */ - "ldr r1, [r1, #0x1d8]\n\t" /* arm_thread_data()->syscall_frame */ + "ldr r1, [r1, #0x1d4]\n\t" /* arm_thread_data()->syscall_frame */ "add r0, r1, #0x10\n\t" "stm r0, {r4-r12,lr}\n\t" "add r2, sp, #0x10\n\t" diff --git a/dlls/ntdll/unix/signal_arm64.c b/dlls/ntdll/unix/signal_arm64.c index b24dbc2d8a4..a9c3a1b3844 100644 --- a/dlls/ntdll/unix/signal_arm64.c +++ b/dlls/ntdll/unix/signal_arm64.c @@ -150,13 +150,11 @@ C_ASSERT( sizeof( struct syscall_frame ) == 0x330 );
struct arm64_thread_data { - void *exit_frame; /* 02f0 exit frame pointer */ - struct syscall_frame *syscall_frame; /* 02f8 frame pointer on syscall entry */ + struct syscall_frame *syscall_frame; /* 02f0 frame pointer on syscall entry */ };
C_ASSERT( sizeof(struct arm64_thread_data) <= sizeof(((struct ntdll_thread_data *)0)->cpu_data) ); -C_ASSERT( offsetof( TEB, GdiTebBatch ) + offsetof( struct arm64_thread_data, exit_frame ) == 0x2f0 ); -C_ASSERT( offsetof( TEB, GdiTebBatch ) + offsetof( struct arm64_thread_data, syscall_frame ) == 0x2f8 ); +C_ASSERT( offsetof( TEB, GdiTebBatch ) + offsetof( struct arm64_thread_data, syscall_frame ) == 0x2f0 );
static inline struct arm64_thread_data *arm64_thread_data(void) { @@ -1242,14 +1240,11 @@ void DECLSPEC_HIDDEN call_init_thunk( LPTHREAD_START_ROUTINE entry, void *arg, B */ __ASM_GLOBAL_FUNC( signal_start_thread, "stp x29, x30, [sp,#-16]!\n\t" - /* store exit frame */ - "mov x29, sp\n\t" - "str x29, [x3, #0x2f0]\n\t" /* arm64_thread_data()->exit_frame */ /* set syscall frame */ - "ldr x8, [x3, #0x2f8]\n\t" /* arm64_thread_data()->syscall_frame */ + "ldr x8, [x3, #0x2f0]\n\t" /* arm64_thread_data()->syscall_frame */ "cbnz x8, 1f\n\t" "sub x8, sp, #0x330\n\t" /* sizeof(struct syscall_frame) */ - "str x8, [x3, #0x2f8]\n\t" /* arm64_thread_data()->syscall_frame */ + "str x8, [x3, #0x2f0]\n\t" /* arm64_thread_data()->syscall_frame */ "1:\tmov sp, x8\n\t" "bl " __ASM_NAME("call_init_thunk") )
@@ -1274,7 +1269,7 @@ __ASM_GLOBAL_FUNC( __wine_syscall_dispatcher, "ldr x30, [sp, #80]\n\t" "ldp x0, x1, [sp], #96\n\t"
- "ldr x10, [x18, #0x2f8]\n\t" /* arm64_thread_data()->syscall_frame */ + "ldr x10, [x18, #0x2f0]\n\t" /* arm64_thread_data()->syscall_frame */ "stp x18, x19, [x10, #0x90]\n\t" "stp x20, x21, [x10, #0xa0]\n\t" "stp x22, x23, [x10, #0xb0]\n\t" diff --git a/dlls/ntdll/unix/signal_i386.c b/dlls/ntdll/unix/signal_i386.c index 0eecb5fb57e..e07d754c612 100644 --- a/dlls/ntdll/unix/signal_i386.c +++ b/dlls/ntdll/unix/signal_i386.c @@ -477,14 +477,12 @@ struct x86_thread_data DWORD dr3; /* 1e8 */ DWORD dr6; /* 1ec */ DWORD dr7; /* 1f0 */ - void *exit_frame; /* 1f4 exit frame pointer */ - struct syscall_frame *syscall_frame; /* 1f8 frame pointer on syscall entry */ + struct syscall_frame *syscall_frame; /* 1f4 frame pointer on syscall entry */ };
C_ASSERT( sizeof(struct x86_thread_data) <= sizeof(((struct ntdll_thread_data *)0)->cpu_data) ); C_ASSERT( offsetof( TEB, GdiTebBatch ) + offsetof( struct x86_thread_data, gs ) == 0x1d8 ); -C_ASSERT( offsetof( TEB, GdiTebBatch ) + offsetof( struct x86_thread_data, exit_frame ) == 0x1f4 ); -C_ASSERT( offsetof( TEB, GdiTebBatch ) + offsetof( struct x86_thread_data, syscall_frame ) == 0x1f8 ); +C_ASSERT( offsetof( TEB, GdiTebBatch ) + offsetof( struct x86_thread_data, syscall_frame ) == 0x1f4 );
/* flags to control the behavior of the syscall dispatcher */ #define SYSCALL_HAVE_XSAVE 1 @@ -2452,14 +2450,13 @@ __ASM_GLOBAL_FUNC( signal_start_thread, __ASM_CFI(".cfi_rel_offset %edi,-12\n\t") /* store exit frame */ "movl 20(%ebp),%ecx\n\t" /* teb */ - "movl %ebp,0x1f4(%ecx)\n\t" /* x86_thread_data()->exit_frame */ /* set syscall frame */ - "movl 0x1f8(%ecx),%eax\n\t" /* x86_thread_data()->syscall_frame */ + "movl 0x1f4(%ecx),%eax\n\t" /* x86_thread_data()->syscall_frame */ "orl %eax,%eax\n\t" "jnz 1f\n\t" "leal -0x380(%esp),%eax\n\t" /* sizeof(struct syscall_frame) */ "andl $~63,%eax\n\t" - "movl %eax,0x1f8(%ecx)\n" /* x86_thread_data()->syscall_frame */ + "movl %eax,0x1f4(%ecx)\n" /* x86_thread_data()->syscall_frame */ "1:\tmovl %eax,%esp\n\t" "pushl %ecx\n\t" /* teb */ "pushl 16(%ebp)\n\t" /* suspend */ @@ -2472,7 +2469,7 @@ __ASM_GLOBAL_FUNC( signal_start_thread, * __wine_syscall_dispatcher */ __ASM_GLOBAL_FUNC( __wine_syscall_dispatcher, - "movl %fs:0x1f8,%ecx\n\t" /* x86_thread_data()->syscall_frame */ + "movl %fs:0x1f4,%ecx\n\t" /* x86_thread_data()->syscall_frame */ "movw $0,0x02(%ecx)\n\t" /* frame->restore_flags */ "popl 0x08(%ecx)\n\t" /* frame->eip */ "pushfl\n\t" diff --git a/dlls/ntdll/unix/signal_x86_64.c b/dlls/ntdll/unix/signal_x86_64.c index fc73925e819..48091d45a8f 100644 --- a/dlls/ntdll/unix/signal_x86_64.c +++ b/dlls/ntdll/unix/signal_x86_64.c @@ -418,16 +418,14 @@ struct amd64_thread_data DWORD_PTR dr3; /* 0308 */ DWORD_PTR dr6; /* 0310 */ DWORD_PTR dr7; /* 0318 */ - void *exit_frame; /* 0320 exit frame pointer */ - struct syscall_frame *syscall_frame; /* 0328 syscall frame pointer */ - void *pthread_teb; /* 0330 thread data for pthread */ - DWORD fs; /* 0338 WOW TEB selector */ + struct syscall_frame *syscall_frame; /* 0320 syscall frame pointer */ + void *pthread_teb; /* 0328 thread data for pthread */ + DWORD fs; /* 0330 WOW TEB selector */ };
C_ASSERT( sizeof(struct amd64_thread_data) <= sizeof(((struct ntdll_thread_data *)0)->cpu_data) ); -C_ASSERT( offsetof( TEB, GdiTebBatch ) + offsetof( struct amd64_thread_data, exit_frame ) == 0x320 ); -C_ASSERT( offsetof( TEB, GdiTebBatch ) + offsetof( struct amd64_thread_data, syscall_frame ) == 0x328 ); -C_ASSERT( offsetof( TEB, GdiTebBatch ) + offsetof( struct amd64_thread_data, pthread_teb ) == 0x330 ); +C_ASSERT( offsetof( TEB, GdiTebBatch ) + offsetof( struct amd64_thread_data, syscall_frame ) == 0x320 ); +C_ASSERT( offsetof( TEB, GdiTebBatch ) + offsetof( struct amd64_thread_data, pthread_teb ) == 0x328 );
static inline struct amd64_thread_data *amd64_thread_data(void) { @@ -3311,15 +3309,13 @@ __ASM_GLOBAL_FUNC( signal_start_thread, __ASM_CFI(".cfi_rel_offset %r14,16\n\t") "movq %r15,8(%rsp)\n\t" __ASM_CFI(".cfi_rel_offset %r15,8\n\t") - /* store exit frame */ - "movq %rsp,0x320(%rcx)\n\t" /* amd64_thread_data()->exit_frame */ /* set syscall frame */ - "movq 0x328(%rcx),%rax\n\t" /* amd64_thread_data()->syscall_frame */ + "movq 0x320(%rcx),%rax\n\t" /* amd64_thread_data()->syscall_frame */ "orq %rax,%rax\n\t" "jnz 1f\n\t" "leaq -0x400(%rsp),%rax\n\t" /* sizeof(struct syscall_frame) */ "andq $~63,%rax\n\t" - "movq %rax,0x328(%rcx)\n" /* amd64_thread_data()->syscall_frame */ + "movq %rax,0x320(%rcx)\n" /* amd64_thread_data()->syscall_frame */ "1:\tmovq %rax,%rsp\n\t" "call " __ASM_NAME("call_init_thunk"))
@@ -3329,7 +3325,7 @@ __ASM_GLOBAL_FUNC( signal_start_thread, */ __ASM_GLOBAL_FUNC( __wine_syscall_dispatcher, "movq %gs:0x30,%rcx\n\t" - "movq 0x328(%rcx),%rcx\n\t" /* amd64_thread_data()->syscall_frame */ + "movq 0x320(%rcx),%rcx\n\t" /* amd64_thread_data()->syscall_frame */ "popq 0x70(%rcx)\n\t" /* frame->rip */ "pushfq\n\t" "popq 0x80(%rcx)\n\t"
From: Rémi Bernon rbernon@codeweavers.com
--- dlls/ntdll/unix/signal_arm.c | 1 - dlls/ntdll/unix/signal_arm64.c | 1 - dlls/ntdll/unix/signal_i386.c | 6 ------ dlls/ntdll/unix/signal_x86_64.c | 16 ---------------- 4 files changed, 24 deletions(-)
diff --git a/dlls/ntdll/unix/signal_arm.c b/dlls/ntdll/unix/signal_arm.c index d5905622d1c..91b0445cc63 100644 --- a/dlls/ntdll/unix/signal_arm.c +++ b/dlls/ntdll/unix/signal_arm.c @@ -1169,7 +1169,6 @@ void DECLSPEC_HIDDEN call_init_thunk( LPTHREAD_START_ROUTINE entry, void *arg, B * signal_start_thread */ __ASM_GLOBAL_FUNC( signal_start_thread, - "push {r4-r12,lr}\n\t" /* set syscall frame */ "ldr r6, [r3, #0x1d4]\n\t" /* arm_thread_data()->syscall_frame */ "cbnz r6, 1f\n\t" diff --git a/dlls/ntdll/unix/signal_arm64.c b/dlls/ntdll/unix/signal_arm64.c index a9c3a1b3844..ac53586d621 100644 --- a/dlls/ntdll/unix/signal_arm64.c +++ b/dlls/ntdll/unix/signal_arm64.c @@ -1239,7 +1239,6 @@ void DECLSPEC_HIDDEN call_init_thunk( LPTHREAD_START_ROUTINE entry, void *arg, B * signal_start_thread */ __ASM_GLOBAL_FUNC( signal_start_thread, - "stp x29, x30, [sp,#-16]!\n\t" /* set syscall frame */ "ldr x8, [x3, #0x2f0]\n\t" /* arm64_thread_data()->syscall_frame */ "cbnz x8, 1f\n\t" diff --git a/dlls/ntdll/unix/signal_i386.c b/dlls/ntdll/unix/signal_i386.c index e07d754c612..9bcdf18a9a4 100644 --- a/dlls/ntdll/unix/signal_i386.c +++ b/dlls/ntdll/unix/signal_i386.c @@ -2442,12 +2442,6 @@ __ASM_GLOBAL_FUNC( signal_start_thread, __ASM_CFI(".cfi_rel_offset %ebp,0\n\t") "movl %esp,%ebp\n\t" __ASM_CFI(".cfi_def_cfa_register %ebp\n\t") - "pushl %ebx\n\t" - __ASM_CFI(".cfi_rel_offset %ebx,-4\n\t") - "pushl %esi\n\t" - __ASM_CFI(".cfi_rel_offset %esi,-8\n\t") - "pushl %edi\n\t" - __ASM_CFI(".cfi_rel_offset %edi,-12\n\t") /* store exit frame */ "movl 20(%ebp),%ecx\n\t" /* teb */ /* set syscall frame */ diff --git a/dlls/ntdll/unix/signal_x86_64.c b/dlls/ntdll/unix/signal_x86_64.c index 48091d45a8f..f14b4f96886 100644 --- a/dlls/ntdll/unix/signal_x86_64.c +++ b/dlls/ntdll/unix/signal_x86_64.c @@ -3293,22 +3293,6 @@ void DECLSPEC_HIDDEN call_init_thunk( LPTHREAD_START_ROUTINE entry, void *arg, B * signal_start_thread */ __ASM_GLOBAL_FUNC( signal_start_thread, - "subq $56,%rsp\n\t" - __ASM_SEH(".seh_stackalloc 56\n\t") - __ASM_SEH(".seh_endprologue\n\t") - __ASM_CFI(".cfi_adjust_cfa_offset 56\n\t") - "movq %rbp,48(%rsp)\n\t" - __ASM_CFI(".cfi_rel_offset %rbp,48\n\t") - "movq %rbx,40(%rsp)\n\t" - __ASM_CFI(".cfi_rel_offset %rbx,40\n\t") - "movq %r12,32(%rsp)\n\t" - __ASM_CFI(".cfi_rel_offset %r12,32\n\t") - "movq %r13,24(%rsp)\n\t" - __ASM_CFI(".cfi_rel_offset %r13,24\n\t") - "movq %r14,16(%rsp)\n\t" - __ASM_CFI(".cfi_rel_offset %r14,16\n\t") - "movq %r15,8(%rsp)\n\t" - __ASM_CFI(".cfi_rel_offset %r15,8\n\t") /* set syscall frame */ "movq 0x320(%rcx),%rax\n\t" /* amd64_thread_data()->syscall_frame */ "orq %rax,%rax\n\t"
From: Rémi Bernon rbernon@codeweavers.com
So that we don't unnecessarily overwrite the return address. --- dlls/ntdll/unix/signal_i386.c | 4 ++-- dlls/ntdll/unix/signal_x86_64.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/dlls/ntdll/unix/signal_i386.c b/dlls/ntdll/unix/signal_i386.c index 9bcdf18a9a4..a7619c2d1d3 100644 --- a/dlls/ntdll/unix/signal_i386.c +++ b/dlls/ntdll/unix/signal_i386.c @@ -2465,9 +2465,9 @@ __ASM_GLOBAL_FUNC( signal_start_thread, __ASM_GLOBAL_FUNC( __wine_syscall_dispatcher, "movl %fs:0x1f4,%ecx\n\t" /* x86_thread_data()->syscall_frame */ "movw $0,0x02(%ecx)\n\t" /* frame->restore_flags */ - "popl 0x08(%ecx)\n\t" /* frame->eip */ "pushfl\n\t" - "popl 0x04(%ecx)\n\t" /* frame->eflags */ + "popl 0x04(%ecx)\n" /* frame->eflags */ + "popl 0x08(%ecx)\n\t" /* frame->eip */ ".globl " __ASM_NAME("__wine_syscall_dispatcher_prolog_end") "\n" __ASM_NAME("__wine_syscall_dispatcher_prolog_end") ":\n\t" "movl %esp,0x0c(%ecx)\n\t" /* frame->esp */ diff --git a/dlls/ntdll/unix/signal_x86_64.c b/dlls/ntdll/unix/signal_x86_64.c index f14b4f96886..adec25b711f 100644 --- a/dlls/ntdll/unix/signal_x86_64.c +++ b/dlls/ntdll/unix/signal_x86_64.c @@ -3310,9 +3310,9 @@ __ASM_GLOBAL_FUNC( signal_start_thread, __ASM_GLOBAL_FUNC( __wine_syscall_dispatcher, "movq %gs:0x30,%rcx\n\t" "movq 0x320(%rcx),%rcx\n\t" /* amd64_thread_data()->syscall_frame */ - "popq 0x70(%rcx)\n\t" /* frame->rip */ "pushfq\n\t" "popq 0x80(%rcx)\n\t" + "popq 0x70(%rcx)\n\t" /* frame->rip */ "movl $0,0x94(%rcx)\n\t" /* frame->restore_flags */ ".globl " __ASM_NAME("__wine_syscall_dispatcher_prolog_end") "\n" __ASM_NAME("__wine_syscall_dispatcher_prolog_end") ":\n\t"
From: Rémi Bernon rbernon@codeweavers.com
When available. Otherwise the output_cfi calls in winebuild, in the syscall thunks for instance, are just no-op. --- configure.ac | 1 + 1 file changed, 1 insertion(+)
diff --git a/configure.ac b/configure.ac index e11f3cfdb63..e738af8a248 100644 --- a/configure.ac +++ b/configure.ac @@ -1975,6 +1975,7 @@ then DLLFLAGS="$DLLFLAGS -fasynchronous-unwind-tables" LDDLLFLAGS="$LDDLLFLAGS -fasynchronous-unwind-tables" UNIXDLLFLAGS="$UNIXDLLFLAGS -fasynchronous-unwind-tables" + CROSSLDFLAGS="$CROSSLDFLAGS -fasynchronous-unwind-tables" else if test "x$enable_win64" = "xyes" then
From: Rémi Bernon rbernon@codeweavers.com
To make sure gdb will unwind through it, and ignore the fact that the syscall frame is inner its caller frame on the thread stack. --- dlls/ntdll/unix/signal_arm.c | 1 + dlls/ntdll/unix/signal_arm64.c | 1 + dlls/ntdll/unix/signal_i386.c | 1 + dlls/ntdll/unix/signal_x86_64.c | 1 + 4 files changed, 4 insertions(+)
diff --git a/dlls/ntdll/unix/signal_arm.c b/dlls/ntdll/unix/signal_arm.c index 91b0445cc63..61ab29d111e 100644 --- a/dlls/ntdll/unix/signal_arm.c +++ b/dlls/ntdll/unix/signal_arm.c @@ -1182,6 +1182,7 @@ __ASM_GLOBAL_FUNC( signal_start_thread, * __wine_syscall_dispatcher */ __ASM_GLOBAL_FUNC( __wine_syscall_dispatcher, + __ASM_CFI(".cfi_signal_frame\n\t") "mrc p15, 0, r1, c13, c0, 2\n\t" /* NtCurrentTeb() */ "ldr r1, [r1, #0x1d4]\n\t" /* arm_thread_data()->syscall_frame */ "add r0, r1, #0x10\n\t" diff --git a/dlls/ntdll/unix/signal_arm64.c b/dlls/ntdll/unix/signal_arm64.c index ac53586d621..68d63fefeb3 100644 --- a/dlls/ntdll/unix/signal_arm64.c +++ b/dlls/ntdll/unix/signal_arm64.c @@ -1252,6 +1252,7 @@ __ASM_GLOBAL_FUNC( signal_start_thread, * __wine_syscall_dispatcher */ __ASM_GLOBAL_FUNC( __wine_syscall_dispatcher, + __ASM_CFI(".cfi_signal_frame\n\t") /* FIXME: use x18 directly instead */ "stp x0, x1, [sp, #-96]!\n\t" "stp x2, x3, [sp, #16]\n\t" diff --git a/dlls/ntdll/unix/signal_i386.c b/dlls/ntdll/unix/signal_i386.c index a7619c2d1d3..0bc471c410a 100644 --- a/dlls/ntdll/unix/signal_i386.c +++ b/dlls/ntdll/unix/signal_i386.c @@ -2463,6 +2463,7 @@ __ASM_GLOBAL_FUNC( signal_start_thread, * __wine_syscall_dispatcher */ __ASM_GLOBAL_FUNC( __wine_syscall_dispatcher, + __ASM_CFI(".cfi_signal_frame\n\t") "movl %fs:0x1f4,%ecx\n\t" /* x86_thread_data()->syscall_frame */ "movw $0,0x02(%ecx)\n\t" /* frame->restore_flags */ "pushfl\n\t" diff --git a/dlls/ntdll/unix/signal_x86_64.c b/dlls/ntdll/unix/signal_x86_64.c index adec25b711f..2d910bba90a 100644 --- a/dlls/ntdll/unix/signal_x86_64.c +++ b/dlls/ntdll/unix/signal_x86_64.c @@ -3308,6 +3308,7 @@ __ASM_GLOBAL_FUNC( signal_start_thread, * __wine_syscall_dispatcher */ __ASM_GLOBAL_FUNC( __wine_syscall_dispatcher, + __ASM_CFI(".cfi_signal_frame\n\t") "movq %gs:0x30,%rcx\n\t" "movq 0x320(%rcx),%rcx\n\t" /* amd64_thread_data()->syscall_frame */ "pushfq\n\t"
From: Rémi Bernon rbernon@codeweavers.com
--- dlls/ntdll/unix/signal_i386.c | 15 ++++++++++ dlls/ntdll/unix/signal_x86_64.c | 51 +++++++++++++++++++++++++++++++++ 2 files changed, 66 insertions(+)
diff --git a/dlls/ntdll/unix/signal_i386.c b/dlls/ntdll/unix/signal_i386.c index 0bc471c410a..d5dd77b43ff 100644 --- a/dlls/ntdll/unix/signal_i386.c +++ b/dlls/ntdll/unix/signal_i386.c @@ -2467,8 +2467,11 @@ __ASM_GLOBAL_FUNC( __wine_syscall_dispatcher, "movl %fs:0x1f4,%ecx\n\t" /* x86_thread_data()->syscall_frame */ "movw $0,0x02(%ecx)\n\t" /* frame->restore_flags */ "pushfl\n\t" + __ASM_CFI(".cfi_adjust_cfa_offset 4\n\t") "popl 0x04(%ecx)\n" /* frame->eflags */ + __ASM_CFI(".cfi_adjust_cfa_offset -4\n\t") "popl 0x08(%ecx)\n\t" /* frame->eip */ + __ASM_CFI(".cfi_adjust_cfa_offset -4\n\t") ".globl " __ASM_NAME("__wine_syscall_dispatcher_prolog_end") "\n" __ASM_NAME("__wine_syscall_dispatcher_prolog_end") ":\n\t" "movl %esp,0x0c(%ecx)\n\t" /* frame->esp */ @@ -2484,6 +2487,18 @@ __ASM_GLOBAL_FUNC( __wine_syscall_dispatcher, "movl %esi,0x30(%ecx)\n\t" "movl %ebp,0x34(%ecx)\n\t" "leal 0x34(%ecx),%ebp\n\t" + __ASM_CFI(".cfi_endproc\n\t") + __ASM_CFI(".cfi_startproc\n\t") + __ASM_CFI(".cfi_def_cfa %ebp,0\n\t") + __ASM_CFI(".cfi_rel_offset %eip,-0x2c\n\t") + __ASM_CFI(".cfi_rel_offset %esp,-0x28\n\t") + __ASM_CFI(".cfi_rel_offset %eax,-0x18\n\t") + __ASM_CFI(".cfi_rel_offset %ebx,-0x14\n\t") + __ASM_CFI(".cfi_rel_offset %ecx,-0x10\n\t") + __ASM_CFI(".cfi_rel_offset %edx,-0x0c\n\t") + __ASM_CFI(".cfi_rel_offset %edi,-0x08\n\t") + __ASM_CFI(".cfi_rel_offset %esi,-0x04\n\t") + __ASM_CFI(".cfi_rel_offset %ebp,-0x00\n\t") "leal 4(%esp),%esi\n\t" /* first argument */ "movl %eax,%ebx\n\t" "shrl $8,%ebx\n\t" diff --git a/dlls/ntdll/unix/signal_x86_64.c b/dlls/ntdll/unix/signal_x86_64.c index 2d910bba90a..e2b7fa73a36 100644 --- a/dlls/ntdll/unix/signal_x86_64.c +++ b/dlls/ntdll/unix/signal_x86_64.c @@ -3312,8 +3312,11 @@ __ASM_GLOBAL_FUNC( __wine_syscall_dispatcher, "movq %gs:0x30,%rcx\n\t" "movq 0x320(%rcx),%rcx\n\t" /* amd64_thread_data()->syscall_frame */ "pushfq\n\t" + __ASM_CFI(".cfi_adjust_cfa_offset 8\n\t") "popq 0x80(%rcx)\n\t" + __ASM_CFI(".cfi_adjust_cfa_offset -8\n\t") "popq 0x70(%rcx)\n\t" /* frame->rip */ + __ASM_CFI(".cfi_adjust_cfa_offset -8\n\t") "movl $0,0x94(%rcx)\n\t" /* frame->restore_flags */ ".globl " __ASM_NAME("__wine_syscall_dispatcher_prolog_end") "\n" __ASM_NAME("__wine_syscall_dispatcher_prolog_end") ":\n\t" @@ -3334,6 +3337,22 @@ __ASM_GLOBAL_FUNC( __wine_syscall_dispatcher, "movw %ss,0x90(%rcx)\n\t" "movw %gs,0x92(%rcx)\n\t" "movq %rbp,0x98(%rcx)\n\t" + __ASM_CFI(".cfi_endproc\n\t") + __ASM_CFI(".cfi_startproc\n\t") + __ASM_CFI(".cfi_def_cfa %rcx,0\n\t") + __ASM_CFI(".cfi_rel_offset %rax,0x00\n\t") + __ASM_CFI(".cfi_rel_offset %rbx,0x08\n\t") + __ASM_CFI(".cfi_rel_offset %rcx,0x10\n\t") + __ASM_CFI(".cfi_rel_offset %rdx,0x18\n\t") + __ASM_CFI(".cfi_rel_offset %rsi,0x20\n\t") + __ASM_CFI(".cfi_rel_offset %rdi,0x28\n\t") + __ASM_CFI(".cfi_rel_offset %r12,0x50\n\t") + __ASM_CFI(".cfi_rel_offset %r13,0x58\n\t") + __ASM_CFI(".cfi_rel_offset %r14,0x60\n\t") + __ASM_CFI(".cfi_rel_offset %r15,0x68\n\t") + __ASM_CFI(".cfi_rel_offset %rip,0x70\n\t") + __ASM_CFI(".cfi_rel_offset %rsp,0x88\n\t") + __ASM_CFI(".cfi_rel_offset %rbp,0x98\n\t") /* Legends of Runeterra hooks the first system call return instruction, and * depends on us returning to it. Adjust the return address accordingly. */ "subq $0xb,0x70(%rcx)\n\t" @@ -3372,6 +3391,22 @@ __ASM_GLOBAL_FUNC( __wine_syscall_dispatcher, "leaq -0x98(%rbp),%rcx\n" "2:\n\t" #endif + __ASM_CFI(".cfi_endproc\n\t") + __ASM_CFI(".cfi_startproc\n\t") + __ASM_CFI(".cfi_def_cfa %rbp,0\n\t") + __ASM_CFI(".cfi_rel_offset %rax,-0x98\n\t") + __ASM_CFI(".cfi_rel_offset %rbx,-0x90\n\t") + __ASM_CFI(".cfi_rel_offset %rcx,-0x88\n\t") + __ASM_CFI(".cfi_rel_offset %rdx,-0x80\n\t") + __ASM_CFI(".cfi_rel_offset %rsi,-0x78\n\t") + __ASM_CFI(".cfi_rel_offset %rdi,-0x70\n\t") + __ASM_CFI(".cfi_rel_offset %r12,-0x48\n\t") + __ASM_CFI(".cfi_rel_offset %r13,-0x40\n\t") + __ASM_CFI(".cfi_rel_offset %r14,-0x38\n\t") + __ASM_CFI(".cfi_rel_offset %r15,-0x30\n\t") + __ASM_CFI(".cfi_rel_offset %rip,-0x28\n\t") + __ASM_CFI(".cfi_rel_offset %rsp,-0x10\n\t") + __ASM_CFI(".cfi_rel_offset %rbp,-0x00\n\t") "leaq 0x28(%rsp),%rsi\n\t" /* first argument */ "movq %rcx,%rsp\n\t" "movq 0x00(%rcx),%rax\n\t" @@ -3399,6 +3434,22 @@ __ASM_GLOBAL_FUNC( __wine_syscall_dispatcher, "movq (%rbx),%r10\n\t" /* table->ServiceTable */ "callq *(%r10,%rax,8)\n\t" "leaq -0x98(%rbp),%rcx\n" + __ASM_CFI(".cfi_endproc\n\t") + __ASM_CFI(".cfi_startproc\n\t") + __ASM_CFI(".cfi_def_cfa %rcx,0\n\t") + __ASM_CFI(".cfi_rel_offset %rax,0x00\n\t") + __ASM_CFI(".cfi_rel_offset %rbx,0x08\n\t") + __ASM_CFI(".cfi_rel_offset %rcx,0x10\n\t") + __ASM_CFI(".cfi_rel_offset %rdx,0x18\n\t") + __ASM_CFI(".cfi_rel_offset %rsi,0x20\n\t") + __ASM_CFI(".cfi_rel_offset %rdi,0x28\n\t") + __ASM_CFI(".cfi_rel_offset %r12,0x50\n\t") + __ASM_CFI(".cfi_rel_offset %r13,0x58\n\t") + __ASM_CFI(".cfi_rel_offset %r14,0x60\n\t") + __ASM_CFI(".cfi_rel_offset %r15,0x68\n\t") + __ASM_CFI(".cfi_rel_offset %rip,0x70\n\t") + __ASM_CFI(".cfi_rel_offset %rsp,0x88\n\t") + __ASM_CFI(".cfi_rel_offset %rbp,0x98\n\t") "2:\tmovl 0x94(%rcx),%edx\n\t" /* frame->restore_flags */ #ifdef __linux__ "testl $12,%r14d\n\t" /* SYSCALL_HAVE_PTHREAD_TEB | SYSCALL_HAVE_WRFSGSBASE */
From: Rémi Bernon rbernon@codeweavers.com
--- dlls/ntdll/unix/thread.c | 22 +++++++++++++--------- dlls/ntdll/unix/unix_private.h | 2 +- dlls/ntdll/unix/virtual.c | 23 +++-------------------- 3 files changed, 17 insertions(+), 30 deletions(-)
diff --git a/dlls/ntdll/unix/thread.c b/dlls/ntdll/unix/thread.c index 19647f7622b..d7562ea89ba 100644 --- a/dlls/ntdll/unix/thread.c +++ b/dlls/ntdll/unix/thread.c @@ -1172,25 +1172,28 @@ NTSTATUS init_thread_stack( TEB *teb, ULONG_PTR zero_bits, SIZE_T reserve_size, #ifdef _WIN64 /* 32-bit stack */ if ((status = virtual_alloc_thread_stack( &stack, zero_bits ? zero_bits : 0x7fffffff, - reserve_size, commit_size, 0 ))) + reserve_size, commit_size ))) return status; wow_teb->Tib.StackBase = PtrToUlong( stack.StackBase ); wow_teb->Tib.StackLimit = PtrToUlong( stack.StackLimit ); wow_teb->DeallocationStack = PtrToUlong( stack.DeallocationStack );
/* 64-bit stack */ - if ((status = virtual_alloc_thread_stack( &stack, 0, 0x40000, 0x40000, kernel_stack_size ))) + if ((status = virtual_alloc_thread_stack( &stack, 0, 0x40000, 0x40000 ))) return status; cpu = (WOW64_CPURESERVED *)(((ULONG_PTR)stack.StackBase - cpusize) & ~15); cpu->Machine = main_image_info.Machine; teb->Tib.StackBase = teb->TlsSlots[WOW64_TLS_CPURESERVED] = cpu; teb->Tib.StackLimit = stack.StackLimit; teb->DeallocationStack = stack.DeallocationStack; - thread_data->kernel_stack = stack.StackBase; + + if ((status = virtual_alloc_thread_stack( &stack, 0, kernel_stack_size, kernel_stack_size ))) + return status; + thread_data->kernel_stack = stack.DeallocationStack; return STATUS_SUCCESS; #else /* 64-bit stack */ - if ((status = virtual_alloc_thread_stack( &stack, 0, 0x40000, 0x40000, 0 ))) return status; + if ((status = virtual_alloc_thread_stack( &stack, 0, 0x40000, 0x40000 ))) return status;
cpu = (WOW64_CPURESERVED *)(((ULONG_PTR)stack.StackBase - cpusize) & ~15); cpu->Machine = main_image_info.Machine; @@ -1201,13 +1204,15 @@ NTSTATUS init_thread_stack( TEB *teb, ULONG_PTR zero_bits, SIZE_T reserve_size, }
/* native stack */ - if ((status = virtual_alloc_thread_stack( &stack, zero_bits, reserve_size, - commit_size, kernel_stack_size ))) + if ((status = virtual_alloc_thread_stack( &stack, zero_bits, reserve_size, commit_size ))) return status; teb->Tib.StackBase = stack.StackBase; teb->Tib.StackLimit = stack.StackLimit; teb->DeallocationStack = stack.DeallocationStack; - thread_data->kernel_stack = stack.StackBase; + + if ((status = virtual_alloc_thread_stack( &stack, 0, kernel_stack_size, kernel_stack_size ))) + return status; + thread_data->kernel_stack = stack.DeallocationStack; return STATUS_SUCCESS; }
@@ -1358,8 +1363,7 @@ NTSTATUS WINAPI NtCreateThreadEx( HANDLE *handle, ACCESS_MASK access, OBJECT_ATT thread_data->param = param;
pthread_attr_init( &pthread_attr ); - pthread_attr_setstack( &pthread_attr, teb->DeallocationStack, - (char *)thread_data->kernel_stack + kernel_stack_size - (char *)teb->DeallocationStack ); + pthread_attr_setstack( &pthread_attr, thread_data->kernel_stack, kernel_stack_size ); pthread_attr_setguardsize( &pthread_attr, 0 ); pthread_attr_setscope( &pthread_attr, PTHREAD_SCOPE_SYSTEM ); /* force creating a kernel thread */ InterlockedIncrement( &nb_threads ); diff --git a/dlls/ntdll/unix/unix_private.h b/dlls/ntdll/unix/unix_private.h index ad349207889..7165dd694a9 100644 --- a/dlls/ntdll/unix/unix_private.h +++ b/dlls/ntdll/unix/unix_private.h @@ -205,7 +205,7 @@ extern NTSTATUS virtual_alloc_teb( TEB **ret_teb ) DECLSPEC_HIDDEN; extern void virtual_free_teb( TEB *teb ) DECLSPEC_HIDDEN; extern NTSTATUS virtual_clear_tls_index( ULONG index ) DECLSPEC_HIDDEN; extern NTSTATUS virtual_alloc_thread_stack( INITIAL_TEB *stack, ULONG_PTR zero_bits, SIZE_T reserve_size, - SIZE_T commit_size, SIZE_T extra_size ) DECLSPEC_HIDDEN; + SIZE_T commit_size ) DECLSPEC_HIDDEN; extern void virtual_map_user_shared_data(void) DECLSPEC_HIDDEN; extern NTSTATUS virtual_handle_fault( void *addr, DWORD err, void *stack ) DECLSPEC_HIDDEN; extern unsigned int virtual_locked_server_call( void *req_ptr ) DECLSPEC_HIDDEN; diff --git a/dlls/ntdll/unix/virtual.c b/dlls/ntdll/unix/virtual.c index d8b888aa49e..5611c6d1350 100644 --- a/dlls/ntdll/unix/virtual.c +++ b/dlls/ntdll/unix/virtual.c @@ -3097,8 +3097,9 @@ NTSTATUS virtual_clear_tls_index( ULONG index ) * virtual_alloc_thread_stack */ NTSTATUS virtual_alloc_thread_stack( INITIAL_TEB *stack, ULONG_PTR zero_bits, SIZE_T reserve_size, - SIZE_T commit_size, SIZE_T extra_size ) + SIZE_T commit_size ) { + unsigned int vprot = VPROT_READ | VPROT_WRITE | VPROT_COMMITTED; struct file_view *view; NTSTATUS status; sigset_t sigset; @@ -3113,8 +3114,7 @@ NTSTATUS virtual_alloc_thread_stack( INITIAL_TEB *stack, ULONG_PTR zero_bits, SI
server_enter_uninterrupted_section( &virtual_mutex, &sigset );
- if ((status = map_view( &view, NULL, size + extra_size, FALSE, - VPROT_READ | VPROT_WRITE | VPROT_COMMITTED, zero_bits )) != STATUS_SUCCESS) + if ((status = map_view( &view, NULL, size, FALSE, vprot, zero_bits )) != STATUS_SUCCESS) goto done;
#ifdef VALGRIND_STACK_REGISTER @@ -3128,23 +3128,6 @@ NTSTATUS virtual_alloc_thread_stack( INITIAL_TEB *stack, ULONG_PTR zero_bits, SI mprotect_range( view->base, 2 * page_size, 0, 0 ); VIRTUAL_DEBUG_DUMP_VIEW( view );
- if (extra_size) - { - struct file_view *extra_view; - - /* shrink the first view and create a second one for the extra size */ - /* this allows the app to free the stack without freeing the thread start portion */ - view->size -= extra_size; - status = create_view( &extra_view, (char *)view->base + view->size, extra_size, - VPROT_READ | VPROT_WRITE | VPROT_COMMITTED ); - if (status != STATUS_SUCCESS) - { - view->size += extra_size; - delete_view( view ); - goto done; - } - } - /* note: limit is lower than base since the stack grows down */ stack->OldStackBase = 0; stack->OldStackLimit = 0;
Regarding "[PATCH 08/13] ntdll: Allocate a truly separate stack for the kernel stack.":
Why do we need to do this? Specifically, do we know why this is broken in Valgrind?
I ask because I would guess a priori that this should be fixed on the valgrind side. Valgrind should have all of the information regarding our stacks, so I wouldn't expect it to need to resort to heuristics.
From: Rémi Bernon rbernon@codeweavers.com
--- dlls/ntdll/heap.c | 3 +- dlls/ntdll/unix/file.c | 1 + dlls/ntdll/unix/process.c | 1 + dlls/ntdll/valgrind/memcheck.h | 302 ++ dlls/ntdll/valgrind/valgrind.h | 7165 ++++++++++++++++++++++++++++++++ 5 files changed, 7471 insertions(+), 1 deletion(-) create mode 100644 dlls/ntdll/valgrind/memcheck.h create mode 100644 dlls/ntdll/valgrind/valgrind.h
diff --git a/dlls/ntdll/heap.c b/dlls/ntdll/heap.c index 8530bdf223f..df3fde974d2 100644 --- a/dlls/ntdll/heap.c +++ b/dlls/ntdll/heap.c @@ -25,7 +25,8 @@ #include <stdio.h> #include <string.h>
-#define RUNNING_ON_VALGRIND 0 /* FIXME */ +#include "valgrind/valgrind.h" +#include "valgrind/memcheck.h"
#include "ntstatus.h" #define WIN32_NO_STATUS diff --git a/dlls/ntdll/unix/file.c b/dlls/ntdll/unix/file.c index 604ca866890..b8eb6941abd 100644 --- a/dlls/ntdll/unix/file.c +++ b/dlls/ntdll/unix/file.c @@ -36,6 +36,7 @@ #include <stdlib.h> #include <stdint.h> #include <stdio.h> +#include <stdint.h> #include <limits.h> #include <unistd.h> #ifdef HAVE_MNTENT_H diff --git a/dlls/ntdll/unix/process.c b/dlls/ntdll/unix/process.c index 83888717a5c..ba3c9691dfa 100644 --- a/dlls/ntdll/unix/process.c +++ b/dlls/ntdll/unix/process.c @@ -29,6 +29,7 @@ #include <fcntl.h> #include <signal.h> #include <stdarg.h> +#include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <string.h> diff --git a/dlls/ntdll/valgrind/memcheck.h b/dlls/ntdll/valgrind/memcheck.h new file mode 100644 index 00000000000..8450f0ba969 --- /dev/null +++ b/dlls/ntdll/valgrind/memcheck.h @@ -0,0 +1,302 @@ + +/* + ---------------------------------------------------------------- + + Notice that the following BSD-style license applies to this one + file (memcheck.h) only. The rest of Valgrind is licensed under the + terms of the GNU General Public License, version 2, unless + otherwise indicated. See the COPYING file in the source + distribution for details. + + ---------------------------------------------------------------- + + This file is part of MemCheck, a heavyweight Valgrind tool for + detecting memory errors. + + Copyright (C) 2000-2017 Julian Seward. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. The origin of this software must not be misrepresented; you must + not claim that you wrote the original software. If you use this + software in a product, an acknowledgment in the product + documentation would be appreciated but is not required. + + 3. Altered source versions must be plainly marked as such, and must + not be misrepresented as being the original software. + + 4. The name of the author may not be used to endorse or promote + products derived from this software without specific prior written + permission. + + THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS + OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ---------------------------------------------------------------- + + Notice that the above BSD-style license applies to this one file + (memcheck.h) only. The entire rest of Valgrind is licensed under + the terms of the GNU General Public License, version 2. See the + COPYING file in the source distribution for details. + + ---------------------------------------------------------------- +*/ + + +#ifndef __MEMCHECK_H +#define __MEMCHECK_H + + +/* This file is for inclusion into client (your!) code. + + You can use these macros to manipulate and query memory permissions + inside your own programs. + + See comment near the top of valgrind.h on how to use them. +*/ + +#include "valgrind.h" + +/* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !! + This enum comprises an ABI exported by Valgrind to programs + which use client requests. DO NOT CHANGE THE ORDER OF THESE + ENTRIES, NOR DELETE ANY -- add new ones at the end. */ +typedef + enum { + VG_USERREQ__MAKE_MEM_NOACCESS = VG_USERREQ_TOOL_BASE('M','C'), + VG_USERREQ__MAKE_MEM_UNDEFINED, + VG_USERREQ__MAKE_MEM_DEFINED, + VG_USERREQ__DISCARD, + VG_USERREQ__CHECK_MEM_IS_ADDRESSABLE, + VG_USERREQ__CHECK_MEM_IS_DEFINED, + VG_USERREQ__DO_LEAK_CHECK, + VG_USERREQ__COUNT_LEAKS, + + VG_USERREQ__GET_VBITS, + VG_USERREQ__SET_VBITS, + + VG_USERREQ__CREATE_BLOCK, + + VG_USERREQ__MAKE_MEM_DEFINED_IF_ADDRESSABLE, + + /* Not next to VG_USERREQ__COUNT_LEAKS because it was added later. */ + VG_USERREQ__COUNT_LEAK_BLOCKS, + + VG_USERREQ__ENABLE_ADDR_ERROR_REPORTING_IN_RANGE, + VG_USERREQ__DISABLE_ADDR_ERROR_REPORTING_IN_RANGE, + + /* This is just for memcheck's internal use - don't use it */ + _VG_USERREQ__MEMCHECK_RECORD_OVERLAP_ERROR + = VG_USERREQ_TOOL_BASE('M','C') + 256 + } Vg_MemCheckClientRequest; + + + +/* Client-code macros to manipulate the state of memory. */ + +/* Mark memory at _qzz_addr as unaddressable for _qzz_len bytes. */ +#define VALGRIND_MAKE_MEM_NOACCESS(_qzz_addr,_qzz_len) \ + VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ + VG_USERREQ__MAKE_MEM_NOACCESS, \ + (_qzz_addr), (_qzz_len), 0, 0, 0) + +/* Similarly, mark memory at _qzz_addr as addressable but undefined + for _qzz_len bytes. */ +#define VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr,_qzz_len) \ + VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ + VG_USERREQ__MAKE_MEM_UNDEFINED, \ + (_qzz_addr), (_qzz_len), 0, 0, 0) + +/* Similarly, mark memory at _qzz_addr as addressable and defined + for _qzz_len bytes. */ +#define VALGRIND_MAKE_MEM_DEFINED(_qzz_addr,_qzz_len) \ + VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ + VG_USERREQ__MAKE_MEM_DEFINED, \ + (_qzz_addr), (_qzz_len), 0, 0, 0) + +/* Similar to VALGRIND_MAKE_MEM_DEFINED except that addressability is + not altered: bytes which are addressable are marked as defined, + but those which are not addressable are left unchanged. */ +#define VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE(_qzz_addr,_qzz_len) \ + VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ + VG_USERREQ__MAKE_MEM_DEFINED_IF_ADDRESSABLE, \ + (_qzz_addr), (_qzz_len), 0, 0, 0) + +/* Create a block-description handle. The description is an ascii + string which is included in any messages pertaining to addresses + within the specified memory range. Has no other effect on the + properties of the memory range. */ +#define VALGRIND_CREATE_BLOCK(_qzz_addr,_qzz_len, _qzz_desc) \ + VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ + VG_USERREQ__CREATE_BLOCK, \ + (_qzz_addr), (_qzz_len), (_qzz_desc), \ + 0, 0) + +/* Discard a block-description-handle. Returns 1 for an + invalid handle, 0 for a valid handle. */ +#define VALGRIND_DISCARD(_qzz_blkindex) \ + VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ + VG_USERREQ__DISCARD, \ + 0, (_qzz_blkindex), 0, 0, 0) + + +/* Client-code macros to check the state of memory. */ + +/* Check that memory at _qzz_addr is addressable for _qzz_len bytes. + If suitable addressibility is not established, Valgrind prints an + error message and returns the address of the first offending byte. + Otherwise it returns zero. */ +#define VALGRIND_CHECK_MEM_IS_ADDRESSABLE(_qzz_addr,_qzz_len) \ + VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \ + VG_USERREQ__CHECK_MEM_IS_ADDRESSABLE, \ + (_qzz_addr), (_qzz_len), 0, 0, 0) + +/* Check that memory at _qzz_addr is addressable and defined for + _qzz_len bytes. If suitable addressibility and definedness are not + established, Valgrind prints an error message and returns the + address of the first offending byte. Otherwise it returns zero. */ +#define VALGRIND_CHECK_MEM_IS_DEFINED(_qzz_addr,_qzz_len) \ + VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \ + VG_USERREQ__CHECK_MEM_IS_DEFINED, \ + (_qzz_addr), (_qzz_len), 0, 0, 0) + +/* Use this macro to force the definedness and addressibility of an + lvalue to be checked. If suitable addressibility and definedness + are not established, Valgrind prints an error message and returns + the address of the first offending byte. Otherwise it returns + zero. */ +#define VALGRIND_CHECK_VALUE_IS_DEFINED(__lvalue) \ + VALGRIND_CHECK_MEM_IS_DEFINED( \ + (volatile unsigned char *)&(__lvalue), \ + (unsigned long)(sizeof (__lvalue))) + + +/* Do a full memory leak check (like --leak-check=full) mid-execution. */ +#define VALGRIND_DO_LEAK_CHECK \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DO_LEAK_CHECK, \ + 0, 0, 0, 0, 0) + +/* Same as VALGRIND_DO_LEAK_CHECK but only showing the entries for + which there was an increase in leaked bytes or leaked nr of blocks + since the previous leak search. */ +#define VALGRIND_DO_ADDED_LEAK_CHECK \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DO_LEAK_CHECK, \ + 0, 1, 0, 0, 0) + +/* Same as VALGRIND_DO_ADDED_LEAK_CHECK but showing entries with + increased or decreased leaked bytes/blocks since previous leak + search. */ +#define VALGRIND_DO_CHANGED_LEAK_CHECK \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DO_LEAK_CHECK, \ + 0, 2, 0, 0, 0) + +/* Do a summary memory leak check (like --leak-check=summary) mid-execution. */ +#define VALGRIND_DO_QUICK_LEAK_CHECK \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DO_LEAK_CHECK, \ + 1, 0, 0, 0, 0) + +/* Return number of leaked, dubious, reachable and suppressed bytes found by + all previous leak checks. They must be lvalues. */ +#define VALGRIND_COUNT_LEAKS(leaked, dubious, reachable, suppressed) \ + /* For safety on 64-bit platforms we assign the results to private + unsigned long variables, then assign these to the lvalues the user + specified, which works no matter what type 'leaked', 'dubious', etc + are. We also initialise '_qzz_leaked', etc because + VG_USERREQ__COUNT_LEAKS doesn't mark the values returned as + defined. */ \ + { \ + unsigned long _qzz_leaked = 0, _qzz_dubious = 0; \ + unsigned long _qzz_reachable = 0, _qzz_suppressed = 0; \ + VALGRIND_DO_CLIENT_REQUEST_STMT( \ + VG_USERREQ__COUNT_LEAKS, \ + &_qzz_leaked, &_qzz_dubious, \ + &_qzz_reachable, &_qzz_suppressed, 0); \ + leaked = _qzz_leaked; \ + dubious = _qzz_dubious; \ + reachable = _qzz_reachable; \ + suppressed = _qzz_suppressed; \ + } + +/* Return number of leaked, dubious, reachable and suppressed bytes found by + all previous leak checks. They must be lvalues. */ +#define VALGRIND_COUNT_LEAK_BLOCKS(leaked, dubious, reachable, suppressed) \ + /* For safety on 64-bit platforms we assign the results to private + unsigned long variables, then assign these to the lvalues the user + specified, which works no matter what type 'leaked', 'dubious', etc + are. We also initialise '_qzz_leaked', etc because + VG_USERREQ__COUNT_LEAKS doesn't mark the values returned as + defined. */ \ + { \ + unsigned long _qzz_leaked = 0, _qzz_dubious = 0; \ + unsigned long _qzz_reachable = 0, _qzz_suppressed = 0; \ + VALGRIND_DO_CLIENT_REQUEST_STMT( \ + VG_USERREQ__COUNT_LEAK_BLOCKS, \ + &_qzz_leaked, &_qzz_dubious, \ + &_qzz_reachable, &_qzz_suppressed, 0); \ + leaked = _qzz_leaked; \ + dubious = _qzz_dubious; \ + reachable = _qzz_reachable; \ + suppressed = _qzz_suppressed; \ + } + + +/* Get the validity data for addresses [zza..zza+zznbytes-1] and copy it + into the provided zzvbits array. Return values: + 0 if not running on valgrind + 1 success + 2 [previously indicated unaligned arrays; these are now allowed] + 3 if any parts of zzsrc/zzvbits are not addressable. + The metadata is not copied in cases 0, 2 or 3 so it should be + impossible to segfault your system by using this call. +*/ +#define VALGRIND_GET_VBITS(zza,zzvbits,zznbytes) \ + (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \ + VG_USERREQ__GET_VBITS, \ + (const char*)(zza), \ + (char*)(zzvbits), \ + (zznbytes), 0, 0) + +/* Set the validity data for addresses [zza..zza+zznbytes-1], copying it + from the provided zzvbits array. Return values: + 0 if not running on valgrind + 1 success + 2 [previously indicated unaligned arrays; these are now allowed] + 3 if any parts of zza/zzvbits are not addressable. + The metadata is not copied in cases 0, 2 or 3 so it should be + impossible to segfault your system by using this call. +*/ +#define VALGRIND_SET_VBITS(zza,zzvbits,zznbytes) \ + (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \ + VG_USERREQ__SET_VBITS, \ + (const char*)(zza), \ + (const char*)(zzvbits), \ + (zznbytes), 0, 0 ) + +/* Disable and re-enable reporting of addressing errors in the + specified address range. */ +#define VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE(_qzz_addr,_qzz_len) \ + VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ + VG_USERREQ__DISABLE_ADDR_ERROR_REPORTING_IN_RANGE, \ + (_qzz_addr), (_qzz_len), 0, 0, 0) + +#define VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE(_qzz_addr,_qzz_len) \ + VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ + VG_USERREQ__ENABLE_ADDR_ERROR_REPORTING_IN_RANGE, \ + (_qzz_addr), (_qzz_len), 0, 0, 0) + +#endif + diff --git a/dlls/ntdll/valgrind/valgrind.h b/dlls/ntdll/valgrind/valgrind.h new file mode 100644 index 00000000000..e3adbc66237 --- /dev/null +++ b/dlls/ntdll/valgrind/valgrind.h @@ -0,0 +1,7165 @@ +/* -*- c -*- + ---------------------------------------------------------------- + + Notice that the following BSD-style license applies to this one + file (valgrind.h) only. The rest of Valgrind is licensed under the + terms of the GNU General Public License, version 2, unless + otherwise indicated. See the COPYING file in the source + distribution for details. + + ---------------------------------------------------------------- + + This file is part of Valgrind, a dynamic binary instrumentation + framework. + + Copyright (C) 2000-2017 Julian Seward. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. The origin of this software must not be misrepresented; you must + not claim that you wrote the original software. If you use this + software in a product, an acknowledgment in the product + documentation would be appreciated but is not required. + + 3. Altered source versions must be plainly marked as such, and must + not be misrepresented as being the original software. + + 4. The name of the author may not be used to endorse or promote + products derived from this software without specific prior written + permission. + + THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS + OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ---------------------------------------------------------------- + + Notice that the above BSD-style license applies to this one file + (valgrind.h) only. The entire rest of Valgrind is licensed under + the terms of the GNU General Public License, version 2. See the + COPYING file in the source distribution for details. + + ---------------------------------------------------------------- +*/ + + +/* This file is for inclusion into client (your!) code. + + You can use these macros to manipulate and query Valgrind's + execution inside your own programs. + + The resulting executables will still run without Valgrind, just a + little bit more slowly than they otherwise would, but otherwise + unchanged. When not running on valgrind, each client request + consumes very few (eg. 7) instructions, so the resulting performance + loss is negligible unless you plan to execute client requests + millions of times per second. Nevertheless, if that is still a + problem, you can compile with the NVALGRIND symbol defined (gcc + -DNVALGRIND) so that client requests are not even compiled in. */ + +#ifndef __VALGRIND_H +#define __VALGRIND_H + + +/* ------------------------------------------------------------------ */ +/* VERSION NUMBER OF VALGRIND */ +/* ------------------------------------------------------------------ */ + +/* Specify Valgrind's version number, so that user code can + conditionally compile based on our version number. Note that these + were introduced at version 3.6 and so do not exist in version 3.5 + or earlier. The recommended way to use them to check for "version + X.Y or later" is (eg) + +#if defined(__VALGRIND_MAJOR__) && defined(__VALGRIND_MINOR__) \ + && (__VALGRIND_MAJOR__ > 3 \ + || (__VALGRIND_MAJOR__ == 3 && __VALGRIND_MINOR__ >= 6)) +*/ +#define __VALGRIND_MAJOR__ 3 +#define __VALGRIND_MINOR__ 18 + + +#include <stdarg.h> + +/* Nb: this file might be included in a file compiled with -ansi. So + we can't use C++ style "//" comments nor the "asm" keyword (instead + use "__asm__"). */ + +/* Derive some tags indicating what the target platform is. Note + that in this file we're using the compiler's CPP symbols for + identifying architectures, which are different to the ones we use + within the rest of Valgrind. Note, __powerpc__ is active for both + 32 and 64-bit PPC, whereas __powerpc64__ is only active for the + latter (on Linux, that is). + + Misc note: how to find out what's predefined in gcc by default: + gcc -Wp,-dM somefile.c +*/ +#undef PLAT_x86_darwin +#undef PLAT_amd64_darwin +#undef PLAT_x86_freebsd +#undef PLAT_amd64_freebsd +#undef PLAT_x86_win32 +#undef PLAT_amd64_win64 +#undef PLAT_x86_linux +#undef PLAT_amd64_linux +#undef PLAT_ppc32_linux +#undef PLAT_ppc64be_linux +#undef PLAT_ppc64le_linux +#undef PLAT_arm_linux +#undef PLAT_arm64_linux +#undef PLAT_s390x_linux +#undef PLAT_mips32_linux +#undef PLAT_mips64_linux +#undef PLAT_nanomips_linux +#undef PLAT_x86_solaris +#undef PLAT_amd64_solaris + + +#if defined(__APPLE__) && defined(__i386__) +# define PLAT_x86_darwin 1 +#elif defined(__APPLE__) && defined(__x86_64__) +# define PLAT_amd64_darwin 1 +#elif defined(__FreeBSD__) && defined(__i386__) +# define PLAT_x86_freebsd 1 +#elif defined(__FreeBSD__) && defined(__amd64__) +# define PLAT_amd64_freebsd 1 +#elif (defined(__MINGW32__) && defined(__i386__)) \ + || defined(__CYGWIN32__) \ + || (defined(_WIN32) && defined(_M_IX86)) +# define PLAT_x86_win32 1 +#elif (defined(__MINGW32__) && defined(__x86_64__)) \ + || (defined(_WIN32) && defined(_M_X64)) +/* __MINGW32__ and _WIN32 are defined in 64 bit mode as well. */ +# define PLAT_amd64_win64 1 +#elif defined(__linux__) && defined(__i386__) +# define PLAT_x86_linux 1 +#elif defined(__linux__) && defined(__x86_64__) && !defined(__ILP32__) +# define PLAT_amd64_linux 1 +#elif defined(__linux__) && defined(__powerpc__) && !defined(__powerpc64__) +# define PLAT_ppc32_linux 1 +#elif defined(__linux__) && defined(__powerpc__) && defined(__powerpc64__) && _CALL_ELF != 2 +/* Big Endian uses ELF version 1 */ +# define PLAT_ppc64be_linux 1 +#elif defined(__linux__) && defined(__powerpc__) && defined(__powerpc64__) && _CALL_ELF == 2 +/* Little Endian uses ELF version 2 */ +# define PLAT_ppc64le_linux 1 +#elif defined(__linux__) && defined(__arm__) && !defined(__aarch64__) +# define PLAT_arm_linux 1 +#elif defined(__linux__) && defined(__aarch64__) && !defined(__arm__) +# define PLAT_arm64_linux 1 +#elif defined(__linux__) && defined(__s390__) && defined(__s390x__) +# define PLAT_s390x_linux 1 +#elif defined(__linux__) && defined(__mips__) && (__mips==64) +# define PLAT_mips64_linux 1 +#elif defined(__linux__) && defined(__mips__) && (__mips==32) +# define PLAT_mips32_linux 1 +#elif defined(__linux__) && defined(__nanomips__) +# define PLAT_nanomips_linux 1 +#elif defined(__sun) && defined(__i386__) +# define PLAT_x86_solaris 1 +#elif defined(__sun) && defined(__x86_64__) +# define PLAT_amd64_solaris 1 +#else +/* If we're not compiling for our target platform, don't generate + any inline asms. */ +# if !defined(NVALGRIND) +# define NVALGRIND 1 +# endif +#endif + + +/* ------------------------------------------------------------------ */ +/* ARCHITECTURE SPECIFICS for SPECIAL INSTRUCTIONS. There is nothing */ +/* in here of use to end-users -- skip to the next section. */ +/* ------------------------------------------------------------------ */ + +/* + * VALGRIND_DO_CLIENT_REQUEST(): a statement that invokes a Valgrind client + * request. Accepts both pointers and integers as arguments. + * + * VALGRIND_DO_CLIENT_REQUEST_STMT(): a statement that invokes a Valgrind + * client request that does not return a value. + + * VALGRIND_DO_CLIENT_REQUEST_EXPR(): a C expression that invokes a Valgrind + * client request and whose value equals the client request result. Accepts + * both pointers and integers as arguments. Note that such calls are not + * necessarily pure functions -- they may have side effects. + */ + +#define VALGRIND_DO_CLIENT_REQUEST(_zzq_rlval, _zzq_default, \ + _zzq_request, _zzq_arg1, _zzq_arg2, \ + _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + do { (_zzq_rlval) = VALGRIND_DO_CLIENT_REQUEST_EXPR((_zzq_default), \ + (_zzq_request), (_zzq_arg1), (_zzq_arg2), \ + (_zzq_arg3), (_zzq_arg4), (_zzq_arg5)); } while (0) + +#define VALGRIND_DO_CLIENT_REQUEST_STMT(_zzq_request, _zzq_arg1, \ + _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + do { (void) VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \ + (_zzq_request), (_zzq_arg1), (_zzq_arg2), \ + (_zzq_arg3), (_zzq_arg4), (_zzq_arg5)); } while (0) + +#if defined(NVALGRIND) + +/* Define NVALGRIND to completely remove the Valgrind magic sequence + from the compiled code (analogous to NDEBUG's effects on + assert()) */ +#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ + _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + (_zzq_default) + +#else /* ! NVALGRIND */ + +/* The following defines the magic code sequences which the JITter + spots and handles magically. Don't look too closely at them as + they will rot your brain. + + The assembly code sequences for all architectures is in this one + file. This is because this file must be stand-alone, and we don't + want to have multiple files. + + For VALGRIND_DO_CLIENT_REQUEST, we must ensure that the default + value gets put in the return slot, so that everything works when + this is executed not under Valgrind. Args are passed in a memory + block, and so there's no intrinsic limit to the number that could + be passed, but it's currently five. + + The macro args are: + _zzq_rlval result lvalue + _zzq_default default value (result returned when running on real CPU) + _zzq_request request code + _zzq_arg1..5 request params + + The other two macros are used to support function wrapping, and are + a lot simpler. VALGRIND_GET_NR_CONTEXT returns the value of the + guest's NRADDR pseudo-register and whatever other information is + needed to safely run the call original from the wrapper: on + ppc64-linux, the R2 value at the divert point is also needed. This + information is abstracted into a user-visible type, OrigFn. + + VALGRIND_CALL_NOREDIR_* behaves the same as the following on the + guest, but guarantees that the branch instruction will not be + redirected: x86: call *%eax, amd64: call *%rax, ppc32/ppc64: + branch-and-link-to-r11. VALGRIND_CALL_NOREDIR is just text, not a + complete inline asm, since it needs to be combined with more magic + inline asm stuff to be useful. +*/ + +/* ----------------- x86-{linux,darwin,solaris} ---------------- */ + +#if defined(PLAT_x86_linux) || defined(PLAT_x86_darwin) \ + || (defined(PLAT_x86_win32) && defined(__GNUC__)) \ + || defined(PLAT_x86_solaris) || defined(PLAT_x86_freebsd) + +typedef + struct { + unsigned int nraddr; /* where's the code? */ + } + OrigFn; + +#define __SPECIAL_INSTRUCTION_PREAMBLE \ + "roll $3, %%edi ; roll $13, %%edi\n\t" \ + "roll $29, %%edi ; roll $19, %%edi\n\t" + +#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ + _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + __extension__ \ + ({volatile unsigned int _zzq_args[6]; \ + volatile unsigned int _zzq_result; \ + _zzq_args[0] = (unsigned int)(_zzq_request); \ + _zzq_args[1] = (unsigned int)(_zzq_arg1); \ + _zzq_args[2] = (unsigned int)(_zzq_arg2); \ + _zzq_args[3] = (unsigned int)(_zzq_arg3); \ + _zzq_args[4] = (unsigned int)(_zzq_arg4); \ + _zzq_args[5] = (unsigned int)(_zzq_arg5); \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* %EDX = client_request ( %EAX ) */ \ + "xchgl %%ebx,%%ebx" \ + : "=d" (_zzq_result) \ + : "a" (&_zzq_args[0]), "0" (_zzq_default) \ + : "cc", "memory" \ + ); \ + _zzq_result; \ + }) + +#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ + { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ + volatile unsigned int __addr; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* %EAX = guest_NRADDR */ \ + "xchgl %%ecx,%%ecx" \ + : "=a" (__addr) \ + : \ + : "cc", "memory" \ + ); \ + _zzq_orig->nraddr = __addr; \ + } + +#define VALGRIND_CALL_NOREDIR_EAX \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* call-noredir *%EAX */ \ + "xchgl %%edx,%%edx\n\t" + +#define VALGRIND_VEX_INJECT_IR() \ + do { \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + "xchgl %%edi,%%edi\n\t" \ + : : : "cc", "memory" \ + ); \ + } while (0) + +#endif /* PLAT_x86_linux || PLAT_x86_darwin || (PLAT_x86_win32 && __GNUC__) + || PLAT_x86_solaris */ + +/* ------------------------- x86-Win32 ------------------------- */ + +#if defined(PLAT_x86_win32) && !defined(__GNUC__) + +typedef + struct { + unsigned int nraddr; /* where's the code? */ + } + OrigFn; + +#if defined(_MSC_VER) + +#define __SPECIAL_INSTRUCTION_PREAMBLE \ + __asm rol edi, 3 __asm rol edi, 13 \ + __asm rol edi, 29 __asm rol edi, 19 + +#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ + _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + valgrind_do_client_request_expr((uintptr_t)(_zzq_default), \ + (uintptr_t)(_zzq_request), (uintptr_t)(_zzq_arg1), \ + (uintptr_t)(_zzq_arg2), (uintptr_t)(_zzq_arg3), \ + (uintptr_t)(_zzq_arg4), (uintptr_t)(_zzq_arg5)) + +static __inline uintptr_t +valgrind_do_client_request_expr(uintptr_t _zzq_default, uintptr_t _zzq_request, + uintptr_t _zzq_arg1, uintptr_t _zzq_arg2, + uintptr_t _zzq_arg3, uintptr_t _zzq_arg4, + uintptr_t _zzq_arg5) +{ + volatile uintptr_t _zzq_args[6]; + volatile unsigned int _zzq_result; + _zzq_args[0] = (uintptr_t)(_zzq_request); + _zzq_args[1] = (uintptr_t)(_zzq_arg1); + _zzq_args[2] = (uintptr_t)(_zzq_arg2); + _zzq_args[3] = (uintptr_t)(_zzq_arg3); + _zzq_args[4] = (uintptr_t)(_zzq_arg4); + _zzq_args[5] = (uintptr_t)(_zzq_arg5); + __asm { __asm lea eax, _zzq_args __asm mov edx, _zzq_default + __SPECIAL_INSTRUCTION_PREAMBLE + /* %EDX = client_request ( %EAX ) */ + __asm xchg ebx,ebx + __asm mov _zzq_result, edx + } + return _zzq_result; +} + +#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ + { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ + volatile unsigned int __addr; \ + __asm { __SPECIAL_INSTRUCTION_PREAMBLE \ + /* %EAX = guest_NRADDR */ \ + __asm xchg ecx,ecx \ + __asm mov __addr, eax \ + } \ + _zzq_orig->nraddr = __addr; \ + } + +#define VALGRIND_CALL_NOREDIR_EAX ERROR + +#define VALGRIND_VEX_INJECT_IR() \ + do { \ + __asm { __SPECIAL_INSTRUCTION_PREAMBLE \ + __asm xchg edi,edi \ + } \ + } while (0) + +#else +#error Unsupported compiler. +#endif + +#endif /* PLAT_x86_win32 */ + +/* ----------------- amd64-{linux,darwin,solaris} --------------- */ + +#if defined(PLAT_amd64_linux) || defined(PLAT_amd64_darwin) \ + || defined(PLAT_amd64_solaris) \ + || defined(PLAT_amd64_freebsd) \ + || (defined(PLAT_amd64_win64) && defined(__GNUC__)) + +typedef + struct { + uintptr_t nraddr; /* where's the code? */ + } + OrigFn; + +#define __SPECIAL_INSTRUCTION_PREAMBLE \ + "rolq $3, %%rdi ; rolq $13, %%rdi\n\t" \ + "rolq $61, %%rdi ; rolq $51, %%rdi\n\t" + +#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ + _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + __extension__ \ + ({ volatile uintptr_t _zzq_args[6]; \ + volatile uintptr_t _zzq_result; \ + _zzq_args[0] = (uintptr_t)(_zzq_request); \ + _zzq_args[1] = (uintptr_t)(_zzq_arg1); \ + _zzq_args[2] = (uintptr_t)(_zzq_arg2); \ + _zzq_args[3] = (uintptr_t)(_zzq_arg3); \ + _zzq_args[4] = (uintptr_t)(_zzq_arg4); \ + _zzq_args[5] = (uintptr_t)(_zzq_arg5); \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* %RDX = client_request ( %RAX ) */ \ + "xchgq %%rbx,%%rbx" \ + : "=d" (_zzq_result) \ + : "a" (&_zzq_args[0]), "0" (_zzq_default) \ + : "cc", "memory" \ + ); \ + _zzq_result; \ + }) + +#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ + { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ + volatile uintptr_t __addr; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* %RAX = guest_NRADDR */ \ + "xchgq %%rcx,%%rcx" \ + : "=a" (__addr) \ + : \ + : "cc", "memory" \ + ); \ + _zzq_orig->nraddr = __addr; \ + } + +#define VALGRIND_CALL_NOREDIR_RAX \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* call-noredir *%RAX */ \ + "xchgq %%rdx,%%rdx\n\t" + +#define VALGRIND_VEX_INJECT_IR() \ + do { \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + "xchgq %%rdi,%%rdi\n\t" \ + : : : "cc", "memory" \ + ); \ + } while (0) + +#endif /* PLAT_amd64_linux || PLAT_amd64_darwin || PLAT_amd64_solaris */ + +/* ------------------------- amd64-Win64 ------------------------- */ + +#if defined(PLAT_amd64_win64) && !defined(__GNUC__) + +#error Unsupported compiler. + +#endif /* PLAT_amd64_win64 */ + +/* ------------------------ ppc32-linux ------------------------ */ + +#if defined(PLAT_ppc32_linux) + +typedef + struct { + unsigned int nraddr; /* where's the code? */ + } + OrigFn; + +#define __SPECIAL_INSTRUCTION_PREAMBLE \ + "rlwinm 0,0,3,0,31 ; rlwinm 0,0,13,0,31\n\t" \ + "rlwinm 0,0,29,0,31 ; rlwinm 0,0,19,0,31\n\t" + +#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ + _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + \ + __extension__ \ + ({ unsigned int _zzq_args[6]; \ + unsigned int _zzq_result; \ + unsigned int* _zzq_ptr; \ + _zzq_args[0] = (unsigned int)(_zzq_request); \ + _zzq_args[1] = (unsigned int)(_zzq_arg1); \ + _zzq_args[2] = (unsigned int)(_zzq_arg2); \ + _zzq_args[3] = (unsigned int)(_zzq_arg3); \ + _zzq_args[4] = (unsigned int)(_zzq_arg4); \ + _zzq_args[5] = (unsigned int)(_zzq_arg5); \ + _zzq_ptr = _zzq_args; \ + __asm__ volatile("mr 3,%1\n\t" /*default*/ \ + "mr 4,%2\n\t" /*ptr*/ \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* %R3 = client_request ( %R4 ) */ \ + "or 1,1,1\n\t" \ + "mr %0,3" /*result*/ \ + : "=b" (_zzq_result) \ + : "b" (_zzq_default), "b" (_zzq_ptr) \ + : "cc", "memory", "r3", "r4"); \ + _zzq_result; \ + }) + +#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ + { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ + unsigned int __addr; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* %R3 = guest_NRADDR */ \ + "or 2,2,2\n\t" \ + "mr %0,3" \ + : "=b" (__addr) \ + : \ + : "cc", "memory", "r3" \ + ); \ + _zzq_orig->nraddr = __addr; \ + } + +#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* branch-and-link-to-noredir *%R11 */ \ + "or 3,3,3\n\t" + +#define VALGRIND_VEX_INJECT_IR() \ + do { \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + "or 5,5,5\n\t" \ + ); \ + } while (0) + +#endif /* PLAT_ppc32_linux */ + +/* ------------------------ ppc64-linux ------------------------ */ + +#if defined(PLAT_ppc64be_linux) + +typedef + struct { + uintptr_t nraddr; /* where's the code? */ + uintptr_t r2; /* what tocptr do we need? */ + } + OrigFn; + +#define __SPECIAL_INSTRUCTION_PREAMBLE \ + "rotldi 0,0,3 ; rotldi 0,0,13\n\t" \ + "rotldi 0,0,61 ; rotldi 0,0,51\n\t" + +#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ + _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + \ + __extension__ \ + ({ uintptr_t _zzq_args[6]; \ + uintptr_t _zzq_result; \ + uintptr_t* _zzq_ptr; \ + _zzq_args[0] = (uintptr_t)(_zzq_request); \ + _zzq_args[1] = (uintptr_t)(_zzq_arg1); \ + _zzq_args[2] = (uintptr_t)(_zzq_arg2); \ + _zzq_args[3] = (uintptr_t)(_zzq_arg3); \ + _zzq_args[4] = (uintptr_t)(_zzq_arg4); \ + _zzq_args[5] = (uintptr_t)(_zzq_arg5); \ + _zzq_ptr = _zzq_args; \ + __asm__ volatile("mr 3,%1\n\t" /*default*/ \ + "mr 4,%2\n\t" /*ptr*/ \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* %R3 = client_request ( %R4 ) */ \ + "or 1,1,1\n\t" \ + "mr %0,3" /*result*/ \ + : "=b" (_zzq_result) \ + : "b" (_zzq_default), "b" (_zzq_ptr) \ + : "cc", "memory", "r3", "r4"); \ + _zzq_result; \ + }) + +#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ + { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ + uintptr_t __addr; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* %R3 = guest_NRADDR */ \ + "or 2,2,2\n\t" \ + "mr %0,3" \ + : "=b" (__addr) \ + : \ + : "cc", "memory", "r3" \ + ); \ + _zzq_orig->nraddr = __addr; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* %R3 = guest_NRADDR_GPR2 */ \ + "or 4,4,4\n\t" \ + "mr %0,3" \ + : "=b" (__addr) \ + : \ + : "cc", "memory", "r3" \ + ); \ + _zzq_orig->r2 = __addr; \ + } + +#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* branch-and-link-to-noredir *%R11 */ \ + "or 3,3,3\n\t" + +#define VALGRIND_VEX_INJECT_IR() \ + do { \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + "or 5,5,5\n\t" \ + ); \ + } while (0) + +#endif /* PLAT_ppc64be_linux */ + +#if defined(PLAT_ppc64le_linux) + +typedef + struct { + uintptr_t nraddr; /* where's the code? */ + uintptr_t r2; /* what tocptr do we need? */ + } + OrigFn; + +#define __SPECIAL_INSTRUCTION_PREAMBLE \ + "rotldi 0,0,3 ; rotldi 0,0,13\n\t" \ + "rotldi 0,0,61 ; rotldi 0,0,51\n\t" + +#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ + _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + \ + __extension__ \ + ({ uintptr_t _zzq_args[6]; \ + uintptr_t _zzq_result; \ + uintptr_t* _zzq_ptr; \ + _zzq_args[0] = (uintptr_t)(_zzq_request); \ + _zzq_args[1] = (uintptr_t)(_zzq_arg1); \ + _zzq_args[2] = (uintptr_t)(_zzq_arg2); \ + _zzq_args[3] = (uintptr_t)(_zzq_arg3); \ + _zzq_args[4] = (uintptr_t)(_zzq_arg4); \ + _zzq_args[5] = (uintptr_t)(_zzq_arg5); \ + _zzq_ptr = _zzq_args; \ + __asm__ volatile("mr 3,%1\n\t" /*default*/ \ + "mr 4,%2\n\t" /*ptr*/ \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* %R3 = client_request ( %R4 ) */ \ + "or 1,1,1\n\t" \ + "mr %0,3" /*result*/ \ + : "=b" (_zzq_result) \ + : "b" (_zzq_default), "b" (_zzq_ptr) \ + : "cc", "memory", "r3", "r4"); \ + _zzq_result; \ + }) + +#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ + { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ + uintptr_t __addr; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* %R3 = guest_NRADDR */ \ + "or 2,2,2\n\t" \ + "mr %0,3" \ + : "=b" (__addr) \ + : \ + : "cc", "memory", "r3" \ + ); \ + _zzq_orig->nraddr = __addr; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* %R3 = guest_NRADDR_GPR2 */ \ + "or 4,4,4\n\t" \ + "mr %0,3" \ + : "=b" (__addr) \ + : \ + : "cc", "memory", "r3" \ + ); \ + _zzq_orig->r2 = __addr; \ + } + +#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* branch-and-link-to-noredir *%R12 */ \ + "or 3,3,3\n\t" + +#define VALGRIND_VEX_INJECT_IR() \ + do { \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + "or 5,5,5\n\t" \ + ); \ + } while (0) + +#endif /* PLAT_ppc64le_linux */ + +/* ------------------------- arm-linux ------------------------- */ + +#if defined(PLAT_arm_linux) + +typedef + struct { + unsigned int nraddr; /* where's the code? */ + } + OrigFn; + +#define __SPECIAL_INSTRUCTION_PREAMBLE \ + "mov r12, r12, ror #3 ; mov r12, r12, ror #13 \n\t" \ + "mov r12, r12, ror #29 ; mov r12, r12, ror #19 \n\t" + +#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ + _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + \ + __extension__ \ + ({volatile unsigned int _zzq_args[6]; \ + volatile unsigned int _zzq_result; \ + _zzq_args[0] = (unsigned int)(_zzq_request); \ + _zzq_args[1] = (unsigned int)(_zzq_arg1); \ + _zzq_args[2] = (unsigned int)(_zzq_arg2); \ + _zzq_args[3] = (unsigned int)(_zzq_arg3); \ + _zzq_args[4] = (unsigned int)(_zzq_arg4); \ + _zzq_args[5] = (unsigned int)(_zzq_arg5); \ + __asm__ volatile("mov r3, %1\n\t" /*default*/ \ + "mov r4, %2\n\t" /*ptr*/ \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* R3 = client_request ( R4 ) */ \ + "orr r10, r10, r10\n\t" \ + "mov %0, r3" /*result*/ \ + : "=r" (_zzq_result) \ + : "r" (_zzq_default), "r" (&_zzq_args[0]) \ + : "cc","memory", "r3", "r4"); \ + _zzq_result; \ + }) + +#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ + { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ + unsigned int __addr; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* R3 = guest_NRADDR */ \ + "orr r11, r11, r11\n\t" \ + "mov %0, r3" \ + : "=r" (__addr) \ + : \ + : "cc", "memory", "r3" \ + ); \ + _zzq_orig->nraddr = __addr; \ + } + +#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* branch-and-link-to-noredir *%R4 */ \ + "orr r12, r12, r12\n\t" + +#define VALGRIND_VEX_INJECT_IR() \ + do { \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + "orr r9, r9, r9\n\t" \ + : : : "cc", "memory" \ + ); \ + } while (0) + +#endif /* PLAT_arm_linux */ + +/* ------------------------ arm64-linux ------------------------- */ + +#if defined(PLAT_arm64_linux) + +typedef + struct { + uintptr_t nraddr; /* where's the code? */ + } + OrigFn; + +#define __SPECIAL_INSTRUCTION_PREAMBLE \ + "ror x12, x12, #3 ; ror x12, x12, #13 \n\t" \ + "ror x12, x12, #51 ; ror x12, x12, #61 \n\t" + +#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ + _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + \ + __extension__ \ + ({volatile uintptr_t _zzq_args[6]; \ + volatile uintptr_t _zzq_result; \ + _zzq_args[0] = (uintptr_t)(_zzq_request); \ + _zzq_args[1] = (uintptr_t)(_zzq_arg1); \ + _zzq_args[2] = (uintptr_t)(_zzq_arg2); \ + _zzq_args[3] = (uintptr_t)(_zzq_arg3); \ + _zzq_args[4] = (uintptr_t)(_zzq_arg4); \ + _zzq_args[5] = (uintptr_t)(_zzq_arg5); \ + __asm__ volatile("mov x3, %1\n\t" /*default*/ \ + "mov x4, %2\n\t" /*ptr*/ \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* X3 = client_request ( X4 ) */ \ + "orr x10, x10, x10\n\t" \ + "mov %0, x3" /*result*/ \ + : "=r" (_zzq_result) \ + : "r" ((uintptr_t)(_zzq_default)), \ + "r" (&_zzq_args[0]) \ + : "cc","memory", "x3", "x4"); \ + _zzq_result; \ + }) + +#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ + { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ + uintptr_t __addr; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* X3 = guest_NRADDR */ \ + "orr x11, x11, x11\n\t" \ + "mov %0, x3" \ + : "=r" (__addr) \ + : \ + : "cc", "memory", "x3" \ + ); \ + _zzq_orig->nraddr = __addr; \ + } + +#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* branch-and-link-to-noredir X8 */ \ + "orr x12, x12, x12\n\t" + +#define VALGRIND_VEX_INJECT_IR() \ + do { \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + "orr x9, x9, x9\n\t" \ + : : : "cc", "memory" \ + ); \ + } while (0) + +#endif /* PLAT_arm64_linux */ + +/* ------------------------ s390x-linux ------------------------ */ + +#if defined(PLAT_s390x_linux) + +typedef + struct { + uintptr_t nraddr; /* where's the code? */ + } + OrigFn; + +/* __SPECIAL_INSTRUCTION_PREAMBLE will be used to identify Valgrind specific + * code. This detection is implemented in platform specific toIR.c + * (e.g. VEX/priv/guest_s390_decoder.c). + */ +#define __SPECIAL_INSTRUCTION_PREAMBLE \ + "lr 15,15\n\t" \ + "lr 1,1\n\t" \ + "lr 2,2\n\t" \ + "lr 3,3\n\t" + +#define __CLIENT_REQUEST_CODE "lr 2,2\n\t" +#define __GET_NR_CONTEXT_CODE "lr 3,3\n\t" +#define __CALL_NO_REDIR_CODE "lr 4,4\n\t" +#define __VEX_INJECT_IR_CODE "lr 5,5\n\t" + +#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ + _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + __extension__ \ + ({volatile uintptr_t _zzq_args[6]; \ + volatile uintptr_t _zzq_result; \ + _zzq_args[0] = (uintptr_t)(_zzq_request); \ + _zzq_args[1] = (uintptr_t)(_zzq_arg1); \ + _zzq_args[2] = (uintptr_t)(_zzq_arg2); \ + _zzq_args[3] = (uintptr_t)(_zzq_arg3); \ + _zzq_args[4] = (uintptr_t)(_zzq_arg4); \ + _zzq_args[5] = (uintptr_t)(_zzq_arg5); \ + __asm__ volatile(/* r2 = args */ \ + "lgr 2,%1\n\t" \ + /* r3 = default */ \ + "lgr 3,%2\n\t" \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + __CLIENT_REQUEST_CODE \ + /* results = r3 */ \ + "lgr %0, 3\n\t" \ + : "=d" (_zzq_result) \ + : "a" (&_zzq_args[0]), \ + "0" ((uintptr_t)_zzq_default) \ + : "cc", "2", "3", "memory" \ + ); \ + _zzq_result; \ + }) + +#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ + { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ + volatile uintptr_t __addr; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + __GET_NR_CONTEXT_CODE \ + "lgr %0, 3\n\t" \ + : "=a" (__addr) \ + : \ + : "cc", "3", "memory" \ + ); \ + _zzq_orig->nraddr = __addr; \ + } + +#define VALGRIND_CALL_NOREDIR_R1 \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + __CALL_NO_REDIR_CODE + +#define VALGRIND_VEX_INJECT_IR() \ + do { \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + __VEX_INJECT_IR_CODE); \ + } while (0) + +#endif /* PLAT_s390x_linux */ + +/* ------------------------- mips32-linux ---------------- */ + +#if defined(PLAT_mips32_linux) + +typedef + struct { + unsigned int nraddr; /* where's the code? */ + } + OrigFn; + +/* .word 0x342 + * .word 0x742 + * .word 0xC2 + * .word 0x4C2*/ +#define __SPECIAL_INSTRUCTION_PREAMBLE \ + "srl $0, $0, 13\n\t" \ + "srl $0, $0, 29\n\t" \ + "srl $0, $0, 3\n\t" \ + "srl $0, $0, 19\n\t" + +#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ + _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + __extension__ \ + ({ volatile unsigned int _zzq_args[6]; \ + volatile unsigned int _zzq_result; \ + _zzq_args[0] = (unsigned int)(_zzq_request); \ + _zzq_args[1] = (unsigned int)(_zzq_arg1); \ + _zzq_args[2] = (unsigned int)(_zzq_arg2); \ + _zzq_args[3] = (unsigned int)(_zzq_arg3); \ + _zzq_args[4] = (unsigned int)(_zzq_arg4); \ + _zzq_args[5] = (unsigned int)(_zzq_arg5); \ + __asm__ volatile("move $11, %1\n\t" /*default*/ \ + "move $12, %2\n\t" /*ptr*/ \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* T3 = client_request ( T4 ) */ \ + "or $13, $13, $13\n\t" \ + "move %0, $11\n\t" /*result*/ \ + : "=r" (_zzq_result) \ + : "r" (_zzq_default), "r" (&_zzq_args[0]) \ + : "$11", "$12", "memory"); \ + _zzq_result; \ + }) + +#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ + { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ + volatile unsigned int __addr; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* %t9 = guest_NRADDR */ \ + "or $14, $14, $14\n\t" \ + "move %0, $11" /*result*/ \ + : "=r" (__addr) \ + : \ + : "$11" \ + ); \ + _zzq_orig->nraddr = __addr; \ + } + +#define VALGRIND_CALL_NOREDIR_T9 \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* call-noredir *%t9 */ \ + "or $15, $15, $15\n\t" + +#define VALGRIND_VEX_INJECT_IR() \ + do { \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + "or $11, $11, $11\n\t" \ + ); \ + } while (0) + + +#endif /* PLAT_mips32_linux */ + +/* ------------------------- mips64-linux ---------------- */ + +#if defined(PLAT_mips64_linux) + +typedef + struct { + uintptr_t nraddr; /* where's the code? */ + } + OrigFn; + +/* dsll $0,$0, 3 + * dsll $0,$0, 13 + * dsll $0,$0, 29 + * dsll $0,$0, 19*/ +#define __SPECIAL_INSTRUCTION_PREAMBLE \ + "dsll $0,$0, 3 ; dsll $0,$0,13\n\t" \ + "dsll $0,$0,29 ; dsll $0,$0,19\n\t" + +#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ + _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + __extension__ \ + ({ volatile uintptr_t _zzq_args[6]; \ + volatile uintptr_t _zzq_result; \ + _zzq_args[0] = (uintptr_t)(_zzq_request); \ + _zzq_args[1] = (uintptr_t)(_zzq_arg1); \ + _zzq_args[2] = (uintptr_t)(_zzq_arg2); \ + _zzq_args[3] = (uintptr_t)(_zzq_arg3); \ + _zzq_args[4] = (uintptr_t)(_zzq_arg4); \ + _zzq_args[5] = (uintptr_t)(_zzq_arg5); \ + __asm__ volatile("move $11, %1\n\t" /*default*/ \ + "move $12, %2\n\t" /*ptr*/ \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* $11 = client_request ( $12 ) */ \ + "or $13, $13, $13\n\t" \ + "move %0, $11\n\t" /*result*/ \ + : "=r" (_zzq_result) \ + : "r" (_zzq_default), "r" (&_zzq_args[0]) \ + : "$11", "$12", "memory"); \ + _zzq_result; \ + }) + +#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ + { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ + volatile uintptr_t __addr; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* $11 = guest_NRADDR */ \ + "or $14, $14, $14\n\t" \ + "move %0, $11" /*result*/ \ + : "=r" (__addr) \ + : \ + : "$11"); \ + _zzq_orig->nraddr = __addr; \ + } + +#define VALGRIND_CALL_NOREDIR_T9 \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* call-noredir $25 */ \ + "or $15, $15, $15\n\t" + +#define VALGRIND_VEX_INJECT_IR() \ + do { \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + "or $11, $11, $11\n\t" \ + ); \ + } while (0) + +#endif /* PLAT_mips64_linux */ + +#if defined(PLAT_nanomips_linux) + +typedef + struct { + unsigned int nraddr; /* where's the code? */ + } + OrigFn; +/* + 8000 c04d srl zero, zero, 13 + 8000 c05d srl zero, zero, 29 + 8000 c043 srl zero, zero, 3 + 8000 c053 srl zero, zero, 19 +*/ + +#define __SPECIAL_INSTRUCTION_PREAMBLE "srl[32] $zero, $zero, 13 \n\t" \ + "srl[32] $zero, $zero, 29 \n\t" \ + "srl[32] $zero, $zero, 3 \n\t" \ + "srl[32] $zero, $zero, 19 \n\t" + +#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ + _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + __extension__ \ + ({ volatile unsigned int _zzq_args[6]; \ + volatile unsigned int _zzq_result; \ + _zzq_args[0] = (unsigned int)(_zzq_request); \ + _zzq_args[1] = (unsigned int)(_zzq_arg1); \ + _zzq_args[2] = (unsigned int)(_zzq_arg2); \ + _zzq_args[3] = (unsigned int)(_zzq_arg3); \ + _zzq_args[4] = (unsigned int)(_zzq_arg4); \ + _zzq_args[5] = (unsigned int)(_zzq_arg5); \ + __asm__ volatile("move $a7, %1\n\t" /* default */ \ + "move $t0, %2\n\t" /* ptr */ \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* $a7 = client_request( $t0 ) */ \ + "or[32] $t0, $t0, $t0\n\t" \ + "move %0, $a7\n\t" /* result */ \ + : "=r" (_zzq_result) \ + : "r" (_zzq_default), "r" (&_zzq_args[0]) \ + : "$a7", "$t0", "memory"); \ + _zzq_result; \ + }) + +#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ + { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ + volatile uintptr_t __addr; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* $a7 = guest_NRADDR */ \ + "or[32] $t1, $t1, $t1\n\t" \ + "move %0, $a7" /*result*/ \ + : "=r" (__addr) \ + : \ + : "$a7"); \ + _zzq_orig->nraddr = __addr; \ + } + +#define VALGRIND_CALL_NOREDIR_T9 \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* call-noredir $25 */ \ + "or[32] $t2, $t2, $t2\n\t" + +#define VALGRIND_VEX_INJECT_IR() \ + do { \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + "or[32] $t3, $t3, $t3\n\t" \ + ); \ + } while (0) + +#endif +/* Insert assembly code for other platforms here... */ + +#endif /* NVALGRIND */ + + +/* ------------------------------------------------------------------ */ +/* PLATFORM SPECIFICS for FUNCTION WRAPPING. This is all very */ +/* ugly. It's the least-worst tradeoff I can think of. */ +/* ------------------------------------------------------------------ */ + +/* This section defines magic (a.k.a appalling-hack) macros for doing + guaranteed-no-redirection macros, so as to get from function + wrappers to the functions they are wrapping. The whole point is to + construct standard call sequences, but to do the call itself with a + special no-redirect call pseudo-instruction that the JIT + understands and handles specially. This section is long and + repetitious, and I can't see a way to make it shorter. + + The naming scheme is as follows: + + CALL_FN_{W,v}_{v,W,WW,WWW,WWWW,5W,6W,7W,etc} + + 'W' stands for "word" and 'v' for "void". Hence there are + different macros for calling arity 0, 1, 2, 3, 4, etc, functions, + and for each, the possibility of returning a word-typed result, or + no result. +*/ + +/* Use these to write the name of your wrapper. NOTE: duplicates + VG_WRAP_FUNCTION_Z{U,Z} in pub_tool_redir.h. NOTE also: inserts + the default behaviour equivalance class tag "0000" into the name. + See pub_tool_redir.h for details -- normally you don't need to + think about this, though. */ + +/* Use an extra level of macroisation so as to ensure the soname/fnname + args are fully macro-expanded before pasting them together. */ +#define VG_CONCAT4(_aa,_bb,_cc,_dd) _aa##_bb##_cc##_dd + +#define I_WRAP_SONAME_FNNAME_ZU(soname,fnname) \ + VG_CONCAT4(_vgw00000ZU_,soname,_,fnname) + +#define I_WRAP_SONAME_FNNAME_ZZ(soname,fnname) \ + VG_CONCAT4(_vgw00000ZZ_,soname,_,fnname) + +/* Use this macro from within a wrapper function to collect the + context (address and possibly other info) of the original function. + Once you have that you can then use it in one of the CALL_FN_ + macros. The type of the argument _lval is OrigFn. */ +#define VALGRIND_GET_ORIG_FN(_lval) VALGRIND_GET_NR_CONTEXT(_lval) + +/* Also provide end-user facilities for function replacement, rather + than wrapping. A replacement function differs from a wrapper in + that it has no way to get hold of the original function being + called, and hence no way to call onwards to it. In a replacement + function, VALGRIND_GET_ORIG_FN always returns zero. */ + +#define I_REPLACE_SONAME_FNNAME_ZU(soname,fnname) \ + VG_CONCAT4(_vgr00000ZU_,soname,_,fnname) + +#define I_REPLACE_SONAME_FNNAME_ZZ(soname,fnname) \ + VG_CONCAT4(_vgr00000ZZ_,soname,_,fnname) + +/* Derivatives of the main macros below, for calling functions + returning void. */ + +#define CALL_FN_v_v(fnptr) \ + do { volatile uintptr_t _junk; \ + CALL_FN_W_v(_junk,fnptr); } while (0) + +#define CALL_FN_v_W(fnptr, arg1) \ + do { volatile uintptr_t _junk; \ + CALL_FN_W_W(_junk,fnptr,arg1); } while (0) + +#define CALL_FN_v_WW(fnptr, arg1,arg2) \ + do { volatile uintptr_t _junk; \ + CALL_FN_W_WW(_junk,fnptr,arg1,arg2); } while (0) + +#define CALL_FN_v_WWW(fnptr, arg1,arg2,arg3) \ + do { volatile uintptr_t _junk; \ + CALL_FN_W_WWW(_junk,fnptr,arg1,arg2,arg3); } while (0) + +#define CALL_FN_v_WWWW(fnptr, arg1,arg2,arg3,arg4) \ + do { volatile uintptr_t _junk; \ + CALL_FN_W_WWWW(_junk,fnptr,arg1,arg2,arg3,arg4); } while (0) + +#define CALL_FN_v_5W(fnptr, arg1,arg2,arg3,arg4,arg5) \ + do { volatile uintptr_t _junk; \ + CALL_FN_W_5W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5); } while (0) + +#define CALL_FN_v_6W(fnptr, arg1,arg2,arg3,arg4,arg5,arg6) \ + do { volatile uintptr_t _junk; \ + CALL_FN_W_6W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5,arg6); } while (0) + +#define CALL_FN_v_7W(fnptr, arg1,arg2,arg3,arg4,arg5,arg6,arg7) \ + do { volatile uintptr_t _junk; \ + CALL_FN_W_7W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5,arg6,arg7); } while (0) + +/* ----------------- x86-{linux,darwin,solaris} ---------------- */ + +#if defined(PLAT_x86_linux) || defined(PLAT_x86_darwin) \ + || defined(PLAT_x86_solaris) || defined(PLAT_x86_freebsd) + +/* These regs are trashed by the hidden call. No need to mention eax + as gcc can already see that, plus causes gcc to bomb. */ +#define __CALLER_SAVED_REGS /*"eax"*/ "ecx", "edx" + +/* Macros to save and align the stack before making a function + call and restore it afterwards as gcc may not keep the stack + pointer aligned if it doesn't realise calls are being made + to other functions. */ + +#define VALGRIND_ALIGN_STACK \ + "movl %%esp,%%edi\n\t" \ + "andl $0xfffffff0,%%esp\n\t" +#define VALGRIND_RESTORE_STACK \ + "movl %%edi,%%esp\n\t" + +/* These CALL_FN_ macros assume that on x86-linux, sizeof(unsigned + long) == 4. */ + +#define CALL_FN_W_v(lval, orig) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[1]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_W(lval, orig, arg1) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[2]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "subl $12, %%esp\n\t" \ + "pushl 4(%%eax)\n\t" \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WW(lval, orig, arg1,arg2) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[3]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "subl $8, %%esp\n\t" \ + "pushl 8(%%eax)\n\t" \ + "pushl 4(%%eax)\n\t" \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[4]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + _argvec[3] = (uintptr_t)(arg3); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "subl $4, %%esp\n\t" \ + "pushl 12(%%eax)\n\t" \ + "pushl 8(%%eax)\n\t" \ + "pushl 4(%%eax)\n\t" \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[5]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + _argvec[3] = (uintptr_t)(arg3); \ + _argvec[4] = (uintptr_t)(arg4); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "pushl 16(%%eax)\n\t" \ + "pushl 12(%%eax)\n\t" \ + "pushl 8(%%eax)\n\t" \ + "pushl 4(%%eax)\n\t" \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[6]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + _argvec[3] = (uintptr_t)(arg3); \ + _argvec[4] = (uintptr_t)(arg4); \ + _argvec[5] = (uintptr_t)(arg5); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "subl $12, %%esp\n\t" \ + "pushl 20(%%eax)\n\t" \ + "pushl 16(%%eax)\n\t" \ + "pushl 12(%%eax)\n\t" \ + "pushl 8(%%eax)\n\t" \ + "pushl 4(%%eax)\n\t" \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[7]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + _argvec[3] = (uintptr_t)(arg3); \ + _argvec[4] = (uintptr_t)(arg4); \ + _argvec[5] = (uintptr_t)(arg5); \ + _argvec[6] = (uintptr_t)(arg6); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "subl $8, %%esp\n\t" \ + "pushl 24(%%eax)\n\t" \ + "pushl 20(%%eax)\n\t" \ + "pushl 16(%%eax)\n\t" \ + "pushl 12(%%eax)\n\t" \ + "pushl 8(%%eax)\n\t" \ + "pushl 4(%%eax)\n\t" \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[8]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + _argvec[3] = (uintptr_t)(arg3); \ + _argvec[4] = (uintptr_t)(arg4); \ + _argvec[5] = (uintptr_t)(arg5); \ + _argvec[6] = (uintptr_t)(arg6); \ + _argvec[7] = (uintptr_t)(arg7); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "subl $4, %%esp\n\t" \ + "pushl 28(%%eax)\n\t" \ + "pushl 24(%%eax)\n\t" \ + "pushl 20(%%eax)\n\t" \ + "pushl 16(%%eax)\n\t" \ + "pushl 12(%%eax)\n\t" \ + "pushl 8(%%eax)\n\t" \ + "pushl 4(%%eax)\n\t" \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[9]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + _argvec[3] = (uintptr_t)(arg3); \ + _argvec[4] = (uintptr_t)(arg4); \ + _argvec[5] = (uintptr_t)(arg5); \ + _argvec[6] = (uintptr_t)(arg6); \ + _argvec[7] = (uintptr_t)(arg7); \ + _argvec[8] = (uintptr_t)(arg8); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "pushl 32(%%eax)\n\t" \ + "pushl 28(%%eax)\n\t" \ + "pushl 24(%%eax)\n\t" \ + "pushl 20(%%eax)\n\t" \ + "pushl 16(%%eax)\n\t" \ + "pushl 12(%%eax)\n\t" \ + "pushl 8(%%eax)\n\t" \ + "pushl 4(%%eax)\n\t" \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[10]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + _argvec[3] = (uintptr_t)(arg3); \ + _argvec[4] = (uintptr_t)(arg4); \ + _argvec[5] = (uintptr_t)(arg5); \ + _argvec[6] = (uintptr_t)(arg6); \ + _argvec[7] = (uintptr_t)(arg7); \ + _argvec[8] = (uintptr_t)(arg8); \ + _argvec[9] = (uintptr_t)(arg9); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "subl $12, %%esp\n\t" \ + "pushl 36(%%eax)\n\t" \ + "pushl 32(%%eax)\n\t" \ + "pushl 28(%%eax)\n\t" \ + "pushl 24(%%eax)\n\t" \ + "pushl 20(%%eax)\n\t" \ + "pushl 16(%%eax)\n\t" \ + "pushl 12(%%eax)\n\t" \ + "pushl 8(%%eax)\n\t" \ + "pushl 4(%%eax)\n\t" \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[11]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + _argvec[3] = (uintptr_t)(arg3); \ + _argvec[4] = (uintptr_t)(arg4); \ + _argvec[5] = (uintptr_t)(arg5); \ + _argvec[6] = (uintptr_t)(arg6); \ + _argvec[7] = (uintptr_t)(arg7); \ + _argvec[8] = (uintptr_t)(arg8); \ + _argvec[9] = (uintptr_t)(arg9); \ + _argvec[10] = (uintptr_t)(arg10); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "subl $8, %%esp\n\t" \ + "pushl 40(%%eax)\n\t" \ + "pushl 36(%%eax)\n\t" \ + "pushl 32(%%eax)\n\t" \ + "pushl 28(%%eax)\n\t" \ + "pushl 24(%%eax)\n\t" \ + "pushl 20(%%eax)\n\t" \ + "pushl 16(%%eax)\n\t" \ + "pushl 12(%%eax)\n\t" \ + "pushl 8(%%eax)\n\t" \ + "pushl 4(%%eax)\n\t" \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ + arg6,arg7,arg8,arg9,arg10, \ + arg11) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[12]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + _argvec[3] = (uintptr_t)(arg3); \ + _argvec[4] = (uintptr_t)(arg4); \ + _argvec[5] = (uintptr_t)(arg5); \ + _argvec[6] = (uintptr_t)(arg6); \ + _argvec[7] = (uintptr_t)(arg7); \ + _argvec[8] = (uintptr_t)(arg8); \ + _argvec[9] = (uintptr_t)(arg9); \ + _argvec[10] = (uintptr_t)(arg10); \ + _argvec[11] = (uintptr_t)(arg11); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "subl $4, %%esp\n\t" \ + "pushl 44(%%eax)\n\t" \ + "pushl 40(%%eax)\n\t" \ + "pushl 36(%%eax)\n\t" \ + "pushl 32(%%eax)\n\t" \ + "pushl 28(%%eax)\n\t" \ + "pushl 24(%%eax)\n\t" \ + "pushl 20(%%eax)\n\t" \ + "pushl 16(%%eax)\n\t" \ + "pushl 12(%%eax)\n\t" \ + "pushl 8(%%eax)\n\t" \ + "pushl 4(%%eax)\n\t" \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ + arg6,arg7,arg8,arg9,arg10, \ + arg11,arg12) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[13]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + _argvec[3] = (uintptr_t)(arg3); \ + _argvec[4] = (uintptr_t)(arg4); \ + _argvec[5] = (uintptr_t)(arg5); \ + _argvec[6] = (uintptr_t)(arg6); \ + _argvec[7] = (uintptr_t)(arg7); \ + _argvec[8] = (uintptr_t)(arg8); \ + _argvec[9] = (uintptr_t)(arg9); \ + _argvec[10] = (uintptr_t)(arg10); \ + _argvec[11] = (uintptr_t)(arg11); \ + _argvec[12] = (uintptr_t)(arg12); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "pushl 48(%%eax)\n\t" \ + "pushl 44(%%eax)\n\t" \ + "pushl 40(%%eax)\n\t" \ + "pushl 36(%%eax)\n\t" \ + "pushl 32(%%eax)\n\t" \ + "pushl 28(%%eax)\n\t" \ + "pushl 24(%%eax)\n\t" \ + "pushl 20(%%eax)\n\t" \ + "pushl 16(%%eax)\n\t" \ + "pushl 12(%%eax)\n\t" \ + "pushl 8(%%eax)\n\t" \ + "pushl 4(%%eax)\n\t" \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#endif /* PLAT_x86_linux || PLAT_x86_darwin || PLAT_x86_solaris */ + +/* ---------------- amd64-{linux,darwin,solaris} --------------- */ + +#if defined(PLAT_amd64_linux) || defined(PLAT_amd64_darwin) \ + || defined(PLAT_amd64_solaris) || defined(PLAT_amd64_freebsd) + +/* ARGREGS: rdi rsi rdx rcx r8 r9 (the rest on stack in R-to-L order) */ + +/* These regs are trashed by the hidden call. */ +#define __CALLER_SAVED_REGS /*"rax",*/ "rcx", "rdx", "rsi", \ + "rdi", "r8", "r9", "r10", "r11" + +/* This is all pretty complex. It's so as to make stack unwinding + work reliably. See bug 243270. The basic problem is the sub and + add of 128 of %rsp in all of the following macros. If gcc believes + the CFA is in %rsp, then unwinding may fail, because what's at the + CFA is not what gcc "expected" when it constructs the CFIs for the + places where the macros are instantiated. + + But we can't just add a CFI annotation to increase the CFA offset + by 128, to match the sub of 128 from %rsp, because we don't know + whether gcc has chosen %rsp as the CFA at that point, or whether it + has chosen some other register (eg, %rbp). In the latter case, + adding a CFI annotation to change the CFA offset is simply wrong. + + So the solution is to get hold of the CFA using + __builtin_dwarf_cfa(), put it in a known register, and add a + CFI annotation to say what the register is. We choose %rbp for + this (perhaps perversely), because: + + (1) %rbp is already subject to unwinding. If a new register was + chosen then the unwinder would have to unwind it in all stack + traces, which is expensive, and + + (2) %rbp is already subject to precise exception updates in the + JIT. If a new register was chosen, we'd have to have precise + exceptions for it too, which reduces performance of the + generated code. + + However .. one extra complication. We can't just whack the result + of __builtin_dwarf_cfa() into %rbp and then add %rbp to the + list of trashed registers at the end of the inline assembly + fragments; gcc won't allow %rbp to appear in that list. Hence + instead we need to stash %rbp in %r15 for the duration of the asm, + and say that %r15 is trashed instead. gcc seems happy to go with + that. + + Oh .. and this all needs to be conditionalised so that it is + unchanged from before this commit, when compiled with older gccs + that don't support __builtin_dwarf_cfa. Furthermore, since + this header file is freestanding, it has to be independent of + config.h, and so the following conditionalisation cannot depend on + configure time checks. + + Although it's not clear from + 'defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM)', + this expression excludes Darwin. + .cfi directives in Darwin assembly appear to be completely + different and I haven't investigated how they work. + + For even more entertainment value, note we have to use the + completely undocumented __builtin_dwarf_cfa(), which appears to + really compute the CFA, whereas __builtin_frame_address(0) claims + to but actually doesn't. See + https://bugs.kde.org/show_bug.cgi?id=243270#c47 +*/ +#if defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM) +# define __FRAME_POINTER \ + ,"r"(__builtin_dwarf_cfa()) +# define VALGRIND_CFI_PROLOGUE \ + "movq %%rbp, %%r15\n\t" \ + "movq %2, %%rbp\n\t" \ + ".cfi_remember_state\n\t" \ + ".cfi_def_cfa rbp, 0\n\t" +# define VALGRIND_CFI_EPILOGUE \ + "movq %%r15, %%rbp\n\t" \ + ".cfi_restore_state\n\t" +#else +# define __FRAME_POINTER +# define VALGRIND_CFI_PROLOGUE +# define VALGRIND_CFI_EPILOGUE +#endif + +/* Macros to save and align the stack before making a function + call and restore it afterwards as gcc may not keep the stack + pointer aligned if it doesn't realise calls are being made + to other functions. */ + +#define VALGRIND_ALIGN_STACK \ + "movq %%rsp,%%r14\n\t" \ + "andq $0xfffffffffffffff0,%%rsp\n\t" +#define VALGRIND_RESTORE_STACK \ + "movq %%r14,%%rsp\n\t" + +/* These CALL_FN_ macros assume that on amd64-linux, sizeof(unsigned + long) == 8. */ + +/* NB 9 Sept 07. There is a nasty kludge here in all these CALL_FN_ + macros. In order not to trash the stack redzone, we need to drop + %rsp by 128 before the hidden call, and restore afterwards. The + nastyness is that it is only by luck that the stack still appears + to be unwindable during the hidden call - since then the behaviour + of any routine using this macro does not match what the CFI data + says. Sigh. + + Why is this important? Imagine that a wrapper has a stack + allocated local, and passes to the hidden call, a pointer to it. + Because gcc does not know about the hidden call, it may allocate + that local in the redzone. Unfortunately the hidden call may then + trash it before it comes to use it. So we must step clear of the + redzone, for the duration of the hidden call, to make it safe. + + Probably the same problem afflicts the other redzone-style ABIs too + (ppc64-linux); but for those, the stack is + self describing (none of this CFI nonsense) so at least messing + with the stack pointer doesn't give a danger of non-unwindable + stack. */ + +#define CALL_FN_W_v(lval, orig) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[1]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + VALGRIND_ALIGN_STACK \ + "subq $128,%%rsp\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + VALGRIND_RESTORE_STACK \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_W(lval, orig, arg1) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[2]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + VALGRIND_ALIGN_STACK \ + "subq $128,%%rsp\n\t" \ + "movq 8(%%rax), %%rdi\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + VALGRIND_RESTORE_STACK \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WW(lval, orig, arg1,arg2) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[3]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + VALGRIND_ALIGN_STACK \ + "subq $128,%%rsp\n\t" \ + "movq 16(%%rax), %%rsi\n\t" \ + "movq 8(%%rax), %%rdi\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + VALGRIND_RESTORE_STACK \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[4]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + _argvec[3] = (uintptr_t)(arg3); \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + VALGRIND_ALIGN_STACK \ + "subq $128,%%rsp\n\t" \ + "movq 24(%%rax), %%rdx\n\t" \ + "movq 16(%%rax), %%rsi\n\t" \ + "movq 8(%%rax), %%rdi\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + VALGRIND_RESTORE_STACK \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[5]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + _argvec[3] = (uintptr_t)(arg3); \ + _argvec[4] = (uintptr_t)(arg4); \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + VALGRIND_ALIGN_STACK \ + "subq $128,%%rsp\n\t" \ + "movq 32(%%rax), %%rcx\n\t" \ + "movq 24(%%rax), %%rdx\n\t" \ + "movq 16(%%rax), %%rsi\n\t" \ + "movq 8(%%rax), %%rdi\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + VALGRIND_RESTORE_STACK \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[6]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + _argvec[3] = (uintptr_t)(arg3); \ + _argvec[4] = (uintptr_t)(arg4); \ + _argvec[5] = (uintptr_t)(arg5); \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + VALGRIND_ALIGN_STACK \ + "subq $128,%%rsp\n\t" \ + "movq 40(%%rax), %%r8\n\t" \ + "movq 32(%%rax), %%rcx\n\t" \ + "movq 24(%%rax), %%rdx\n\t" \ + "movq 16(%%rax), %%rsi\n\t" \ + "movq 8(%%rax), %%rdi\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + VALGRIND_RESTORE_STACK \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[7]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + _argvec[3] = (uintptr_t)(arg3); \ + _argvec[4] = (uintptr_t)(arg4); \ + _argvec[5] = (uintptr_t)(arg5); \ + _argvec[6] = (uintptr_t)(arg6); \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + VALGRIND_ALIGN_STACK \ + "subq $128,%%rsp\n\t" \ + "movq 48(%%rax), %%r9\n\t" \ + "movq 40(%%rax), %%r8\n\t" \ + "movq 32(%%rax), %%rcx\n\t" \ + "movq 24(%%rax), %%rdx\n\t" \ + "movq 16(%%rax), %%rsi\n\t" \ + "movq 8(%%rax), %%rdi\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + VALGRIND_RESTORE_STACK \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[8]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + _argvec[3] = (uintptr_t)(arg3); \ + _argvec[4] = (uintptr_t)(arg4); \ + _argvec[5] = (uintptr_t)(arg5); \ + _argvec[6] = (uintptr_t)(arg6); \ + _argvec[7] = (uintptr_t)(arg7); \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + VALGRIND_ALIGN_STACK \ + "subq $136,%%rsp\n\t" \ + "pushq 56(%%rax)\n\t" \ + "movq 48(%%rax), %%r9\n\t" \ + "movq 40(%%rax), %%r8\n\t" \ + "movq 32(%%rax), %%rcx\n\t" \ + "movq 24(%%rax), %%rdx\n\t" \ + "movq 16(%%rax), %%rsi\n\t" \ + "movq 8(%%rax), %%rdi\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + VALGRIND_RESTORE_STACK \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[9]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + _argvec[3] = (uintptr_t)(arg3); \ + _argvec[4] = (uintptr_t)(arg4); \ + _argvec[5] = (uintptr_t)(arg5); \ + _argvec[6] = (uintptr_t)(arg6); \ + _argvec[7] = (uintptr_t)(arg7); \ + _argvec[8] = (uintptr_t)(arg8); \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + VALGRIND_ALIGN_STACK \ + "subq $128,%%rsp\n\t" \ + "pushq 64(%%rax)\n\t" \ + "pushq 56(%%rax)\n\t" \ + "movq 48(%%rax), %%r9\n\t" \ + "movq 40(%%rax), %%r8\n\t" \ + "movq 32(%%rax), %%rcx\n\t" \ + "movq 24(%%rax), %%rdx\n\t" \ + "movq 16(%%rax), %%rsi\n\t" \ + "movq 8(%%rax), %%rdi\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + VALGRIND_RESTORE_STACK \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[10]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + _argvec[3] = (uintptr_t)(arg3); \ + _argvec[4] = (uintptr_t)(arg4); \ + _argvec[5] = (uintptr_t)(arg5); \ + _argvec[6] = (uintptr_t)(arg6); \ + _argvec[7] = (uintptr_t)(arg7); \ + _argvec[8] = (uintptr_t)(arg8); \ + _argvec[9] = (uintptr_t)(arg9); \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + VALGRIND_ALIGN_STACK \ + "subq $136,%%rsp\n\t" \ + "pushq 72(%%rax)\n\t" \ + "pushq 64(%%rax)\n\t" \ + "pushq 56(%%rax)\n\t" \ + "movq 48(%%rax), %%r9\n\t" \ + "movq 40(%%rax), %%r8\n\t" \ + "movq 32(%%rax), %%rcx\n\t" \ + "movq 24(%%rax), %%rdx\n\t" \ + "movq 16(%%rax), %%rsi\n\t" \ + "movq 8(%%rax), %%rdi\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + VALGRIND_RESTORE_STACK \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[11]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + _argvec[3] = (uintptr_t)(arg3); \ + _argvec[4] = (uintptr_t)(arg4); \ + _argvec[5] = (uintptr_t)(arg5); \ + _argvec[6] = (uintptr_t)(arg6); \ + _argvec[7] = (uintptr_t)(arg7); \ + _argvec[8] = (uintptr_t)(arg8); \ + _argvec[9] = (uintptr_t)(arg9); \ + _argvec[10] = (uintptr_t)(arg10); \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + VALGRIND_ALIGN_STACK \ + "subq $128,%%rsp\n\t" \ + "pushq 80(%%rax)\n\t" \ + "pushq 72(%%rax)\n\t" \ + "pushq 64(%%rax)\n\t" \ + "pushq 56(%%rax)\n\t" \ + "movq 48(%%rax), %%r9\n\t" \ + "movq 40(%%rax), %%r8\n\t" \ + "movq 32(%%rax), %%rcx\n\t" \ + "movq 24(%%rax), %%rdx\n\t" \ + "movq 16(%%rax), %%rsi\n\t" \ + "movq 8(%%rax), %%rdi\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + VALGRIND_RESTORE_STACK \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10,arg11) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[12]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + _argvec[3] = (uintptr_t)(arg3); \ + _argvec[4] = (uintptr_t)(arg4); \ + _argvec[5] = (uintptr_t)(arg5); \ + _argvec[6] = (uintptr_t)(arg6); \ + _argvec[7] = (uintptr_t)(arg7); \ + _argvec[8] = (uintptr_t)(arg8); \ + _argvec[9] = (uintptr_t)(arg9); \ + _argvec[10] = (uintptr_t)(arg10); \ + _argvec[11] = (uintptr_t)(arg11); \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + VALGRIND_ALIGN_STACK \ + "subq $136,%%rsp\n\t" \ + "pushq 88(%%rax)\n\t" \ + "pushq 80(%%rax)\n\t" \ + "pushq 72(%%rax)\n\t" \ + "pushq 64(%%rax)\n\t" \ + "pushq 56(%%rax)\n\t" \ + "movq 48(%%rax), %%r9\n\t" \ + "movq 40(%%rax), %%r8\n\t" \ + "movq 32(%%rax), %%rcx\n\t" \ + "movq 24(%%rax), %%rdx\n\t" \ + "movq 16(%%rax), %%rsi\n\t" \ + "movq 8(%%rax), %%rdi\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + VALGRIND_RESTORE_STACK \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10,arg11,arg12) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[13]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + _argvec[3] = (uintptr_t)(arg3); \ + _argvec[4] = (uintptr_t)(arg4); \ + _argvec[5] = (uintptr_t)(arg5); \ + _argvec[6] = (uintptr_t)(arg6); \ + _argvec[7] = (uintptr_t)(arg7); \ + _argvec[8] = (uintptr_t)(arg8); \ + _argvec[9] = (uintptr_t)(arg9); \ + _argvec[10] = (uintptr_t)(arg10); \ + _argvec[11] = (uintptr_t)(arg11); \ + _argvec[12] = (uintptr_t)(arg12); \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + VALGRIND_ALIGN_STACK \ + "subq $128,%%rsp\n\t" \ + "pushq 96(%%rax)\n\t" \ + "pushq 88(%%rax)\n\t" \ + "pushq 80(%%rax)\n\t" \ + "pushq 72(%%rax)\n\t" \ + "pushq 64(%%rax)\n\t" \ + "pushq 56(%%rax)\n\t" \ + "movq 48(%%rax), %%r9\n\t" \ + "movq 40(%%rax), %%r8\n\t" \ + "movq 32(%%rax), %%rcx\n\t" \ + "movq 24(%%rax), %%rdx\n\t" \ + "movq 16(%%rax), %%rsi\n\t" \ + "movq 8(%%rax), %%rdi\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + VALGRIND_RESTORE_STACK \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#endif /* PLAT_amd64_linux || PLAT_amd64_darwin || PLAT_amd64_solaris */ + +/* ------------------------ ppc32-linux ------------------------ */ + +#if defined(PLAT_ppc32_linux) + +/* This is useful for finding out about the on-stack stuff: + + extern int f9 ( int,int,int,int,int,int,int,int,int ); + extern int f10 ( int,int,int,int,int,int,int,int,int,int ); + extern int f11 ( int,int,int,int,int,int,int,int,int,int,int ); + extern int f12 ( int,int,int,int,int,int,int,int,int,int,int,int ); + + int g9 ( void ) { + return f9(11,22,33,44,55,66,77,88,99); + } + int g10 ( void ) { + return f10(11,22,33,44,55,66,77,88,99,110); + } + int g11 ( void ) { + return f11(11,22,33,44,55,66,77,88,99,110,121); + } + int g12 ( void ) { + return f12(11,22,33,44,55,66,77,88,99,110,121,132); + } +*/ + +/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */ + +/* These regs are trashed by the hidden call. */ +#define __CALLER_SAVED_REGS \ + "lr", "ctr", "xer", \ + "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \ + "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \ + "r11", "r12", "r13" + +/* Macros to save and align the stack before making a function + call and restore it afterwards as gcc may not keep the stack + pointer aligned if it doesn't realise calls are being made + to other functions. */ + +#define VALGRIND_ALIGN_STACK \ + "mr 28,1\n\t" \ + "rlwinm 1,1,0,0,27\n\t" +#define VALGRIND_RESTORE_STACK \ + "mr 1,28\n\t" + +/* These CALL_FN_ macros assume that on ppc32-linux, + sizeof(uintptr_t) == 4. */ + +#define CALL_FN_W_v(lval, orig) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[1]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + VALGRIND_RESTORE_STACK \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_W(lval, orig, arg1) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[2]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)arg1; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "lwz 3,4(11)\n\t" /* arg1->r3 */ \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + VALGRIND_RESTORE_STACK \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WW(lval, orig, arg1,arg2) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[3]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)arg1; \ + _argvec[2] = (uintptr_t)arg2; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "lwz 3,4(11)\n\t" /* arg1->r3 */ \ + "lwz 4,8(11)\n\t" \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + VALGRIND_RESTORE_STACK \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[4]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)arg1; \ + _argvec[2] = (uintptr_t)arg2; \ + _argvec[3] = (uintptr_t)arg3; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "lwz 3,4(11)\n\t" /* arg1->r3 */ \ + "lwz 4,8(11)\n\t" \ + "lwz 5,12(11)\n\t" \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + VALGRIND_RESTORE_STACK \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[5]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)arg1; \ + _argvec[2] = (uintptr_t)arg2; \ + _argvec[3] = (uintptr_t)arg3; \ + _argvec[4] = (uintptr_t)arg4; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "lwz 3,4(11)\n\t" /* arg1->r3 */ \ + "lwz 4,8(11)\n\t" \ + "lwz 5,12(11)\n\t" \ + "lwz 6,16(11)\n\t" /* arg4->r6 */ \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + VALGRIND_RESTORE_STACK \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[6]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)arg1; \ + _argvec[2] = (uintptr_t)arg2; \ + _argvec[3] = (uintptr_t)arg3; \ + _argvec[4] = (uintptr_t)arg4; \ + _argvec[5] = (uintptr_t)arg5; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "lwz 3,4(11)\n\t" /* arg1->r3 */ \ + "lwz 4,8(11)\n\t" \ + "lwz 5,12(11)\n\t" \ + "lwz 6,16(11)\n\t" /* arg4->r6 */ \ + "lwz 7,20(11)\n\t" \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + VALGRIND_RESTORE_STACK \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[7]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)arg1; \ + _argvec[2] = (uintptr_t)arg2; \ + _argvec[3] = (uintptr_t)arg3; \ + _argvec[4] = (uintptr_t)arg4; \ + _argvec[5] = (uintptr_t)arg5; \ + _argvec[6] = (uintptr_t)arg6; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "lwz 3,4(11)\n\t" /* arg1->r3 */ \ + "lwz 4,8(11)\n\t" \ + "lwz 5,12(11)\n\t" \ + "lwz 6,16(11)\n\t" /* arg4->r6 */ \ + "lwz 7,20(11)\n\t" \ + "lwz 8,24(11)\n\t" \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + VALGRIND_RESTORE_STACK \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[8]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)arg1; \ + _argvec[2] = (uintptr_t)arg2; \ + _argvec[3] = (uintptr_t)arg3; \ + _argvec[4] = (uintptr_t)arg4; \ + _argvec[5] = (uintptr_t)arg5; \ + _argvec[6] = (uintptr_t)arg6; \ + _argvec[7] = (uintptr_t)arg7; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "lwz 3,4(11)\n\t" /* arg1->r3 */ \ + "lwz 4,8(11)\n\t" \ + "lwz 5,12(11)\n\t" \ + "lwz 6,16(11)\n\t" /* arg4->r6 */ \ + "lwz 7,20(11)\n\t" \ + "lwz 8,24(11)\n\t" \ + "lwz 9,28(11)\n\t" \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + VALGRIND_RESTORE_STACK \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[9]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)arg1; \ + _argvec[2] = (uintptr_t)arg2; \ + _argvec[3] = (uintptr_t)arg3; \ + _argvec[4] = (uintptr_t)arg4; \ + _argvec[5] = (uintptr_t)arg5; \ + _argvec[6] = (uintptr_t)arg6; \ + _argvec[7] = (uintptr_t)arg7; \ + _argvec[8] = (uintptr_t)arg8; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "lwz 3,4(11)\n\t" /* arg1->r3 */ \ + "lwz 4,8(11)\n\t" \ + "lwz 5,12(11)\n\t" \ + "lwz 6,16(11)\n\t" /* arg4->r6 */ \ + "lwz 7,20(11)\n\t" \ + "lwz 8,24(11)\n\t" \ + "lwz 9,28(11)\n\t" \ + "lwz 10,32(11)\n\t" /* arg8->r10 */ \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + VALGRIND_RESTORE_STACK \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[10]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)arg1; \ + _argvec[2] = (uintptr_t)arg2; \ + _argvec[3] = (uintptr_t)arg3; \ + _argvec[4] = (uintptr_t)arg4; \ + _argvec[5] = (uintptr_t)arg5; \ + _argvec[6] = (uintptr_t)arg6; \ + _argvec[7] = (uintptr_t)arg7; \ + _argvec[8] = (uintptr_t)arg8; \ + _argvec[9] = (uintptr_t)arg9; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "addi 1,1,-16\n\t" \ + /* arg9 */ \ + "lwz 3,36(11)\n\t" \ + "stw 3,8(1)\n\t" \ + /* args1-8 */ \ + "lwz 3,4(11)\n\t" /* arg1->r3 */ \ + "lwz 4,8(11)\n\t" \ + "lwz 5,12(11)\n\t" \ + "lwz 6,16(11)\n\t" /* arg4->r6 */ \ + "lwz 7,20(11)\n\t" \ + "lwz 8,24(11)\n\t" \ + "lwz 9,28(11)\n\t" \ + "lwz 10,32(11)\n\t" /* arg8->r10 */ \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + VALGRIND_RESTORE_STACK \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[11]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)arg1; \ + _argvec[2] = (uintptr_t)arg2; \ + _argvec[3] = (uintptr_t)arg3; \ + _argvec[4] = (uintptr_t)arg4; \ + _argvec[5] = (uintptr_t)arg5; \ + _argvec[6] = (uintptr_t)arg6; \ + _argvec[7] = (uintptr_t)arg7; \ + _argvec[8] = (uintptr_t)arg8; \ + _argvec[9] = (uintptr_t)arg9; \ + _argvec[10] = (uintptr_t)arg10; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "addi 1,1,-16\n\t" \ + /* arg10 */ \ + "lwz 3,40(11)\n\t" \ + "stw 3,12(1)\n\t" \ + /* arg9 */ \ + "lwz 3,36(11)\n\t" \ + "stw 3,8(1)\n\t" \ + /* args1-8 */ \ + "lwz 3,4(11)\n\t" /* arg1->r3 */ \ + "lwz 4,8(11)\n\t" \ + "lwz 5,12(11)\n\t" \ + "lwz 6,16(11)\n\t" /* arg4->r6 */ \ + "lwz 7,20(11)\n\t" \ + "lwz 8,24(11)\n\t" \ + "lwz 9,28(11)\n\t" \ + "lwz 10,32(11)\n\t" /* arg8->r10 */ \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + VALGRIND_RESTORE_STACK \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10,arg11) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[12]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)arg1; \ + _argvec[2] = (uintptr_t)arg2; \ + _argvec[3] = (uintptr_t)arg3; \ + _argvec[4] = (uintptr_t)arg4; \ + _argvec[5] = (uintptr_t)arg5; \ + _argvec[6] = (uintptr_t)arg6; \ + _argvec[7] = (uintptr_t)arg7; \ + _argvec[8] = (uintptr_t)arg8; \ + _argvec[9] = (uintptr_t)arg9; \ + _argvec[10] = (uintptr_t)arg10; \ + _argvec[11] = (uintptr_t)arg11; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "addi 1,1,-32\n\t" \ + /* arg11 */ \ + "lwz 3,44(11)\n\t" \ + "stw 3,16(1)\n\t" \ + /* arg10 */ \ + "lwz 3,40(11)\n\t" \ + "stw 3,12(1)\n\t" \ + /* arg9 */ \ + "lwz 3,36(11)\n\t" \ + "stw 3,8(1)\n\t" \ + /* args1-8 */ \ + "lwz 3,4(11)\n\t" /* arg1->r3 */ \ + "lwz 4,8(11)\n\t" \ + "lwz 5,12(11)\n\t" \ + "lwz 6,16(11)\n\t" /* arg4->r6 */ \ + "lwz 7,20(11)\n\t" \ + "lwz 8,24(11)\n\t" \ + "lwz 9,28(11)\n\t" \ + "lwz 10,32(11)\n\t" /* arg8->r10 */ \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + VALGRIND_RESTORE_STACK \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10,arg11,arg12) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[13]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)arg1; \ + _argvec[2] = (uintptr_t)arg2; \ + _argvec[3] = (uintptr_t)arg3; \ + _argvec[4] = (uintptr_t)arg4; \ + _argvec[5] = (uintptr_t)arg5; \ + _argvec[6] = (uintptr_t)arg6; \ + _argvec[7] = (uintptr_t)arg7; \ + _argvec[8] = (uintptr_t)arg8; \ + _argvec[9] = (uintptr_t)arg9; \ + _argvec[10] = (uintptr_t)arg10; \ + _argvec[11] = (uintptr_t)arg11; \ + _argvec[12] = (uintptr_t)arg12; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "addi 1,1,-32\n\t" \ + /* arg12 */ \ + "lwz 3,48(11)\n\t" \ + "stw 3,20(1)\n\t" \ + /* arg11 */ \ + "lwz 3,44(11)\n\t" \ + "stw 3,16(1)\n\t" \ + /* arg10 */ \ + "lwz 3,40(11)\n\t" \ + "stw 3,12(1)\n\t" \ + /* arg9 */ \ + "lwz 3,36(11)\n\t" \ + "stw 3,8(1)\n\t" \ + /* args1-8 */ \ + "lwz 3,4(11)\n\t" /* arg1->r3 */ \ + "lwz 4,8(11)\n\t" \ + "lwz 5,12(11)\n\t" \ + "lwz 6,16(11)\n\t" /* arg4->r6 */ \ + "lwz 7,20(11)\n\t" \ + "lwz 8,24(11)\n\t" \ + "lwz 9,28(11)\n\t" \ + "lwz 10,32(11)\n\t" /* arg8->r10 */ \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + VALGRIND_RESTORE_STACK \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#endif /* PLAT_ppc32_linux */ + +/* ------------------------ ppc64-linux ------------------------ */ + +#if defined(PLAT_ppc64be_linux) + +/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */ + +/* These regs are trashed by the hidden call. */ +#define __CALLER_SAVED_REGS \ + "lr", "ctr", "xer", \ + "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \ + "r0", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \ + "r11", "r12", "r13" + +/* Macros to save and align the stack before making a function + call and restore it afterwards as gcc may not keep the stack + pointer aligned if it doesn't realise calls are being made + to other functions. */ + +#define VALGRIND_ALIGN_STACK \ + "mr 28,1\n\t" \ + "rldicr 1,1,0,59\n\t" +#define VALGRIND_RESTORE_STACK \ + "mr 1,28\n\t" + +/* These CALL_FN_ macros assume that on ppc64-linux, sizeof(unsigned + long) == 8. */ + +#define CALL_FN_W_v(lval, orig) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[3+0]; \ + volatile uintptr_t _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (uintptr_t)_orig.r2; \ + _argvec[2] = (uintptr_t)_orig.nraddr; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_W(lval, orig, arg1) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[3+1]; \ + volatile uintptr_t _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (uintptr_t)_orig.r2; \ + _argvec[2] = (uintptr_t)_orig.nraddr; \ + _argvec[2+1] = (uintptr_t)arg1; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WW(lval, orig, arg1,arg2) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[3+2]; \ + volatile uintptr_t _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (uintptr_t)_orig.r2; \ + _argvec[2] = (uintptr_t)_orig.nraddr; \ + _argvec[2+1] = (uintptr_t)arg1; \ + _argvec[2+2] = (uintptr_t)arg2; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[3+3]; \ + volatile uintptr_t _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (uintptr_t)_orig.r2; \ + _argvec[2] = (uintptr_t)_orig.nraddr; \ + _argvec[2+1] = (uintptr_t)arg1; \ + _argvec[2+2] = (uintptr_t)arg2; \ + _argvec[2+3] = (uintptr_t)arg3; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 5, 24(11)\n\t" /* arg3->r5 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[3+4]; \ + volatile uintptr_t _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (uintptr_t)_orig.r2; \ + _argvec[2] = (uintptr_t)_orig.nraddr; \ + _argvec[2+1] = (uintptr_t)arg1; \ + _argvec[2+2] = (uintptr_t)arg2; \ + _argvec[2+3] = (uintptr_t)arg3; \ + _argvec[2+4] = (uintptr_t)arg4; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 5, 24(11)\n\t" /* arg3->r5 */ \ + "ld 6, 32(11)\n\t" /* arg4->r6 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[3+5]; \ + volatile uintptr_t _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (uintptr_t)_orig.r2; \ + _argvec[2] = (uintptr_t)_orig.nraddr; \ + _argvec[2+1] = (uintptr_t)arg1; \ + _argvec[2+2] = (uintptr_t)arg2; \ + _argvec[2+3] = (uintptr_t)arg3; \ + _argvec[2+4] = (uintptr_t)arg4; \ + _argvec[2+5] = (uintptr_t)arg5; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 5, 24(11)\n\t" /* arg3->r5 */ \ + "ld 6, 32(11)\n\t" /* arg4->r6 */ \ + "ld 7, 40(11)\n\t" /* arg5->r7 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[3+6]; \ + volatile uintptr_t _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (uintptr_t)_orig.r2; \ + _argvec[2] = (uintptr_t)_orig.nraddr; \ + _argvec[2+1] = (uintptr_t)arg1; \ + _argvec[2+2] = (uintptr_t)arg2; \ + _argvec[2+3] = (uintptr_t)arg3; \ + _argvec[2+4] = (uintptr_t)arg4; \ + _argvec[2+5] = (uintptr_t)arg5; \ + _argvec[2+6] = (uintptr_t)arg6; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 5, 24(11)\n\t" /* arg3->r5 */ \ + "ld 6, 32(11)\n\t" /* arg4->r6 */ \ + "ld 7, 40(11)\n\t" /* arg5->r7 */ \ + "ld 8, 48(11)\n\t" /* arg6->r8 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[3+7]; \ + volatile uintptr_t _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (uintptr_t)_orig.r2; \ + _argvec[2] = (uintptr_t)_orig.nraddr; \ + _argvec[2+1] = (uintptr_t)arg1; \ + _argvec[2+2] = (uintptr_t)arg2; \ + _argvec[2+3] = (uintptr_t)arg3; \ + _argvec[2+4] = (uintptr_t)arg4; \ + _argvec[2+5] = (uintptr_t)arg5; \ + _argvec[2+6] = (uintptr_t)arg6; \ + _argvec[2+7] = (uintptr_t)arg7; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 5, 24(11)\n\t" /* arg3->r5 */ \ + "ld 6, 32(11)\n\t" /* arg4->r6 */ \ + "ld 7, 40(11)\n\t" /* arg5->r7 */ \ + "ld 8, 48(11)\n\t" /* arg6->r8 */ \ + "ld 9, 56(11)\n\t" /* arg7->r9 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[3+8]; \ + volatile uintptr_t _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (uintptr_t)_orig.r2; \ + _argvec[2] = (uintptr_t)_orig.nraddr; \ + _argvec[2+1] = (uintptr_t)arg1; \ + _argvec[2+2] = (uintptr_t)arg2; \ + _argvec[2+3] = (uintptr_t)arg3; \ + _argvec[2+4] = (uintptr_t)arg4; \ + _argvec[2+5] = (uintptr_t)arg5; \ + _argvec[2+6] = (uintptr_t)arg6; \ + _argvec[2+7] = (uintptr_t)arg7; \ + _argvec[2+8] = (uintptr_t)arg8; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 5, 24(11)\n\t" /* arg3->r5 */ \ + "ld 6, 32(11)\n\t" /* arg4->r6 */ \ + "ld 7, 40(11)\n\t" /* arg5->r7 */ \ + "ld 8, 48(11)\n\t" /* arg6->r8 */ \ + "ld 9, 56(11)\n\t" /* arg7->r9 */ \ + "ld 10, 64(11)\n\t" /* arg8->r10 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[3+9]; \ + volatile uintptr_t _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (uintptr_t)_orig.r2; \ + _argvec[2] = (uintptr_t)_orig.nraddr; \ + _argvec[2+1] = (uintptr_t)arg1; \ + _argvec[2+2] = (uintptr_t)arg2; \ + _argvec[2+3] = (uintptr_t)arg3; \ + _argvec[2+4] = (uintptr_t)arg4; \ + _argvec[2+5] = (uintptr_t)arg5; \ + _argvec[2+6] = (uintptr_t)arg6; \ + _argvec[2+7] = (uintptr_t)arg7; \ + _argvec[2+8] = (uintptr_t)arg8; \ + _argvec[2+9] = (uintptr_t)arg9; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "addi 1,1,-128\n\t" /* expand stack frame */ \ + /* arg9 */ \ + "ld 3,72(11)\n\t" \ + "std 3,112(1)\n\t" \ + /* args1-8 */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 5, 24(11)\n\t" /* arg3->r5 */ \ + "ld 6, 32(11)\n\t" /* arg4->r6 */ \ + "ld 7, 40(11)\n\t" /* arg5->r7 */ \ + "ld 8, 48(11)\n\t" /* arg6->r8 */ \ + "ld 9, 56(11)\n\t" /* arg7->r9 */ \ + "ld 10, 64(11)\n\t" /* arg8->r10 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[3+10]; \ + volatile uintptr_t _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (uintptr_t)_orig.r2; \ + _argvec[2] = (uintptr_t)_orig.nraddr; \ + _argvec[2+1] = (uintptr_t)arg1; \ + _argvec[2+2] = (uintptr_t)arg2; \ + _argvec[2+3] = (uintptr_t)arg3; \ + _argvec[2+4] = (uintptr_t)arg4; \ + _argvec[2+5] = (uintptr_t)arg5; \ + _argvec[2+6] = (uintptr_t)arg6; \ + _argvec[2+7] = (uintptr_t)arg7; \ + _argvec[2+8] = (uintptr_t)arg8; \ + _argvec[2+9] = (uintptr_t)arg9; \ + _argvec[2+10] = (uintptr_t)arg10; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "addi 1,1,-128\n\t" /* expand stack frame */ \ + /* arg10 */ \ + "ld 3,80(11)\n\t" \ + "std 3,120(1)\n\t" \ + /* arg9 */ \ + "ld 3,72(11)\n\t" \ + "std 3,112(1)\n\t" \ + /* args1-8 */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 5, 24(11)\n\t" /* arg3->r5 */ \ + "ld 6, 32(11)\n\t" /* arg4->r6 */ \ + "ld 7, 40(11)\n\t" /* arg5->r7 */ \ + "ld 8, 48(11)\n\t" /* arg6->r8 */ \ + "ld 9, 56(11)\n\t" /* arg7->r9 */ \ + "ld 10, 64(11)\n\t" /* arg8->r10 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10,arg11) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[3+11]; \ + volatile uintptr_t _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (uintptr_t)_orig.r2; \ + _argvec[2] = (uintptr_t)_orig.nraddr; \ + _argvec[2+1] = (uintptr_t)arg1; \ + _argvec[2+2] = (uintptr_t)arg2; \ + _argvec[2+3] = (uintptr_t)arg3; \ + _argvec[2+4] = (uintptr_t)arg4; \ + _argvec[2+5] = (uintptr_t)arg5; \ + _argvec[2+6] = (uintptr_t)arg6; \ + _argvec[2+7] = (uintptr_t)arg7; \ + _argvec[2+8] = (uintptr_t)arg8; \ + _argvec[2+9] = (uintptr_t)arg9; \ + _argvec[2+10] = (uintptr_t)arg10; \ + _argvec[2+11] = (uintptr_t)arg11; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "addi 1,1,-144\n\t" /* expand stack frame */ \ + /* arg11 */ \ + "ld 3,88(11)\n\t" \ + "std 3,128(1)\n\t" \ + /* arg10 */ \ + "ld 3,80(11)\n\t" \ + "std 3,120(1)\n\t" \ + /* arg9 */ \ + "ld 3,72(11)\n\t" \ + "std 3,112(1)\n\t" \ + /* args1-8 */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 5, 24(11)\n\t" /* arg3->r5 */ \ + "ld 6, 32(11)\n\t" /* arg4->r6 */ \ + "ld 7, 40(11)\n\t" /* arg5->r7 */ \ + "ld 8, 48(11)\n\t" /* arg6->r8 */ \ + "ld 9, 56(11)\n\t" /* arg7->r9 */ \ + "ld 10, 64(11)\n\t" /* arg8->r10 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10,arg11,arg12) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[3+12]; \ + volatile uintptr_t _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (uintptr_t)_orig.r2; \ + _argvec[2] = (uintptr_t)_orig.nraddr; \ + _argvec[2+1] = (uintptr_t)arg1; \ + _argvec[2+2] = (uintptr_t)arg2; \ + _argvec[2+3] = (uintptr_t)arg3; \ + _argvec[2+4] = (uintptr_t)arg4; \ + _argvec[2+5] = (uintptr_t)arg5; \ + _argvec[2+6] = (uintptr_t)arg6; \ + _argvec[2+7] = (uintptr_t)arg7; \ + _argvec[2+8] = (uintptr_t)arg8; \ + _argvec[2+9] = (uintptr_t)arg9; \ + _argvec[2+10] = (uintptr_t)arg10; \ + _argvec[2+11] = (uintptr_t)arg11; \ + _argvec[2+12] = (uintptr_t)arg12; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "addi 1,1,-144\n\t" /* expand stack frame */ \ + /* arg12 */ \ + "ld 3,96(11)\n\t" \ + "std 3,136(1)\n\t" \ + /* arg11 */ \ + "ld 3,88(11)\n\t" \ + "std 3,128(1)\n\t" \ + /* arg10 */ \ + "ld 3,80(11)\n\t" \ + "std 3,120(1)\n\t" \ + /* arg9 */ \ + "ld 3,72(11)\n\t" \ + "std 3,112(1)\n\t" \ + /* args1-8 */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 5, 24(11)\n\t" /* arg3->r5 */ \ + "ld 6, 32(11)\n\t" /* arg4->r6 */ \ + "ld 7, 40(11)\n\t" /* arg5->r7 */ \ + "ld 8, 48(11)\n\t" /* arg6->r8 */ \ + "ld 9, 56(11)\n\t" /* arg7->r9 */ \ + "ld 10, 64(11)\n\t" /* arg8->r10 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#endif /* PLAT_ppc64be_linux */ + +/* ------------------------- ppc64le-linux ----------------------- */ +#if defined(PLAT_ppc64le_linux) + +/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */ + +/* These regs are trashed by the hidden call. */ +#define __CALLER_SAVED_REGS \ + "lr", "ctr", "xer", \ + "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \ + "r0", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \ + "r11", "r12", "r13" + +/* Macros to save and align the stack before making a function + call and restore it afterwards as gcc may not keep the stack + pointer aligned if it doesn't realise calls are being made + to other functions. */ + +#define VALGRIND_ALIGN_STACK \ + "mr 28,1\n\t" \ + "rldicr 1,1,0,59\n\t" +#define VALGRIND_RESTORE_STACK \ + "mr 1,28\n\t" + +/* These CALL_FN_ macros assume that on ppc64-linux, sizeof(unsigned + long) == 8. */ + +#define CALL_FN_W_v(lval, orig) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[3+0]; \ + volatile uintptr_t _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (uintptr_t)_orig.r2; \ + _argvec[2] = (uintptr_t)_orig.nraddr; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 12,%1\n\t" \ + "std 2,-16(12)\n\t" /* save tocptr */ \ + "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ + "ld 12, 0(12)\n\t" /* target->r12 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ + "mr 12,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(12)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_W(lval, orig, arg1) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[3+1]; \ + volatile uintptr_t _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (uintptr_t)_orig.r2; \ + _argvec[2] = (uintptr_t)_orig.nraddr; \ + _argvec[2+1] = (uintptr_t)arg1; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 12,%1\n\t" \ + "std 2,-16(12)\n\t" /* save tocptr */ \ + "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(12)\n\t" /* arg1->r3 */ \ + "ld 12, 0(12)\n\t" /* target->r12 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ + "mr 12,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(12)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WW(lval, orig, arg1,arg2) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[3+2]; \ + volatile uintptr_t _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (uintptr_t)_orig.r2; \ + _argvec[2] = (uintptr_t)_orig.nraddr; \ + _argvec[2+1] = (uintptr_t)arg1; \ + _argvec[2+2] = (uintptr_t)arg2; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 12,%1\n\t" \ + "std 2,-16(12)\n\t" /* save tocptr */ \ + "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(12)\n\t" /* arg1->r3 */ \ + "ld 4, 16(12)\n\t" /* arg2->r4 */ \ + "ld 12, 0(12)\n\t" /* target->r12 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ + "mr 12,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(12)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[3+3]; \ + volatile uintptr_t _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (uintptr_t)_orig.r2; \ + _argvec[2] = (uintptr_t)_orig.nraddr; \ + _argvec[2+1] = (uintptr_t)arg1; \ + _argvec[2+2] = (uintptr_t)arg2; \ + _argvec[2+3] = (uintptr_t)arg3; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 12,%1\n\t" \ + "std 2,-16(12)\n\t" /* save tocptr */ \ + "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(12)\n\t" /* arg1->r3 */ \ + "ld 4, 16(12)\n\t" /* arg2->r4 */ \ + "ld 5, 24(12)\n\t" /* arg3->r5 */ \ + "ld 12, 0(12)\n\t" /* target->r12 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ + "mr 12,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(12)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[3+4]; \ + volatile uintptr_t _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (uintptr_t)_orig.r2; \ + _argvec[2] = (uintptr_t)_orig.nraddr; \ + _argvec[2+1] = (uintptr_t)arg1; \ + _argvec[2+2] = (uintptr_t)arg2; \ + _argvec[2+3] = (uintptr_t)arg3; \ + _argvec[2+4] = (uintptr_t)arg4; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 12,%1\n\t" \ + "std 2,-16(12)\n\t" /* save tocptr */ \ + "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(12)\n\t" /* arg1->r3 */ \ + "ld 4, 16(12)\n\t" /* arg2->r4 */ \ + "ld 5, 24(12)\n\t" /* arg3->r5 */ \ + "ld 6, 32(12)\n\t" /* arg4->r6 */ \ + "ld 12, 0(12)\n\t" /* target->r12 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ + "mr 12,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(12)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[3+5]; \ + volatile uintptr_t _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (uintptr_t)_orig.r2; \ + _argvec[2] = (uintptr_t)_orig.nraddr; \ + _argvec[2+1] = (uintptr_t)arg1; \ + _argvec[2+2] = (uintptr_t)arg2; \ + _argvec[2+3] = (uintptr_t)arg3; \ + _argvec[2+4] = (uintptr_t)arg4; \ + _argvec[2+5] = (uintptr_t)arg5; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 12,%1\n\t" \ + "std 2,-16(12)\n\t" /* save tocptr */ \ + "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(12)\n\t" /* arg1->r3 */ \ + "ld 4, 16(12)\n\t" /* arg2->r4 */ \ + "ld 5, 24(12)\n\t" /* arg3->r5 */ \ + "ld 6, 32(12)\n\t" /* arg4->r6 */ \ + "ld 7, 40(12)\n\t" /* arg5->r7 */ \ + "ld 12, 0(12)\n\t" /* target->r12 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ + "mr 12,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(12)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[3+6]; \ + volatile uintptr_t _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (uintptr_t)_orig.r2; \ + _argvec[2] = (uintptr_t)_orig.nraddr; \ + _argvec[2+1] = (uintptr_t)arg1; \ + _argvec[2+2] = (uintptr_t)arg2; \ + _argvec[2+3] = (uintptr_t)arg3; \ + _argvec[2+4] = (uintptr_t)arg4; \ + _argvec[2+5] = (uintptr_t)arg5; \ + _argvec[2+6] = (uintptr_t)arg6; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 12,%1\n\t" \ + "std 2,-16(12)\n\t" /* save tocptr */ \ + "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(12)\n\t" /* arg1->r3 */ \ + "ld 4, 16(12)\n\t" /* arg2->r4 */ \ + "ld 5, 24(12)\n\t" /* arg3->r5 */ \ + "ld 6, 32(12)\n\t" /* arg4->r6 */ \ + "ld 7, 40(12)\n\t" /* arg5->r7 */ \ + "ld 8, 48(12)\n\t" /* arg6->r8 */ \ + "ld 12, 0(12)\n\t" /* target->r12 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ + "mr 12,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(12)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[3+7]; \ + volatile uintptr_t _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (uintptr_t)_orig.r2; \ + _argvec[2] = (uintptr_t)_orig.nraddr; \ + _argvec[2+1] = (uintptr_t)arg1; \ + _argvec[2+2] = (uintptr_t)arg2; \ + _argvec[2+3] = (uintptr_t)arg3; \ + _argvec[2+4] = (uintptr_t)arg4; \ + _argvec[2+5] = (uintptr_t)arg5; \ + _argvec[2+6] = (uintptr_t)arg6; \ + _argvec[2+7] = (uintptr_t)arg7; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 12,%1\n\t" \ + "std 2,-16(12)\n\t" /* save tocptr */ \ + "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(12)\n\t" /* arg1->r3 */ \ + "ld 4, 16(12)\n\t" /* arg2->r4 */ \ + "ld 5, 24(12)\n\t" /* arg3->r5 */ \ + "ld 6, 32(12)\n\t" /* arg4->r6 */ \ + "ld 7, 40(12)\n\t" /* arg5->r7 */ \ + "ld 8, 48(12)\n\t" /* arg6->r8 */ \ + "ld 9, 56(12)\n\t" /* arg7->r9 */ \ + "ld 12, 0(12)\n\t" /* target->r12 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ + "mr 12,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(12)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[3+8]; \ + volatile uintptr_t _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (uintptr_t)_orig.r2; \ + _argvec[2] = (uintptr_t)_orig.nraddr; \ + _argvec[2+1] = (uintptr_t)arg1; \ + _argvec[2+2] = (uintptr_t)arg2; \ + _argvec[2+3] = (uintptr_t)arg3; \ + _argvec[2+4] = (uintptr_t)arg4; \ + _argvec[2+5] = (uintptr_t)arg5; \ + _argvec[2+6] = (uintptr_t)arg6; \ + _argvec[2+7] = (uintptr_t)arg7; \ + _argvec[2+8] = (uintptr_t)arg8; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 12,%1\n\t" \ + "std 2,-16(12)\n\t" /* save tocptr */ \ + "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(12)\n\t" /* arg1->r3 */ \ + "ld 4, 16(12)\n\t" /* arg2->r4 */ \ + "ld 5, 24(12)\n\t" /* arg3->r5 */ \ + "ld 6, 32(12)\n\t" /* arg4->r6 */ \ + "ld 7, 40(12)\n\t" /* arg5->r7 */ \ + "ld 8, 48(12)\n\t" /* arg6->r8 */ \ + "ld 9, 56(12)\n\t" /* arg7->r9 */ \ + "ld 10, 64(12)\n\t" /* arg8->r10 */ \ + "ld 12, 0(12)\n\t" /* target->r12 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ + "mr 12,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(12)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[3+9]; \ + volatile uintptr_t _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (uintptr_t)_orig.r2; \ + _argvec[2] = (uintptr_t)_orig.nraddr; \ + _argvec[2+1] = (uintptr_t)arg1; \ + _argvec[2+2] = (uintptr_t)arg2; \ + _argvec[2+3] = (uintptr_t)arg3; \ + _argvec[2+4] = (uintptr_t)arg4; \ + _argvec[2+5] = (uintptr_t)arg5; \ + _argvec[2+6] = (uintptr_t)arg6; \ + _argvec[2+7] = (uintptr_t)arg7; \ + _argvec[2+8] = (uintptr_t)arg8; \ + _argvec[2+9] = (uintptr_t)arg9; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 12,%1\n\t" \ + "std 2,-16(12)\n\t" /* save tocptr */ \ + "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ + "addi 1,1,-128\n\t" /* expand stack frame */ \ + /* arg9 */ \ + "ld 3,72(12)\n\t" \ + "std 3,96(1)\n\t" \ + /* args1-8 */ \ + "ld 3, 8(12)\n\t" /* arg1->r3 */ \ + "ld 4, 16(12)\n\t" /* arg2->r4 */ \ + "ld 5, 24(12)\n\t" /* arg3->r5 */ \ + "ld 6, 32(12)\n\t" /* arg4->r6 */ \ + "ld 7, 40(12)\n\t" /* arg5->r7 */ \ + "ld 8, 48(12)\n\t" /* arg6->r8 */ \ + "ld 9, 56(12)\n\t" /* arg7->r9 */ \ + "ld 10, 64(12)\n\t" /* arg8->r10 */ \ + "ld 12, 0(12)\n\t" /* target->r12 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ + "mr 12,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(12)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[3+10]; \ + volatile uintptr_t _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (uintptr_t)_orig.r2; \ + _argvec[2] = (uintptr_t)_orig.nraddr; \ + _argvec[2+1] = (uintptr_t)arg1; \ + _argvec[2+2] = (uintptr_t)arg2; \ + _argvec[2+3] = (uintptr_t)arg3; \ + _argvec[2+4] = (uintptr_t)arg4; \ + _argvec[2+5] = (uintptr_t)arg5; \ + _argvec[2+6] = (uintptr_t)arg6; \ + _argvec[2+7] = (uintptr_t)arg7; \ + _argvec[2+8] = (uintptr_t)arg8; \ + _argvec[2+9] = (uintptr_t)arg9; \ + _argvec[2+10] = (uintptr_t)arg10; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 12,%1\n\t" \ + "std 2,-16(12)\n\t" /* save tocptr */ \ + "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ + "addi 1,1,-128\n\t" /* expand stack frame */ \ + /* arg10 */ \ + "ld 3,80(12)\n\t" \ + "std 3,104(1)\n\t" \ + /* arg9 */ \ + "ld 3,72(12)\n\t" \ + "std 3,96(1)\n\t" \ + /* args1-8 */ \ + "ld 3, 8(12)\n\t" /* arg1->r3 */ \ + "ld 4, 16(12)\n\t" /* arg2->r4 */ \ + "ld 5, 24(12)\n\t" /* arg3->r5 */ \ + "ld 6, 32(12)\n\t" /* arg4->r6 */ \ + "ld 7, 40(12)\n\t" /* arg5->r7 */ \ + "ld 8, 48(12)\n\t" /* arg6->r8 */ \ + "ld 9, 56(12)\n\t" /* arg7->r9 */ \ + "ld 10, 64(12)\n\t" /* arg8->r10 */ \ + "ld 12, 0(12)\n\t" /* target->r12 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ + "mr 12,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(12)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10,arg11) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[3+11]; \ + volatile uintptr_t _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (uintptr_t)_orig.r2; \ + _argvec[2] = (uintptr_t)_orig.nraddr; \ + _argvec[2+1] = (uintptr_t)arg1; \ + _argvec[2+2] = (uintptr_t)arg2; \ + _argvec[2+3] = (uintptr_t)arg3; \ + _argvec[2+4] = (uintptr_t)arg4; \ + _argvec[2+5] = (uintptr_t)arg5; \ + _argvec[2+6] = (uintptr_t)arg6; \ + _argvec[2+7] = (uintptr_t)arg7; \ + _argvec[2+8] = (uintptr_t)arg8; \ + _argvec[2+9] = (uintptr_t)arg9; \ + _argvec[2+10] = (uintptr_t)arg10; \ + _argvec[2+11] = (uintptr_t)arg11; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 12,%1\n\t" \ + "std 2,-16(12)\n\t" /* save tocptr */ \ + "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ + "addi 1,1,-144\n\t" /* expand stack frame */ \ + /* arg11 */ \ + "ld 3,88(12)\n\t" \ + "std 3,112(1)\n\t" \ + /* arg10 */ \ + "ld 3,80(12)\n\t" \ + "std 3,104(1)\n\t" \ + /* arg9 */ \ + "ld 3,72(12)\n\t" \ + "std 3,96(1)\n\t" \ + /* args1-8 */ \ + "ld 3, 8(12)\n\t" /* arg1->r3 */ \ + "ld 4, 16(12)\n\t" /* arg2->r4 */ \ + "ld 5, 24(12)\n\t" /* arg3->r5 */ \ + "ld 6, 32(12)\n\t" /* arg4->r6 */ \ + "ld 7, 40(12)\n\t" /* arg5->r7 */ \ + "ld 8, 48(12)\n\t" /* arg6->r8 */ \ + "ld 9, 56(12)\n\t" /* arg7->r9 */ \ + "ld 10, 64(12)\n\t" /* arg8->r10 */ \ + "ld 12, 0(12)\n\t" /* target->r12 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ + "mr 12,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(12)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10,arg11,arg12) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[3+12]; \ + volatile uintptr_t _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (uintptr_t)_orig.r2; \ + _argvec[2] = (uintptr_t)_orig.nraddr; \ + _argvec[2+1] = (uintptr_t)arg1; \ + _argvec[2+2] = (uintptr_t)arg2; \ + _argvec[2+3] = (uintptr_t)arg3; \ + _argvec[2+4] = (uintptr_t)arg4; \ + _argvec[2+5] = (uintptr_t)arg5; \ + _argvec[2+6] = (uintptr_t)arg6; \ + _argvec[2+7] = (uintptr_t)arg7; \ + _argvec[2+8] = (uintptr_t)arg8; \ + _argvec[2+9] = (uintptr_t)arg9; \ + _argvec[2+10] = (uintptr_t)arg10; \ + _argvec[2+11] = (uintptr_t)arg11; \ + _argvec[2+12] = (uintptr_t)arg12; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 12,%1\n\t" \ + "std 2,-16(12)\n\t" /* save tocptr */ \ + "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ + "addi 1,1,-144\n\t" /* expand stack frame */ \ + /* arg12 */ \ + "ld 3,96(12)\n\t" \ + "std 3,120(1)\n\t" \ + /* arg11 */ \ + "ld 3,88(12)\n\t" \ + "std 3,112(1)\n\t" \ + /* arg10 */ \ + "ld 3,80(12)\n\t" \ + "std 3,104(1)\n\t" \ + /* arg9 */ \ + "ld 3,72(12)\n\t" \ + "std 3,96(1)\n\t" \ + /* args1-8 */ \ + "ld 3, 8(12)\n\t" /* arg1->r3 */ \ + "ld 4, 16(12)\n\t" /* arg2->r4 */ \ + "ld 5, 24(12)\n\t" /* arg3->r5 */ \ + "ld 6, 32(12)\n\t" /* arg4->r6 */ \ + "ld 7, 40(12)\n\t" /* arg5->r7 */ \ + "ld 8, 48(12)\n\t" /* arg6->r8 */ \ + "ld 9, 56(12)\n\t" /* arg7->r9 */ \ + "ld 10, 64(12)\n\t" /* arg8->r10 */ \ + "ld 12, 0(12)\n\t" /* target->r12 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ + "mr 12,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(12)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#endif /* PLAT_ppc64le_linux */ + +/* ------------------------- arm-linux ------------------------- */ + +#if defined(PLAT_arm_linux) + +/* These regs are trashed by the hidden call. */ +#define __CALLER_SAVED_REGS "r0", "r1", "r2", "r3","r4", "r12", "r14" + +/* Macros to save and align the stack before making a function + call and restore it afterwards as gcc may not keep the stack + pointer aligned if it doesn't realise calls are being made + to other functions. */ + +/* This is a bit tricky. We store the original stack pointer in r10 + as it is callee-saves. gcc doesn't allow the use of r11 for some + reason. Also, we can't directly "bic" the stack pointer in thumb + mode since r13 isn't an allowed register number in that context. + So use r4 as a temporary, since that is about to get trashed + anyway, just after each use of this macro. Side effect is we need + to be very careful about any future changes, since + VALGRIND_ALIGN_STACK simply assumes r4 is usable. */ +#define VALGRIND_ALIGN_STACK \ + "mov r10, sp\n\t" \ + "mov r4, sp\n\t" \ + "bic r4, r4, #7\n\t" \ + "mov sp, r4\n\t" +#define VALGRIND_RESTORE_STACK \ + "mov sp, r10\n\t" + +/* These CALL_FN_ macros assume that on arm-linux, sizeof(unsigned + long) == 4. */ + +#define CALL_FN_W_v(lval, orig) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[1]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + VALGRIND_RESTORE_STACK \ + "mov %0, r0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_W(lval, orig, arg1) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[2]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr r0, [%1, #4] \n\t" \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + VALGRIND_RESTORE_STACK \ + "mov %0, r0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WW(lval, orig, arg1,arg2) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[3]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr r0, [%1, #4] \n\t" \ + "ldr r1, [%1, #8] \n\t" \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + VALGRIND_RESTORE_STACK \ + "mov %0, r0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[4]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + _argvec[3] = (uintptr_t)(arg3); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr r0, [%1, #4] \n\t" \ + "ldr r1, [%1, #8] \n\t" \ + "ldr r2, [%1, #12] \n\t" \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + VALGRIND_RESTORE_STACK \ + "mov %0, r0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[5]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + _argvec[3] = (uintptr_t)(arg3); \ + _argvec[4] = (uintptr_t)(arg4); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr r0, [%1, #4] \n\t" \ + "ldr r1, [%1, #8] \n\t" \ + "ldr r2, [%1, #12] \n\t" \ + "ldr r3, [%1, #16] \n\t" \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + VALGRIND_RESTORE_STACK \ + "mov %0, r0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[6]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + _argvec[3] = (uintptr_t)(arg3); \ + _argvec[4] = (uintptr_t)(arg4); \ + _argvec[5] = (uintptr_t)(arg5); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "sub sp, sp, #4 \n\t" \ + "ldr r0, [%1, #20] \n\t" \ + "push {r0} \n\t" \ + "ldr r0, [%1, #4] \n\t" \ + "ldr r1, [%1, #8] \n\t" \ + "ldr r2, [%1, #12] \n\t" \ + "ldr r3, [%1, #16] \n\t" \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + VALGRIND_RESTORE_STACK \ + "mov %0, r0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[7]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + _argvec[3] = (uintptr_t)(arg3); \ + _argvec[4] = (uintptr_t)(arg4); \ + _argvec[5] = (uintptr_t)(arg5); \ + _argvec[6] = (uintptr_t)(arg6); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr r0, [%1, #20] \n\t" \ + "ldr r1, [%1, #24] \n\t" \ + "push {r0, r1} \n\t" \ + "ldr r0, [%1, #4] \n\t" \ + "ldr r1, [%1, #8] \n\t" \ + "ldr r2, [%1, #12] \n\t" \ + "ldr r3, [%1, #16] \n\t" \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + VALGRIND_RESTORE_STACK \ + "mov %0, r0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[8]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + _argvec[3] = (uintptr_t)(arg3); \ + _argvec[4] = (uintptr_t)(arg4); \ + _argvec[5] = (uintptr_t)(arg5); \ + _argvec[6] = (uintptr_t)(arg6); \ + _argvec[7] = (uintptr_t)(arg7); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "sub sp, sp, #4 \n\t" \ + "ldr r0, [%1, #20] \n\t" \ + "ldr r1, [%1, #24] \n\t" \ + "ldr r2, [%1, #28] \n\t" \ + "push {r0, r1, r2} \n\t" \ + "ldr r0, [%1, #4] \n\t" \ + "ldr r1, [%1, #8] \n\t" \ + "ldr r2, [%1, #12] \n\t" \ + "ldr r3, [%1, #16] \n\t" \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + VALGRIND_RESTORE_STACK \ + "mov %0, r0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[9]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + _argvec[3] = (uintptr_t)(arg3); \ + _argvec[4] = (uintptr_t)(arg4); \ + _argvec[5] = (uintptr_t)(arg5); \ + _argvec[6] = (uintptr_t)(arg6); \ + _argvec[7] = (uintptr_t)(arg7); \ + _argvec[8] = (uintptr_t)(arg8); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr r0, [%1, #20] \n\t" \ + "ldr r1, [%1, #24] \n\t" \ + "ldr r2, [%1, #28] \n\t" \ + "ldr r3, [%1, #32] \n\t" \ + "push {r0, r1, r2, r3} \n\t" \ + "ldr r0, [%1, #4] \n\t" \ + "ldr r1, [%1, #8] \n\t" \ + "ldr r2, [%1, #12] \n\t" \ + "ldr r3, [%1, #16] \n\t" \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + VALGRIND_RESTORE_STACK \ + "mov %0, r0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[10]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + _argvec[3] = (uintptr_t)(arg3); \ + _argvec[4] = (uintptr_t)(arg4); \ + _argvec[5] = (uintptr_t)(arg5); \ + _argvec[6] = (uintptr_t)(arg6); \ + _argvec[7] = (uintptr_t)(arg7); \ + _argvec[8] = (uintptr_t)(arg8); \ + _argvec[9] = (uintptr_t)(arg9); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "sub sp, sp, #4 \n\t" \ + "ldr r0, [%1, #20] \n\t" \ + "ldr r1, [%1, #24] \n\t" \ + "ldr r2, [%1, #28] \n\t" \ + "ldr r3, [%1, #32] \n\t" \ + "ldr r4, [%1, #36] \n\t" \ + "push {r0, r1, r2, r3, r4} \n\t" \ + "ldr r0, [%1, #4] \n\t" \ + "ldr r1, [%1, #8] \n\t" \ + "ldr r2, [%1, #12] \n\t" \ + "ldr r3, [%1, #16] \n\t" \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + VALGRIND_RESTORE_STACK \ + "mov %0, r0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[11]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + _argvec[3] = (uintptr_t)(arg3); \ + _argvec[4] = (uintptr_t)(arg4); \ + _argvec[5] = (uintptr_t)(arg5); \ + _argvec[6] = (uintptr_t)(arg6); \ + _argvec[7] = (uintptr_t)(arg7); \ + _argvec[8] = (uintptr_t)(arg8); \ + _argvec[9] = (uintptr_t)(arg9); \ + _argvec[10] = (uintptr_t)(arg10); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr r0, [%1, #40] \n\t" \ + "push {r0} \n\t" \ + "ldr r0, [%1, #20] \n\t" \ + "ldr r1, [%1, #24] \n\t" \ + "ldr r2, [%1, #28] \n\t" \ + "ldr r3, [%1, #32] \n\t" \ + "ldr r4, [%1, #36] \n\t" \ + "push {r0, r1, r2, r3, r4} \n\t" \ + "ldr r0, [%1, #4] \n\t" \ + "ldr r1, [%1, #8] \n\t" \ + "ldr r2, [%1, #12] \n\t" \ + "ldr r3, [%1, #16] \n\t" \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + VALGRIND_RESTORE_STACK \ + "mov %0, r0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ + arg6,arg7,arg8,arg9,arg10, \ + arg11) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[12]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + _argvec[3] = (uintptr_t)(arg3); \ + _argvec[4] = (uintptr_t)(arg4); \ + _argvec[5] = (uintptr_t)(arg5); \ + _argvec[6] = (uintptr_t)(arg6); \ + _argvec[7] = (uintptr_t)(arg7); \ + _argvec[8] = (uintptr_t)(arg8); \ + _argvec[9] = (uintptr_t)(arg9); \ + _argvec[10] = (uintptr_t)(arg10); \ + _argvec[11] = (uintptr_t)(arg11); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "sub sp, sp, #4 \n\t" \ + "ldr r0, [%1, #40] \n\t" \ + "ldr r1, [%1, #44] \n\t" \ + "push {r0, r1} \n\t" \ + "ldr r0, [%1, #20] \n\t" \ + "ldr r1, [%1, #24] \n\t" \ + "ldr r2, [%1, #28] \n\t" \ + "ldr r3, [%1, #32] \n\t" \ + "ldr r4, [%1, #36] \n\t" \ + "push {r0, r1, r2, r3, r4} \n\t" \ + "ldr r0, [%1, #4] \n\t" \ + "ldr r1, [%1, #8] \n\t" \ + "ldr r2, [%1, #12] \n\t" \ + "ldr r3, [%1, #16] \n\t" \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + VALGRIND_RESTORE_STACK \ + "mov %0, r0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ + arg6,arg7,arg8,arg9,arg10, \ + arg11,arg12) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[13]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + _argvec[3] = (uintptr_t)(arg3); \ + _argvec[4] = (uintptr_t)(arg4); \ + _argvec[5] = (uintptr_t)(arg5); \ + _argvec[6] = (uintptr_t)(arg6); \ + _argvec[7] = (uintptr_t)(arg7); \ + _argvec[8] = (uintptr_t)(arg8); \ + _argvec[9] = (uintptr_t)(arg9); \ + _argvec[10] = (uintptr_t)(arg10); \ + _argvec[11] = (uintptr_t)(arg11); \ + _argvec[12] = (uintptr_t)(arg12); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr r0, [%1, #40] \n\t" \ + "ldr r1, [%1, #44] \n\t" \ + "ldr r2, [%1, #48] \n\t" \ + "push {r0, r1, r2} \n\t" \ + "ldr r0, [%1, #20] \n\t" \ + "ldr r1, [%1, #24] \n\t" \ + "ldr r2, [%1, #28] \n\t" \ + "ldr r3, [%1, #32] \n\t" \ + "ldr r4, [%1, #36] \n\t" \ + "push {r0, r1, r2, r3, r4} \n\t" \ + "ldr r0, [%1, #4] \n\t" \ + "ldr r1, [%1, #8] \n\t" \ + "ldr r2, [%1, #12] \n\t" \ + "ldr r3, [%1, #16] \n\t" \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + VALGRIND_RESTORE_STACK \ + "mov %0, r0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#endif /* PLAT_arm_linux */ + +/* ------------------------ arm64-linux ------------------------ */ + +#if defined(PLAT_arm64_linux) + +/* These regs are trashed by the hidden call. */ +#define __CALLER_SAVED_REGS \ + "x0", "x1", "x2", "x3","x4", "x5", "x6", "x7", "x8", "x9", \ + "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", \ + "x18", "x19", "x20", "x30", \ + "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", \ + "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", \ + "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", \ + "v26", "v27", "v28", "v29", "v30", "v31" + +/* x21 is callee-saved, so we can use it to save and restore SP around + the hidden call. */ +#define VALGRIND_ALIGN_STACK \ + "mov x21, sp\n\t" \ + "bic sp, x21, #15\n\t" +#define VALGRIND_RESTORE_STACK \ + "mov sp, x21\n\t" + +/* These CALL_FN_ macros assume that on arm64-linux, + sizeof(uintptr_t) == 8. */ + +#define CALL_FN_W_v(lval, orig) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[1]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr x8, [%1] \n\t" /* target->x8 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ + VALGRIND_RESTORE_STACK \ + "mov %0, x0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_W(lval, orig, arg1) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[2]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr x0, [%1, #8] \n\t" \ + "ldr x8, [%1] \n\t" /* target->x8 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ + VALGRIND_RESTORE_STACK \ + "mov %0, x0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WW(lval, orig, arg1,arg2) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[3]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr x0, [%1, #8] \n\t" \ + "ldr x1, [%1, #16] \n\t" \ + "ldr x8, [%1] \n\t" /* target->x8 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ + VALGRIND_RESTORE_STACK \ + "mov %0, x0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[4]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + _argvec[3] = (uintptr_t)(arg3); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr x0, [%1, #8] \n\t" \ + "ldr x1, [%1, #16] \n\t" \ + "ldr x2, [%1, #24] \n\t" \ + "ldr x8, [%1] \n\t" /* target->x8 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ + VALGRIND_RESTORE_STACK \ + "mov %0, x0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[5]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + _argvec[3] = (uintptr_t)(arg3); \ + _argvec[4] = (uintptr_t)(arg4); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr x0, [%1, #8] \n\t" \ + "ldr x1, [%1, #16] \n\t" \ + "ldr x2, [%1, #24] \n\t" \ + "ldr x3, [%1, #32] \n\t" \ + "ldr x8, [%1] \n\t" /* target->x8 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ + VALGRIND_RESTORE_STACK \ + "mov %0, x0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[6]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + _argvec[3] = (uintptr_t)(arg3); \ + _argvec[4] = (uintptr_t)(arg4); \ + _argvec[5] = (uintptr_t)(arg5); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr x0, [%1, #8] \n\t" \ + "ldr x1, [%1, #16] \n\t" \ + "ldr x2, [%1, #24] \n\t" \ + "ldr x3, [%1, #32] \n\t" \ + "ldr x4, [%1, #40] \n\t" \ + "ldr x8, [%1] \n\t" /* target->x8 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ + VALGRIND_RESTORE_STACK \ + "mov %0, x0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[7]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + _argvec[3] = (uintptr_t)(arg3); \ + _argvec[4] = (uintptr_t)(arg4); \ + _argvec[5] = (uintptr_t)(arg5); \ + _argvec[6] = (uintptr_t)(arg6); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr x0, [%1, #8] \n\t" \ + "ldr x1, [%1, #16] \n\t" \ + "ldr x2, [%1, #24] \n\t" \ + "ldr x3, [%1, #32] \n\t" \ + "ldr x4, [%1, #40] \n\t" \ + "ldr x5, [%1, #48] \n\t" \ + "ldr x8, [%1] \n\t" /* target->x8 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ + VALGRIND_RESTORE_STACK \ + "mov %0, x0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[8]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + _argvec[3] = (uintptr_t)(arg3); \ + _argvec[4] = (uintptr_t)(arg4); \ + _argvec[5] = (uintptr_t)(arg5); \ + _argvec[6] = (uintptr_t)(arg6); \ + _argvec[7] = (uintptr_t)(arg7); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr x0, [%1, #8] \n\t" \ + "ldr x1, [%1, #16] \n\t" \ + "ldr x2, [%1, #24] \n\t" \ + "ldr x3, [%1, #32] \n\t" \ + "ldr x4, [%1, #40] \n\t" \ + "ldr x5, [%1, #48] \n\t" \ + "ldr x6, [%1, #56] \n\t" \ + "ldr x8, [%1] \n\t" /* target->x8 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ + VALGRIND_RESTORE_STACK \ + "mov %0, x0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[9]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + _argvec[3] = (uintptr_t)(arg3); \ + _argvec[4] = (uintptr_t)(arg4); \ + _argvec[5] = (uintptr_t)(arg5); \ + _argvec[6] = (uintptr_t)(arg6); \ + _argvec[7] = (uintptr_t)(arg7); \ + _argvec[8] = (uintptr_t)(arg8); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr x0, [%1, #8] \n\t" \ + "ldr x1, [%1, #16] \n\t" \ + "ldr x2, [%1, #24] \n\t" \ + "ldr x3, [%1, #32] \n\t" \ + "ldr x4, [%1, #40] \n\t" \ + "ldr x5, [%1, #48] \n\t" \ + "ldr x6, [%1, #56] \n\t" \ + "ldr x7, [%1, #64] \n\t" \ + "ldr x8, [%1] \n\t" /* target->x8 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ + VALGRIND_RESTORE_STACK \ + "mov %0, x0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[10]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + _argvec[3] = (uintptr_t)(arg3); \ + _argvec[4] = (uintptr_t)(arg4); \ + _argvec[5] = (uintptr_t)(arg5); \ + _argvec[6] = (uintptr_t)(arg6); \ + _argvec[7] = (uintptr_t)(arg7); \ + _argvec[8] = (uintptr_t)(arg8); \ + _argvec[9] = (uintptr_t)(arg9); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "sub sp, sp, #0x20 \n\t" \ + "ldr x0, [%1, #8] \n\t" \ + "ldr x1, [%1, #16] \n\t" \ + "ldr x2, [%1, #24] \n\t" \ + "ldr x3, [%1, #32] \n\t" \ + "ldr x4, [%1, #40] \n\t" \ + "ldr x5, [%1, #48] \n\t" \ + "ldr x6, [%1, #56] \n\t" \ + "ldr x7, [%1, #64] \n\t" \ + "ldr x8, [%1, #72] \n\t" \ + "str x8, [sp, #0] \n\t" \ + "ldr x8, [%1] \n\t" /* target->x8 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ + VALGRIND_RESTORE_STACK \ + "mov %0, x0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[11]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + _argvec[3] = (uintptr_t)(arg3); \ + _argvec[4] = (uintptr_t)(arg4); \ + _argvec[5] = (uintptr_t)(arg5); \ + _argvec[6] = (uintptr_t)(arg6); \ + _argvec[7] = (uintptr_t)(arg7); \ + _argvec[8] = (uintptr_t)(arg8); \ + _argvec[9] = (uintptr_t)(arg9); \ + _argvec[10] = (uintptr_t)(arg10); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "sub sp, sp, #0x20 \n\t" \ + "ldr x0, [%1, #8] \n\t" \ + "ldr x1, [%1, #16] \n\t" \ + "ldr x2, [%1, #24] \n\t" \ + "ldr x3, [%1, #32] \n\t" \ + "ldr x4, [%1, #40] \n\t" \ + "ldr x5, [%1, #48] \n\t" \ + "ldr x6, [%1, #56] \n\t" \ + "ldr x7, [%1, #64] \n\t" \ + "ldr x8, [%1, #72] \n\t" \ + "str x8, [sp, #0] \n\t" \ + "ldr x8, [%1, #80] \n\t" \ + "str x8, [sp, #8] \n\t" \ + "ldr x8, [%1] \n\t" /* target->x8 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ + VALGRIND_RESTORE_STACK \ + "mov %0, x0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10,arg11) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[12]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + _argvec[3] = (uintptr_t)(arg3); \ + _argvec[4] = (uintptr_t)(arg4); \ + _argvec[5] = (uintptr_t)(arg5); \ + _argvec[6] = (uintptr_t)(arg6); \ + _argvec[7] = (uintptr_t)(arg7); \ + _argvec[8] = (uintptr_t)(arg8); \ + _argvec[9] = (uintptr_t)(arg9); \ + _argvec[10] = (uintptr_t)(arg10); \ + _argvec[11] = (uintptr_t)(arg11); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "sub sp, sp, #0x30 \n\t" \ + "ldr x0, [%1, #8] \n\t" \ + "ldr x1, [%1, #16] \n\t" \ + "ldr x2, [%1, #24] \n\t" \ + "ldr x3, [%1, #32] \n\t" \ + "ldr x4, [%1, #40] \n\t" \ + "ldr x5, [%1, #48] \n\t" \ + "ldr x6, [%1, #56] \n\t" \ + "ldr x7, [%1, #64] \n\t" \ + "ldr x8, [%1, #72] \n\t" \ + "str x8, [sp, #0] \n\t" \ + "ldr x8, [%1, #80] \n\t" \ + "str x8, [sp, #8] \n\t" \ + "ldr x8, [%1, #88] \n\t" \ + "str x8, [sp, #16] \n\t" \ + "ldr x8, [%1] \n\t" /* target->x8 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ + VALGRIND_RESTORE_STACK \ + "mov %0, x0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10,arg11, \ + arg12) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[13]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + _argvec[3] = (uintptr_t)(arg3); \ + _argvec[4] = (uintptr_t)(arg4); \ + _argvec[5] = (uintptr_t)(arg5); \ + _argvec[6] = (uintptr_t)(arg6); \ + _argvec[7] = (uintptr_t)(arg7); \ + _argvec[8] = (uintptr_t)(arg8); \ + _argvec[9] = (uintptr_t)(arg9); \ + _argvec[10] = (uintptr_t)(arg10); \ + _argvec[11] = (uintptr_t)(arg11); \ + _argvec[12] = (uintptr_t)(arg12); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "sub sp, sp, #0x30 \n\t" \ + "ldr x0, [%1, #8] \n\t" \ + "ldr x1, [%1, #16] \n\t" \ + "ldr x2, [%1, #24] \n\t" \ + "ldr x3, [%1, #32] \n\t" \ + "ldr x4, [%1, #40] \n\t" \ + "ldr x5, [%1, #48] \n\t" \ + "ldr x6, [%1, #56] \n\t" \ + "ldr x7, [%1, #64] \n\t" \ + "ldr x8, [%1, #72] \n\t" \ + "str x8, [sp, #0] \n\t" \ + "ldr x8, [%1, #80] \n\t" \ + "str x8, [sp, #8] \n\t" \ + "ldr x8, [%1, #88] \n\t" \ + "str x8, [sp, #16] \n\t" \ + "ldr x8, [%1, #96] \n\t" \ + "str x8, [sp, #24] \n\t" \ + "ldr x8, [%1] \n\t" /* target->x8 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ + VALGRIND_RESTORE_STACK \ + "mov %0, x0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#endif /* PLAT_arm64_linux */ + +/* ------------------------- s390x-linux ------------------------- */ + +#if defined(PLAT_s390x_linux) + +/* Similar workaround as amd64 (see above), but we use r11 as frame + pointer and save the old r11 in r7. r11 might be used for + argvec, therefore we copy argvec in r1 since r1 is clobbered + after the call anyway. */ +#if defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM) +# define __FRAME_POINTER \ + ,"d"(__builtin_dwarf_cfa()) +# define VALGRIND_CFI_PROLOGUE \ + ".cfi_remember_state\n\t" \ + "lgr 1,%1\n\t" /* copy the argvec pointer in r1 */ \ + "lgr 7,11\n\t" \ + "lgr 11,%2\n\t" \ + ".cfi_def_cfa r11, 0\n\t" +# define VALGRIND_CFI_EPILOGUE \ + "lgr 11, 7\n\t" \ + ".cfi_restore_state\n\t" +#else +# define __FRAME_POINTER +# define VALGRIND_CFI_PROLOGUE \ + "lgr 1,%1\n\t" +# define VALGRIND_CFI_EPILOGUE +#endif + +/* Nb: On s390 the stack pointer is properly aligned *at all times* + according to the s390 GCC maintainer. (The ABI specification is not + precise in this regard.) Therefore, VALGRIND_ALIGN_STACK and + VALGRIND_RESTORE_STACK are not defined here. */ + +/* These regs are trashed by the hidden call. Note that we overwrite + r14 in s390_irgen_noredir (VEX/priv/guest_s390_irgen.c) to give the + function a proper return address. All others are ABI defined call + clobbers. */ +#if defined(__VX__) || defined(__S390_VX__) +#define __CALLER_SAVED_REGS "0", "1", "2", "3", "4", "5", "14", \ + "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", \ + "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", \ + "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", \ + "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" +#else +#define __CALLER_SAVED_REGS "0", "1", "2", "3", "4", "5", "14", \ + "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7" +#endif + +/* Nb: Although r11 is modified in the asm snippets below (inside + VALGRIND_CFI_PROLOGUE) it is not listed in the clobber section, for + two reasons: + (1) r11 is restored in VALGRIND_CFI_EPILOGUE, so effectively it is not + modified + (2) GCC will complain that r11 cannot appear inside a clobber section, + when compiled with -O -fno-omit-frame-pointer + */ + +#define CALL_FN_W_v(lval, orig) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[1]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + "aghi 15,-160\n\t" \ + "lg 1, 0(1)\n\t" /* target->r1 */ \ + VALGRIND_CALL_NOREDIR_R1 \ + "aghi 15,160\n\t" \ + VALGRIND_CFI_EPILOGUE \ + "lgr %0, 2\n\t" \ + : /*out*/ "=d" (_res) \ + : /*in*/ "d" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +/* The call abi has the arguments in r2-r6 and stack */ +#define CALL_FN_W_W(lval, orig, arg1) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[2]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)arg1; \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + "aghi 15,-160\n\t" \ + "lg 2, 8(1)\n\t" \ + "lg 1, 0(1)\n\t" \ + VALGRIND_CALL_NOREDIR_R1 \ + "aghi 15,160\n\t" \ + VALGRIND_CFI_EPILOGUE \ + "lgr %0, 2\n\t" \ + : /*out*/ "=d" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WW(lval, orig, arg1, arg2) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[3]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)arg1; \ + _argvec[2] = (uintptr_t)arg2; \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + "aghi 15,-160\n\t" \ + "lg 2, 8(1)\n\t" \ + "lg 3,16(1)\n\t" \ + "lg 1, 0(1)\n\t" \ + VALGRIND_CALL_NOREDIR_R1 \ + "aghi 15,160\n\t" \ + VALGRIND_CFI_EPILOGUE \ + "lgr %0, 2\n\t" \ + : /*out*/ "=d" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWW(lval, orig, arg1, arg2, arg3) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[4]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)arg1; \ + _argvec[2] = (uintptr_t)arg2; \ + _argvec[3] = (uintptr_t)arg3; \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + "aghi 15,-160\n\t" \ + "lg 2, 8(1)\n\t" \ + "lg 3,16(1)\n\t" \ + "lg 4,24(1)\n\t" \ + "lg 1, 0(1)\n\t" \ + VALGRIND_CALL_NOREDIR_R1 \ + "aghi 15,160\n\t" \ + VALGRIND_CFI_EPILOGUE \ + "lgr %0, 2\n\t" \ + : /*out*/ "=d" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWWW(lval, orig, arg1, arg2, arg3, arg4) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[5]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)arg1; \ + _argvec[2] = (uintptr_t)arg2; \ + _argvec[3] = (uintptr_t)arg3; \ + _argvec[4] = (uintptr_t)arg4; \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + "aghi 15,-160\n\t" \ + "lg 2, 8(1)\n\t" \ + "lg 3,16(1)\n\t" \ + "lg 4,24(1)\n\t" \ + "lg 5,32(1)\n\t" \ + "lg 1, 0(1)\n\t" \ + VALGRIND_CALL_NOREDIR_R1 \ + "aghi 15,160\n\t" \ + VALGRIND_CFI_EPILOGUE \ + "lgr %0, 2\n\t" \ + : /*out*/ "=d" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_5W(lval, orig, arg1, arg2, arg3, arg4, arg5) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[6]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)arg1; \ + _argvec[2] = (uintptr_t)arg2; \ + _argvec[3] = (uintptr_t)arg3; \ + _argvec[4] = (uintptr_t)arg4; \ + _argvec[5] = (uintptr_t)arg5; \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + "aghi 15,-160\n\t" \ + "lg 2, 8(1)\n\t" \ + "lg 3,16(1)\n\t" \ + "lg 4,24(1)\n\t" \ + "lg 5,32(1)\n\t" \ + "lg 6,40(1)\n\t" \ + "lg 1, 0(1)\n\t" \ + VALGRIND_CALL_NOREDIR_R1 \ + "aghi 15,160\n\t" \ + VALGRIND_CFI_EPILOGUE \ + "lgr %0, 2\n\t" \ + : /*out*/ "=d" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_6W(lval, orig, arg1, arg2, arg3, arg4, arg5, \ + arg6) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[7]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)arg1; \ + _argvec[2] = (uintptr_t)arg2; \ + _argvec[3] = (uintptr_t)arg3; \ + _argvec[4] = (uintptr_t)arg4; \ + _argvec[5] = (uintptr_t)arg5; \ + _argvec[6] = (uintptr_t)arg6; \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + "aghi 15,-168\n\t" \ + "lg 2, 8(1)\n\t" \ + "lg 3,16(1)\n\t" \ + "lg 4,24(1)\n\t" \ + "lg 5,32(1)\n\t" \ + "lg 6,40(1)\n\t" \ + "mvc 160(8,15), 48(1)\n\t" \ + "lg 1, 0(1)\n\t" \ + VALGRIND_CALL_NOREDIR_R1 \ + "aghi 15,168\n\t" \ + VALGRIND_CFI_EPILOGUE \ + "lgr %0, 2\n\t" \ + : /*out*/ "=d" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_7W(lval, orig, arg1, arg2, arg3, arg4, arg5, \ + arg6, arg7) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[8]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)arg1; \ + _argvec[2] = (uintptr_t)arg2; \ + _argvec[3] = (uintptr_t)arg3; \ + _argvec[4] = (uintptr_t)arg4; \ + _argvec[5] = (uintptr_t)arg5; \ + _argvec[6] = (uintptr_t)arg6; \ + _argvec[7] = (uintptr_t)arg7; \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + "aghi 15,-176\n\t" \ + "lg 2, 8(1)\n\t" \ + "lg 3,16(1)\n\t" \ + "lg 4,24(1)\n\t" \ + "lg 5,32(1)\n\t" \ + "lg 6,40(1)\n\t" \ + "mvc 160(8,15), 48(1)\n\t" \ + "mvc 168(8,15), 56(1)\n\t" \ + "lg 1, 0(1)\n\t" \ + VALGRIND_CALL_NOREDIR_R1 \ + "aghi 15,176\n\t" \ + VALGRIND_CFI_EPILOGUE \ + "lgr %0, 2\n\t" \ + : /*out*/ "=d" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_8W(lval, orig, arg1, arg2, arg3, arg4, arg5, \ + arg6, arg7 ,arg8) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[9]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)arg1; \ + _argvec[2] = (uintptr_t)arg2; \ + _argvec[3] = (uintptr_t)arg3; \ + _argvec[4] = (uintptr_t)arg4; \ + _argvec[5] = (uintptr_t)arg5; \ + _argvec[6] = (uintptr_t)arg6; \ + _argvec[7] = (uintptr_t)arg7; \ + _argvec[8] = (uintptr_t)arg8; \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + "aghi 15,-184\n\t" \ + "lg 2, 8(1)\n\t" \ + "lg 3,16(1)\n\t" \ + "lg 4,24(1)\n\t" \ + "lg 5,32(1)\n\t" \ + "lg 6,40(1)\n\t" \ + "mvc 160(8,15), 48(1)\n\t" \ + "mvc 168(8,15), 56(1)\n\t" \ + "mvc 176(8,15), 64(1)\n\t" \ + "lg 1, 0(1)\n\t" \ + VALGRIND_CALL_NOREDIR_R1 \ + "aghi 15,184\n\t" \ + VALGRIND_CFI_EPILOGUE \ + "lgr %0, 2\n\t" \ + : /*out*/ "=d" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_9W(lval, orig, arg1, arg2, arg3, arg4, arg5, \ + arg6, arg7 ,arg8, arg9) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[10]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)arg1; \ + _argvec[2] = (uintptr_t)arg2; \ + _argvec[3] = (uintptr_t)arg3; \ + _argvec[4] = (uintptr_t)arg4; \ + _argvec[5] = (uintptr_t)arg5; \ + _argvec[6] = (uintptr_t)arg6; \ + _argvec[7] = (uintptr_t)arg7; \ + _argvec[8] = (uintptr_t)arg8; \ + _argvec[9] = (uintptr_t)arg9; \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + "aghi 15,-192\n\t" \ + "lg 2, 8(1)\n\t" \ + "lg 3,16(1)\n\t" \ + "lg 4,24(1)\n\t" \ + "lg 5,32(1)\n\t" \ + "lg 6,40(1)\n\t" \ + "mvc 160(8,15), 48(1)\n\t" \ + "mvc 168(8,15), 56(1)\n\t" \ + "mvc 176(8,15), 64(1)\n\t" \ + "mvc 184(8,15), 72(1)\n\t" \ + "lg 1, 0(1)\n\t" \ + VALGRIND_CALL_NOREDIR_R1 \ + "aghi 15,192\n\t" \ + VALGRIND_CFI_EPILOGUE \ + "lgr %0, 2\n\t" \ + : /*out*/ "=d" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_10W(lval, orig, arg1, arg2, arg3, arg4, arg5, \ + arg6, arg7 ,arg8, arg9, arg10) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[11]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)arg1; \ + _argvec[2] = (uintptr_t)arg2; \ + _argvec[3] = (uintptr_t)arg3; \ + _argvec[4] = (uintptr_t)arg4; \ + _argvec[5] = (uintptr_t)arg5; \ + _argvec[6] = (uintptr_t)arg6; \ + _argvec[7] = (uintptr_t)arg7; \ + _argvec[8] = (uintptr_t)arg8; \ + _argvec[9] = (uintptr_t)arg9; \ + _argvec[10] = (uintptr_t)arg10; \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + "aghi 15,-200\n\t" \ + "lg 2, 8(1)\n\t" \ + "lg 3,16(1)\n\t" \ + "lg 4,24(1)\n\t" \ + "lg 5,32(1)\n\t" \ + "lg 6,40(1)\n\t" \ + "mvc 160(8,15), 48(1)\n\t" \ + "mvc 168(8,15), 56(1)\n\t" \ + "mvc 176(8,15), 64(1)\n\t" \ + "mvc 184(8,15), 72(1)\n\t" \ + "mvc 192(8,15), 80(1)\n\t" \ + "lg 1, 0(1)\n\t" \ + VALGRIND_CALL_NOREDIR_R1 \ + "aghi 15,200\n\t" \ + VALGRIND_CFI_EPILOGUE \ + "lgr %0, 2\n\t" \ + : /*out*/ "=d" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_11W(lval, orig, arg1, arg2, arg3, arg4, arg5, \ + arg6, arg7 ,arg8, arg9, arg10, arg11) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[12]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)arg1; \ + _argvec[2] = (uintptr_t)arg2; \ + _argvec[3] = (uintptr_t)arg3; \ + _argvec[4] = (uintptr_t)arg4; \ + _argvec[5] = (uintptr_t)arg5; \ + _argvec[6] = (uintptr_t)arg6; \ + _argvec[7] = (uintptr_t)arg7; \ + _argvec[8] = (uintptr_t)arg8; \ + _argvec[9] = (uintptr_t)arg9; \ + _argvec[10] = (uintptr_t)arg10; \ + _argvec[11] = (uintptr_t)arg11; \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + "aghi 15,-208\n\t" \ + "lg 2, 8(1)\n\t" \ + "lg 3,16(1)\n\t" \ + "lg 4,24(1)\n\t" \ + "lg 5,32(1)\n\t" \ + "lg 6,40(1)\n\t" \ + "mvc 160(8,15), 48(1)\n\t" \ + "mvc 168(8,15), 56(1)\n\t" \ + "mvc 176(8,15), 64(1)\n\t" \ + "mvc 184(8,15), 72(1)\n\t" \ + "mvc 192(8,15), 80(1)\n\t" \ + "mvc 200(8,15), 88(1)\n\t" \ + "lg 1, 0(1)\n\t" \ + VALGRIND_CALL_NOREDIR_R1 \ + "aghi 15,208\n\t" \ + VALGRIND_CFI_EPILOGUE \ + "lgr %0, 2\n\t" \ + : /*out*/ "=d" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_12W(lval, orig, arg1, arg2, arg3, arg4, arg5, \ + arg6, arg7 ,arg8, arg9, arg10, arg11, arg12)\ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[13]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)arg1; \ + _argvec[2] = (uintptr_t)arg2; \ + _argvec[3] = (uintptr_t)arg3; \ + _argvec[4] = (uintptr_t)arg4; \ + _argvec[5] = (uintptr_t)arg5; \ + _argvec[6] = (uintptr_t)arg6; \ + _argvec[7] = (uintptr_t)arg7; \ + _argvec[8] = (uintptr_t)arg8; \ + _argvec[9] = (uintptr_t)arg9; \ + _argvec[10] = (uintptr_t)arg10; \ + _argvec[11] = (uintptr_t)arg11; \ + _argvec[12] = (uintptr_t)arg12; \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + "aghi 15,-216\n\t" \ + "lg 2, 8(1)\n\t" \ + "lg 3,16(1)\n\t" \ + "lg 4,24(1)\n\t" \ + "lg 5,32(1)\n\t" \ + "lg 6,40(1)\n\t" \ + "mvc 160(8,15), 48(1)\n\t" \ + "mvc 168(8,15), 56(1)\n\t" \ + "mvc 176(8,15), 64(1)\n\t" \ + "mvc 184(8,15), 72(1)\n\t" \ + "mvc 192(8,15), 80(1)\n\t" \ + "mvc 200(8,15), 88(1)\n\t" \ + "mvc 208(8,15), 96(1)\n\t" \ + "lg 1, 0(1)\n\t" \ + VALGRIND_CALL_NOREDIR_R1 \ + "aghi 15,216\n\t" \ + VALGRIND_CFI_EPILOGUE \ + "lgr %0, 2\n\t" \ + : /*out*/ "=d" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + + +#endif /* PLAT_s390x_linux */ + +/* ------------------------- mips32-linux ----------------------- */ + +#if defined(PLAT_mips32_linux) + +/* These regs are trashed by the hidden call. */ +#define __CALLER_SAVED_REGS "$2", "$3", "$4", "$5", "$6", \ +"$7", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \ +"$25", "$31" + +/* These CALL_FN_ macros assume that on mips-linux, sizeof(unsigned + long) == 4. */ + +#define CALL_FN_W_v(lval, orig) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[1]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + __asm__ volatile( \ + "subu $29, $29, 8 \n\t" \ + "sw $28, 0($29) \n\t" \ + "sw $31, 4($29) \n\t" \ + "subu $29, $29, 16 \n\t" \ + "lw $25, 0(%1) \n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "addu $29, $29, 16\n\t" \ + "lw $28, 0($29) \n\t" \ + "lw $31, 4($29) \n\t" \ + "addu $29, $29, 8 \n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_W(lval, orig, arg1) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[2]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + __asm__ volatile( \ + "subu $29, $29, 8 \n\t" \ + "sw $28, 0($29) \n\t" \ + "sw $31, 4($29) \n\t" \ + "subu $29, $29, 16 \n\t" \ + "lw $4, 4(%1) \n\t" /* arg1*/ \ + "lw $25, 0(%1) \n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "addu $29, $29, 16 \n\t" \ + "lw $28, 0($29) \n\t" \ + "lw $31, 4($29) \n\t" \ + "addu $29, $29, 8 \n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WW(lval, orig, arg1,arg2) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[3]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + __asm__ volatile( \ + "subu $29, $29, 8 \n\t" \ + "sw $28, 0($29) \n\t" \ + "sw $31, 4($29) \n\t" \ + "subu $29, $29, 16 \n\t" \ + "lw $4, 4(%1) \n\t" \ + "lw $5, 8(%1) \n\t" \ + "lw $25, 0(%1) \n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "addu $29, $29, 16 \n\t" \ + "lw $28, 0($29) \n\t" \ + "lw $31, 4($29) \n\t" \ + "addu $29, $29, 8 \n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[4]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + _argvec[3] = (uintptr_t)(arg3); \ + __asm__ volatile( \ + "subu $29, $29, 8 \n\t" \ + "sw $28, 0($29) \n\t" \ + "sw $31, 4($29) \n\t" \ + "subu $29, $29, 16 \n\t" \ + "lw $4, 4(%1) \n\t" \ + "lw $5, 8(%1) \n\t" \ + "lw $6, 12(%1) \n\t" \ + "lw $25, 0(%1) \n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "addu $29, $29, 16 \n\t" \ + "lw $28, 0($29) \n\t" \ + "lw $31, 4($29) \n\t" \ + "addu $29, $29, 8 \n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[5]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + _argvec[3] = (uintptr_t)(arg3); \ + _argvec[4] = (uintptr_t)(arg4); \ + __asm__ volatile( \ + "subu $29, $29, 8 \n\t" \ + "sw $28, 0($29) \n\t" \ + "sw $31, 4($29) \n\t" \ + "subu $29, $29, 16 \n\t" \ + "lw $4, 4(%1) \n\t" \ + "lw $5, 8(%1) \n\t" \ + "lw $6, 12(%1) \n\t" \ + "lw $7, 16(%1) \n\t" \ + "lw $25, 0(%1) \n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "addu $29, $29, 16 \n\t" \ + "lw $28, 0($29) \n\t" \ + "lw $31, 4($29) \n\t" \ + "addu $29, $29, 8 \n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[6]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + _argvec[3] = (uintptr_t)(arg3); \ + _argvec[4] = (uintptr_t)(arg4); \ + _argvec[5] = (uintptr_t)(arg5); \ + __asm__ volatile( \ + "subu $29, $29, 8 \n\t" \ + "sw $28, 0($29) \n\t" \ + "sw $31, 4($29) \n\t" \ + "lw $4, 20(%1) \n\t" \ + "subu $29, $29, 24\n\t" \ + "sw $4, 16($29) \n\t" \ + "lw $4, 4(%1) \n\t" \ + "lw $5, 8(%1) \n\t" \ + "lw $6, 12(%1) \n\t" \ + "lw $7, 16(%1) \n\t" \ + "lw $25, 0(%1) \n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "addu $29, $29, 24 \n\t" \ + "lw $28, 0($29) \n\t" \ + "lw $31, 4($29) \n\t" \ + "addu $29, $29, 8 \n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) +#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[7]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + _argvec[3] = (uintptr_t)(arg3); \ + _argvec[4] = (uintptr_t)(arg4); \ + _argvec[5] = (uintptr_t)(arg5); \ + _argvec[6] = (uintptr_t)(arg6); \ + __asm__ volatile( \ + "subu $29, $29, 8 \n\t" \ + "sw $28, 0($29) \n\t" \ + "sw $31, 4($29) \n\t" \ + "lw $4, 20(%1) \n\t" \ + "subu $29, $29, 32\n\t" \ + "sw $4, 16($29) \n\t" \ + "lw $4, 24(%1) \n\t" \ + "nop\n\t" \ + "sw $4, 20($29) \n\t" \ + "lw $4, 4(%1) \n\t" \ + "lw $5, 8(%1) \n\t" \ + "lw $6, 12(%1) \n\t" \ + "lw $7, 16(%1) \n\t" \ + "lw $25, 0(%1) \n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "addu $29, $29, 32 \n\t" \ + "lw $28, 0($29) \n\t" \ + "lw $31, 4($29) \n\t" \ + "addu $29, $29, 8 \n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[8]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + _argvec[3] = (uintptr_t)(arg3); \ + _argvec[4] = (uintptr_t)(arg4); \ + _argvec[5] = (uintptr_t)(arg5); \ + _argvec[6] = (uintptr_t)(arg6); \ + _argvec[7] = (uintptr_t)(arg7); \ + __asm__ volatile( \ + "subu $29, $29, 8 \n\t" \ + "sw $28, 0($29) \n\t" \ + "sw $31, 4($29) \n\t" \ + "lw $4, 20(%1) \n\t" \ + "subu $29, $29, 32\n\t" \ + "sw $4, 16($29) \n\t" \ + "lw $4, 24(%1) \n\t" \ + "sw $4, 20($29) \n\t" \ + "lw $4, 28(%1) \n\t" \ + "sw $4, 24($29) \n\t" \ + "lw $4, 4(%1) \n\t" \ + "lw $5, 8(%1) \n\t" \ + "lw $6, 12(%1) \n\t" \ + "lw $7, 16(%1) \n\t" \ + "lw $25, 0(%1) \n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "addu $29, $29, 32 \n\t" \ + "lw $28, 0($29) \n\t" \ + "lw $31, 4($29) \n\t" \ + "addu $29, $29, 8 \n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[9]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + _argvec[3] = (uintptr_t)(arg3); \ + _argvec[4] = (uintptr_t)(arg4); \ + _argvec[5] = (uintptr_t)(arg5); \ + _argvec[6] = (uintptr_t)(arg6); \ + _argvec[7] = (uintptr_t)(arg7); \ + _argvec[8] = (uintptr_t)(arg8); \ + __asm__ volatile( \ + "subu $29, $29, 8 \n\t" \ + "sw $28, 0($29) \n\t" \ + "sw $31, 4($29) \n\t" \ + "lw $4, 20(%1) \n\t" \ + "subu $29, $29, 40\n\t" \ + "sw $4, 16($29) \n\t" \ + "lw $4, 24(%1) \n\t" \ + "sw $4, 20($29) \n\t" \ + "lw $4, 28(%1) \n\t" \ + "sw $4, 24($29) \n\t" \ + "lw $4, 32(%1) \n\t" \ + "sw $4, 28($29) \n\t" \ + "lw $4, 4(%1) \n\t" \ + "lw $5, 8(%1) \n\t" \ + "lw $6, 12(%1) \n\t" \ + "lw $7, 16(%1) \n\t" \ + "lw $25, 0(%1) \n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "addu $29, $29, 40 \n\t" \ + "lw $28, 0($29) \n\t" \ + "lw $31, 4($29) \n\t" \ + "addu $29, $29, 8 \n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[10]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + _argvec[3] = (uintptr_t)(arg3); \ + _argvec[4] = (uintptr_t)(arg4); \ + _argvec[5] = (uintptr_t)(arg5); \ + _argvec[6] = (uintptr_t)(arg6); \ + _argvec[7] = (uintptr_t)(arg7); \ + _argvec[8] = (uintptr_t)(arg8); \ + _argvec[9] = (uintptr_t)(arg9); \ + __asm__ volatile( \ + "subu $29, $29, 8 \n\t" \ + "sw $28, 0($29) \n\t" \ + "sw $31, 4($29) \n\t" \ + "lw $4, 20(%1) \n\t" \ + "subu $29, $29, 40\n\t" \ + "sw $4, 16($29) \n\t" \ + "lw $4, 24(%1) \n\t" \ + "sw $4, 20($29) \n\t" \ + "lw $4, 28(%1) \n\t" \ + "sw $4, 24($29) \n\t" \ + "lw $4, 32(%1) \n\t" \ + "sw $4, 28($29) \n\t" \ + "lw $4, 36(%1) \n\t" \ + "sw $4, 32($29) \n\t" \ + "lw $4, 4(%1) \n\t" \ + "lw $5, 8(%1) \n\t" \ + "lw $6, 12(%1) \n\t" \ + "lw $7, 16(%1) \n\t" \ + "lw $25, 0(%1) \n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "addu $29, $29, 40 \n\t" \ + "lw $28, 0($29) \n\t" \ + "lw $31, 4($29) \n\t" \ + "addu $29, $29, 8 \n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[11]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + _argvec[3] = (uintptr_t)(arg3); \ + _argvec[4] = (uintptr_t)(arg4); \ + _argvec[5] = (uintptr_t)(arg5); \ + _argvec[6] = (uintptr_t)(arg6); \ + _argvec[7] = (uintptr_t)(arg7); \ + _argvec[8] = (uintptr_t)(arg8); \ + _argvec[9] = (uintptr_t)(arg9); \ + _argvec[10] = (uintptr_t)(arg10); \ + __asm__ volatile( \ + "subu $29, $29, 8 \n\t" \ + "sw $28, 0($29) \n\t" \ + "sw $31, 4($29) \n\t" \ + "lw $4, 20(%1) \n\t" \ + "subu $29, $29, 48\n\t" \ + "sw $4, 16($29) \n\t" \ + "lw $4, 24(%1) \n\t" \ + "sw $4, 20($29) \n\t" \ + "lw $4, 28(%1) \n\t" \ + "sw $4, 24($29) \n\t" \ + "lw $4, 32(%1) \n\t" \ + "sw $4, 28($29) \n\t" \ + "lw $4, 36(%1) \n\t" \ + "sw $4, 32($29) \n\t" \ + "lw $4, 40(%1) \n\t" \ + "sw $4, 36($29) \n\t" \ + "lw $4, 4(%1) \n\t" \ + "lw $5, 8(%1) \n\t" \ + "lw $6, 12(%1) \n\t" \ + "lw $7, 16(%1) \n\t" \ + "lw $25, 0(%1) \n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "addu $29, $29, 48 \n\t" \ + "lw $28, 0($29) \n\t" \ + "lw $31, 4($29) \n\t" \ + "addu $29, $29, 8 \n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ + arg6,arg7,arg8,arg9,arg10, \ + arg11) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[12]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + _argvec[3] = (uintptr_t)(arg3); \ + _argvec[4] = (uintptr_t)(arg4); \ + _argvec[5] = (uintptr_t)(arg5); \ + _argvec[6] = (uintptr_t)(arg6); \ + _argvec[7] = (uintptr_t)(arg7); \ + _argvec[8] = (uintptr_t)(arg8); \ + _argvec[9] = (uintptr_t)(arg9); \ + _argvec[10] = (uintptr_t)(arg10); \ + _argvec[11] = (uintptr_t)(arg11); \ + __asm__ volatile( \ + "subu $29, $29, 8 \n\t" \ + "sw $28, 0($29) \n\t" \ + "sw $31, 4($29) \n\t" \ + "lw $4, 20(%1) \n\t" \ + "subu $29, $29, 48\n\t" \ + "sw $4, 16($29) \n\t" \ + "lw $4, 24(%1) \n\t" \ + "sw $4, 20($29) \n\t" \ + "lw $4, 28(%1) \n\t" \ + "sw $4, 24($29) \n\t" \ + "lw $4, 32(%1) \n\t" \ + "sw $4, 28($29) \n\t" \ + "lw $4, 36(%1) \n\t" \ + "sw $4, 32($29) \n\t" \ + "lw $4, 40(%1) \n\t" \ + "sw $4, 36($29) \n\t" \ + "lw $4, 44(%1) \n\t" \ + "sw $4, 40($29) \n\t" \ + "lw $4, 4(%1) \n\t" \ + "lw $5, 8(%1) \n\t" \ + "lw $6, 12(%1) \n\t" \ + "lw $7, 16(%1) \n\t" \ + "lw $25, 0(%1) \n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "addu $29, $29, 48 \n\t" \ + "lw $28, 0($29) \n\t" \ + "lw $31, 4($29) \n\t" \ + "addu $29, $29, 8 \n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ + arg6,arg7,arg8,arg9,arg10, \ + arg11,arg12) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[13]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + _argvec[3] = (uintptr_t)(arg3); \ + _argvec[4] = (uintptr_t)(arg4); \ + _argvec[5] = (uintptr_t)(arg5); \ + _argvec[6] = (uintptr_t)(arg6); \ + _argvec[7] = (uintptr_t)(arg7); \ + _argvec[8] = (uintptr_t)(arg8); \ + _argvec[9] = (uintptr_t)(arg9); \ + _argvec[10] = (uintptr_t)(arg10); \ + _argvec[11] = (uintptr_t)(arg11); \ + _argvec[12] = (uintptr_t)(arg12); \ + __asm__ volatile( \ + "subu $29, $29, 8 \n\t" \ + "sw $28, 0($29) \n\t" \ + "sw $31, 4($29) \n\t" \ + "lw $4, 20(%1) \n\t" \ + "subu $29, $29, 56\n\t" \ + "sw $4, 16($29) \n\t" \ + "lw $4, 24(%1) \n\t" \ + "sw $4, 20($29) \n\t" \ + "lw $4, 28(%1) \n\t" \ + "sw $4, 24($29) \n\t" \ + "lw $4, 32(%1) \n\t" \ + "sw $4, 28($29) \n\t" \ + "lw $4, 36(%1) \n\t" \ + "sw $4, 32($29) \n\t" \ + "lw $4, 40(%1) \n\t" \ + "sw $4, 36($29) \n\t" \ + "lw $4, 44(%1) \n\t" \ + "sw $4, 40($29) \n\t" \ + "lw $4, 48(%1) \n\t" \ + "sw $4, 44($29) \n\t" \ + "lw $4, 4(%1) \n\t" \ + "lw $5, 8(%1) \n\t" \ + "lw $6, 12(%1) \n\t" \ + "lw $7, 16(%1) \n\t" \ + "lw $25, 0(%1) \n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "addu $29, $29, 56 \n\t" \ + "lw $28, 0($29) \n\t" \ + "lw $31, 4($29) \n\t" \ + "addu $29, $29, 8 \n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#endif /* PLAT_mips32_linux */ + +/* ------------------------- nanomips-linux -------------------- */ + +#if defined(PLAT_nanomips_linux) + +/* These regs are trashed by the hidden call. */ +#define __CALLER_SAVED_REGS "$t4", "$t5", "$a0", "$a1", "$a2", \ +"$a3", "$a4", "$a5", "$a6", "$a7", "$t0", "$t1", "$t2", "$t3", \ +"$t8","$t9", "$at" + +/* These CALL_FN_ macros assume that on mips-linux, sizeof(unsigned + long) == 4. */ + +#define CALL_FN_W_v(lval, orig) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[1]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + __asm__ volatile( \ + "lw $t9, 0(%1)\n\t" \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $a0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_W(lval, orig, arg1) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[2]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + __asm__ volatile( \ + "lw $t9, 0(%1)\n\t" \ + "lw $a0, 4(%1)\n\t" \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $a0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WW(lval, orig, arg1,arg2) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[3]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + __asm__ volatile( \ + "lw $t9, 0(%1)\n\t" \ + "lw $a0, 4(%1)\n\t" \ + "lw $a1, 8(%1)\n\t" \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $a0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[4]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + _argvec[3] = (uintptr_t)(arg3); \ + __asm__ volatile( \ + "lw $t9, 0(%1)\n\t" \ + "lw $a0, 4(%1)\n\t" \ + "lw $a1, 8(%1)\n\t" \ + "lw $a2,12(%1)\n\t" \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $a0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[5]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + _argvec[3] = (uintptr_t)(arg3); \ + _argvec[4] = (uintptr_t)(arg4); \ + __asm__ volatile( \ + "lw $t9, 0(%1)\n\t" \ + "lw $a0, 4(%1)\n\t" \ + "lw $a1, 8(%1)\n\t" \ + "lw $a2,12(%1)\n\t" \ + "lw $a3,16(%1)\n\t" \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $a0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[6]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + _argvec[3] = (uintptr_t)(arg3); \ + _argvec[4] = (uintptr_t)(arg4); \ + _argvec[5] = (uintptr_t)(arg5); \ + __asm__ volatile( \ + "lw $t9, 0(%1)\n\t" \ + "lw $a0, 4(%1)\n\t" \ + "lw $a1, 8(%1)\n\t" \ + "lw $a2,12(%1)\n\t" \ + "lw $a3,16(%1)\n\t" \ + "lw $a4,20(%1)\n\t" \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $a0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) +#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[7]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + _argvec[3] = (uintptr_t)(arg3); \ + _argvec[4] = (uintptr_t)(arg4); \ + _argvec[5] = (uintptr_t)(arg5); \ + _argvec[6] = (uintptr_t)(arg6); \ + __asm__ volatile( \ + "lw $t9, 0(%1)\n\t" \ + "lw $a0, 4(%1)\n\t" \ + "lw $a1, 8(%1)\n\t" \ + "lw $a2,12(%1)\n\t" \ + "lw $a3,16(%1)\n\t" \ + "lw $a4,20(%1)\n\t" \ + "lw $a5,24(%1)\n\t" \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $a0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[8]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + _argvec[3] = (uintptr_t)(arg3); \ + _argvec[4] = (uintptr_t)(arg4); \ + _argvec[5] = (uintptr_t)(arg5); \ + _argvec[6] = (uintptr_t)(arg6); \ + _argvec[7] = (uintptr_t)(arg7); \ + __asm__ volatile( \ + "lw $t9, 0(%1)\n\t" \ + "lw $a0, 4(%1)\n\t" \ + "lw $a1, 8(%1)\n\t" \ + "lw $a2,12(%1)\n\t" \ + "lw $a3,16(%1)\n\t" \ + "lw $a4,20(%1)\n\t" \ + "lw $a5,24(%1)\n\t" \ + "lw $a6,28(%1)\n\t" \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $a0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[9]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + _argvec[3] = (uintptr_t)(arg3); \ + _argvec[4] = (uintptr_t)(arg4); \ + _argvec[5] = (uintptr_t)(arg5); \ + _argvec[6] = (uintptr_t)(arg6); \ + _argvec[7] = (uintptr_t)(arg7); \ + _argvec[8] = (uintptr_t)(arg8); \ + __asm__ volatile( \ + "lw $t9, 0(%1)\n\t" \ + "lw $a0, 4(%1)\n\t" \ + "lw $a1, 8(%1)\n\t" \ + "lw $a2,12(%1)\n\t" \ + "lw $a3,16(%1)\n\t" \ + "lw $a4,20(%1)\n\t" \ + "lw $a5,24(%1)\n\t" \ + "lw $a6,28(%1)\n\t" \ + "lw $a7,32(%1)\n\t" \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $a0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[10]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + _argvec[3] = (uintptr_t)(arg3); \ + _argvec[4] = (uintptr_t)(arg4); \ + _argvec[5] = (uintptr_t)(arg5); \ + _argvec[6] = (uintptr_t)(arg6); \ + _argvec[7] = (uintptr_t)(arg7); \ + _argvec[8] = (uintptr_t)(arg8); \ + _argvec[9] = (uintptr_t)(arg9); \ + __asm__ volatile( \ + "addiu $sp, $sp, -16 \n\t" \ + "lw $t9,36(%1) \n\t" \ + "sw $t9, 0($sp) \n\t" \ + "lw $t9, 0(%1) \n\t" \ + "lw $a0, 4(%1) \n\t" \ + "lw $a1, 8(%1) \n\t" \ + "lw $a2,12(%1) \n\t" \ + "lw $a3,16(%1) \n\t" \ + "lw $a4,20(%1) \n\t" \ + "lw $a5,24(%1) \n\t" \ + "lw $a6,28(%1) \n\t" \ + "lw $a7,32(%1) \n\t" \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $a0 \n\t" \ + "addiu $sp, $sp, 16 \n\t" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[11]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + _argvec[3] = (uintptr_t)(arg3); \ + _argvec[4] = (uintptr_t)(arg4); \ + _argvec[5] = (uintptr_t)(arg5); \ + _argvec[6] = (uintptr_t)(arg6); \ + _argvec[7] = (uintptr_t)(arg7); \ + _argvec[8] = (uintptr_t)(arg8); \ + _argvec[9] = (uintptr_t)(arg9); \ + _argvec[10] = (uintptr_t)(arg10); \ + __asm__ volatile( \ + "addiu $sp, $sp, -16 \n\t" \ + "lw $t9,36(%1) \n\t" \ + "sw $t9, 0($sp) \n\t" \ + "lw $t9,40(%1) \n\t" \ + "sw $t9, 4($sp) \n\t" \ + "lw $t9, 0(%1) \n\t" \ + "lw $a0, 4(%1) \n\t" \ + "lw $a1, 8(%1) \n\t" \ + "lw $a2,12(%1) \n\t" \ + "lw $a3,16(%1) \n\t" \ + "lw $a4,20(%1) \n\t" \ + "lw $a5,24(%1) \n\t" \ + "lw $a6,28(%1) \n\t" \ + "lw $a7,32(%1) \n\t" \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $a0 \n\t" \ + "addiu $sp, $sp, 16 \n\t" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ + arg6,arg7,arg8,arg9,arg10, \ + arg11) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[12]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + _argvec[3] = (uintptr_t)(arg3); \ + _argvec[4] = (uintptr_t)(arg4); \ + _argvec[5] = (uintptr_t)(arg5); \ + _argvec[6] = (uintptr_t)(arg6); \ + _argvec[7] = (uintptr_t)(arg7); \ + _argvec[8] = (uintptr_t)(arg8); \ + _argvec[9] = (uintptr_t)(arg9); \ + _argvec[10] = (uintptr_t)(arg10); \ + _argvec[11] = (uintptr_t)(arg11); \ + __asm__ volatile( \ + "addiu $sp, $sp, -16 \n\t" \ + "lw $t9,36(%1) \n\t" \ + "sw $t9, 0($sp) \n\t" \ + "lw $t9,40(%1) \n\t" \ + "sw $t9, 4($sp) \n\t" \ + "lw $t9,44(%1) \n\t" \ + "sw $t9, 8($sp) \n\t" \ + "lw $t9, 0(%1) \n\t" \ + "lw $a0, 4(%1) \n\t" \ + "lw $a1, 8(%1) \n\t" \ + "lw $a2,12(%1) \n\t" \ + "lw $a3,16(%1) \n\t" \ + "lw $a4,20(%1) \n\t" \ + "lw $a5,24(%1) \n\t" \ + "lw $a6,28(%1) \n\t" \ + "lw $a7,32(%1) \n\t" \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $a0 \n\t" \ + "addiu $sp, $sp, 16 \n\t" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ + arg6,arg7,arg8,arg9,arg10, \ + arg11,arg12) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[13]; \ + volatile uintptr_t _res; \ + _argvec[0] = (uintptr_t)_orig.nraddr; \ + _argvec[1] = (uintptr_t)(arg1); \ + _argvec[2] = (uintptr_t)(arg2); \ + _argvec[3] = (uintptr_t)(arg3); \ + _argvec[4] = (uintptr_t)(arg4); \ + _argvec[5] = (uintptr_t)(arg5); \ + _argvec[6] = (uintptr_t)(arg6); \ + _argvec[7] = (uintptr_t)(arg7); \ + _argvec[8] = (uintptr_t)(arg8); \ + _argvec[9] = (uintptr_t)(arg9); \ + _argvec[10] = (uintptr_t)(arg10); \ + _argvec[11] = (uintptr_t)(arg11); \ + _argvec[12] = (uintptr_t)(arg12); \ + __asm__ volatile( \ + "addiu $sp, $sp, -16 \n\t" \ + "lw $t9,36(%1) \n\t" \ + "sw $t9, 0($sp) \n\t" \ + "lw $t9,40(%1) \n\t" \ + "sw $t9, 4($sp) \n\t" \ + "lw $t9,44(%1) \n\t" \ + "sw $t9, 8($sp) \n\t" \ + "lw $t9,48(%1) \n\t" \ + "sw $t9,12($sp) \n\t" \ + "lw $t9, 0(%1) \n\t" \ + "lw $a0, 4(%1) \n\t" \ + "lw $a1, 8(%1) \n\t" \ + "lw $a2,12(%1) \n\t" \ + "lw $a3,16(%1) \n\t" \ + "lw $a4,20(%1) \n\t" \ + "lw $a5,24(%1) \n\t" \ + "lw $a6,28(%1) \n\t" \ + "lw $a7,32(%1) \n\t" \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $a0 \n\t" \ + "addiu $sp, $sp, 16 \n\t" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#endif /* PLAT_nanomips_linux */ + +/* ------------------------- mips64-linux ------------------------- */ + +#if defined(PLAT_mips64_linux) + +/* These regs are trashed by the hidden call. */ +#define __CALLER_SAVED_REGS "$2", "$3", "$4", "$5", "$6", \ +"$7", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \ +"$25", "$31" + +/* These CALL_FN_ macros assume that on mips64-linux, + sizeof(long long) == 8. */ + +#define MIPS64_LONG2REG_CAST(x) ((uintptr_t)x) + +#define CALL_FN_W_v(lval, orig) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[1]; \ + volatile uintptr_t _res; \ + _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \ + __asm__ volatile( \ + "ld $25, 0(%1)\n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_W(lval, orig, arg1) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[2]; \ + volatile uintptr_t _res; \ + _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \ + _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \ + __asm__ volatile( \ + "ld $4, 8(%1)\n\t" /* arg1*/ \ + "ld $25, 0(%1)\n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WW(lval, orig, arg1,arg2) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[3]; \ + volatile uintptr_t _res; \ + _argvec[0] = _orig.nraddr; \ + _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \ + _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \ + __asm__ volatile( \ + "ld $4, 8(%1)\n\t" \ + "ld $5, 16(%1)\n\t" \ + "ld $25, 0(%1)\n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + + +#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[4]; \ + volatile uintptr_t _res; \ + _argvec[0] = _orig.nraddr; \ + _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \ + _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \ + _argvec[3] = MIPS64_LONG2REG_CAST(arg3); \ + __asm__ volatile( \ + "ld $4, 8(%1)\n\t" \ + "ld $5, 16(%1)\n\t" \ + "ld $6, 24(%1)\n\t" \ + "ld $25, 0(%1)\n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[5]; \ + volatile uintptr_t _res; \ + _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \ + _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \ + _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \ + _argvec[3] = MIPS64_LONG2REG_CAST(arg3); \ + _argvec[4] = MIPS64_LONG2REG_CAST(arg4); \ + __asm__ volatile( \ + "ld $4, 8(%1)\n\t" \ + "ld $5, 16(%1)\n\t" \ + "ld $6, 24(%1)\n\t" \ + "ld $7, 32(%1)\n\t" \ + "ld $25, 0(%1)\n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[6]; \ + volatile uintptr_t _res; \ + _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \ + _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \ + _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \ + _argvec[3] = MIPS64_LONG2REG_CAST(arg3); \ + _argvec[4] = MIPS64_LONG2REG_CAST(arg4); \ + _argvec[5] = MIPS64_LONG2REG_CAST(arg5); \ + __asm__ volatile( \ + "ld $4, 8(%1)\n\t" \ + "ld $5, 16(%1)\n\t" \ + "ld $6, 24(%1)\n\t" \ + "ld $7, 32(%1)\n\t" \ + "ld $8, 40(%1)\n\t" \ + "ld $25, 0(%1)\n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[7]; \ + volatile uintptr_t _res; \ + _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \ + _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \ + _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \ + _argvec[3] = MIPS64_LONG2REG_CAST(arg3); \ + _argvec[4] = MIPS64_LONG2REG_CAST(arg4); \ + _argvec[5] = MIPS64_LONG2REG_CAST(arg5); \ + _argvec[6] = MIPS64_LONG2REG_CAST(arg6); \ + __asm__ volatile( \ + "ld $4, 8(%1)\n\t" \ + "ld $5, 16(%1)\n\t" \ + "ld $6, 24(%1)\n\t" \ + "ld $7, 32(%1)\n\t" \ + "ld $8, 40(%1)\n\t" \ + "ld $9, 48(%1)\n\t" \ + "ld $25, 0(%1)\n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[8]; \ + volatile uintptr_t _res; \ + _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \ + _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \ + _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \ + _argvec[3] = MIPS64_LONG2REG_CAST(arg3); \ + _argvec[4] = MIPS64_LONG2REG_CAST(arg4); \ + _argvec[5] = MIPS64_LONG2REG_CAST(arg5); \ + _argvec[6] = MIPS64_LONG2REG_CAST(arg6); \ + _argvec[7] = MIPS64_LONG2REG_CAST(arg7); \ + __asm__ volatile( \ + "ld $4, 8(%1)\n\t" \ + "ld $5, 16(%1)\n\t" \ + "ld $6, 24(%1)\n\t" \ + "ld $7, 32(%1)\n\t" \ + "ld $8, 40(%1)\n\t" \ + "ld $9, 48(%1)\n\t" \ + "ld $10, 56(%1)\n\t" \ + "ld $25, 0(%1) \n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[9]; \ + volatile uintptr_t _res; \ + _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \ + _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \ + _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \ + _argvec[3] = MIPS64_LONG2REG_CAST(arg3); \ + _argvec[4] = MIPS64_LONG2REG_CAST(arg4); \ + _argvec[5] = MIPS64_LONG2REG_CAST(arg5); \ + _argvec[6] = MIPS64_LONG2REG_CAST(arg6); \ + _argvec[7] = MIPS64_LONG2REG_CAST(arg7); \ + _argvec[8] = MIPS64_LONG2REG_CAST(arg8); \ + __asm__ volatile( \ + "ld $4, 8(%1)\n\t" \ + "ld $5, 16(%1)\n\t" \ + "ld $6, 24(%1)\n\t" \ + "ld $7, 32(%1)\n\t" \ + "ld $8, 40(%1)\n\t" \ + "ld $9, 48(%1)\n\t" \ + "ld $10, 56(%1)\n\t" \ + "ld $11, 64(%1)\n\t" \ + "ld $25, 0(%1) \n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[10]; \ + volatile uintptr_t _res; \ + _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \ + _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \ + _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \ + _argvec[3] = MIPS64_LONG2REG_CAST(arg3); \ + _argvec[4] = MIPS64_LONG2REG_CAST(arg4); \ + _argvec[5] = MIPS64_LONG2REG_CAST(arg5); \ + _argvec[6] = MIPS64_LONG2REG_CAST(arg6); \ + _argvec[7] = MIPS64_LONG2REG_CAST(arg7); \ + _argvec[8] = MIPS64_LONG2REG_CAST(arg8); \ + _argvec[9] = MIPS64_LONG2REG_CAST(arg9); \ + __asm__ volatile( \ + "dsubu $29, $29, 8\n\t" \ + "ld $4, 72(%1)\n\t" \ + "sd $4, 0($29)\n\t" \ + "ld $4, 8(%1)\n\t" \ + "ld $5, 16(%1)\n\t" \ + "ld $6, 24(%1)\n\t" \ + "ld $7, 32(%1)\n\t" \ + "ld $8, 40(%1)\n\t" \ + "ld $9, 48(%1)\n\t" \ + "ld $10, 56(%1)\n\t" \ + "ld $11, 64(%1)\n\t" \ + "ld $25, 0(%1)\n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "daddu $29, $29, 8\n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[11]; \ + volatile uintptr_t _res; \ + _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \ + _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \ + _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \ + _argvec[3] = MIPS64_LONG2REG_CAST(arg3); \ + _argvec[4] = MIPS64_LONG2REG_CAST(arg4); \ + _argvec[5] = MIPS64_LONG2REG_CAST(arg5); \ + _argvec[6] = MIPS64_LONG2REG_CAST(arg6); \ + _argvec[7] = MIPS64_LONG2REG_CAST(arg7); \ + _argvec[8] = MIPS64_LONG2REG_CAST(arg8); \ + _argvec[9] = MIPS64_LONG2REG_CAST(arg9); \ + _argvec[10] = MIPS64_LONG2REG_CAST(arg10); \ + __asm__ volatile( \ + "dsubu $29, $29, 16\n\t" \ + "ld $4, 72(%1)\n\t" \ + "sd $4, 0($29)\n\t" \ + "ld $4, 80(%1)\n\t" \ + "sd $4, 8($29)\n\t" \ + "ld $4, 8(%1)\n\t" \ + "ld $5, 16(%1)\n\t" \ + "ld $6, 24(%1)\n\t" \ + "ld $7, 32(%1)\n\t" \ + "ld $8, 40(%1)\n\t" \ + "ld $9, 48(%1)\n\t" \ + "ld $10, 56(%1)\n\t" \ + "ld $11, 64(%1)\n\t" \ + "ld $25, 0(%1)\n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "daddu $29, $29, 16\n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ + arg6,arg7,arg8,arg9,arg10, \ + arg11) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[12]; \ + volatile uintptr_t _res; \ + _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \ + _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \ + _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \ + _argvec[3] = MIPS64_LONG2REG_CAST(arg3); \ + _argvec[4] = MIPS64_LONG2REG_CAST(arg4); \ + _argvec[5] = MIPS64_LONG2REG_CAST(arg5); \ + _argvec[6] = MIPS64_LONG2REG_CAST(arg6); \ + _argvec[7] = MIPS64_LONG2REG_CAST(arg7); \ + _argvec[8] = MIPS64_LONG2REG_CAST(arg8); \ + _argvec[9] = MIPS64_LONG2REG_CAST(arg9); \ + _argvec[10] = MIPS64_LONG2REG_CAST(arg10); \ + _argvec[11] = MIPS64_LONG2REG_CAST(arg11); \ + __asm__ volatile( \ + "dsubu $29, $29, 24\n\t" \ + "ld $4, 72(%1)\n\t" \ + "sd $4, 0($29)\n\t" \ + "ld $4, 80(%1)\n\t" \ + "sd $4, 8($29)\n\t" \ + "ld $4, 88(%1)\n\t" \ + "sd $4, 16($29)\n\t" \ + "ld $4, 8(%1)\n\t" \ + "ld $5, 16(%1)\n\t" \ + "ld $6, 24(%1)\n\t" \ + "ld $7, 32(%1)\n\t" \ + "ld $8, 40(%1)\n\t" \ + "ld $9, 48(%1)\n\t" \ + "ld $10, 56(%1)\n\t" \ + "ld $11, 64(%1)\n\t" \ + "ld $25, 0(%1)\n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "daddu $29, $29, 24\n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ + arg6,arg7,arg8,arg9,arg10, \ + arg11,arg12) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile uintptr_t _argvec[13]; \ + volatile uintptr_t _res; \ + _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \ + _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \ + _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \ + _argvec[3] = MIPS64_LONG2REG_CAST(arg3); \ + _argvec[4] = MIPS64_LONG2REG_CAST(arg4); \ + _argvec[5] = MIPS64_LONG2REG_CAST(arg5); \ + _argvec[6] = MIPS64_LONG2REG_CAST(arg6); \ + _argvec[7] = MIPS64_LONG2REG_CAST(arg7); \ + _argvec[8] = MIPS64_LONG2REG_CAST(arg8); \ + _argvec[9] = MIPS64_LONG2REG_CAST(arg9); \ + _argvec[10] = MIPS64_LONG2REG_CAST(arg10); \ + _argvec[11] = MIPS64_LONG2REG_CAST(arg11); \ + _argvec[12] = MIPS64_LONG2REG_CAST(arg12); \ + __asm__ volatile( \ + "dsubu $29, $29, 32\n\t" \ + "ld $4, 72(%1)\n\t" \ + "sd $4, 0($29)\n\t" \ + "ld $4, 80(%1)\n\t" \ + "sd $4, 8($29)\n\t" \ + "ld $4, 88(%1)\n\t" \ + "sd $4, 16($29)\n\t" \ + "ld $4, 96(%1)\n\t" \ + "sd $4, 24($29)\n\t" \ + "ld $4, 8(%1)\n\t" \ + "ld $5, 16(%1)\n\t" \ + "ld $6, 24(%1)\n\t" \ + "ld $7, 32(%1)\n\t" \ + "ld $8, 40(%1)\n\t" \ + "ld $9, 48(%1)\n\t" \ + "ld $10, 56(%1)\n\t" \ + "ld $11, 64(%1)\n\t" \ + "ld $25, 0(%1)\n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "daddu $29, $29, 32\n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#endif /* PLAT_mips64_linux */ + +/* ------------------------------------------------------------------ */ +/* ARCHITECTURE INDEPENDENT MACROS for CLIENT REQUESTS. */ +/* */ +/* ------------------------------------------------------------------ */ + +/* Some request codes. There are many more of these, but most are not + exposed to end-user view. These are the public ones, all of the + form 0x1000 + small_number. + + Core ones are in the range 0x00000000--0x0000ffff. The non-public + ones start at 0x2000. +*/ + +/* These macros are used by tools -- they must be public, but don't + embed them into other programs. */ +#define VG_USERREQ_TOOL_BASE(a,b) \ + ((unsigned int)(((a)&0xff) << 24 | ((b)&0xff) << 16)) +#define VG_IS_TOOL_USERREQ(a, b, v) \ + (VG_USERREQ_TOOL_BASE(a,b) == ((v) & 0xffff0000)) + +/* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !! + This enum comprises an ABI exported by Valgrind to programs + which use client requests. DO NOT CHANGE THE NUMERIC VALUES OF THESE + ENTRIES, NOR DELETE ANY -- add new ones at the end of the most + relevant group. */ +typedef + enum { VG_USERREQ__RUNNING_ON_VALGRIND = 0x1001, + VG_USERREQ__DISCARD_TRANSLATIONS = 0x1002, + + /* These allow any function to be called from the simulated + CPU but run on the real CPU. Nb: the first arg passed to + the function is always the ThreadId of the running + thread! So CLIENT_CALL0 actually requires a 1 arg + function, etc. */ + VG_USERREQ__CLIENT_CALL0 = 0x1101, + VG_USERREQ__CLIENT_CALL1 = 0x1102, + VG_USERREQ__CLIENT_CALL2 = 0x1103, + VG_USERREQ__CLIENT_CALL3 = 0x1104, + + /* Can be useful in regression testing suites -- eg. can + send Valgrind's output to /dev/null and still count + errors. */ + VG_USERREQ__COUNT_ERRORS = 0x1201, + + /* Allows the client program and/or gdbserver to execute a monitor + command. */ + VG_USERREQ__GDB_MONITOR_COMMAND = 0x1202, + + /* Allows the client program to change a dynamic command line + option. */ + VG_USERREQ__CLO_CHANGE = 0x1203, + + /* These are useful and can be interpreted by any tool that + tracks malloc() et al, by using vg_replace_malloc.c. */ + VG_USERREQ__MALLOCLIKE_BLOCK = 0x1301, + VG_USERREQ__RESIZEINPLACE_BLOCK = 0x130b, + VG_USERREQ__FREELIKE_BLOCK = 0x1302, + /* Memory pool support. */ + VG_USERREQ__CREATE_MEMPOOL = 0x1303, + VG_USERREQ__DESTROY_MEMPOOL = 0x1304, + VG_USERREQ__MEMPOOL_ALLOC = 0x1305, + VG_USERREQ__MEMPOOL_FREE = 0x1306, + VG_USERREQ__MEMPOOL_TRIM = 0x1307, + VG_USERREQ__MOVE_MEMPOOL = 0x1308, + VG_USERREQ__MEMPOOL_CHANGE = 0x1309, + VG_USERREQ__MEMPOOL_EXISTS = 0x130a, + + /* Allow printfs to valgrind log. */ + /* The first two pass the va_list argument by value, which + assumes it is the same size as or smaller than a UWord, + which generally isn't the case. Hence are deprecated. + The second two pass the vargs by reference and so are + immune to this problem. */ + /* both :: char* fmt, va_list vargs (DEPRECATED) */ + VG_USERREQ__PRINTF = 0x1401, + VG_USERREQ__PRINTF_BACKTRACE = 0x1402, + /* both :: char* fmt, va_list* vargs */ + VG_USERREQ__PRINTF_VALIST_BY_REF = 0x1403, + VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF = 0x1404, + + /* Stack support. */ + VG_USERREQ__STACK_REGISTER = 0x1501, + VG_USERREQ__STACK_DEREGISTER = 0x1502, + VG_USERREQ__STACK_CHANGE = 0x1503, + + /* Wine support */ + VG_USERREQ__LOAD_PDB_DEBUGINFO = 0x1601, + + /* Querying of debug info. */ + VG_USERREQ__MAP_IP_TO_SRCLOC = 0x1701, + + /* Disable/enable error reporting level. Takes a single + Word arg which is the delta to this thread's error + disablement indicator. Hence 1 disables or further + disables errors, and -1 moves back towards enablement. + Other values are not allowed. */ + VG_USERREQ__CHANGE_ERR_DISABLEMENT = 0x1801, + + /* Some requests used for Valgrind internal, such as + self-test or self-hosting. */ + /* Initialise IR injection */ + VG_USERREQ__VEX_INIT_FOR_IRI = 0x1901, + /* Used by Inner Valgrind to inform Outer Valgrind where to + find the list of inner guest threads */ + VG_USERREQ__INNER_THREADS = 0x1902 + } Vg_ClientRequest; + +#if !defined(__GNUC__) +# define __extension__ /* */ +#endif + + +/* Returns the number of Valgrinds this code is running under. That + is, 0 if running natively, 1 if running under Valgrind, 2 if + running under Valgrind which is running under another Valgrind, + etc. */ +#define RUNNING_ON_VALGRIND \ + (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* if not */, \ + VG_USERREQ__RUNNING_ON_VALGRIND, \ + 0, 0, 0, 0, 0) \ + + +/* Discard translation of code in the range [_qzz_addr .. _qzz_addr + + _qzz_len - 1]. Useful if you are debugging a JITter or some such, + since it provides a way to make sure valgrind will retranslate the + invalidated area. Returns no value. */ +#define VALGRIND_DISCARD_TRANSLATIONS(_qzz_addr,_qzz_len) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DISCARD_TRANSLATIONS, \ + _qzz_addr, _qzz_len, 0, 0, 0) + +#define VALGRIND_INNER_THREADS(_qzz_addr) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__INNER_THREADS, \ + _qzz_addr, 0, 0, 0, 0) + + +/* These requests are for getting Valgrind itself to print something. + Possibly with a backtrace. This is a really ugly hack. The return value + is the number of characters printed, excluding the "**<pid>** " part at the + start and the backtrace (if present). */ + +#if defined(__GNUC__) || defined(__INTEL_COMPILER) && !defined(_MSC_VER) +/* Modern GCC will optimize the static routine out if unused, + and unused attribute will shut down warnings about it. */ +static int VALGRIND_PRINTF(const char *format, ...) + __attribute__((format(__printf__, 1, 2), __unused__)); +#endif +static int +#if defined(_MSC_VER) +__inline +#endif +VALGRIND_PRINTF(const char *format, ...) +{ +#if defined(NVALGRIND) + (void)format; + return 0; +#else /* NVALGRIND */ +#if defined(_MSC_VER) || defined(__MINGW64__) + uintptr_t _qzz_res; +#else + uintptr_t _qzz_res; +#endif + va_list vargs; + va_start(vargs, format); +#if defined(_MSC_VER) || defined(__MINGW64__) + _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0, + VG_USERREQ__PRINTF_VALIST_BY_REF, + (uintptr_t)format, + (uintptr_t)&vargs, + 0, 0, 0); +#else + _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0, + VG_USERREQ__PRINTF_VALIST_BY_REF, + (uintptr_t)format, + (uintptr_t)&vargs, + 0, 0, 0); +#endif + va_end(vargs); + return (int)_qzz_res; +#endif /* NVALGRIND */ +} + +#if defined(__GNUC__) || defined(__INTEL_COMPILER) && !defined(_MSC_VER) +static int VALGRIND_PRINTF_BACKTRACE(const char *format, ...) + __attribute__((format(__printf__, 1, 2), __unused__)); +#endif +static int +#if defined(_MSC_VER) +__inline +#endif +VALGRIND_PRINTF_BACKTRACE(const char *format, ...) +{ +#if defined(NVALGRIND) + (void)format; + return 0; +#else /* NVALGRIND */ +#if defined(_MSC_VER) || defined(__MINGW64__) + uintptr_t _qzz_res; +#else + uintptr_t _qzz_res; +#endif + va_list vargs; + va_start(vargs, format); +#if defined(_MSC_VER) || defined(__MINGW64__) + _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0, + VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF, + (uintptr_t)format, + (uintptr_t)&vargs, + 0, 0, 0); +#else + _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0, + VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF, + (uintptr_t)format, + (uintptr_t)&vargs, + 0, 0, 0); +#endif + va_end(vargs); + return (int)_qzz_res; +#endif /* NVALGRIND */ +} + + +/* These requests allow control to move from the simulated CPU to the + real CPU, calling an arbitrary function. + + Note that the current ThreadId is inserted as the first argument. + So this call: + + VALGRIND_NON_SIMD_CALL2(f, arg1, arg2) + + requires f to have this signature: + + Word f(Word tid, Word arg1, Word arg2) + + where "Word" is a word-sized type. + + Note that these client requests are not entirely reliable. For example, + if you call a function with them that subsequently calls printf(), + there's a high chance Valgrind will crash. Generally, your prospects of + these working are made higher if the called function does not refer to + any global variables, and does not refer to any libc or other functions + (printf et al). Any kind of entanglement with libc or dynamic linking is + likely to have a bad outcome, for tricky reasons which we've grappled + with a lot in the past. +*/ +#define VALGRIND_NON_SIMD_CALL0(_qyy_fn) \ + VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ + VG_USERREQ__CLIENT_CALL0, \ + _qyy_fn, \ + 0, 0, 0, 0) + +#define VALGRIND_NON_SIMD_CALL1(_qyy_fn, _qyy_arg1) \ + VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ + VG_USERREQ__CLIENT_CALL1, \ + _qyy_fn, \ + _qyy_arg1, 0, 0, 0) + +#define VALGRIND_NON_SIMD_CALL2(_qyy_fn, _qyy_arg1, _qyy_arg2) \ + VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ + VG_USERREQ__CLIENT_CALL2, \ + _qyy_fn, \ + _qyy_arg1, _qyy_arg2, 0, 0) + +#define VALGRIND_NON_SIMD_CALL3(_qyy_fn, _qyy_arg1, _qyy_arg2, _qyy_arg3) \ + VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ + VG_USERREQ__CLIENT_CALL3, \ + _qyy_fn, \ + _qyy_arg1, _qyy_arg2, \ + _qyy_arg3, 0) + + +/* Counts the number of errors that have been recorded by a tool. Nb: + the tool must record the errors with VG_(maybe_record_error)() or + VG_(unique_error)() for them to be counted. */ +#define VALGRIND_COUNT_ERRORS \ + (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR( \ + 0 /* default return */, \ + VG_USERREQ__COUNT_ERRORS, \ + 0, 0, 0, 0, 0) + +/* Several Valgrind tools (Memcheck, Massif, Helgrind, DRD) rely on knowing + when heap blocks are allocated in order to give accurate results. This + happens automatically for the standard allocator functions such as + malloc(), calloc(), realloc(), memalign(), new, new[], free(), delete, + delete[], etc. + + But if your program uses a custom allocator, this doesn't automatically + happen, and Valgrind will not do as well. For example, if you allocate + superblocks with mmap() and then allocates chunks of the superblocks, all + Valgrind's observations will be at the mmap() level and it won't know that + the chunks should be considered separate entities. In Memcheck's case, + that means you probably won't get heap block overrun detection (because + there won't be redzones marked as unaddressable) and you definitely won't + get any leak detection. + + The following client requests allow a custom allocator to be annotated so + that it can be handled accurately by Valgrind. + + VALGRIND_MALLOCLIKE_BLOCK marks a region of memory as having been allocated + by a malloc()-like function. For Memcheck (an illustrative case), this + does two things: + + - It records that the block has been allocated. This means any addresses + within the block mentioned in error messages will be + identified as belonging to the block. It also means that if the block + isn't freed it will be detected by the leak checker. + + - It marks the block as being addressable and undefined (if 'is_zeroed' is + not set), or addressable and defined (if 'is_zeroed' is set). This + controls how accesses to the block by the program are handled. + + 'addr' is the start of the usable block (ie. after any + redzone), 'sizeB' is its size. 'rzB' is the redzone size if the allocator + can apply redzones -- these are blocks of padding at the start and end of + each block. Adding redzones is recommended as it makes it much more likely + Valgrind will spot block overruns. `is_zeroed' indicates if the memory is + zeroed (or filled with another predictable value), as is the case for + calloc(). + + VALGRIND_MALLOCLIKE_BLOCK should be put immediately after the point where a + heap block -- that will be used by the client program -- is allocated. + It's best to put it at the outermost level of the allocator if possible; + for example, if you have a function my_alloc() which calls + internal_alloc(), and the client request is put inside internal_alloc(), + stack traces relating to the heap block will contain entries for both + my_alloc() and internal_alloc(), which is probably not what you want. + + For Memcheck users: if you use VALGRIND_MALLOCLIKE_BLOCK to carve out + custom blocks from within a heap block, B, that has been allocated with + malloc/calloc/new/etc, then block B will be *ignored* during leak-checking + -- the custom blocks will take precedence. + + VALGRIND_FREELIKE_BLOCK is the partner to VALGRIND_MALLOCLIKE_BLOCK. For + Memcheck, it does two things: + + - It records that the block has been deallocated. This assumes that the + block was annotated as having been allocated via + VALGRIND_MALLOCLIKE_BLOCK. Otherwise, an error will be issued. + + - It marks the block as being unaddressable. + + VALGRIND_FREELIKE_BLOCK should be put immediately after the point where a + heap block is deallocated. + + VALGRIND_RESIZEINPLACE_BLOCK informs a tool about reallocation. For + Memcheck, it does four things: + + - It records that the size of a block has been changed. This assumes that + the block was annotated as having been allocated via + VALGRIND_MALLOCLIKE_BLOCK. Otherwise, an error will be issued. + + - If the block shrunk, it marks the freed memory as being unaddressable. + + - If the block grew, it marks the new area as undefined and defines a red + zone past the end of the new block. + + - The V-bits of the overlap between the old and the new block are preserved. + + VALGRIND_RESIZEINPLACE_BLOCK should be put after allocation of the new block + and before deallocation of the old block. + + In many cases, these three client requests will not be enough to get your + allocator working well with Memcheck. More specifically, if your allocator + writes to freed blocks in any way then a VALGRIND_MAKE_MEM_UNDEFINED call + will be necessary to mark the memory as addressable just before the zeroing + occurs, otherwise you'll get a lot of invalid write errors. For example, + you'll need to do this if your allocator recycles freed blocks, but it + zeroes them before handing them back out (via VALGRIND_MALLOCLIKE_BLOCK). + Alternatively, if your allocator reuses freed blocks for allocator-internal + data structures, VALGRIND_MAKE_MEM_UNDEFINED calls will also be necessary. + + Really, what's happening is a blurring of the lines between the client + program and the allocator... after VALGRIND_FREELIKE_BLOCK is called, the + memory should be considered unaddressable to the client program, but the + allocator knows more than the rest of the client program and so may be able + to safely access it. Extra client requests are necessary for Valgrind to + understand the distinction between the allocator and the rest of the + program. + + Ignored if addr == 0. +*/ +#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MALLOCLIKE_BLOCK, \ + addr, sizeB, rzB, is_zeroed, 0) + +/* See the comment for VALGRIND_MALLOCLIKE_BLOCK for details. + Ignored if addr == 0. +*/ +#define VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__RESIZEINPLACE_BLOCK, \ + addr, oldSizeB, newSizeB, rzB, 0) + +/* See the comment for VALGRIND_MALLOCLIKE_BLOCK for details. + Ignored if addr == 0. +*/ +#define VALGRIND_FREELIKE_BLOCK(addr, rzB) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__FREELIKE_BLOCK, \ + addr, rzB, 0, 0, 0) + +/* Create a memory pool. */ +#define VALGRIND_CREATE_MEMPOOL(pool, rzB, is_zeroed) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CREATE_MEMPOOL, \ + pool, rzB, is_zeroed, 0, 0) + +/* Create a memory pool with some flags specifying extended behaviour. + When flags is zero, the behaviour is identical to VALGRIND_CREATE_MEMPOOL. + + The flag VALGRIND_MEMPOOL_METAPOOL specifies that the pieces of memory + associated with the pool using VALGRIND_MEMPOOL_ALLOC will be used + by the application as superblocks to dole out MALLOC_LIKE blocks using + VALGRIND_MALLOCLIKE_BLOCK. In other words, a meta pool is a "2 levels" + pool : first level is the blocks described by VALGRIND_MEMPOOL_ALLOC. + The second level blocks are described using VALGRIND_MALLOCLIKE_BLOCK. + Note that the association between the pool and the second level blocks + is implicit : second level blocks will be located inside first level + blocks. It is necessary to use the VALGRIND_MEMPOOL_METAPOOL flag + for such 2 levels pools, as otherwise valgrind will detect overlapping + memory blocks, and will abort execution (e.g. during leak search). + + Such a meta pool can also be marked as an 'auto free' pool using the flag + VALGRIND_MEMPOOL_AUTO_FREE, which must be OR-ed together with the + VALGRIND_MEMPOOL_METAPOOL. For an 'auto free' pool, VALGRIND_MEMPOOL_FREE + will automatically free the second level blocks that are contained + inside the first level block freed with VALGRIND_MEMPOOL_FREE. + In other words, calling VALGRIND_MEMPOOL_FREE will cause implicit calls + to VALGRIND_FREELIKE_BLOCK for all the second level blocks included + in the first level block. + Note: it is an error to use the VALGRIND_MEMPOOL_AUTO_FREE flag + without the VALGRIND_MEMPOOL_METAPOOL flag. +*/ +#define VALGRIND_MEMPOOL_AUTO_FREE 1 +#define VALGRIND_MEMPOOL_METAPOOL 2 +#define VALGRIND_CREATE_MEMPOOL_EXT(pool, rzB, is_zeroed, flags) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CREATE_MEMPOOL, \ + pool, rzB, is_zeroed, flags, 0) + +/* Destroy a memory pool. */ +#define VALGRIND_DESTROY_MEMPOOL(pool) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DESTROY_MEMPOOL, \ + pool, 0, 0, 0, 0) + +/* Associate a piece of memory with a memory pool. */ +#define VALGRIND_MEMPOOL_ALLOC(pool, addr, size) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MEMPOOL_ALLOC, \ + pool, addr, size, 0, 0) + +/* Disassociate a piece of memory from a memory pool. */ +#define VALGRIND_MEMPOOL_FREE(pool, addr) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MEMPOOL_FREE, \ + pool, addr, 0, 0, 0) + +/* Disassociate any pieces outside a particular range. */ +#define VALGRIND_MEMPOOL_TRIM(pool, addr, size) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MEMPOOL_TRIM, \ + pool, addr, size, 0, 0) + +/* Resize and/or move a piece associated with a memory pool. */ +#define VALGRIND_MOVE_MEMPOOL(poolA, poolB) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MOVE_MEMPOOL, \ + poolA, poolB, 0, 0, 0) + +/* Resize and/or move a piece associated with a memory pool. */ +#define VALGRIND_MEMPOOL_CHANGE(pool, addrA, addrB, size) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MEMPOOL_CHANGE, \ + pool, addrA, addrB, size, 0) + +/* Return 1 if a mempool exists, else 0. */ +#define VALGRIND_MEMPOOL_EXISTS(pool) \ + (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \ + VG_USERREQ__MEMPOOL_EXISTS, \ + pool, 0, 0, 0, 0) + +/* Mark a piece of memory as being a stack. Returns a stack id. + start is the lowest addressable stack byte, end is the highest + addressable stack byte. */ +#define VALGRIND_STACK_REGISTER(start, end) \ + (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \ + VG_USERREQ__STACK_REGISTER, \ + start, end, 0, 0, 0) + +/* Unmark the piece of memory associated with a stack id as being a + stack. */ +#define VALGRIND_STACK_DEREGISTER(id) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__STACK_DEREGISTER, \ + id, 0, 0, 0, 0) + +/* Change the start and end address of the stack id. + start is the new lowest addressable stack byte, end is the new highest + addressable stack byte. */ +#define VALGRIND_STACK_CHANGE(id, start, end) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__STACK_CHANGE, \ + id, start, end, 0, 0) + +/* Load PDB debug info for Wine PE image_map. */ +#define VALGRIND_LOAD_PDB_DEBUGINFO(fd, ptr, total_size, delta) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__LOAD_PDB_DEBUGINFO, \ + fd, ptr, total_size, delta, 0) + +/* Map a code address to a source file name and line number. buf64 + must point to a 64-byte buffer in the caller's address space. The + result will be dumped in there and is guaranteed to be zero + terminated. If no info is found, the first byte is set to zero. */ +#define VALGRIND_MAP_IP_TO_SRCLOC(addr, buf64) \ + (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \ + VG_USERREQ__MAP_IP_TO_SRCLOC, \ + addr, buf64, 0, 0, 0) + +/* Disable error reporting for this thread. Behaves in a stack like + way, so you can safely call this multiple times provided that + VALGRIND_ENABLE_ERROR_REPORTING is called the same number of times + to re-enable reporting. The first call of this macro disables + reporting. Subsequent calls have no effect except to increase the + number of VALGRIND_ENABLE_ERROR_REPORTING calls needed to re-enable + reporting. Child threads do not inherit this setting from their + parents -- they are always created with reporting enabled. */ +#define VALGRIND_DISABLE_ERROR_REPORTING \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CHANGE_ERR_DISABLEMENT, \ + 1, 0, 0, 0, 0) + +/* Re-enable error reporting, as per comments on + VALGRIND_DISABLE_ERROR_REPORTING. */ +#define VALGRIND_ENABLE_ERROR_REPORTING \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CHANGE_ERR_DISABLEMENT, \ + -1, 0, 0, 0, 0) + +/* Execute a monitor command from the client program. + If a connection is opened with GDB, the output will be sent + according to the output mode set for vgdb. + If no connection is opened, output will go to the log output. + Returns 1 if command not recognised, 0 otherwise. */ +#define VALGRIND_MONITOR_COMMAND(command) \ + VALGRIND_DO_CLIENT_REQUEST_EXPR(0, VG_USERREQ__GDB_MONITOR_COMMAND, \ + command, 0, 0, 0, 0) + + +/* Change the value of a dynamic command line option. + Note that unknown or not dynamically changeable options + will cause a warning message to be output. */ +#define VALGRIND_CLO_CHANGE(option) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CLO_CHANGE, \ + option, 0, 0, 0, 0) + + +#undef PLAT_x86_darwin +#undef PLAT_amd64_darwin +#undef PLAT_x86_win32 +#undef PLAT_amd64_win64 +#undef PLAT_x86_linux +#undef PLAT_amd64_linux +#undef PLAT_ppc32_linux +#undef PLAT_ppc64be_linux +#undef PLAT_ppc64le_linux +#undef PLAT_arm_linux +#undef PLAT_s390x_linux +#undef PLAT_mips32_linux +#undef PLAT_mips64_linux +#undef PLAT_nanomips_linux +#undef PLAT_x86_solaris +#undef PLAT_amd64_solaris + +#endif /* __VALGRIND_H */
From "[PATCH 09/13] ntdll: Import valgrind headers for PE side ntdll.":
One of the motivating factors for 9951f2f43 was to allow including headers like valgrind from PE code. I'm not immediately sure how to solve the problem of detecting the header, but it doesn't seem obviously intractable, and potentially preferable to do that rather than import more actively developed external code into the project.
From: Rémi Bernon rbernon@codeweavers.com
--- dlls/ntdll/unix/file.c | 6 ++++++ dlls/ntdll/unix/process.c | 8 +++++++- dlls/ntdll/unix/virtual.c | 6 +++++- 3 files changed, 18 insertions(+), 2 deletions(-)
diff --git a/dlls/ntdll/unix/file.c b/dlls/ntdll/unix/file.c index b8eb6941abd..4c3395da5b4 100644 --- a/dlls/ntdll/unix/file.c +++ b/dlls/ntdll/unix/file.c @@ -105,6 +105,12 @@ #endif #include <time.h> #include <unistd.h> +#ifdef HAVE_VALGRIND_VALGRIND_H +# include <valgrind/valgrind.h> +#endif +#ifdef HAVE_VALGRIND_MEMCHECK_H +# include <valgrind/memcheck.h> +#endif
#include "ntstatus.h" #define WIN32_NO_STATUS diff --git a/dlls/ntdll/unix/process.c b/dlls/ntdll/unix/process.c index ba3c9691dfa..ea0375d2974 100644 --- a/dlls/ntdll/unix/process.c +++ b/dlls/ntdll/unix/process.c @@ -60,6 +60,12 @@ #ifdef HAVE_MACH_MACH_H # include <mach/mach.h> #endif +#ifdef HAVE_VALGRIND_VALGRIND_H +# include <valgrind/valgrind.h> +#endif +#ifdef HAVE_VALGRIND_MEMCHECK_H +# include <valgrind/memcheck.h> +#endif
#include "ntstatus.h" #define WIN32_NO_STATUS @@ -1612,7 +1618,7 @@ NTSTATUS WINAPI NtSetInformationProcess( HANDLE handle, PROCESSINFOCLASS class, if (!ret) { #ifdef VALGRIND_STACK_REGISTER - VALGRIND_STACK_REGISTER( addr, (char *)addr + reserve ); + VALGRIND_DISCARD( VALGRIND_STACK_REGISTER( addr, (char *)addr + reserve ) ); #endif stack->StackBase = addr; } diff --git a/dlls/ntdll/unix/virtual.c b/dlls/ntdll/unix/virtual.c index 5611c6d1350..0ceb2d81b39 100644 --- a/dlls/ntdll/unix/virtual.c +++ b/dlls/ntdll/unix/virtual.c @@ -32,6 +32,7 @@ #include <string.h> #include <stdlib.h> #include <signal.h> +#include <stdint.h> #include <sys/types.h> #include <sys/socket.h> #include <sys/stat.h> @@ -59,6 +60,9 @@ #ifdef HAVE_VALGRIND_VALGRIND_H # include <valgrind/valgrind.h> #endif +#ifdef HAVE_VALGRIND_MEMCHECK_H +# include <valgrind/memcheck.h> +#endif #if defined(__APPLE__) # include <mach/mach_init.h> # include <mach/mach_vm.h> @@ -3118,7 +3122,7 @@ NTSTATUS virtual_alloc_thread_stack( INITIAL_TEB *stack, ULONG_PTR zero_bits, SI goto done;
#ifdef VALGRIND_STACK_REGISTER - VALGRIND_STACK_REGISTER( view->base, (char *)view->base + view->size ); + VALGRIND_DISCARD( VALGRIND_STACK_REGISTER( view->base, (char *)view->base + view->size ) ); #endif
/* setup no access guard page */
From: Rémi Bernon rbernon@codeweavers.com
--- tools/valgrind.supp | 46 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) create mode 100644 tools/valgrind.supp
diff --git a/tools/valgrind.supp b/tools/valgrind.supp new file mode 100644 index 00000000000..c9d384297bc --- /dev/null +++ b/tools/valgrind.supp @@ -0,0 +1,46 @@ +{ + mingw_stack_grow + Memcheck:Addr8 + fun:___chkstk_ms + ... +} +{ + mingw_stack_grow + Memcheck:Addr4 + fun:___chkstk_ms + ... +} +{ + wine_syscall_stack_copy + Memcheck:Addr8 + fun:__wine_syscall_dispatcher +} +{ + wine_syscall_stack_copy + Memcheck:Addr4 + fun:__wine_syscall_dispatcher +} +{ + radeonsi_leaks + Memcheck:Leak + ... + obj:/usr/lib/x86_64-linux-gnu/dri/radeonsi_dri.so +} +{ + llvm_leaks + Memcheck:Leak + ... + obj:/usr/lib/x86_64-linux-gnu/libLLVM-11.so.1 +} +{ + fontconfig_leaks + Memcheck:Leak + ... + obj:/usr/lib/x86_64-linux-gnu/libfontconfig.so.1.12.0 +} +{ + winetest_leaks + Memcheck:Leak + ... + fun:winetest_set_location +}
From: Rémi Bernon rbernon@codeweavers.com
Wrapping stack reads in a single place so we can silent Valgrind false positives when reading arguments from the user stack on x86_64. --- dlls/ntdll/unix/loader.c | 71 +++++++++++++++++++++++++++++++++ dlls/ntdll/unix/signal_i386.c | 33 +++++++-------- dlls/ntdll/unix/signal_x86_64.c | 38 +++++++----------- dlls/ntdll/unix/unix_private.h | 1 + tools/valgrind.supp | 12 ++++++ 5 files changed, 112 insertions(+), 43 deletions(-)
diff --git a/dlls/ntdll/unix/loader.c b/dlls/ntdll/unix/loader.c index 500c9884121..aaf47ad69cc 100644 --- a/dlls/ntdll/unix/loader.c +++ b/dlls/ntdll/unix/loader.c @@ -1330,6 +1330,77 @@ already_loaded: }
+/*********************************************************************** + * ntdll_dispatch_syscall + */ +UINT_PTR ntdll_dispatch_syscall( SYSTEM_SERVICE_TABLE *tables, UINT_PTR code, UINT_PTR *arg03, UINT_PTR *arg4n ) +{ + typedef UINT_PTR (WINAPIV *syscall_n)(UINT_PTR arg0, ...); + typedef UINT_PTR (WINAPIV *syscall_0)(void); + + SYSTEM_SERVICE_TABLE *table = tables + (code & 0x3000) / 0x1000; + UINT_PTR ret = STATUS_INVALID_PARAMETER; + int argc, index = code & 0xfff; + syscall_n call; + + if (code >= 0x4000 || index >= table->ServiceLimit) return STATUS_INVALID_PARAMETER; + call = (syscall_n)table->ServiceTable[index]; + + switch ((argc = table->ArgumentTable[index] / sizeof(UINT_PTR))) + { + case 0: ret = ((syscall_0)table->ServiceTable[index])(); break; + case 1: ret = call( arg03[0] ); break; + case 2: ret = call( arg03[0], arg03[1] ); break; + case 3: ret = call( arg03[0], arg03[1], arg03[2] ); break; + case 4: ret = call( arg03[0], arg03[1], arg03[2], arg03[3] ); break; + case 5: ret = call( arg03[0], arg03[1], arg03[2], arg03[3], arg4n[0] ); break; + case 6: ret = call( arg03[0], arg03[1], arg03[2], arg03[3], arg4n[0], arg4n[1] ); break; + case 7: ret = call( arg03[0], arg03[1], arg03[2], arg03[3], arg4n[0], arg4n[1], arg4n[2] ); break; + case 8: ret = call( arg03[0], arg03[1], arg03[2], arg03[3], arg4n[0], arg4n[1], arg4n[2], arg4n[3] ); break; + case 9: + ret = call( arg03[0], arg03[1], arg03[2], arg03[3], arg4n[0], arg4n[1], arg4n[2], arg4n[3], + arg4n[4] ); + break; + case 10: + ret = call( arg03[0], arg03[1], arg03[2], arg03[3], arg4n[0], arg4n[1], arg4n[2], arg4n[3], + arg4n[4], arg4n[5] ); + break; + case 11: + ret = call( arg03[0], arg03[1], arg03[2], arg03[3], arg4n[0], arg4n[1], arg4n[2], arg4n[3], + arg4n[4], arg4n[5], arg4n[6] ); + break; + case 12: + ret = call( arg03[0], arg03[1], arg03[2], arg03[3], arg4n[0], arg4n[1], arg4n[2], arg4n[3], + arg4n[4], arg4n[5], arg4n[6], arg4n[7] ); + break; + case 13: + ret = call( arg03[0], arg03[1], arg03[2], arg03[3], arg4n[0], arg4n[1], arg4n[2], arg4n[3], + arg4n[4], arg4n[5], arg4n[6], arg4n[7], arg4n[8] ); + break; + case 14: + ret = call( arg03[0], arg03[1], arg03[2], arg03[3], arg4n[0], arg4n[1], arg4n[2], arg4n[3], + arg4n[4], arg4n[5], arg4n[6], arg4n[7], arg4n[8], arg4n[9] ); + break; + case 15: + ret = call( arg03[0], arg03[1], arg03[2], arg03[3], arg4n[0], arg4n[1], arg4n[2], arg4n[3], + arg4n[4], arg4n[5], arg4n[6], arg4n[7], arg4n[8], arg4n[9], arg4n[10] ); + break; + case 16: + ret = call( arg03[0], arg03[1], arg03[2], arg03[3], arg4n[0], arg4n[1], arg4n[2], arg4n[3], + arg4n[4], arg4n[5], arg4n[6], arg4n[7], arg4n[8], arg4n[9], arg4n[10], arg4n[11] ); + break; + case 17: + ret = call( arg03[0], arg03[1], arg03[2], arg03[3], arg4n[0], arg4n[1], arg4n[2], arg4n[3], + arg4n[4], arg4n[5], arg4n[6], arg4n[7], arg4n[8], arg4n[9], arg4n[10], arg4n[11], + arg4n[12] ); + break; + default: ERR( "syscall with %u arguments, not implemented!\n", argc ); break; + } + + return ret; +} + + /*********************************************************************** * ntdll_init_syscalls */ diff --git a/dlls/ntdll/unix/signal_i386.c b/dlls/ntdll/unix/signal_i386.c index d5dd77b43ff..9250f863145 100644 --- a/dlls/ntdll/unix/signal_i386.c +++ b/dlls/ntdll/unix/signal_i386.c @@ -2499,11 +2499,6 @@ __ASM_GLOBAL_FUNC( __wine_syscall_dispatcher, __ASM_CFI(".cfi_rel_offset %edi,-0x08\n\t") __ASM_CFI(".cfi_rel_offset %esi,-0x04\n\t") __ASM_CFI(".cfi_rel_offset %ebp,-0x00\n\t") - "leal 4(%esp),%esi\n\t" /* first argument */ - "movl %eax,%ebx\n\t" - "shrl $8,%ebx\n\t" - "andl $0x30,%ebx\n\t" /* syscall table number */ - "addl 0x38(%ecx),%ebx\n\t" /* frame->syscall_table */ "testl $3,(%ecx)\n\t" /* frame->syscall_flags & (SYSCALL_HAVE_XSAVE | SYSCALL_HAVE_XSAVEC) */ "jz 2f\n\t" "movl $7,%eax\n\t" @@ -2536,21 +2531,21 @@ __ASM_GLOBAL_FUNC( __wine_syscall_dispatcher, "jmp 4f\n" "3:\tfnsave 0x40(%ecx)\n\t" "fwait\n" - "4:\tmovl %ecx,%esp\n\t" - "movl 0x1c(%esp),%edx\n\t" /* frame->eax */ - "andl $0xfff,%edx\n\t" /* syscall number */ - "cmpl 8(%ebx),%edx\n\t" /* table->ServiceLimit */ - "jae 6f\n\t" - "movl 12(%ebx),%eax\n\t" /* table->ArgumentTable */ - "movzbl (%eax,%edx,1),%ecx\n\t" - "movl (%ebx),%eax\n\t" /* table->ServiceTable */ - "subl %ecx,%esp\n\t" - "shrl $2,%ecx\n\t" + "4:\t" + + /* switch stack and dispatch syscall */ + "leal 4(%esp),%esi\n\t" /* 1st argument */ + "movl 0x1c(%ecx),%eax\n\t" /* frame->eax */ + "movl 0x38(%ecx),%ebx\n\t" /* frame->syscall_table */ + "movl %ecx,%esp\n\t" "andl $~15,%esp\n\t" - "movl %esp,%edi\n\t" - "cld\n\t" - "rep; movsl\n\t" - "call *(%eax,%edx,4)\n\t" + "leal 16(%esi),%ecx\n\t" /* 5th, ... arguments */ + "pushl %ecx\n\t" + "pushl %esi\n\t" + "pushl %eax\n\t" + "pushl %ebx\n\t" + "call ntdll_dispatch_syscall\n\t" + "leal -0x34(%ebp),%esp\n" "5:\tmovl 0(%esp),%ecx\n\t" /* frame->syscall_flags + (frame->restore_flags << 16) */ "testl $0x68 << 16,%ecx\n\t" /* CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS | CONTEXT_XSAVE */ diff --git a/dlls/ntdll/unix/signal_x86_64.c b/dlls/ntdll/unix/signal_x86_64.c index e2b7fa73a36..98152194e5f 100644 --- a/dlls/ntdll/unix/signal_x86_64.c +++ b/dlls/ntdll/unix/signal_x86_64.c @@ -3407,32 +3407,22 @@ __ASM_GLOBAL_FUNC( __wine_syscall_dispatcher, __ASM_CFI(".cfi_rel_offset %rip,-0x28\n\t") __ASM_CFI(".cfi_rel_offset %rsp,-0x10\n\t") __ASM_CFI(".cfi_rel_offset %rbp,-0x00\n\t") - "leaq 0x28(%rsp),%rsi\n\t" /* first argument */ + + /* switch stack and dispatch syscall */ + "movq 0xa8(%rcx),%rdi\n\t" /* frame->syscall_table */ + "movq 0x00(%rcx),%rsi\n\t" + "movq 0x18(%rcx),%rdx\n\t" /* 2nd argument */ + "leaq 0x28(%rsp),%rax\n\t" /* 5th, ... arguments */ "movq %rcx,%rsp\n\t" - "movq 0x00(%rcx),%rax\n\t" - "movq 0x18(%rcx),%rdx\n\t" - "movl %eax,%ebx\n\t" - "shrl $8,%ebx\n\t" - "andl $0x30,%ebx\n\t" /* syscall table number */ - "movq 0xa8(%rcx),%rcx\n\t" /* frame->syscall_table */ - "leaq (%rcx,%rbx,2),%rbx\n\t" - "andl $0xfff,%eax\n\t" /* syscall number */ - "cmpq 16(%rbx),%rax\n\t" /* table->ServiceLimit */ - "jae 5f\n\t" - "movq 24(%rbx),%rcx\n\t" /* table->ArgumentTable */ - "movzbl (%rcx,%rax),%ecx\n\t" - "subq $0x20,%rcx\n\t" - "jbe 1f\n\t" - "subq %rcx,%rsp\n\t" - "shrq $3,%rcx\n\t" "andq $~15,%rsp\n\t" - "movq %rsp,%rdi\n\t" - "cld\n\t" - "rep; movsq\n" - "1:\tmovq %r10,%rcx\n\t" - "subq $0x20,%rsp\n\t" - "movq (%rbx),%r10\n\t" /* table->ServiceTable */ - "callq *(%r10,%rax,8)\n\t" + "movq %rax,%rcx\n\t" + "pushq %r9\n\t" + "pushq %r8\n\t" + "pushq %rdx\n\t" + "pushq %r10\n\t" + "movq %rsp,%rdx\n\t" + "callq ntdll_dispatch_syscall\n\t" + "leaq -0x98(%rbp),%rcx\n" __ASM_CFI(".cfi_endproc\n\t") __ASM_CFI(".cfi_startproc\n\t") diff --git a/dlls/ntdll/unix/unix_private.h b/dlls/ntdll/unix/unix_private.h index 7165dd694a9..fbb4c5151c3 100644 --- a/dlls/ntdll/unix/unix_private.h +++ b/dlls/ntdll/unix/unix_private.h @@ -241,6 +241,7 @@ extern void DECLSPEC_NORETURN signal_start_thread( PRTL_THREAD_START_ROUTINE ent extern void thread_start( PRTL_THREAD_START_ROUTINE entry, void *arg, BOOL suspend, TEB *teb ) DECLSPEC_HIDDEN; extern SYSTEM_SERVICE_TABLE KeServiceDescriptorTable[4] DECLSPEC_HIDDEN; +extern UINT_PTR ntdll_dispatch_syscall( SYSTEM_SERVICE_TABLE *tables, UINT_PTR code, UINT_PTR *arg03, UINT_PTR *arg4n ) DECLSPEC_HIDDEN; extern void __wine_syscall_dispatcher(void) DECLSPEC_HIDDEN; extern void WINAPI DECLSPEC_NORETURN __wine_syscall_dispatcher_return( void *frame, ULONG_PTR retval ) DECLSPEC_HIDDEN; extern NTSTATUS signal_set_full_context( CONTEXT *context ) DECLSPEC_HIDDEN; diff --git a/tools/valgrind.supp b/tools/valgrind.supp index c9d384297bc..a61767b56c8 100644 --- a/tools/valgrind.supp +++ b/tools/valgrind.supp @@ -20,6 +20,18 @@ Memcheck:Addr4 fun:__wine_syscall_dispatcher } +{ + wine_syscall_stack_args + Memcheck:Addr8 + fun:ntdll_dispatch_syscall + fun:__wine_syscall_dispatcher +} +{ + wine_syscall_stack_args + Memcheck:Addr4 + fun:ntdll_dispatch_syscall + fun:__wine_syscall_dispatcher +} { radeonsi_leaks Memcheck:Leak
From "[PATCH 12/13] ntdll: Introduce a new ntdll_dispatch_syscall helper.":
Wrapping stack reads in a single place so we can silent Valgrind false positives when reading arguments from the user stack on x86_64.
What kind of false positives?
diff --git a/dlls/ntdll/unix/signal_i386.c b/dlls/ntdll/unix/signal_i386.c index d5dd77b43ff..9250f863145 100644 --- a/dlls/ntdll/unix/signal_i386.c +++ b/dlls/ntdll/unix/signal_i386.c @@ -2499,11 +2499,6 @@ __ASM_GLOBAL_FUNC( __wine_syscall_dispatcher, __ASM_CFI(".cfi_rel_offset %edi,-0x08\n\t") __ASM_CFI(".cfi_rel_offset %esi,-0x04\n\t") __ASM_CFI(".cfi_rel_offset %ebp,-0x00\n\t")
"leal 4(%esp),%esi\n\t" /* first argument */
"movl %eax,%ebx\n\t"
"shrl $8,%ebx\n\t"
"andl $0x30,%ebx\n\t" /* syscall table number */
"addl 0x38(%ecx),%ebx\n\t" /* frame->syscall_table */ "testl $3,(%ecx)\n\t" /* frame->syscall_flags & (SYSCALL_HAVE_XSAVE | SYSCALL_HAVE_XSAVEC) */ "jz 2f\n\t" "movl $7,%eax\n\t"
@@ -2536,21 +2531,21 @@ __ASM_GLOBAL_FUNC( __wine_syscall_dispatcher, "jmp 4f\n" "3:\tfnsave 0x40(%ecx)\n\t" "fwait\n"
"4:\tmovl %ecx,%esp\n\t"
"movl 0x1c(%esp),%edx\n\t" /* frame->eax */
"andl $0xfff,%edx\n\t" /* syscall number */
"cmpl 8(%ebx),%edx\n\t" /* table->ServiceLimit */
"jae 6f\n\t"
"movl 12(%ebx),%eax\n\t" /* table->ArgumentTable */
"movzbl (%eax,%edx,1),%ecx\n\t"
"movl (%ebx),%eax\n\t" /* table->ServiceTable */
"subl %ecx,%esp\n\t"
"shrl $2,%ecx\n\t"
"4:\t"
/* switch stack and dispatch syscall */
"leal 4(%esp),%esi\n\t" /* 1st argument */
"movl 0x1c(%ecx),%eax\n\t" /* frame->eax */
"movl 0x38(%ecx),%ebx\n\t" /* frame->syscall_table */
"movl %ecx,%esp\n\t" "andl $~15,%esp\n\t"
"movl %esp,%edi\n\t"
"cld\n\t"
"rep; movsl\n\t"
"call *(%eax,%edx,4)\n\t"
"leal 16(%esi),%ecx\n\t" /* 5th, ... arguments */
"pushl %ecx\n\t"
"pushl %esi\n\t"
"pushl %eax\n\t"
"pushl %ebx\n\t"
"call ntdll_dispatch_syscall\n\t"
"leal -0x34(%ebp),%esp\n" "5:\tmovl 0(%esp),%ecx\n\t" /* frame->syscall_flags + (frame->restore_flags << 16) */ "testl $0x68 << 16,%ecx\n\t" /* CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS | CONTEXT_XSAVE */
I'm concerned about the prospect of introducing another call in the syscall dispatcher. Some syscalls are in a very hot path. Maybe another direct call doesn't make things worse, but we're also in a place where we are counting individual instructions... do we know that this doesn't make a difference?
From: Rémi Bernon rbernon@codeweavers.com
This is a hack, but valgrind heuristics are having a hard time with the stack manipulation around here. This silents some annoying reports. --- dlls/ntdll/unix/signal_i386.c | 3 ++- dlls/ntdll/unix/signal_x86_64.c | 23 +++++++++++++++++++++-- 2 files changed, 23 insertions(+), 3 deletions(-)
diff --git a/dlls/ntdll/unix/signal_i386.c b/dlls/ntdll/unix/signal_i386.c index 9250f863145..484e9c8c72e 100644 --- a/dlls/ntdll/unix/signal_i386.c +++ b/dlls/ntdll/unix/signal_i386.c @@ -1605,10 +1605,11 @@ NTSTATUS WINAPI KeUserModeCallback( ULONG id, const void *args, ULONG len, void *(--stack) = len; *(--stack) = (ULONG_PTR)args_data; *(--stack) = id; - *(--stack) = 0xdeadbabe; + *(--stack) = *(ULONG_PTR *)frame->esp;
callback_frame.frame.esp = (ULONG_PTR)stack; callback_frame.frame.eip = (ULONG_PTR)pKiUserCallbackDispatcher; + callback_frame.frame.ebp = frame->ebp; callback_frame.frame.eflags = 0x202; callback_frame.frame.syscall_flags = frame->syscall_flags; callback_frame.frame.syscall_table = frame->syscall_table; diff --git a/dlls/ntdll/unix/signal_x86_64.c b/dlls/ntdll/unix/signal_x86_64.c index 98152194e5f..0b7d7bca7bc 100644 --- a/dlls/ntdll/unix/signal_x86_64.c +++ b/dlls/ntdll/unix/signal_x86_64.c @@ -64,6 +64,12 @@ #ifdef __APPLE__ # include <mach/mach.h> #endif +#ifdef HAVE_VALGRIND_VALGRIND_H +# include <valgrind/valgrind.h> +#endif +#ifdef HAVE_VALGRIND_MEMCHECK_H +# include <valgrind/memcheck.h> +#endif
#define NONAMELESSUNION #define NONAMELESSSTRUCT @@ -2404,18 +2410,31 @@ NTSTATUS WINAPI KeUserModeCallback( ULONG id, const void *args, ULONG len, void { struct syscall_frame *frame = amd64_thread_data()->syscall_frame; void *args_data = (void *)((frame->rsp - len) & ~15); + ULONG_PTR *stack = args_data; + +#if defined(VALGRIND_MAKE_MEM_UNDEFINED) + VALGRIND_MAKE_MEM_UNDEFINED( (char *)stack - 0x100, (char *)frame->rsp - (char *)stack + 0x100 ); +#elif defined(VALGRIND_MAKE_WRITABLE) + VALGRIND_MAKE_WRITABLE( (char *)stack - 0x100, (char *)frame->rsp - (char *)stack + 0x100 ); +#endif
memcpy( args_data, args, len ); + *(--stack) = 0; + *(--stack) = 0; + *(--stack) = 0; + *(--stack) = 0; + *(--stack) = *(ULONG_PTR *)frame->rsp;
callback_frame.frame.rcx = id; - callback_frame.frame.rdx = (ULONG_PTR)args; + callback_frame.frame.rdx = (ULONG_PTR)args_data; callback_frame.frame.r8 = len; callback_frame.frame.cs = cs64_sel; callback_frame.frame.fs = amd64_thread_data()->fs; callback_frame.frame.gs = ds64_sel; callback_frame.frame.ss = ds64_sel; - callback_frame.frame.rsp = (ULONG_PTR)args_data - 0x28; + callback_frame.frame.rsp = (ULONG_PTR)stack; callback_frame.frame.rip = (ULONG_PTR)pKiUserCallbackDispatcher; + callback_frame.frame.rbp = frame->rbp; callback_frame.frame.eflags = 0x200; callback_frame.frame.restore_flags = CONTEXT_CONTROL | CONTEXT_INTEGER; callback_frame.frame.prev_frame = frame;
Hi,
It looks like your patch introduced the new failures shown below. Please investigate and fix them before resubmitting your patch. If they are not new, fixing them anyway would help a lot. Otherwise please ask for the known failures list to be updated.
The tests also ran into some preexisting test failures. If you know how to fix them that would be helpful. See the TestBot job for the details:
The full results can be found at: https://testbot.winehq.org/JobDetails.pl?Key=125036
Your paranoid android.
=== debian11 (32 bit report) ===
ddraw: ddraw7.c:15663: Test failed: Expected unsynchronised map for flags 0x1000. ddraw7.c:15663: Test failed: Expected unsynchronised map for flags 0x3000.
Report validation errors: kernel32:sync has no test summary line (early exit of the main process?)
=== debian11 (build log) ===
Use of uninitialized value $Flaky in addition (+) at /home/testbot/lib/WineTestBot/LogUtils.pm line 720, <$LogFile> line 24850. Use of uninitialized value $Flaky in addition (+) at /home/testbot/lib/WineTestBot/LogUtils.pm line 720, <$LogFile> line 24850. Use of uninitialized value $Flaky in addition (+) at /home/testbot/lib/WineTestBot/LogUtils.pm line 720, <$LogFile> line 24850.
From "[PATCH 13/13] ntdll: Tweak KeUserModeCallback to please Valgrind and GDB."
On 10/16/22 12:24, Rémi Bernon wrote:
This is a hack, but valgrind heuristics are having a hard time with the stack manipulation around here. This silents some annoying reports.
What heuristics? Can they be fixed on the valgrind side, and if not, why?
(And if not, shouldn't we add comments in the ntdll side so this doesn't get broken again?)
On 10/16/22 12:24, Rémi Bernon (@rbernon) wrote:
Valgrind support requires a fork, which I've published to https://gitlab.winehq.org/rbernon/valgrind. The fork implements loading DWARF debug info from PE files, instead of the old and broken upstream PDB support. I've tried to upstream these changes a long time ago but didn't receive any feedback.
I think we could maybe consider keeping a fork, which I'm happy to maintain, as the changes aren't too large. We may want to investigate adding 32-on-64 support, which may require a bit more changes (to VEX specifically, because its amd64 guest doesn't support segment register manipulation).
FWIW, I asked on #valgrind-dev, and was told that patches on the valgrind-developers mailing list may get forgotten, and that a more reliable path is to attach them to a bug at https://bugs.kde.org/describecomponents.cgi?product=valgrind. So there may be hope upstreaming these yet.
On Sun Oct 16 21:56:01 2022 +0000, **** wrote:
Zebediah Figura replied on the mailing list:
On 10/16/22 12:24, Rémi Bernon (@rbernon) wrote: > Valgrind support requires a fork, which I've published to https://gitlab.winehq.org/rbernon/valgrind. The fork implements loading DWARF debug info from PE files, instead of the old and broken upstream PDB support. I've tried to upstream these changes a long time ago but didn't receive any feedback. > > I think we could maybe consider keeping a fork, which I'm happy to maintain, as the changes aren't too large. We may want to investigate adding 32-on-64 support, which may require a bit more changes (to VEX specifically, because its amd64 guest doesn't support segment register manipulation). FWIW, I asked on #valgrind-dev, and was told that patches on the valgrind-developers mailing list may get forgotten, and that a more reliable path is to attach them to a bug at <https://bugs.kde.org/describecomponents.cgi?product=valgrind>. So there may be hope upstreaming these yet.
Which I did, at the time, here https://bugs.kde.org/show_bug.cgi?id=211031, without any sort of feedback from upstream either.
On Sun Oct 16 22:15:26 2022 +0000, **** wrote:
Zebediah Figura replied on the mailing list:
From "[PATCH 09/13] ntdll: Import valgrind headers for PE side ntdll.": One of the motivating factors for 9951f2f43 was to allow including headers like valgrind from PE code. I'm not immediately sure how to solve the problem of detecting the header, but it doesn't seem obviously intractable, and potentially preferable to do that rather than import more actively developed external code into the project.
You can't do that because these headers use 64bit `long` types. This is the Valgrind ABI and it's very unlikely to change. It's been pretty much untouched for 20 years now, with only a few advanced request additions.
Given the complete absence of feedback for the other changes, I also wouldn't qualify this as "actively developed external code" either.
On 10/16/22 17:23, Rémi Bernon (@rbernon) wrote:
On Sun Oct 16 22:15:26 2022 +0000, **** wrote:
Zebediah Figura replied on the mailing list:
From "[PATCH 09/13] ntdll: Import valgrind headers for PE side ntdll.": One of the motivating factors for 9951f2f43 was to allow including headers like valgrind from PE code. I'm not immediately sure how to solve the problem of detecting the header, but it doesn't seem obviously intractable, and potentially preferable to do that rather than import more actively developed external code into the project.
You can't do that because these headers use 64bit `long` types. This is the Valgrind ABI and it's very unlikely to change. It's been pretty much untouched for 20 years now, with only a few advanced request additions.
I think there's room for the ABI to change if the current ABI just is completely broken with a given configuration. I suppose working around that bug, whether on a temporary or permanent basis, can be deemed a worthwhile workaround (though opinions may vary), but it'd at least be useful to put that information somewhere (say, the commit message). As it is I had no way of knowing that this header was modified, period.
On Sun Oct 16 22:04:22 2022 +0000, **** wrote:
Zebediah Figura replied on the mailing list:
Regarding "[PATCH 08/13] ntdll: Allocate a truly separate stack for the kernel stack.": Why do we need to do this? Specifically, do we know why this is broken in Valgrind? I ask because I would guess a priori that this should be fixed on the valgrind side. Valgrind should have all of the information regarding our stacks, so I wouldn't expect it to need to resort to heuristics.
Valgrind keeps track of the stack pointer changes (some of them at least), to update the memory attributes below it and figure invalid and uninitialized memory accesses. It detects shrinking and growing stacks when the stack pointer stays within the registered "stack" region, and in our case this currently includes both the user and the kernel stack.
Our syscalls are updating the stack pointer from a lower, user stack, address to a higher, kernel stack, address, effectively shrinking the stack by a large amount, then accessing the syscall parameters from the lower address to copy them to the kernel stack. Valgrind doesn't like that, and even complains that we're likely switching stacks.
Having two separately allocated stacks is 1) much cleaner anyway, and 2) allows Valgrind to correctly track the two separate stack pointers (though it still complains in some cases about cross-stack accesses).
On 10/16/22 17:33, Rémi Bernon (@rbernon) wrote:
On Sun Oct 16 22:04:22 2022 +0000, **** wrote:
Zebediah Figura replied on the mailing list:
Regarding "[PATCH 08/13] ntdll: Allocate a truly separate stack for the kernel stack.": Why do we need to do this? Specifically, do we know why this is broken in Valgrind? I ask because I would guess a priori that this should be fixed on the valgrind side. Valgrind should have all of the information regarding our stacks, so I wouldn't expect it to need to resort to heuristics.
Valgrind keeps track of the stack pointer changes (some of them at least), to update the memory attributes below it and figure invalid and uninitialized memory accesses. It detects shrinking and growing stacks when the stack pointer stays within the registered "stack" region, and in our case this currently includes both the user and the kernel stack.
Our syscalls are updating the stack pointer from a lower, user stack, address to a higher, kernel stack, address, effectively shrinking the stack by a large amount, then accessing the syscall parameters from the lower address to copy them to the kernel stack. Valgrind doesn't like that, and even complains that we're likely switching stacks.
Having two separately allocated stacks is 1) much cleaner anyway, and 2) allows Valgrind to correctly track the two separate stack pointers (though it still complains in some cases about cross-stack accesses).
Ah right, I remember this detail. IIRC the solution should be to register the stack in two pieces, though.
When I tried that I still got errors, but it's not clear to me that's not a (fixable) Valgrind bug.
On Sun Oct 16 22:08:54 2022 +0000, **** wrote:
Zebediah Figura replied on the mailing list:
From "[PATCH 13/13] ntdll: Tweak KeUserModeCallback to please Valgrind and GDB." On 10/16/22 12:24, Rémi Bernon wrote: > This is a hack, but valgrind heuristics are having a hard time with > the stack manipulation around here. This silents some annoying reports. What heuristics? Can they be fixed on the valgrind side, and if not, why? (And if not, shouldn't we add comments in the ntdll side so this doesn't get broken again?)
In the same way as for the kernel stack, and even with separate stacks, it either misses some stack pointer changes and considers the accessed memory as invalid, or doesn't like accesses from/to the other, inactive stack pointer somehow.
`KeUserModeCallback` is quite broken regarding unwinding anyway, even like this, though this improves a bit the experience in GDB.
On 10/16/22 17:37, Rémi Bernon (@rbernon) wrote:
On Sun Oct 16 22:08:54 2022 +0000, **** wrote:
Zebediah Figura replied on the mailing list:
From "[PATCH 13/13] ntdll: Tweak KeUserModeCallback to please Valgrind and GDB." On 10/16/22 12:24, Rémi Bernon wrote: > This is a hack, but valgrind heuristics are having a hard time with > the stack manipulation around here. This silents some annoying reports. What heuristics? Can they be fixed on the valgrind side, and if not, why? (And if not, shouldn't we add comments in the ntdll side so this doesn't get broken again?)
In the same way as for the kernel stack, and even with separate stacks, it either misses some stack pointer changes and considers the accessed memory as invalid, or doesn't like accesses from/to the other, inactive stack pointer somehow.
Does Valgrind prohibit all access to alternative stacks? That seems like it would break any time a thread passes a stack pointer to another thread, or (if it's only within threads) a fiber to another fiber. If this really is broken, can it be fixed on the Valgrind side?
On 16 Oct 2022, at 17:41, Rémi Bernon
`KeUserModeCallback` is quite broken regarding unwinding anyway, even like this, though this improves a bit the experience in GDB.
AFAIK KiUserModeCallback (and thus inherently KeUserModeCallback) doesn’t match Windows ABI now, it should maybe look closer to KiUserExceptionDispatcher (although I don’t know now currently what exactly its abi is). Should not we fix that before making gdb and valgrind happier? Or whatever will be done to it in the current state will probably need to be redone from scratch after abi is fixed.
In fact, I think it is a specific part of more general question: are we ready to officially support Valgrind? That would mean that we need to make sure any future changes in these areas do not break it, accept valgrind related bugs and regressions? My feeling is it will create more burden than it removes.
On 16 Oct 2022, at 19:00, Paul Gofman pgofman@codeweavers.com wrote:
On 16 Oct 2022, at 17:41, Rémi Bernon
`KeUserModeCallback` is quite broken regarding unwinding anyway, even like this, though this improves a bit the experience in GDB.
AFAIK KiUserModeCallback (and thus inherently KeUserModeCallback) doesn’t match Windows ABI now, it should maybe look closer to KiUserExceptionDispatcher (although I don’t know now currently what exactly its abi is). Should not we fix that before making gdb and valgrind happier? Or whatever will be done to it in the current state will probably need to be redone from scratch after abi is fixed.
On 10/16/22 19:28, Paul Gofman wrote:
In fact, I think it is a specific part of more general question: are we ready to officially support Valgrind? That would mean that we need to make sure any future changes in these areas do not break it, accept valgrind related bugs and regressions? My feeling is it will create more burden than it removes.
Personally, I'd cast my vote for supporting it. Valgrind has been an invaluable tool in the past, and I think it's worth bending over backwards regarding things like the PE/Unix split to make sure it keeps working.
Of course, I also think that we should make a good effort to fix bugs in the right place, and at least some of these are probably valgrind bugs, but the ultimate point is the same.
I think at very least we should keep correct Win ABI and performance as a priority. E. g., if we can express the unwind through syscall dispatcher with CFI we can do that, but IMO building this whole thing around Valgrind is wrong. Maybe there are other ways to do it, keeping Valgrind solely on the Unix part (I don’t know if that is possible). Or maybe we can develop some parts for the tool which will explicitly support Wine specifics.
On 16 Oct 2022, at 19:48, Zebediah Figura zfigura@codeweavers.com wrote:
On 10/16/22 19:28, Paul Gofman wrote:
In fact, I think it is a specific part of more general question: are we ready to officially support Valgrind? That would mean that we need to make sure any future changes in these areas do not break it, accept valgrind related bugs and regressions? My feeling is it will create more burden than it removes.
Personally, I'd cast my vote for supporting it. Valgrind has been an invaluable tool in the past, and I think it's worth bending over backwards regarding things like the PE/Unix split to make sure it keeps working.
Of course, I also think that we should make a good effort to fix bugs in the right place, and at least some of these are probably valgrind bugs, but the ultimate point is the same.
On 10/16/22 20:26, Paul Gofman wrote:
I think at very least we should keep correct Win ABI and performance as a priority. E. g., if we can express the unwind through syscall dispatcher with CFI we can do that, but IMO building this whole thing around Valgrind is wrong. Maybe there are other ways to do it, keeping Valgrind solely on the Unix part (I don’t know if that is possible). Or maybe we can develop some parts for the tool which will explicitly support Wine specifics.
Sure, I definitely agree with that part. I don't think we should sacrifice performance in the case that valgrind isn't enabled, but I do think we should try to make sure it works in at least some configuration. (And if that means using different code paths based on RUNNING_ON_VALGRIND, so be it...)
On Mon Oct 17 00:00:23 2022 +0000, **** wrote:
Paul Gofman replied on the mailing list:
> On 16 Oct 2022, at 17:41, Rémi Bernon > > `KeUserModeCallback` is quite broken regarding unwinding anyway, even like this, though this improves a bit the experience in GDB. > AFAIK KiUserModeCallback (and thus inherently KeUserModeCallback) doesn’t match Windows ABI now, it should maybe look closer to KiUserExceptionDispatcher (although I don’t know now currently what exactly its abi is). Should not we fix that before making gdb and valgrind happier? Or whatever will be done to it in the current state will probably need to be redone from scratch after abi is fixed.
Perhaps. FWIW this is a Draft MR for sharing the feature which I believe is useful, and I don't have any intention in upstreaming most of these changes.