From: Julian Seward Date: Wed, 19 Jun 2002 10:17:40 +0000 (+0000) Subject: Implement pause(). X-Git-Tag: svn/VALGRIND_1_0_3~46 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=2bdcd29d63260252a7e3352424d60241aa8e25ad;p=thirdparty%2Fvalgrind.git Implement pause(). git-svn-id: svn://svn.valgrind.org/valgrind/trunk@442 --- diff --git a/coregrind/arch/x86-linux/vg_libpthread.c b/coregrind/arch/x86-linux/vg_libpthread.c index dc66c7eb96..b373e10ee2 100644 --- a/coregrind/arch/x86-linux/vg_libpthread.c +++ b/coregrind/arch/x86-linux/vg_libpthread.c @@ -75,6 +75,10 @@ static void wait_for_fd_to_be_readable_or_erring ( int fd ); +static +int my_do_syscall2 ( int syscallno, + int arg1, int arg2 ); + /* --------------------------------------------------------------------- Helpers. We have to be pretty self-sufficient. @@ -1147,6 +1151,41 @@ int raise (int sig) } +int pause ( void ) +{ + unsigned int n_orig, n_now; + struct vki_timespec nanosleep_interval; + ensure_valgrind("pause"); + + /* This is surely a cancellation point. */ + __my_pthread_testcancel(); + + VALGRIND_MAGIC_SEQUENCE(n_orig, 0xFFFFFFFF /* default */, + VG_USERREQ__GET_N_SIGS_RETURNED, + 0, 0, 0, 0); + my_assert(n_orig != 0xFFFFFFFF); + + while (1) { + VALGRIND_MAGIC_SEQUENCE(n_now, 0xFFFFFFFF /* default */, + VG_USERREQ__GET_N_SIGS_RETURNED, + 0, 0, 0, 0); + my_assert(n_now != 0xFFFFFFFF); + my_assert(n_now >= n_orig); + if (n_now != n_orig) break; + + nanosleep_interval.tv_sec = 0; + nanosleep_interval.tv_nsec = 52 * 1000 * 1000; /* 52 milliseconds */ + /* It's critical here that valgrind's nanosleep implementation + is nonblocking. */ + (void)my_do_syscall2(__NR_nanosleep, + (int)(&nanosleep_interval), (int)NULL); + } + + * (__errno_location()) = EINTR; + return -1; +} + + /* --------------------------------------------------- THREAD-SPECIFICs ------------------------------------------------ */ diff --git a/coregrind/arch/x86-linux/vg_libpthread_unimp.c b/coregrind/arch/x86-linux/vg_libpthread_unimp.c index 43e81149b6..6f8f75d236 100644 --- a/coregrind/arch/x86-linux/vg_libpthread_unimp.c +++ b/coregrind/arch/x86-linux/vg_libpthread_unimp.c @@ -206,7 +206,7 @@ weak_alias(_IO_ftrylockfile, ftrylockfile) //__attribute__((weak)) void pread ( void ) { vgPlain_unimp("pread"); } //__attribute__((weak)) void pwrite ( void ) { vgPlain_unimp("pwrite"); } //__attribute__((weak)) void msync ( void ) { vgPlain_unimp("msync"); } -__attribute__((weak)) void pause ( void ) { vgPlain_unimp("pause"); } +//__attribute__((weak)) void pause ( void ) { vgPlain_unimp("pause"); } //__attribute__((weak)) void recvfrom ( void ) { vgPlain_unimp("recvfrom"); } //__attribute__((weak)) void recvmsg ( void ) { vgPlain_unimp("recvmsg"); } //__attribute__((weak)) void sendmsg ( void ) { vgPlain_unimp("sendmsg"); } diff --git a/coregrind/vg_include.h b/coregrind/vg_include.h index 182cb3cb4e..bdafd3f8e6 100644 --- a/coregrind/vg_include.h +++ b/coregrind/vg_include.h @@ -505,6 +505,7 @@ extern Bool VG_(is_empty_arena) ( ArenaId aid ); #define VG_USERREQ__GET_KEY_D_AND_S 0x3022 #define VG_USERREQ__NUKE_OTHER_THREADS 0x3023 +#define VG_USERREQ__GET_N_SIGS_RETURNED 0x3024 /* Cosmetic ... */ @@ -644,6 +645,12 @@ typedef is the set of signals for which we are sigwait()ing. */ vki_ksigset_t sigs_waited_for; + /* Counts the number of times a signal handler for this thread + has returned. This makes it easy to implement pause(), by + polling this value, of course interspersed with nanosleeps, + and waiting till it changes. */ + UInt n_signals_returned; + /* Stacks. When a thread slot is freed, we don't deallocate its stack; we just leave it lying around for the next use of the slot. If the next use of the slot requires a larger stack, diff --git a/coregrind/vg_libpthread.c b/coregrind/vg_libpthread.c index dc66c7eb96..b373e10ee2 100644 --- a/coregrind/vg_libpthread.c +++ b/coregrind/vg_libpthread.c @@ -75,6 +75,10 @@ static void wait_for_fd_to_be_readable_or_erring ( int fd ); +static +int my_do_syscall2 ( int syscallno, + int arg1, int arg2 ); + /* --------------------------------------------------------------------- Helpers. We have to be pretty self-sufficient. @@ -1147,6 +1151,41 @@ int raise (int sig) } +int pause ( void ) +{ + unsigned int n_orig, n_now; + struct vki_timespec nanosleep_interval; + ensure_valgrind("pause"); + + /* This is surely a cancellation point. */ + __my_pthread_testcancel(); + + VALGRIND_MAGIC_SEQUENCE(n_orig, 0xFFFFFFFF /* default */, + VG_USERREQ__GET_N_SIGS_RETURNED, + 0, 0, 0, 0); + my_assert(n_orig != 0xFFFFFFFF); + + while (1) { + VALGRIND_MAGIC_SEQUENCE(n_now, 0xFFFFFFFF /* default */, + VG_USERREQ__GET_N_SIGS_RETURNED, + 0, 0, 0, 0); + my_assert(n_now != 0xFFFFFFFF); + my_assert(n_now >= n_orig); + if (n_now != n_orig) break; + + nanosleep_interval.tv_sec = 0; + nanosleep_interval.tv_nsec = 52 * 1000 * 1000; /* 52 milliseconds */ + /* It's critical here that valgrind's nanosleep implementation + is nonblocking. */ + (void)my_do_syscall2(__NR_nanosleep, + (int)(&nanosleep_interval), (int)NULL); + } + + * (__errno_location()) = EINTR; + return -1; +} + + /* --------------------------------------------------- THREAD-SPECIFICs ------------------------------------------------ */ diff --git a/coregrind/vg_libpthread_unimp.c b/coregrind/vg_libpthread_unimp.c index 43e81149b6..6f8f75d236 100644 --- a/coregrind/vg_libpthread_unimp.c +++ b/coregrind/vg_libpthread_unimp.c @@ -206,7 +206,7 @@ weak_alias(_IO_ftrylockfile, ftrylockfile) //__attribute__((weak)) void pread ( void ) { vgPlain_unimp("pread"); } //__attribute__((weak)) void pwrite ( void ) { vgPlain_unimp("pwrite"); } //__attribute__((weak)) void msync ( void ) { vgPlain_unimp("msync"); } -__attribute__((weak)) void pause ( void ) { vgPlain_unimp("pause"); } +//__attribute__((weak)) void pause ( void ) { vgPlain_unimp("pause"); } //__attribute__((weak)) void recvfrom ( void ) { vgPlain_unimp("recvfrom"); } //__attribute__((weak)) void recvmsg ( void ) { vgPlain_unimp("recvmsg"); } //__attribute__((weak)) void sendmsg ( void ) { vgPlain_unimp("sendmsg"); } diff --git a/coregrind/vg_scheduler.c b/coregrind/vg_scheduler.c index 7bc9de586b..52f1b16108 100644 --- a/coregrind/vg_scheduler.c +++ b/coregrind/vg_scheduler.c @@ -543,6 +543,7 @@ void mostly_clear_thread_record ( ThreadId tid ) VG_(threads)[tid].cancel_ty = True; /* PTHREAD_CANCEL_DEFERRED */ VG_(threads)[tid].cancel_pend = NULL; /* not pending */ VG_(threads)[tid].custack_used = 0; + VG_(threads)[tid].n_signals_returned = 0; VG_(ksigemptyset)(&VG_(threads)[tid].sig_mask); VG_(ksigemptyset)(&VG_(threads)[tid].sigs_waited_for); for (j = 0; j < VG_N_THREAD_KEYS; j++) @@ -653,6 +654,10 @@ Bool fd_is_valid ( Int fd ) False = request not done. A more capable but slower mechanism will deal with it. + + 2002-06-19: the division between this dispatcher and the one at the + end of the file is completely artificial and should be got rid of. + There is no longer any good reason for it. */ static Bool maybe_do_trivial_clientreq ( ThreadId tid ) @@ -748,6 +753,9 @@ Bool maybe_do_trivial_clientreq ( ThreadId tid ) do__testcancel ( tid ); return True; + case VG_USERREQ__GET_N_SIGS_RETURNED: + SIMPLE_RETURN(VG_(threads)[tid].n_signals_returned); + default: /* Too hard; wimp out. */ return False; @@ -801,6 +809,10 @@ void handle_signal_return ( ThreadId tid ) vg_assert(VG_(is_valid_tid)(tid)); + /* Increment signal-returned counter. Used only to implement + pause(). */ + VG_(threads)[tid].n_signals_returned++; + restart_blocked_syscalls = VG_(signal_returns)(tid); if (restart_blocked_syscalls) diff --git a/tests/Makefile.am b/tests/Makefile.am index f873b9f66b..8eadc56c60 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -30,4 +30,5 @@ EXTRA_DIST = \ pth_once.c weirdioctl.c pth_signal1.c pth_signal2.c \ discard.c pth_semaphore1.c new_override.cpp pth_yield.c \ sigaltstack.c erringfds.c sigwait_all.c \ - pth_cancel1.c pth_cancel2.c pth_signal_gober.c nanoleak.c + pth_cancel1.c pth_cancel2.c pth_signal_gober.c nanoleak.c \ + pth_pause.c diff --git a/tests/pth_pause.c b/tests/pth_pause.c new file mode 100644 index 0000000000..bca0a823dc --- /dev/null +++ b/tests/pth_pause.c @@ -0,0 +1,29 @@ + +#include +#include +#include +#include +#include + +void hdlr ( int sig ) +{ + printf("signal %d arrived\n", sig); +} + +int main ( void ) +{ + int res; + /* Force use of libpthread here */ + pthread_testcancel(); + + printf("installing handler\n"); + signal(SIGINT, hdlr); + printf("installing handler done; please do Control-C\n"); + + res = pause(); + printf("pause done; res = %d, errno = %d\n", res, errno); + + printf("bye\n"); + + return 0; +} diff --git a/vg_include.h b/vg_include.h index 182cb3cb4e..bdafd3f8e6 100644 --- a/vg_include.h +++ b/vg_include.h @@ -505,6 +505,7 @@ extern Bool VG_(is_empty_arena) ( ArenaId aid ); #define VG_USERREQ__GET_KEY_D_AND_S 0x3022 #define VG_USERREQ__NUKE_OTHER_THREADS 0x3023 +#define VG_USERREQ__GET_N_SIGS_RETURNED 0x3024 /* Cosmetic ... */ @@ -644,6 +645,12 @@ typedef is the set of signals for which we are sigwait()ing. */ vki_ksigset_t sigs_waited_for; + /* Counts the number of times a signal handler for this thread + has returned. This makes it easy to implement pause(), by + polling this value, of course interspersed with nanosleeps, + and waiting till it changes. */ + UInt n_signals_returned; + /* Stacks. When a thread slot is freed, we don't deallocate its stack; we just leave it lying around for the next use of the slot. If the next use of the slot requires a larger stack, diff --git a/vg_libpthread.c b/vg_libpthread.c index dc66c7eb96..b373e10ee2 100644 --- a/vg_libpthread.c +++ b/vg_libpthread.c @@ -75,6 +75,10 @@ static void wait_for_fd_to_be_readable_or_erring ( int fd ); +static +int my_do_syscall2 ( int syscallno, + int arg1, int arg2 ); + /* --------------------------------------------------------------------- Helpers. We have to be pretty self-sufficient. @@ -1147,6 +1151,41 @@ int raise (int sig) } +int pause ( void ) +{ + unsigned int n_orig, n_now; + struct vki_timespec nanosleep_interval; + ensure_valgrind("pause"); + + /* This is surely a cancellation point. */ + __my_pthread_testcancel(); + + VALGRIND_MAGIC_SEQUENCE(n_orig, 0xFFFFFFFF /* default */, + VG_USERREQ__GET_N_SIGS_RETURNED, + 0, 0, 0, 0); + my_assert(n_orig != 0xFFFFFFFF); + + while (1) { + VALGRIND_MAGIC_SEQUENCE(n_now, 0xFFFFFFFF /* default */, + VG_USERREQ__GET_N_SIGS_RETURNED, + 0, 0, 0, 0); + my_assert(n_now != 0xFFFFFFFF); + my_assert(n_now >= n_orig); + if (n_now != n_orig) break; + + nanosleep_interval.tv_sec = 0; + nanosleep_interval.tv_nsec = 52 * 1000 * 1000; /* 52 milliseconds */ + /* It's critical here that valgrind's nanosleep implementation + is nonblocking. */ + (void)my_do_syscall2(__NR_nanosleep, + (int)(&nanosleep_interval), (int)NULL); + } + + * (__errno_location()) = EINTR; + return -1; +} + + /* --------------------------------------------------- THREAD-SPECIFICs ------------------------------------------------ */ diff --git a/vg_libpthread_unimp.c b/vg_libpthread_unimp.c index 43e81149b6..6f8f75d236 100644 --- a/vg_libpthread_unimp.c +++ b/vg_libpthread_unimp.c @@ -206,7 +206,7 @@ weak_alias(_IO_ftrylockfile, ftrylockfile) //__attribute__((weak)) void pread ( void ) { vgPlain_unimp("pread"); } //__attribute__((weak)) void pwrite ( void ) { vgPlain_unimp("pwrite"); } //__attribute__((weak)) void msync ( void ) { vgPlain_unimp("msync"); } -__attribute__((weak)) void pause ( void ) { vgPlain_unimp("pause"); } +//__attribute__((weak)) void pause ( void ) { vgPlain_unimp("pause"); } //__attribute__((weak)) void recvfrom ( void ) { vgPlain_unimp("recvfrom"); } //__attribute__((weak)) void recvmsg ( void ) { vgPlain_unimp("recvmsg"); } //__attribute__((weak)) void sendmsg ( void ) { vgPlain_unimp("sendmsg"); } diff --git a/vg_scheduler.c b/vg_scheduler.c index 7bc9de586b..52f1b16108 100644 --- a/vg_scheduler.c +++ b/vg_scheduler.c @@ -543,6 +543,7 @@ void mostly_clear_thread_record ( ThreadId tid ) VG_(threads)[tid].cancel_ty = True; /* PTHREAD_CANCEL_DEFERRED */ VG_(threads)[tid].cancel_pend = NULL; /* not pending */ VG_(threads)[tid].custack_used = 0; + VG_(threads)[tid].n_signals_returned = 0; VG_(ksigemptyset)(&VG_(threads)[tid].sig_mask); VG_(ksigemptyset)(&VG_(threads)[tid].sigs_waited_for); for (j = 0; j < VG_N_THREAD_KEYS; j++) @@ -653,6 +654,10 @@ Bool fd_is_valid ( Int fd ) False = request not done. A more capable but slower mechanism will deal with it. + + 2002-06-19: the division between this dispatcher and the one at the + end of the file is completely artificial and should be got rid of. + There is no longer any good reason for it. */ static Bool maybe_do_trivial_clientreq ( ThreadId tid ) @@ -748,6 +753,9 @@ Bool maybe_do_trivial_clientreq ( ThreadId tid ) do__testcancel ( tid ); return True; + case VG_USERREQ__GET_N_SIGS_RETURNED: + SIMPLE_RETURN(VG_(threads)[tid].n_signals_returned); + default: /* Too hard; wimp out. */ return False; @@ -801,6 +809,10 @@ void handle_signal_return ( ThreadId tid ) vg_assert(VG_(is_valid_tid)(tid)); + /* Increment signal-returned counter. Used only to implement + pause(). */ + VG_(threads)[tid].n_signals_returned++; + restart_blocked_syscalls = VG_(signal_returns)(tid); if (restart_blocked_syscalls)