From: Tom Hughes Date: Wed, 17 May 2006 14:24:12 +0000 (+0000) Subject: Implement the set_robust_list and get_robust_list system calls. X-Git-Tag: svn/VALGRIND_3_2_0~54 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=297aa09b43ff96edc441e7e6ae1f15b20c339a28;p=thirdparty%2Fvalgrind.git Implement the set_robust_list and get_robust_list system calls. git-svn-id: svn://svn.valgrind.org/valgrind/trunk@5905 --- diff --git a/coregrind/m_syswrap/priv_syswrap-linux.h b/coregrind/m_syswrap/priv_syswrap-linux.h index 0837c1a533..cb7ca97c9f 100644 --- a/coregrind/m_syswrap/priv_syswrap-linux.h +++ b/coregrind/m_syswrap/priv_syswrap-linux.h @@ -74,6 +74,8 @@ DECL_TEMPLATE(linux, sys_prctl); DECL_TEMPLATE(linux, sys_sendfile); DECL_TEMPLATE(linux, sys_sendfile64); DECL_TEMPLATE(linux, sys_futex); +DECL_TEMPLATE(linux, sys_set_robust_list); +DECL_TEMPLATE(linux, sys_get_robust_list); DECL_TEMPLATE(linux, sys_pselect6); DECL_TEMPLATE(linux, sys_ppoll); diff --git a/coregrind/m_syswrap/syswrap-amd64-linux.c b/coregrind/m_syswrap/syswrap-amd64-linux.c index 35854a2dab..b6a23707ad 100644 --- a/coregrind/m_syswrap/syswrap-amd64-linux.c +++ b/coregrind/m_syswrap/syswrap-amd64-linux.c @@ -1328,6 +1328,8 @@ const SyscallTableEntry ML_(syscall_table)[] = { // LINX_(__NR_pselect6, sys_ni_syscall), // 270 // LINXY(__NR_ppoll, sys_ni_syscall), // 271 // LINX_(__NR_unshare, sys_unshare), // 272 + LINX_(__NR_set_robust_list, sys_set_robust_list), // 273 + LINXY(__NR_get_robust_list, sys_get_robust_list), // 274 }; const UInt ML_(syscall_table_size) = diff --git a/coregrind/m_syswrap/syswrap-linux.c b/coregrind/m_syswrap/syswrap-linux.c index 6f4098fa31..328b87cbd9 100644 --- a/coregrind/m_syswrap/syswrap-linux.c +++ b/coregrind/m_syswrap/syswrap-linux.c @@ -799,6 +799,37 @@ POST(sys_futex) } } +PRE(sys_set_robust_list) +{ + PRINT("sys_set_robust_list ( %p, %d )", ARG1,ARG2); + PRE_REG_READ2(long, "set_robust_list", + struct vki_robust_list_head *, head, vki_size_t, len); + + /* Just check the robust_list_head structure is readable - don't + try and chase the list as the kernel will only read it when + the thread exits so the current contents is irrelevant. */ + if (ARG1 != 0) + PRE_MEM_READ("set_robust_list(head)", ARG1, ARG2); +} + +PRE(sys_get_robust_list) +{ + PRINT("sys_get_robust_list ( %d, %p, %d )", ARG1,ARG2,ARG3); + PRE_REG_READ3(long, "get_robust_list", + int, pid, + struct vki_robust_list_head **, head_ptr, + vki_size_t *, len_ptr); + PRE_MEM_WRITE("get_robust_list(head_ptr)", + ARG2, sizeof(struct vki_robust_list_head *)); + PRE_MEM_WRITE("get_robust_list(len_ptr)", + ARG3, sizeof(struct vki_size_t *)); +} +POST(sys_get_robust_list) +{ + POST_MEM_WRITE(ARG2, sizeof(struct vki_robust_list_head *)); + POST_MEM_WRITE(ARG3, sizeof(struct vki_size_t *)); +} + PRE(sys_pselect6) { *flags |= SfMayBlock; diff --git a/coregrind/m_syswrap/syswrap-x86-linux.c b/coregrind/m_syswrap/syswrap-x86-linux.c index c1f714f0a5..c3be552f7d 100644 --- a/coregrind/m_syswrap/syswrap-x86-linux.c +++ b/coregrind/m_syswrap/syswrap-x86-linux.c @@ -2172,6 +2172,8 @@ const SyscallTableEntry ML_(syscall_table)[] = { LINXY(__NR_ppoll, sys_ppoll), // 309 // LINX_(__NR_unshare, sys_unshare), // 310 + LINX_(__NR_set_robust_list, sys_set_robust_list), // 311 + LINXY(__NR_get_robust_list, sys_get_robust_list), // 312 }; const UInt ML_(syscall_table_size) = diff --git a/include/vki-linux.h b/include/vki-linux.h index 1d6034261b..4057e0c238 100644 --- a/include/vki-linux.h +++ b/include/vki-linux.h @@ -1126,6 +1126,37 @@ struct vki_seminfo { #define VKI_FUTEX_REQUEUE (3) #define VKI_FUTEX_CMP_REQUEUE (4) +struct vki_robust_list { + struct vki_robust_list __user *next; +}; + +struct vki_robust_list_head { + /* + * The head of the list. Points back to itself if empty: + */ + struct vki_robust_list list; + + /* + * This relative offset is set by user-space, it gives the kernel + * the relative position of the futex field to examine. This way + * we keep userspace flexible, to freely shape its data-structure, + * without hardcoding any particular offset into the kernel: + */ + long futex_offset; + + /* + * The death of the thread may race with userspace setting + * up a lock's links. So to handle this race, userspace first + * sets this field to the address of the to-be-taken lock, + * then does the lock acquire, and then adds itself to the + * list, and then clears this field. Hence the kernel will + * always have full knowledge of all locks that the thread + * _might_ have taken. We check the owner TID in any case, + * so only truly owned locks will be handled. + */ + struct vki_robust_list __user *list_op_pending; +}; + //---------------------------------------------------------------------- // From linux-2.6.8.1/include/linux/errno.h //----------------------------------------------------------------------