DECL_TEMPLATE(linux, sys_sendfile);
DECL_TEMPLATE(linux, sys_sendfile64);
DECL_TEMPLATE(linux, sys_futex);
+DECL_TEMPLATE(linux, sys_set_robust_list);
+DECL_TEMPLATE(linux, sys_get_robust_list);
DECL_TEMPLATE(linux, sys_pselect6);
DECL_TEMPLATE(linux, sys_ppoll);
// LINX_(__NR_pselect6, sys_ni_syscall), // 270
// LINXY(__NR_ppoll, sys_ni_syscall), // 271
// LINX_(__NR_unshare, sys_unshare), // 272
+ LINX_(__NR_set_robust_list, sys_set_robust_list), // 273
+ LINXY(__NR_get_robust_list, sys_get_robust_list), // 274
};
const UInt ML_(syscall_table_size) =
}
}
+PRE(sys_set_robust_list)
+{
+ PRINT("sys_set_robust_list ( %p, %d )", ARG1,ARG2);
+ PRE_REG_READ2(long, "set_robust_list",
+ struct vki_robust_list_head *, head, vki_size_t, len);
+
+ /* Just check the robust_list_head structure is readable - don't
+ try and chase the list as the kernel will only read it when
+ the thread exits so the current contents is irrelevant. */
+ if (ARG1 != 0)
+ PRE_MEM_READ("set_robust_list(head)", ARG1, ARG2);
+}
+
+PRE(sys_get_robust_list)
+{
+ PRINT("sys_get_robust_list ( %d, %p, %d )", ARG1,ARG2,ARG3);
+ PRE_REG_READ3(long, "get_robust_list",
+ int, pid,
+ struct vki_robust_list_head **, head_ptr,
+ vki_size_t *, len_ptr);
+ PRE_MEM_WRITE("get_robust_list(head_ptr)",
+ ARG2, sizeof(struct vki_robust_list_head *));
+ PRE_MEM_WRITE("get_robust_list(len_ptr)",
+ ARG3, sizeof(struct vki_size_t *));
+}
+POST(sys_get_robust_list)
+{
+ POST_MEM_WRITE(ARG2, sizeof(struct vki_robust_list_head *));
+ POST_MEM_WRITE(ARG3, sizeof(struct vki_size_t *));
+}
+
PRE(sys_pselect6)
{
*flags |= SfMayBlock;
LINXY(__NR_ppoll, sys_ppoll), // 309
// LINX_(__NR_unshare, sys_unshare), // 310
+ LINX_(__NR_set_robust_list, sys_set_robust_list), // 311
+ LINXY(__NR_get_robust_list, sys_get_robust_list), // 312
};
const UInt ML_(syscall_table_size) =
#define VKI_FUTEX_REQUEUE (3)
#define VKI_FUTEX_CMP_REQUEUE (4)
+struct vki_robust_list {
+ struct vki_robust_list __user *next;
+};
+
+struct vki_robust_list_head {
+ /*
+ * The head of the list. Points back to itself if empty:
+ */
+ struct vki_robust_list list;
+
+ /*
+ * This relative offset is set by user-space, it gives the kernel
+ * the relative position of the futex field to examine. This way
+ * we keep userspace flexible, to freely shape its data-structure,
+ * without hardcoding any particular offset into the kernel:
+ */
+ long futex_offset;
+
+ /*
+ * The death of the thread may race with userspace setting
+ * up a lock's links. So to handle this race, userspace first
+ * sets this field to the address of the to-be-taken lock,
+ * then does the lock acquire, and then adds itself to the
+ * list, and then clears this field. Hence the kernel will
+ * always have full knowledge of all locks that the thread
+ * _might_ have taken. We check the owner TID in any case,
+ * so only truly owned locks will be handled.
+ */
+ struct vki_robust_list __user *list_op_pending;
+};
+
//----------------------------------------------------------------------
// From linux-2.6.8.1/include/linux/errno.h
//----------------------------------------------------------------------