Always use INIT bounds in __tls_get_addr.
Set bounds manually in _Unwind_Resume.
void *
__tls_get_addr (GET_ADDR_ARGS)
{
+#ifdef __CHKP__
+ GET_ADDR_PARAM = __bnd_init_ptr_bounds(GET_ADDR_PARAM);
+#endif
dtv_t *dtv = THREAD_DTV ();
if (__builtin_expect (dtv[0].counter != GL(dl_tls_generation), 0))
je .Lreltmo
#endif
+#if defined __CHKP__ || defined __CHKWR__
+ bndldx (%esp,%ebx,1), %bnd0
+ bndldx 28(%esp,%ebp,1), %bnd2
+ bndmov %bnd0, 48(%esp)
+ bndmov %bnd2, 80(%esp)
+#endif
+
/* Get internal lock. */
movl $1, %edx
xorl %eax, %eax
different value in there this is a bad user bug. */
2: cmpl $-1, dep_mutex(%ebx)
movl 24(%esp), %eax
+#if defined __CHKP__ || defined __CHKWR__
+ bndldx 4(%esp,%eax,1), %bnd1
+ bndmov %bnd1, 64(%esp)
+#endif
je 17f
movl %eax, dep_mutex(%ebx)
/* Unlock the mutex. */
17: xorl %edx, %edx
+#if defined __CHKP__ || defined __CHKWR__
+ bndmov %bnd1, %bnd0
+#endif
call __pthread_mutex_unlock_usercnt
+#if defined __CHKP__ || defined __CHKWR__
+ bndmov 48(%esp), %bnd0
+ bndmov 64(%esp), %bnd1
+ bndmov 80(%esp), %bnd2
+#endif
testl %eax, %eax
jne 16f
should always succeed or else the kernel did not lock the mutex
correctly. */
movl dep_mutex(%ebx), %eax
+#if defined __CHKP__ || defined __CHKWR__
+ bndmov %bnd1, %bnd0
+#endif
call __pthread_mutex_cond_lock_adjust
+#if defined __CHKP__ || defined __CHKWR__
+ bndmov 48(%esp), %bnd0
+ bndmov 64(%esp), %bnd1
+ bndmov 80(%esp), %bnd2
+#endif
xorl %edx, %edx
+#if defined __CHKP__ || defined __CHKWR__
+ bndmov %bnd1, %bnd0
+#endif
call __pthread_mutex_unlock_usercnt
+#if defined __CHKP__ || defined __CHKWR__
+ bndmov 48(%esp), %bnd0
+ bndmov 64(%esp), %bnd1
+ bndmov 80(%esp), %bnd2
+#endif
jmp 8b
28: addl $1, wakeup_seq(%ebx)
movl 16(%esp), %ecx
testl %ecx, %ecx
jnz 27f
-
+#if defined __CHKP__ || defined __CHKWR__
+ bndmov %bnd1, %bnd0
+#endif
call __pthread_mutex_cond_lock
+#if defined __CHKP__ || defined __CHKWR__
+ bndmov 48(%esp), %bnd0
+ bndmov 64(%esp), %bnd1
+ bndmov 80(%esp), %bnd2
+#endif
26: addl $FRAME_SIZE, %esp
cfi_adjust_cfa_offset(-FRAME_SIZE)
cfi_restore_state
-27: call __pthread_mutex_cond_lock_adjust
+27:
+#if defined __CHKP__ || defined __CHKWR__
+ bndmov %bnd1, %bnd0
+#endif
+ call __pthread_mutex_cond_lock_adjust
+#if defined __CHKP__ || defined __CHKWR__
+ bndmov 48(%esp), %bnd0
+ bndmov 64(%esp), %bnd1
+ bndmov 80(%esp), %bnd2
+#endif
xorl %eax, %eax
jmp 26b
/* Unlock the mutex. */
117: xorl %edx, %edx
+#if defined __CHKP__ || defined __CHKWR__
+ bndmov %bnd1, %bnd0
+#endif
call __pthread_mutex_unlock_usercnt
+#if defined __CHKP__ || defined __CHKWR__
+ bndmov 48(%esp), %bnd0
+ bndmov 64(%esp), %bnd1
+ bndmov 80(%esp), %bnd2
+#endif
testl %eax, %eax
jne 16b
cmpl %ebx, %gs:TID
jne 8f
/* We managed to get the lock. Fix it up before returning. */
+#if defined __CHKP__ || defined __CHKWR__
+ bndmov %bnd1, %bnd0
+#endif
call __pthread_mutex_cond_lock_adjust
+#if defined __CHKP__ || defined __CHKWR__
+ bndmov 48(%esp), %bnd0
+ bndmov 64(%esp), %bnd1
+ bndmov 80(%esp), %bnd2
+#endif
jmp 9f
-8: call __pthread_mutex_cond_lock
+8:
+#if defined __CHKP__ || defined __CHKWR__
+ bndmov %bnd1, %bnd0
+#endif
+ call __pthread_mutex_cond_lock
+#if defined __CHKP__ || defined __CHKWR__
+ bndmov 48(%esp), %bnd0
+ bndmov 64(%esp), %bnd1
+ bndmov 80(%esp), %bnd2
+#endif
9: movl %esi, (%esp)
.LcallUR:
xorl %esi, %esi
movl 20(%esp), %ebx
+#if defined __CHKP__ || defined __CHKWR__
+ bndldx (%esp,%ebx,1), %bnd0
+ bndmov %bnd0, 32(%esp)
+#endif
LIBC_PROBE (cond_wait, 2, 24(%esp), %ebx)
different value in there this is a bad user bug. */
2: cmpl $-1, dep_mutex(%ebx)
movl 24(%esp), %eax
+#if defined __CHKP__ || defined __CHKWR__
+ bndldx 4(%esp,%eax,1), %bnd1
+ bndmov %bnd1, 48(%esp)
+#endif
je 15f
movl %eax, dep_mutex(%ebx)
/* Unlock the mutex. */
15: xorl %edx, %edx
+#if defined __CHKP__ || defined __CHKWR__
+ bndmov %bnd1, %bnd0
+#endif
call __pthread_mutex_unlock_usercnt
+#if defined __CHKP__ || defined __CHKWR__
+ bndmov 32(%esp), %bnd0
+ bndmov 48(%esp), %bnd1
+#endif
testl %eax, %eax
jne 12f
testl %ecx, %ecx
jnz 21f
+#if defined __CHKP__ || defined __CHKWR__
+ bndmov %bnd1, %bnd0
+#endif
call __pthread_mutex_cond_lock
+#if defined __CHKP__ || defined __CHKWR__
+ bndmov 32(%esp), %bnd0
+ bndmov 48(%esp), %bnd1
+#endif
20: addl $FRAME_SIZE, %esp
cfi_adjust_cfa_offset(-FRAME_SIZE);
cfi_restore_state
-21: call __pthread_mutex_cond_lock_adjust
+21:
+#if defined __CHKP__ || defined __CHKWR__
+ bndmov %bnd1, %bnd0
+#endif
+ call __pthread_mutex_cond_lock_adjust
+#if defined __CHKP__ || defined __CHKWR__
+ bndmov 32(%esp), %bnd0
+ bndmov 48(%esp), %bnd1
+#endif
xorl %eax, %eax
jmp 20b
should always succeed or else the kernel did not lock the mutex
correctly. */
movl dep_mutex(%ebx), %eax
+#if defined __CHKP__ || defined __CHKWR__
+ bndmov %bnd1, %bnd0
+#endif
call __pthread_mutex_cond_lock_adjust
+#if defined __CHKP__ || defined __CHKWR__
+ bndmov 32(%esp), %bnd0
+ bndmov 48(%esp), %bnd1
+#endif
xorl %edx, %edx
+#if defined __CHKP__ || defined __CHKWR__
+ bndmov %bnd1, %bnd0
+#endif
call __pthread_mutex_unlock_usercnt
+#if defined __CHKP__ || defined __CHKWR__
+ bndmov 32(%esp), %bnd0
+ bndmov 48(%esp), %bnd1
+#endif
jmp 8b
/* Initial locking failed. */
cmpl %ebx, %gs:TID
jne 8f
/* We managed to get the lock. Fix it up before returning. */
+#if defined __CHKP__ || defined __CHKWR__
+ bndmov %bnd1, %bnd0
+#endif
call __pthread_mutex_cond_lock_adjust
+#if defined __CHKP__ || defined __CHKWR__
+ bndmov 32(%esp), %bnd0
+ bndmov 48(%esp), %bnd1
+#endif
jmp 9f
-8: call __pthread_mutex_cond_lock
+8:
+#if defined __CHKP__ || defined __CHKWR__
+ bndmov %bnd1, %bnd0
+#endif
+ call __pthread_mutex_cond_lock
+#if defined __CHKP__ || defined __CHKWR__
+ bndmov 32(%esp), %bnd0
+ bndmov 48(%esp), %bnd1
+#endif
9: movl %esi, (%esp)
.LcallUR:
jne 7f
leal 8(%esp), %eax
+#if defined __CHKP__ || defined __CHKWR__
+ bndldx 8(%esp,%eax,1), %bnd0
+#endif
call HIDDEN_JUMPTARGET(__pthread_register_cancel)
/* Call the user-provided initialization function. */
/* Pop the cleanup handler. */
leal 8(%esp), %eax
+#if defined __CHKP__ || defined __CHKWR__
+ bndldx 8(%esp,%eax,1), %bnd0
+#endif
call HIDDEN_JUMPTARGET(__pthread_unregister_cancel)
addl $UNWINDBUFSIZE+8, %esp
cfi_adjust_cfa_offset (-UNWINDBUFSIZE-8)
ENTER_KERNEL
leal 8(%esp), %eax
+#if defined __CHKP__ || defined __CHKWR__
+ bndldx 8(%esp,%eax,1), %bnd0
+#endif
call HIDDEN_JUMPTARGET (__pthread_unwind_next)
/* NOTREACHED */
hlt
movq %rsi, 16(%rsp)
movq %rdx, %r13
+#if defined __CHKP__ || defined __CHKWR__
+ bndmov %bnd0, 72(%rsp)
+ bndmov %bnd1, 88(%rsp)
+ bndmov %bnd2, 104(%rsp)
+#endif
+
je 22f
mov %RSI_LP, dep_mutex(%rdi)
/* Unlock the mutex. */
32: movq 16(%rsp), %rdi
xorl %esi, %esi
+#if defined __CHKP__ || defined __CHKWR__
+ bndmov %bnd1, %bnd0
+#endif
callq __pthread_mutex_unlock_usercnt
+#if defined __CHKP__ || defined __CHKWR__
+ bndmov 72(%rsp), %bnd0
+ bndmov 88(%rsp), %bnd1
+ bndmov 104(%rsp), %bnd2
+#endif
testl %eax, %eax
jne 46f
testb %r15b, %r15b
jnz 64f
+#if defined __CHKP__ || defined __CHKWR__
+ bndmov %bnd1, %bnd0
+#endif
callq __pthread_mutex_cond_lock
+#if defined __CHKP__ || defined __CHKWR__
+ bndmov 72(%rsp), %bnd0
+ bndmov 88(%rsp), %bnd1
+ bndmov 104(%rsp), %bnd2
+#endif
63: testq %rax, %rax
cmoveq %r14, %rax
cfi_restore_state
-64: callq __pthread_mutex_cond_lock_adjust
+64:
+#if defined __CHKP__ || defined __CHKWR__
+ bndmov %bnd1, %bnd0
+#endif
+ callq __pthread_mutex_cond_lock_adjust
+#if defined __CHKP__ || defined __CHKWR__
+ bndmov 72(%rsp), %bnd0
+ bndmov 88(%rsp), %bnd1
+ bndmov 104(%rsp), %bnd2
+#endif
movq %r14, %rax
jmp 48b
/* Unlock the mutex. */
2: movq 16(%rsp), %rdi
xorl %esi, %esi
+#if defined __CHKP__ || defined __CHKWR__
+ bndmov %bnd1, %bnd0
+#endif
callq __pthread_mutex_unlock_usercnt
+#if defined __CHKP__ || defined __CHKWR__
+ bndmov 72(%rsp), %bnd0
+ bndmov 88(%rsp), %bnd1
+ bndmov 104(%rsp), %bnd2
+#endif
testl %eax, %eax
jne 46b
cmpl %eax, %fs:TID
jne 7f
/* We managed to get the lock. Fix it up before returning. */
+#if defined __CHKP__ || defined __CHKWR__
+ bndmov %bnd1, %bnd0
+#endif
callq __pthread_mutex_cond_lock_adjust
+#if defined __CHKP__ || defined __CHKWR__
+ bndmov 72(%rsp), %bnd0
+ bndmov 88(%rsp), %bnd1
+ bndmov 104(%rsp), %bnd2
+#endif
jmp 8f
7: callq __pthread_mutex_cond_lock
movq %rdi, 8(%rsp)
movq %rsi, 16(%rsp)
+#if defined __CHKP__ || defined __CHKWR__
+ bndmov %bnd0, 32(%rsp)
+ bndmov %bnd1, 48(%rsp)
+#endif
+
je 15f
mov %RSI_LP, dep_mutex(%rdi)
/* Unlock the mutex. */
2: movq 16(%rsp), %rdi
xorl %esi, %esi
+#if defined __CHKP__ || defined __CHKWR__
+ bndmov %bnd1, %bnd0
+#endif
callq __pthread_mutex_unlock_usercnt
+#if defined __CHKP__ || defined __CHKWR__
+ bndmov 32(%rsp), %bnd0
+ bndmov 48(%rsp), %bnd1
+#endif
testl %eax, %eax
jne 12f
testb %r8b, %r8b
jnz 18f
+#if defined __CHKP__ || defined __CHKWR__
+ bndmov %bnd1, %bnd0
+#endif
callq __pthread_mutex_cond_lock
+#if defined __CHKP__ || defined __CHKWR__
+ bndmov 32(%rsp), %bnd0
+ bndmov 48(%rsp), %bnd1
+#endif
14: leaq FRAME_SIZE(%rsp), %rsp
cfi_adjust_cfa_offset(-FRAME_SIZE)
cfi_adjust_cfa_offset(FRAME_SIZE)
-18: callq __pthread_mutex_cond_lock_adjust
+18:
+#if defined __CHKP__ || defined __CHKWR__
+ bndmov %bnd1, %bnd0
+#endif
+ callq __pthread_mutex_cond_lock_adjust
+#if defined __CHKP__ || defined __CHKWR__
+ bndmov 32(%rsp), %bnd0
+ bndmov 48(%rsp), %bnd1
+#endif
xorl %eax, %eax
jmp 14b
cmpl %eax, %fs:TID
jne 7f
/* We managed to get the lock. Fix it up before returning. */
+#if defined __CHKP__ || defined __CHKWR__
+ bndmov %bnd1, %bnd0
+#endif
callq __pthread_mutex_cond_lock_adjust
+#if defined __CHKP__ || defined __CHKWR__
+ bndmov 32(%rsp), %bnd0
+ bndmov 48(%rsp), %bnd1
+#endif
jmp 8f
-
7: callq __pthread_mutex_cond_lock
8: movq 24(%rsp), %rdi
void
_Unwind_Resume (struct _Unwind_Exception *exc)
{
+#ifdef __CHKP__
+ exc = (struct _Unwind_Exception *) __bnd_set_ptr_bounds (exc, sizeof (struct _Unwind_Exception));
+#endif
if (__builtin_expect (libgcc_s_resume == NULL, 0))
init ();
libgcc_s_resume (exc);