marked as defined, so as to avoid spurious uninit-value errors.
git-svn-id: svn://svn.valgrind.org/valgrind/trunk@185
like netscape. */
#define VG_PLAUSIBLE_STACK_SIZE 8000000
+/* Needed by the pthreads implementation. */
+#define VGM_WORD_VALID 0
+#define VGM_WORD_INVALID 0xFFFFFFFF
+
/* ---------------------------------------------------------------------
Exports of vg_syscall_mem.c
#define VGM_BYTE_VALID 0
#define VGM_BYTE_INVALID 0xFF
+/* Now in vg_include.h.
#define VGM_WORD_VALID 0
#define VGM_WORD_INVALID 0xFFFFFFFF
+*/
#define VGM_EFLAGS_VALID 0xFFFFFFFE
#define VGM_EFLAGS_INVALID 0xFFFFFFFF
{
# define SIMPLE_RETURN(vvv) \
{ tst->m_edx = (vvv); \
+ tst->sh_edx = VGM_WORD_VALID; \
return True; \
}
#include <errno.h>
#define VG_PTHREAD_STACK_MIN \
- (VG_PTHREAD_STACK_SIZE - VG_AR_CLIENT_STACKBASE_REDZONE_SZB)
+ (VG_PTHREAD_STACK_SIZE - VG_AR_CLIENT_STACKBASE_REDZONE_SZB)
/* /usr/include/bits/pthreadtypes.h:
typedef unsigned long int pthread_t;
*/
+/* Write a value to the client's %EDX (request return value register)
+ and set the shadow to indicate it is defined. */
+#define SET_EDX(zztid, zzval) \
+ do { vg_threads[zztid].m_edx = (zzval); \
+ vg_threads[zztid].sh_edx = VGM_WORD_VALID; \
+ } while (0)
+
/* -----------------------------------------------------------
Thread CREATION, JOINAGE and CANCELLATION.
if (!is_valid_tid(tid_cancellee)
|| vg_threads[tid_cancellee].status == VgTs_Empty) {
- vg_threads[tid].m_edx = ESRCH;
+ SET_EDX(tid, ESRCH);
return;
}
vg_threads[tid_cancellee].status = VgTs_Runnable;
/* We return with success (0). */
- vg_threads[tid].m_edx = 0;
+ SET_EDX(tid, 0);
}
jnr_thread_return = (void**)(jnr_args[2]);
if (jnr_thread_return != NULL)
*jnr_thread_return = vg_threads[tid].retval;
- vg_threads[jnr].m_edx = 0; /* success */
+ SET_EDX(jnr, 0); /* success */
vg_threads[jnr].status = VgTs_Runnable;
vg_threads[tid].status = VgTs_Empty; /* bye! */
if (VG_(clo_instrument) && tid != 0)
vg_assert(vg_threads[tid].status == VgTs_Runnable);
if (jee == tid) {
- vg_threads[tid].m_edx = EDEADLK; /* libc constant, not a kernel one */
+ SET_EDX(tid, EDEADLK); /* libc constant, not a kernel one */
vg_threads[tid].status = VgTs_Runnable;
return;
}
|| jee >= VG_N_THREADS
|| vg_threads[jee].status == VgTs_Empty) {
/* Invalid thread to join to. */
- vg_threads[tid].m_edx = EINVAL;
+ SET_EDX(tid, EINVAL);
vg_threads[tid].status = VgTs_Runnable;
return;
}
if (vg_threads[jee].joiner != VG_INVALID_THREADID) {
/* Someone already did join on this thread */
- vg_threads[tid].m_edx = EINVAL;
+ SET_EDX(tid, EINVAL);
vg_threads[tid].status = VgTs_Runnable;
return;
}
free it properly (also above). */
if (vg_threads[jee].status == VgTs_WaitJoiner) {
vg_assert(vg_threads[jee].joiner == VG_INVALID_THREADID);
- vg_threads[tid].m_edx = 0; /* success */
- if (thread_return != NULL)
+ SET_EDX(tid, 0); /* success */
+ if (thread_return != NULL) {
*thread_return = vg_threads[jee].retval;
+ /* Not really right, since it makes the thread's return value
+ appear to be defined even if it isn't. */
+ if (VG_(clo_instrument))
+ VGM_(make_readable)( (Addr)thread_return, sizeof(void*) );
+ }
vg_threads[tid].status = VgTs_Runnable;
vg_threads[jee].status = VgTs_Empty; /* bye! */
if (VG_(clo_instrument) && jee != 0)
// if (VG_(clo_instrument))
// ***** CHECK *thread is writable
*thread = (pthread_t)tid;
+ if (VG_(clo_instrument))
+ VGM_(make_readable)( (Addr)thread, sizeof(pthread_t) );
vg_threads[tid].associated_mx = NULL;
vg_threads[tid].associated_cv = NULL;
vg_threads[tid].specifics[i] = NULL;
/* return zero */
- vg_threads[tid].m_edx = 0; /* success */
+ SET_EDX(tid, 0); /* success */
}
/* POSIX doesn't mandate this, but for sanity ... */
if (mutex == NULL) {
- vg_threads[tid].m_edx = EINVAL;
+ SET_EDX(tid, EINVAL);
return;
}
if (mutex->__m_count >= 0) break;
/* else fall thru */
default:
- vg_threads[tid].m_edx = EINVAL;
+ SET_EDX(tid, EINVAL);
return;
}
if (mutex->__m_kind == PTHREAD_MUTEX_RECURSIVE_NP) {
/* return 0 (success). */
mutex->__m_count++;
- vg_threads[tid].m_edx = 0;
+ SET_EDX(tid, 0);
if (0)
VG_(printf)("!!!!!! tid %d, mx %p -> locked %d\n",
tid, mutex, mutex->__m_count);
return;
} else {
if (is_trylock)
- vg_threads[tid].m_edx = EBUSY;
+ SET_EDX(tid, EBUSY);
else
- vg_threads[tid].m_edx = EDEADLK;
+ SET_EDX(tid, EDEADLK);
return;
}
} else {
/* GUARD: __m_count > 0 && __m_owner is valid */
if (is_trylock) {
/* caller is polling; so return immediately. */
- vg_threads[tid].m_edx = EBUSY;
+ SET_EDX(tid, EBUSY);
} else {
vg_threads[tid].status = VgTs_WaitMX;
vg_threads[tid].associated_mx = mutex;
- vg_threads[tid].m_edx = 0; /* pth_mx_lock success value */
+ SET_EDX(tid, 0); /* pth_mx_lock success value */
if (VG_(clo_trace_pthread_level) >= 1) {
VG_(sprintf)(msg_buf, "%s mx %p: BLOCK",
caller, mutex );
mutex->__m_owner = (_pthread_descr)tid;
vg_assert(vg_threads[tid].associated_mx == NULL);
/* return 0 (success). */
- vg_threads[tid].m_edx = 0;
+ SET_EDX(tid, 0);
}
}
&& vg_threads[tid].status == VgTs_Runnable);
if (mutex == NULL) {
- vg_threads[tid].m_edx = EINVAL;
+ SET_EDX(tid, EINVAL);
return;
}
if (mutex->__m_count >= 0) break;
/* else fall thru */
default:
- vg_threads[tid].m_edx = EINVAL;
+ SET_EDX(tid, EINVAL);
return;
}
/* Barf if we don't currently hold the mutex. */
if (mutex->__m_count == 0 /* nobody holds it */
|| (ThreadId)mutex->__m_owner != tid /* we don't hold it */) {
- vg_threads[tid].m_edx = EPERM;
+ SET_EDX(tid, EPERM);
return;
}
if (mutex->__m_count > 1) {
vg_assert(mutex->__m_kind == PTHREAD_MUTEX_RECURSIVE_NP);
mutex->__m_count --;
- vg_threads[tid].m_edx = 0; /* success */
+ SET_EDX(tid, 0); /* success */
return;
}
release_one_thread_waiting_on_mutex ( mutex, "pthread_mutex_lock" );
/* Our (tid's) pth_unlock() returns with 0 (success). */
- vg_threads[tid].m_edx = 0; /* Success. */
+ SET_EDX(tid, 0); /* Success. */
}
/* Currently unheld; hand it out to thread tid. */
vg_assert(mx->__m_count == 0);
vg_threads[tid].status = VgTs_Runnable;
- vg_threads[tid].m_edx = ETIMEDOUT;
- /* pthread_cond_wait return value */
+ SET_EDX(tid, ETIMEDOUT); /* pthread_cond_wait return value */
vg_threads[tid].associated_cv = NULL;
vg_threads[tid].associated_mx = NULL;
mx->__m_owner = (_pthread_descr)tid;
mx->__m_count = 1;
if (VG_(clo_trace_pthread_level) >= 1) {
- VG_(sprintf)(msg_buf, "pthread_cond_timedwai cv %p: TIMEOUT with mx %p",
- cv, mx );
+ VG_(sprintf)(msg_buf,
+ "pthread_cond_timedwai cv %p: TIMEOUT with mx %p",
+ cv, mx );
print_pthread_event(tid, msg_buf);
}
} else {
/* Currently held. Make thread tid be blocked on it. */
vg_assert(mx->__m_count > 0);
vg_threads[tid].status = VgTs_WaitMX;
- vg_threads[tid].m_edx = ETIMEDOUT;
- /* pthread_cond_wait return value */
+ SET_EDX(tid, ETIMEDOUT); /* pthread_cond_wait return value */
vg_threads[tid].associated_cv = NULL;
vg_threads[tid].associated_mx = mx;
if (VG_(clo_trace_pthread_level) >= 1) {
vg_threads[i].status = VgTs_WaitMX;
vg_threads[i].associated_cv = NULL;
vg_threads[i].associated_mx = mx;
- vg_threads[i].m_edx = 0; /* pth_cond_wait success value */
+ SET_EDX(i, 0); /* pth_cond_wait success value */
if (VG_(clo_trace_pthread_level) >= 1) {
VG_(sprintf)(msg_buf, "%s cv %p: BLOCK for mx %p",
&& vg_threads[tid].status == VgTs_Runnable);
if (mutex == NULL || cond == NULL) {
- vg_threads[tid].m_edx = EINVAL;
+ SET_EDX(tid, EINVAL);
return;
}
if (mutex->__m_count >= 0) break;
/* else fall thru */
default:
- vg_threads[tid].m_edx = EINVAL;
+ SET_EDX(tid, EINVAL);
return;
}
/* Barf if we don't currently hold the mutex. */
if (mutex->__m_count == 0 /* nobody holds it */
|| (ThreadId)mutex->__m_owner != tid /* we don't hold it */) {
- vg_threads[tid].m_edx = EINVAL;
+ SET_EDX(tid, EINVAL);
return;
}
&& vg_threads[tid].status == VgTs_Runnable);
if (cond == NULL) {
- vg_threads[tid].m_edx = EINVAL;
+ SET_EDX(tid, EINVAL);
return;
}
caller
);
- vg_threads[tid].m_edx = 0; /* success */
+ SET_EDX(tid, 0); /* success */
}
break;
if (i == VG_N_THREAD_KEYS) {
- /* vg_threads[tid].m_edx = EAGAIN;
+ /* SET_EDX(tid, EAGAIN);
return;
*/
VG_(panic)("pthread_key_create: VG_N_THREAD_KEYS is too low;"
}
vg_thread_keys[i].inuse = True;
+
/* TODO: check key for addressibility */
*key = i;
- vg_threads[tid].m_edx = 0;
+ if (VG_(clo_instrument))
+ VGM_(make_readable)( (Addr)key, sizeof(pthread_key_t) );
+
+ SET_EDX(tid, 0);
}
&& vg_threads[tid].status == VgTs_Runnable);
if (!is_valid_key(key)) {
- vg_threads[tid].m_edx = EINVAL;
+ SET_EDX(tid, EINVAL);
return;
}
&& vg_threads[tid].status == VgTs_Runnable);
if (!is_valid_key(key)) {
- vg_threads[tid].m_edx = (UInt)NULL;
+ SET_EDX(tid, (UInt)NULL);
return;
}
- vg_threads[tid].m_edx = (UInt)vg_threads[tid].specifics[key];
+ SET_EDX(tid, (UInt)vg_threads[tid].specifics[key]);
}
&& vg_threads[tid].status == VgTs_Runnable);
if (!is_valid_key(key)) {
- vg_threads[tid].m_edx = EINVAL;
+ SET_EDX(tid, EINVAL);
return;
}
vg_threads[tid].specifics[key] = pointer;
- vg_threads[tid].m_edx = 0;
+ SET_EDX(tid, 0);
}
case VG_USERREQ__MAKE_NOACCESS_STACK:
case VG_USERREQ__RUNNING_ON_VALGRIND:
case VG_USERREQ__DO_LEAK_CHECK:
- vg_threads[tid].m_edx
- = VG_(handle_client_request) ( &vg_threads[tid], arg );
+ SET_EDX(
+ tid,
+ VG_(handle_client_request) ( &vg_threads[tid], arg )
+ );
break;
case VG_USERREQ__SIGNAL_RETURNS:
like netscape. */
#define VG_PLAUSIBLE_STACK_SIZE 8000000
+/* Needed by the pthreads implementation. */
+#define VGM_WORD_VALID 0
+#define VGM_WORD_INVALID 0xFFFFFFFF
+
/* ---------------------------------------------------------------------
Exports of vg_syscall_mem.c
#define VGM_BYTE_VALID 0
#define VGM_BYTE_INVALID 0xFF
+/* Now in vg_include.h.
#define VGM_WORD_VALID 0
#define VGM_WORD_INVALID 0xFFFFFFFF
+*/
#define VGM_EFLAGS_VALID 0xFFFFFFFE
#define VGM_EFLAGS_INVALID 0xFFFFFFFF
{
# define SIMPLE_RETURN(vvv) \
{ tst->m_edx = (vvv); \
+ tst->sh_edx = VGM_WORD_VALID; \
return True; \
}
#include <errno.h>
#define VG_PTHREAD_STACK_MIN \
- (VG_PTHREAD_STACK_SIZE - VG_AR_CLIENT_STACKBASE_REDZONE_SZB)
+ (VG_PTHREAD_STACK_SIZE - VG_AR_CLIENT_STACKBASE_REDZONE_SZB)
/* /usr/include/bits/pthreadtypes.h:
typedef unsigned long int pthread_t;
*/
+/* Write a value to the client's %EDX (request return value register)
+ and set the shadow to indicate it is defined. */
+#define SET_EDX(zztid, zzval) \
+ do { vg_threads[zztid].m_edx = (zzval); \
+ vg_threads[zztid].sh_edx = VGM_WORD_VALID; \
+ } while (0)
+
/* -----------------------------------------------------------
Thread CREATION, JOINAGE and CANCELLATION.
if (!is_valid_tid(tid_cancellee)
|| vg_threads[tid_cancellee].status == VgTs_Empty) {
- vg_threads[tid].m_edx = ESRCH;
+ SET_EDX(tid, ESRCH);
return;
}
vg_threads[tid_cancellee].status = VgTs_Runnable;
/* We return with success (0). */
- vg_threads[tid].m_edx = 0;
+ SET_EDX(tid, 0);
}
jnr_thread_return = (void**)(jnr_args[2]);
if (jnr_thread_return != NULL)
*jnr_thread_return = vg_threads[tid].retval;
- vg_threads[jnr].m_edx = 0; /* success */
+ SET_EDX(jnr, 0); /* success */
vg_threads[jnr].status = VgTs_Runnable;
vg_threads[tid].status = VgTs_Empty; /* bye! */
if (VG_(clo_instrument) && tid != 0)
vg_assert(vg_threads[tid].status == VgTs_Runnable);
if (jee == tid) {
- vg_threads[tid].m_edx = EDEADLK; /* libc constant, not a kernel one */
+ SET_EDX(tid, EDEADLK); /* libc constant, not a kernel one */
vg_threads[tid].status = VgTs_Runnable;
return;
}
|| jee >= VG_N_THREADS
|| vg_threads[jee].status == VgTs_Empty) {
/* Invalid thread to join to. */
- vg_threads[tid].m_edx = EINVAL;
+ SET_EDX(tid, EINVAL);
vg_threads[tid].status = VgTs_Runnable;
return;
}
if (vg_threads[jee].joiner != VG_INVALID_THREADID) {
/* Someone already did join on this thread */
- vg_threads[tid].m_edx = EINVAL;
+ SET_EDX(tid, EINVAL);
vg_threads[tid].status = VgTs_Runnable;
return;
}
free it properly (also above). */
if (vg_threads[jee].status == VgTs_WaitJoiner) {
vg_assert(vg_threads[jee].joiner == VG_INVALID_THREADID);
- vg_threads[tid].m_edx = 0; /* success */
- if (thread_return != NULL)
+ SET_EDX(tid, 0); /* success */
+ if (thread_return != NULL) {
*thread_return = vg_threads[jee].retval;
+ /* Not really right, since it makes the thread's return value
+ appear to be defined even if it isn't. */
+ if (VG_(clo_instrument))
+ VGM_(make_readable)( (Addr)thread_return, sizeof(void*) );
+ }
vg_threads[tid].status = VgTs_Runnable;
vg_threads[jee].status = VgTs_Empty; /* bye! */
if (VG_(clo_instrument) && jee != 0)
// if (VG_(clo_instrument))
// ***** CHECK *thread is writable
*thread = (pthread_t)tid;
+ if (VG_(clo_instrument))
+ VGM_(make_readable)( (Addr)thread, sizeof(pthread_t) );
vg_threads[tid].associated_mx = NULL;
vg_threads[tid].associated_cv = NULL;
vg_threads[tid].specifics[i] = NULL;
/* return zero */
- vg_threads[tid].m_edx = 0; /* success */
+ SET_EDX(tid, 0); /* success */
}
/* POSIX doesn't mandate this, but for sanity ... */
if (mutex == NULL) {
- vg_threads[tid].m_edx = EINVAL;
+ SET_EDX(tid, EINVAL);
return;
}
if (mutex->__m_count >= 0) break;
/* else fall thru */
default:
- vg_threads[tid].m_edx = EINVAL;
+ SET_EDX(tid, EINVAL);
return;
}
if (mutex->__m_kind == PTHREAD_MUTEX_RECURSIVE_NP) {
/* return 0 (success). */
mutex->__m_count++;
- vg_threads[tid].m_edx = 0;
+ SET_EDX(tid, 0);
if (0)
VG_(printf)("!!!!!! tid %d, mx %p -> locked %d\n",
tid, mutex, mutex->__m_count);
return;
} else {
if (is_trylock)
- vg_threads[tid].m_edx = EBUSY;
+ SET_EDX(tid, EBUSY);
else
- vg_threads[tid].m_edx = EDEADLK;
+ SET_EDX(tid, EDEADLK);
return;
}
} else {
/* GUARD: __m_count > 0 && __m_owner is valid */
if (is_trylock) {
/* caller is polling; so return immediately. */
- vg_threads[tid].m_edx = EBUSY;
+ SET_EDX(tid, EBUSY);
} else {
vg_threads[tid].status = VgTs_WaitMX;
vg_threads[tid].associated_mx = mutex;
- vg_threads[tid].m_edx = 0; /* pth_mx_lock success value */
+ SET_EDX(tid, 0); /* pth_mx_lock success value */
if (VG_(clo_trace_pthread_level) >= 1) {
VG_(sprintf)(msg_buf, "%s mx %p: BLOCK",
caller, mutex );
mutex->__m_owner = (_pthread_descr)tid;
vg_assert(vg_threads[tid].associated_mx == NULL);
/* return 0 (success). */
- vg_threads[tid].m_edx = 0;
+ SET_EDX(tid, 0);
}
}
&& vg_threads[tid].status == VgTs_Runnable);
if (mutex == NULL) {
- vg_threads[tid].m_edx = EINVAL;
+ SET_EDX(tid, EINVAL);
return;
}
if (mutex->__m_count >= 0) break;
/* else fall thru */
default:
- vg_threads[tid].m_edx = EINVAL;
+ SET_EDX(tid, EINVAL);
return;
}
/* Barf if we don't currently hold the mutex. */
if (mutex->__m_count == 0 /* nobody holds it */
|| (ThreadId)mutex->__m_owner != tid /* we don't hold it */) {
- vg_threads[tid].m_edx = EPERM;
+ SET_EDX(tid, EPERM);
return;
}
if (mutex->__m_count > 1) {
vg_assert(mutex->__m_kind == PTHREAD_MUTEX_RECURSIVE_NP);
mutex->__m_count --;
- vg_threads[tid].m_edx = 0; /* success */
+ SET_EDX(tid, 0); /* success */
return;
}
release_one_thread_waiting_on_mutex ( mutex, "pthread_mutex_lock" );
/* Our (tid's) pth_unlock() returns with 0 (success). */
- vg_threads[tid].m_edx = 0; /* Success. */
+ SET_EDX(tid, 0); /* Success. */
}
/* Currently unheld; hand it out to thread tid. */
vg_assert(mx->__m_count == 0);
vg_threads[tid].status = VgTs_Runnable;
- vg_threads[tid].m_edx = ETIMEDOUT;
- /* pthread_cond_wait return value */
+ SET_EDX(tid, ETIMEDOUT); /* pthread_cond_wait return value */
vg_threads[tid].associated_cv = NULL;
vg_threads[tid].associated_mx = NULL;
mx->__m_owner = (_pthread_descr)tid;
mx->__m_count = 1;
if (VG_(clo_trace_pthread_level) >= 1) {
- VG_(sprintf)(msg_buf, "pthread_cond_timedwai cv %p: TIMEOUT with mx %p",
- cv, mx );
+ VG_(sprintf)(msg_buf,
+ "pthread_cond_timedwai cv %p: TIMEOUT with mx %p",
+ cv, mx );
print_pthread_event(tid, msg_buf);
}
} else {
/* Currently held. Make thread tid be blocked on it. */
vg_assert(mx->__m_count > 0);
vg_threads[tid].status = VgTs_WaitMX;
- vg_threads[tid].m_edx = ETIMEDOUT;
- /* pthread_cond_wait return value */
+ SET_EDX(tid, ETIMEDOUT); /* pthread_cond_wait return value */
vg_threads[tid].associated_cv = NULL;
vg_threads[tid].associated_mx = mx;
if (VG_(clo_trace_pthread_level) >= 1) {
vg_threads[i].status = VgTs_WaitMX;
vg_threads[i].associated_cv = NULL;
vg_threads[i].associated_mx = mx;
- vg_threads[i].m_edx = 0; /* pth_cond_wait success value */
+ SET_EDX(i, 0); /* pth_cond_wait success value */
if (VG_(clo_trace_pthread_level) >= 1) {
VG_(sprintf)(msg_buf, "%s cv %p: BLOCK for mx %p",
&& vg_threads[tid].status == VgTs_Runnable);
if (mutex == NULL || cond == NULL) {
- vg_threads[tid].m_edx = EINVAL;
+ SET_EDX(tid, EINVAL);
return;
}
if (mutex->__m_count >= 0) break;
/* else fall thru */
default:
- vg_threads[tid].m_edx = EINVAL;
+ SET_EDX(tid, EINVAL);
return;
}
/* Barf if we don't currently hold the mutex. */
if (mutex->__m_count == 0 /* nobody holds it */
|| (ThreadId)mutex->__m_owner != tid /* we don't hold it */) {
- vg_threads[tid].m_edx = EINVAL;
+ SET_EDX(tid, EINVAL);
return;
}
&& vg_threads[tid].status == VgTs_Runnable);
if (cond == NULL) {
- vg_threads[tid].m_edx = EINVAL;
+ SET_EDX(tid, EINVAL);
return;
}
caller
);
- vg_threads[tid].m_edx = 0; /* success */
+ SET_EDX(tid, 0); /* success */
}
break;
if (i == VG_N_THREAD_KEYS) {
- /* vg_threads[tid].m_edx = EAGAIN;
+ /* SET_EDX(tid, EAGAIN);
return;
*/
VG_(panic)("pthread_key_create: VG_N_THREAD_KEYS is too low;"
}
vg_thread_keys[i].inuse = True;
+
/* TODO: check key for addressibility */
*key = i;
- vg_threads[tid].m_edx = 0;
+ if (VG_(clo_instrument))
+ VGM_(make_readable)( (Addr)key, sizeof(pthread_key_t) );
+
+ SET_EDX(tid, 0);
}
&& vg_threads[tid].status == VgTs_Runnable);
if (!is_valid_key(key)) {
- vg_threads[tid].m_edx = EINVAL;
+ SET_EDX(tid, EINVAL);
return;
}
&& vg_threads[tid].status == VgTs_Runnable);
if (!is_valid_key(key)) {
- vg_threads[tid].m_edx = (UInt)NULL;
+ SET_EDX(tid, (UInt)NULL);
return;
}
- vg_threads[tid].m_edx = (UInt)vg_threads[tid].specifics[key];
+ SET_EDX(tid, (UInt)vg_threads[tid].specifics[key]);
}
&& vg_threads[tid].status == VgTs_Runnable);
if (!is_valid_key(key)) {
- vg_threads[tid].m_edx = EINVAL;
+ SET_EDX(tid, EINVAL);
return;
}
vg_threads[tid].specifics[key] = pointer;
- vg_threads[tid].m_edx = 0;
+ SET_EDX(tid, 0);
}
case VG_USERREQ__MAKE_NOACCESS_STACK:
case VG_USERREQ__RUNNING_ON_VALGRIND:
case VG_USERREQ__DO_LEAK_CHECK:
- vg_threads[tid].m_edx
- = VG_(handle_client_request) ( &vg_threads[tid], arg );
+ SET_EDX(
+ tid,
+ VG_(handle_client_request) ( &vg_threads[tid], arg )
+ );
break;
case VG_USERREQ__SIGNAL_RETURNS: