/* Hook for replace_malloc.o to get malloc functions */
#define VG_USERREQ__GET_MALLOCFUNCS 0x3030
+/* Get stack information for a thread. */
+#define VG_USERREQ__GET_STACK_INFO 0x3033
+
/* Cosmetic ... */
#define VG_USERREQ__GET_PTHREAD_TRACE_LEVEL 0x3101
/* Log a pthread error from client-space. Cosmetic. */
}
CleanupType;
+/* Information on a thread's stack. */
+typedef
+ struct {
+ Addr base;
+ UInt size;
+ UInt guardsize;
+ }
+ StackInfo;
+
/* An entry in a threads's cleanup stack. */
typedef
struct {
*/
Addr stack_base;
+ /* The allocated size of this thread's stack's guard area (permanently
+ zero if this is ThreadId == 0, since we didn't allocate its stack) */
+ UInt stack_guard_size;
+
/* Address of the highest legitimate word in this stack. This is
used for error messages only -- not critical for execution
correctness. Is is set for all stacks, specifically including
/* Linuxthreads sets this field to the value __getpagesize(), so I
guess the following is OK. */
vg_attr->__vg_guardsize = VKI_BYTES_PER_PAGE;
+ /* No special stack yet. */
+ vg_attr->__vg_stackaddr = 0;
+ vg_attr->__vg_stacksize = VG_PTHREAD_STACK_SIZE;
return 0;
}
{
vg_pthread_attr_t* vg_attr;
CONVERT(attr, attr, vg_attr);
-
*detachstate = vg_attr->__vg_detachstate;
return 0;
}
}
WEAK
-int pthread_attr_setstacksize (pthread_attr_t *__attr,
- size_t __stacksize)
+int pthread_attr_setstacksize (pthread_attr_t *attr,
+ size_t stacksize)
{
- size_t limit;
- char buf[1024];
-
- ensure_valgrind("pthread_attr_setstacksize");
- limit = VG_PTHREAD_STACK_SIZE - VG_AR_CLIENT_STACKBASE_REDZONE_SZB
- - 1000; /* paranoia */
- if (__stacksize < limit)
- return 0;
- snprintf(buf, sizeof(buf), "pthread_attr_setstacksize: "
- "requested size %d >= VG_PTHREAD_STACK_SIZE\n "
- "edit vg_include.h and rebuild.", __stacksize);
- buf[sizeof(buf)-1] = '\0'; /* Make sure it is zero terminated */
- barf(buf);
+ vg_pthread_attr_t* vg_attr;
+ CONVERT(attr, attr, vg_attr);
+ vg_attr->__vg_stacksize = stacksize;
+ return 0;
}
/* Pretty bogus. Avoid if possible. */
int pthread_getattr_np (pthread_t thread, pthread_attr_t *attr)
{
+ StackInfo si;
+ int res;
int detached;
- size_t limit;
vg_pthread_attr_t* vg_attr;
CONVERT(attr, attr, vg_attr);
ensure_valgrind("pthread_getattr_np");
kludged("pthread_getattr_np", NULL);
- limit = VG_PTHREAD_STACK_SIZE - VG_AR_CLIENT_STACKBASE_REDZONE_SZB
- - 1000; /* paranoia */
vg_attr->__vg_detachstate = PTHREAD_CREATE_JOINABLE;
vg_attr->__vg_schedpolicy = SCHED_OTHER;
vg_attr->__vg_schedparam.sched_priority = 0;
vg_attr->__vg_inheritsched = PTHREAD_EXPLICIT_SCHED;
vg_attr->__vg_scope = PTHREAD_SCOPE_SYSTEM;
- vg_attr->__vg_guardsize = VKI_BYTES_PER_PAGE;
- vg_attr->__vg_stackaddr = NULL;
+ VALGRIND_MAGIC_SEQUENCE(res, (-1) /* default */,
+ VG_USERREQ__GET_STACK_INFO,
+ thread, &si, 0, 0 );
+ vg_attr->__vg_guardsize = si.guardsize;
+ vg_attr->__vg_stackaddr = (void *)si.base;
vg_attr->__vg_stackaddr_set = 0;
- vg_attr->__vg_stacksize = limit;
+ vg_attr->__vg_stacksize = si.size;
VALGRIND_MAGIC_SEQUENCE(detached, (-1) /* default */,
VG_USERREQ__SET_OR_GET_DETACH,
2 /* get */, thread, 0, 0);
}
-/* Bogus ... */
+WEAK
+int pthread_attr_getstack ( const pthread_attr_t * attr,
+ void ** stackaddr,
+ size_t *stacksize )
+{
+ vg_pthread_attr_t* vg_attr;
+ CONVERT(attr, attr, vg_attr);
+ ensure_valgrind("pthread_attr_getstack");
+ if (stackaddr)
+ *stackaddr = vg_attr->__vg_stackaddr;
+ if (stacksize)
+ *stacksize = vg_attr->__vg_stacksize;
+ return 0;
+}
+
WEAK
int pthread_attr_getstackaddr ( const pthread_attr_t * attr,
void ** stackaddr )
{
+ vg_pthread_attr_t* vg_attr;
+ CONVERT(attr, attr, vg_attr);
ensure_valgrind("pthread_attr_getstackaddr");
- kludged("pthread_attr_getstackaddr", "(it always sets stackaddr to zero)");
if (stackaddr)
- *stackaddr = NULL;
+ *stackaddr = vg_attr->__vg_stackaddr;
return 0;
}
-/* Not bogus (!) */
WEAK
-int pthread_attr_getstacksize ( const pthread_attr_t * _attr,
- size_t * __stacksize )
+int pthread_attr_getstacksize ( const pthread_attr_t * attr,
+ size_t * stacksize )
{
- size_t limit;
+ vg_pthread_attr_t* vg_attr;
+ CONVERT(attr, attr, vg_attr);
ensure_valgrind("pthread_attr_getstacksize");
- limit = VG_PTHREAD_STACK_SIZE - VG_AR_CLIENT_STACKBASE_REDZONE_SZB
- - 1000; /* paranoia */
- if (__stacksize)
- *__stacksize = limit;
+ if (stacksize)
+ *stacksize = vg_attr->__vg_stacksize;
return 0;
}
}
-/* This is completely bogus. We reject all attempts to change it from
- VKI_BYTES_PER_PAGE. I don't have a clue what it's for so it seems
- safest to be paranoid. */
WEAK
int pthread_attr_setguardsize(pthread_attr_t *attr, size_t guardsize)
{
- static int moans = N_MOANS;
-
- if (guardsize == VKI_BYTES_PER_PAGE)
- return 0;
-
- if (moans-- > 0)
- ignored("pthread_attr_setguardsize",
- "(it ignores any guardsize != 4096)");
-
+ vg_pthread_attr_t* vg_attr;
+ CONVERT(attr, attr, vg_attr);
+ vg_attr->__vg_guardsize = guardsize;
return 0;
}
-/* A straight copy of the LinuxThreads code. */
WEAK
int pthread_attr_getguardsize(const pthread_attr_t *attr, size_t *guardsize)
{
int tid_child;
NewThreadInfo* info;
int gs;
+ StackInfo si;
vg_pthread_attr_t* __vg_attr;
CONVERT(attr, __attr, __vg_attr);
info->arg = __arg;
sigprocmask(SIG_SETMASK, NULL, &info->sigmask);
+ if (__attr) {
+ si.base = (Addr)__vg_attr->__vg_stackaddr;
+ si.size = __vg_attr->__vg_stacksize;
+ si.guardsize = __vg_attr->__vg_guardsize;
+ } else {
+ si.base = (Addr)NULL;
+ si.size = VG_PTHREAD_STACK_SIZE;
+ si.guardsize = VKI_BYTES_PER_PAGE;
+ }
+
VALGRIND_MAGIC_SEQUENCE(tid_child, VG_INVALID_THREADID /* default */,
VG_USERREQ__APPLY_IN_NEW_THREAD,
- &thread_wrapper, info, 0, 0);
+ &thread_wrapper, info, &si, 0);
my_assert(tid_child != VG_INVALID_THREADID);
if (__thredd)
//__attribute__((weak)) void pthread_attr_getguardsize ( void )
// { vgPlain_unimp("pthread_attr_getguardsize"); }
-__attribute__((weak)) void pthread_attr_getstack ( void )
- { vgPlain_unimp("pthread_attr_getstack"); }
+//__attribute__((weak)) void pthread_attr_getstack ( void )
+// { vgPlain_unimp("pthread_attr_getstack"); }
__attribute__((weak)) void pthread_attr_getstackaddr ( void )
{ vgPlain_unimp("pthread_attr_getstackaddr"); }
__attribute__((weak)) void pthread_attr_getstacksize ( void )
mostly_clear_thread_record(i);
VG_(threads)[i].stack_size = 0;
VG_(threads)[i].stack_base = (Addr)NULL;
+ VG_(threads)[i].stack_guard_size = 0;
VG_(threads)[i].stack_highest_word = (Addr)NULL;
}
#include <pthread.h>
#include <errno.h>
-#define VG_PTHREAD_STACK_MIN \
- (VG_PTHREAD_STACK_SIZE - VG_AR_CLIENT_STACKBASE_REDZONE_SZB)
-
/* /usr/include/bits/pthreadtypes.h:
typedef unsigned long int pthread_t;
*/
static
void do__apply_in_new_thread ( ThreadId parent_tid,
void* (*fn)(void *),
- void* arg )
+ void* arg,
+ StackInfo *si )
{
Addr new_stack;
UInt new_stk_szb;
/* Consider allocating the child a stack, if the one it already has
is inadequate. */
- new_stk_szb = VG_PTHREAD_STACK_MIN;
+ new_stk_szb = si->size + VG_AR_CLIENT_STACKBASE_REDZONE_SZB + si->guardsize;
+ new_stk_szb = (new_stk_szb + VKI_BYTES_PER_PAGE - 1) & ~VKI_BYTES_PER_PAGE;
+
+ VG_(threads)[tid].stack_guard_size = si->guardsize;
if (new_stk_szb > VG_(threads)[tid].stack_size) {
/* Again, for good measure :) We definitely don't want to be
allocating a stack for the main thread. */
vg_assert(tid != 1);
- /* for now, we don't handle the case of anything other than
- assigning it for the first time. */
- vg_assert(VG_(threads)[tid].stack_size == 0);
- vg_assert(VG_(threads)[tid].stack_base == (Addr)NULL);
+ if (VG_(threads)[tid].stack_size > 0)
+ VG_(client_free)(VG_(threads)[tid].stack_base);
new_stack = VG_(client_alloc)(0, new_stk_szb,
VKI_PROT_READ | VKI_PROT_WRITE | VKI_PROT_EXEC,
SF_STACK);
- VG_AR_CLIENT_STACKBASE_REDZONE_SZB);
VG_TRACK ( die_mem_stack, VG_(threads)[tid].stack_base,
- + new_stk_szb - VG_AR_CLIENT_STACKBASE_REDZONE_SZB);
+ VG_(threads)[tid].stack_size
+ - VG_AR_CLIENT_STACKBASE_REDZONE_SZB);
VG_TRACK ( ban_mem_stack, VG_(threads)[tid].m_esp,
VG_AR_CLIENT_STACKBASE_REDZONE_SZB );
VG_TRACK( post_mem_write, (Addr)fh, sizeof(ForkHandlerEntry) );
}
+
+static
+void do__get_stack_info ( ThreadId tid, ThreadId which, StackInfo* si )
+{
+ Char msg_buf[100];
+
+ vg_assert(VG_(is_valid_tid)(tid)
+ && VG_(threads)[tid].status == VgTs_Runnable);
+
+ if (VG_(clo_trace_sched)) {
+ VG_(sprintf)(msg_buf, "get_stack_info for tid %d", which );
+ print_pthread_event(tid, msg_buf);
+ }
+
+ if (!VG_(is_valid_tid)(which)) {
+ SET_PTHREQ_RETVAL(tid, -1);
+ return;
+ }
+
+ si->base = VG_(threads)[which].stack_base;
+ si->size = VG_(threads)[which].stack_size
+ - VG_AR_CLIENT_STACKBASE_REDZONE_SZB
+ - VG_(threads)[which].stack_guard_size;
+ si->guardsize = VG_(threads)[which].stack_guard_size;
+
+ SET_PTHREQ_RETVAL(tid, 0);
+}
+
/* ---------------------------------------------------------------------
Specifying shadow register values
------------------------------------------------------------------ */
case VG_USERREQ__APPLY_IN_NEW_THREAD:
do__apply_in_new_thread ( tid, (void*(*)(void*))arg[1],
- (void*)arg[2] );
+ (void*)arg[2], (StackInfo*)(arg[3]) );
break;
case VG_USERREQ__GET_KEY_D_AND_S:
handle_signal_return(tid);
break;
+ case VG_USERREQ__GET_STACK_INFO:
+ do__get_stack_info( tid, (Int)(arg[1]), (StackInfo*)(arg[2]) );
+ break;
+
case VG_USERREQ__GET_SIGRT_MIN:
SET_PTHREQ_RETVAL(tid, VG_(sig_rtmin));
Int
stack_used = (Addr)VG_(threads)[i].stack_highest_word
- (Addr)VG_(threads)[i].m_esp;
-
+ Int
+ stack_avail = VG_(threads)[i].stack_size
+ - VG_AR_CLIENT_STACKBASE_REDZONE_SZB
+ - VG_(threads)[i].stack_guard_size;
/* This test is a bit bogus - it doesn't take into account
alternate signal stacks, for a start. Also, if a thread
has it's stack pointer somewhere strange, killing Valgrind
isn't the right answer. */
if (0 && i > 1 /* not the root thread */
- && stack_used
- >= (VG_PTHREAD_STACK_MIN - 1000 /* paranoia */)) {
+ && stack_used >= stack_avail) {
VG_(message)(Vg_UserMsg,
"Error: STACK OVERFLOW: "
"thread %d: stack used %d, available %d",
- i, stack_used, VG_PTHREAD_STACK_MIN );
+ i, stack_used, stack_avail );
VG_(message)(Vg_UserMsg,
"Terminating Valgrind. If thread(s) "
"really need more stack, increase");