+/* -*- mode: C; c-basic-offset: 3; -*- */
/*--------------------------------------------------------------------*/
/*--- Wrappers for generic Unix system calls ---*/
static Addr do_brk ( Addr newbrk )
{
NSegment const* aseg;
- NSegment const* rseg;
Addr newbrkP;
SizeT delta;
Bool debug = False;
VG_(printf)("\ndo_brk: brk_base=%#lx brk_limit=%#lx newbrk=%#lx\n",
VG_(brk_base), VG_(brk_limit), newbrk);
-# if 0
- if (0) show_segments("in_brk");
-# endif
+ if (0) VG_(am_show_nsegments)(0, "in_brk");
if (newbrk < VG_(brk_base))
/* Clearly impossible. */
goto bad;
- if (newbrk >= VG_(brk_base) && newbrk < VG_(brk_limit)) {
+ if (newbrk < VG_(brk_limit)) {
/* shrinking the data segment. Be lazy and don't munmap the
excess area. */
NSegment const * seg = VG_(am_find_nsegment)(newbrk);
- if (seg && seg->hasT)
+ vg_assert(seg);
+
+ if (seg->hasT)
VG_(discard_translations)( newbrk, VG_(brk_limit) - newbrk,
"do_brk(shrink)" );
/* Since we're being lazy and not unmapping pages, we have to
try hard to ensure we're not going to segfault by doing the
write - check both ends of the range are in the same segment
and that segment is writable. */
- if (seg) {
- /* pre: newbrk < VG_(brk_limit)
- => newbrk <= VG_(brk_limit)-1 */
- NSegment const * seg2;
- vg_assert(newbrk < VG_(brk_limit));
+ NSegment const * seg2;
+
+ if (VG_(brk_limit) > VG_(brk_base))
seg2 = VG_(am_find_nsegment)( VG_(brk_limit)-1 );
- if (seg2 && seg == seg2 && seg->hasW)
- VG_(memset)( (void*)newbrk, 0, VG_(brk_limit) - newbrk );
- }
+ else
+ seg2 = VG_(am_find_nsegment)( VG_(brk_limit) );
+ vg_assert(seg2);
+
+ if (seg == seg2 && seg->hasW)
+ VG_(memset)( (void*)newbrk, 0, VG_(brk_limit) - newbrk );
VG_(brk_limit) = newbrk;
return newbrk;
aseg = VG_(am_find_nsegment)( VG_(brk_limit)-1 );
else
aseg = VG_(am_find_nsegment)( VG_(brk_limit) );
- rseg = VG_(am_next_nsegment)( aseg, True/*forwards*/ );
/* These should be assured by setup_client_dataseg in m_main. */
vg_assert(aseg);
- vg_assert(rseg);
vg_assert(aseg->kind == SkAnonC);
- vg_assert(rseg->kind == SkResvn);
- vg_assert(aseg->end+1 == rseg->start);
- vg_assert(newbrk >= VG_(brk_base));
- if (newbrk <= rseg->start) {
+ if (newbrk <= aseg->end + 1) {
/* still fits within the anon segment. */
VG_(brk_limit) = newbrk;
return newbrk;
}
- if (newbrk > rseg->end+1 - VKI_PAGE_SIZE) {
- /* request is too large -- the resvn would fall below 1 page,
- which isn't allowed. */
- goto bad;
- }
-
newbrkP = VG_PGROUNDUP(newbrk);
- vg_assert(newbrkP > rseg->start && newbrkP <= rseg->end+1 - VKI_PAGE_SIZE);
- delta = newbrkP - rseg->start;
+ delta = newbrkP - (aseg->end + 1);
vg_assert(delta > 0);
vg_assert(VG_IS_PAGE_ALIGNED(delta));