349087 Fix UNKNOWN task message [id 3410, to mach_task_self(),
reply 0x........] (task_set_special_port)
349626 Implemented additional Xen hypercalls
+349874 Fix typos in source code
n-i-bz Provide implementations of certain compiler builtins to support
compilers who may not provide those
n-i-bz Old STABS code is still being compiled, but never used. Remove it.
* This involves a possibly different address, but is handled by
* looking up a BB keyed by (obj_node, file offset).
*
- * bbIn==0 is possible for artifical BB without real code.
+ * bbIn==0 is possible for artificial BB without real code.
* Such a BB is created when returning to an unknown function.
*/
BB* CLG_(get_bb)(Addr addr, IRSB* bbIn, /*OUT*/ Bool *seen_before)
*/
CLG_ASSERT(jcc->from->bb->obj == jcc->to->bb->obj);
- /* only print if target position info is usefull */
+ /* only print if target position info is useful */
if (!CLG_(clo).dump_instr && !CLG_(clo).dump_bb && target.line==0) {
jcc->call_counter = 0;
return;
}
if (CLG_(clo).dump_bbs) VG_(fprintf)(fp, "\n");
- /* when every cost was immediatly written, we must have done so,
+ /* when every cost was immediately written, we must have done so,
* as this function is only called when there's cost in a BBCC
*/
CLG_ASSERT(something_written);
(void) VG_(get_data_description)( ai->Addr.Variable.descr1,
ai->Addr.Variable.descr2, a );
- /* If there's nothing in descr1/2, free them. Why is it safe to to
+ /* If there's nothing in descr1/2, free them. Why is it safe to
VG_(indexXA) at zero here? Because VG_(get_data_description)
guarantees to zero terminate descr1/2 regardless of the outcome
of the call. So there's always at least one element in each XA
case SkResvn: {
if (seg->smode != SmUpper) return False;
- /* If the the abutting segment towards higher addresses is an SkAnonC
+ /* If the abutting segment towards higher addresses is an SkAnonC
segment, then ADDR is a future stack pointer. */
const NSegment *next = VG_(am_next_nsegment)(seg, /*forward*/ True);
if (next == NULL || next->kind != SkAnonC) return False;
}
/* Ok, success with the kernel. Update our structures. */
- nsegments[segR].start += delta;
- nsegments[segA].end += delta;
- aspacem_assert(nsegments[segR].start <= nsegments[segR].end);
+ NSegment seg_copy = nsegments[segA];
+ seg_copy.end += delta;
+ add_segment(&seg_copy);
} else {
}
/* Ok, success with the kernel. Update our structures. */
- nsegments[segR].end -= delta;
- nsegments[segA].start -= delta;
- aspacem_assert(nsegments[segR].start <= nsegments[segR].end);
+ NSegment seg_copy = nsegments[segA];
+ seg_copy.start -= delta;
+ add_segment(&seg_copy);
}
AM_SANITY_CHECK;
Free slots are chained together in a singly linked list. An index of
zero indicates the end of the chain. Note that zero cannot conflict
- with an index into the string table as the minumum index is at least
+ with an index into the string table as the minimum index is at least
four (see above).
The typical way to traverse the segment names is:
static DebugInfo* debugInfo_list = NULL;
-/* Find 'di' in the debugInfo_list and move it one step closer the the
+/* Find 'di' in the debugInfo_list and move it one step closer to the
front of the list, so as to make subsequent searches for it
cheaper. When used in a controlled way, makes a major improvement
in some DebugInfo-search-intensive situations, most notably stack
It seems to me that the Intel Fortran compiler generates bad
DWARF2 line info code: It sets "is_stmt" of the state machine in
- the the line info reader to be always false. Thus, there is
- never a statement boundary generated and therefore never a
- instruction range/line number mapping generated for valgrind.
+ the line info reader to be always false. Thus, there is never
+ a statement boundary generated and therefore never an instruction
+ range/line number mapping generated for valgrind.
Please have a look at the DWARF2 specification, Ch. 6.2
(x86.ddj.com/ftp/manuals/tools/dwarf.pdf). Perhaps I understand
I just had a look at the GDB DWARF2 reader... They completely
ignore "is_stmt" when recording line info ;-) That's the reason
- "objdump -S" works on files from the the intel fortran compiler.
+ "objdump -S" works on files from the intel fortran compiler.
Therefore: */
info.li_default_is_stmt = True;
}
}
- /* Move on the the next DIE. */
+ /* Move on the next DIE. */
die_offset += die_szb;
} /* Looping over DIEs */
(DW_AT_subprogram), and for those, we also note the GExpr
derived from its DW_AT_frame_base attribute, if any.
Consequently it should be possible to find, for any
- variable's DIE, the GExpr for the the containing function's
+ variable's DIE, the GExpr for the containing function's
DW_AT_frame_base by scanning back through the stack to find
the nearest entry associated with a function. This somewhat
elaborate scheme is provided so as to make it possible to
if (is_decl && (!is_spec)) {
/* It's a DW_AT_declaration. We require the name but
nothing else. */
- /* JRS 2012-06-28: following discussion w/ tromey, if the the
+ /* JRS 2012-06-28: following discussion w/ tromey, if the
type doesn't have name, just make one up, and accept it.
It might be referred to by other DIEs, so ignoring it
doesn't seem like a safe option. */
GET_EXTAB_U32(data, extbl_data);
if (!(data & ARM_EXIDX_COMPACT)) {
// This denotes a "generic model" handler. That will involve
- // executing arbitary machine code, which is something we
+ // executing arbitrary machine code, which is something we
// can't represent here; hence reject it.
return ExCantRepresent;
}
extbl_data++;
}
- // Now look at the the handler table entry. The first word is
- // |data| and subsequent words start at |*extbl_data|. The number
- // of extra words to use is |extra|, provided that the personality
+ // Now look at the handler table entry. The first word is |data|
+ // and subsequent words start at |*extbl_data|. The number of
+ // extra words to use is |extra|, provided that the personality
// allows extra words. Even if it does, none may be available --
// extra_allowed is the maximum number of extra words allowed. */
if (pers == 0) {
/* This file manages the data structures built by the debuginfo
system. These are: the top level SegInfo list. For each SegInfo,
- there are tables for for address-to-symbol mappings,
+ there are tables for address-to-symbol mappings,
address-to-src-file/line mappings, and address-to-CFI-info
mappings.
*/
/* Iterate over occurrences of __, allowing names and types to have a
"__" sequence in them. We must start with the first (not the last)
occurrence, since "__" most often occur between independent mangled
- parts, hence starting at the last occurence inside a signature
+ parts, hence starting at the last occurrence inside a signature
might get us a "successful" demangling of the signature. */
while (scan[2])
}
/* busy > 0 when gdbserver is currently being called.
- busy is used to to avoid vgdb invoking gdbserver
+ busy is used to avoid vgdb invoking gdbserver
while gdbserver by Valgrind. */
static volatile int busy = 0;
" v.do expensive_sanity_check_general : do an expensive sanity check now\n"
" v.info gdbserver_status : show gdbserver status\n"
" v.info memory [aspacemgr] : show valgrind heap memory stats\n"
-" (with aspacemgr arg, also shows valgrind segments on log ouput)\n"
+" (with aspacemgr arg, also shows valgrind segments on log output)\n"
" v.info exectxt : show stacktraces and stats of all execontexts\n"
" v.info scheduler : show valgrind thread state and stacktrace\n"
" v.info stats : show various valgrind and tool stats\n"
Returns True if the address of the variable could be found.
*tls_addr is then set to this address.
Returns False if tls support is not available for this arch, or
- if an error occured. *tls_addr is set to NULL. */
+ if an error occurred. *tls_addr is set to NULL. */
extern Bool valgrind_get_tls_addr (ThreadState *tst,
CORE_ADDR offset,
CORE_ADDR lm,
}
/* If we're unlucky, the alignment constraints for the fast case
- above won't apply, and we'll have to to it all here. Hence the
+ above won't apply, and we'll have to do it all here. Hence the
unrolling. */
while (sz >= 4) {
d[0] = s[0];
/* Helper function for VG_(machine_get_hwcaps), assumes the SIGILL/etc
- * handlers are installed. Determines the the sizes affected by dcbz
+ * handlers are installed. Determines the sizes affected by dcbz
* and dcbzl instructions and updates the given VexArchInfo structure
* accordingly.
*
/*
The idea for malloc_aligned() is to allocate a big block, base, and
- then split it into two parts: frag, which is returned to the the
- free pool, and align, which is the bit we're really after. Here's
+ then split it into two parts: frag, which is returned to the free
+ pool, and align, which is the bit we're really after. Here's
a picture. L and H denote the block lower and upper overheads, in
bytes. The details are gruesome. Note it is slightly complicated
because the initial request to generate base may return a bigger
}
/* Note that the (void*) casts below are a kludge which stops
- compilers complaining about the fact that the the replacement
+ compilers complaining about the fact that the replacement
functions aren't really of the right type. */
static vki_malloc_zone_t vg_default_zone = {
NULL, // reserved1
struct vki_ucontext *ucp = &frame->rs_uc;
if (0)
- VG_(printf)("destory signal frame; sp = %lx, "
+ VG_(printf)("destroy signal frame; sp = %lx, "
" %pc = %lx, status=%d\n",
(Addr)frame, tst->arch.vex.guest_pc, tst->status);
So what we have to do, when doing any syscall which SfMayBlock, is to
quickly switch in the SCSS-specified signal mask just before the
syscall, and switch it back just afterwards, and hope that we don't
- get caught up in some wierd race condition. This is the primary
+ get caught up in some weird race condition. This is the primary
purpose of the ultra-magical pieces of assembly code in
coregrind/m_syswrap/syscall-<plat>.S
*/
static Stack *current_stack;
-/* Find 'st' in the stacks_list and move it one step closer the the
+/* Find 'st' in the stacks_list and move it one step closer to the
front of the list, so as to make subsequent searches for it
cheaper. */
static void move_Stack_one_step_forward ( Stack* st )
/*
When a client clones, we need to keep track of the new thread. This means:
- 1. allocate a ThreadId+ThreadState+stack for the the thread
+ 1. allocate a ThreadId+ThreadState+stack for the thread
2. initialize the thread's new VCPU state
/*
When a client clones, we need to keep track of the new thread. This means:
- 1. allocate a ThreadId+ThreadState+stack for the the thread
+ 1. allocate a ThreadId+ThreadState+stack for the thread
2. initialize the thread's new VCPU state
/*
When a client clones, we need to keep track of the new thread. This means:
- 1. allocate a ThreadId+ThreadState+stack for the the thread
+ 1. allocate a ThreadId+ThreadState+stack for the thread
2. initialize the thread's new VCPU state
UWord arg4, UWord arg5, Off64T arg6);
/*
When a client clones, we need to keep track of the new thread. This means:
- 1. allocate a ThreadId+ThreadState+stack for the the thread
+ 1. allocate a ThreadId+ThreadState+stack for the thread
2. initialize the thread's new VCPU state
3. create the thread using the same args as the client requested,
but using the scheduler entrypoint for IP, and a separate stack
static SysRes sys_set_tls ( ThreadId tid, Addr tlsptr);
/* When a client clones, we need to keep track of the new thread. This means:
- 1. allocate a ThreadId+ThreadState+stack for the the thread
+ 1. allocate a ThreadId+ThreadState+stack for the thread
2. initialize the thread's new VCPU state
/*
When a client clones, we need to keep track of the new thread. This means:
- 1. allocate a ThreadId+ThreadState+stack for the the thread
+ 1. allocate a ThreadId+ThreadState+stack for the thread
2. initialize the thread's new VCPU state
/*
When a client clones, we need to keep track of the new thread. This means:
- 1. allocate a ThreadId+ThreadState+stack for the the thread
+ 1. allocate a ThreadId+ThreadState+stack for the thread
2. initialize the thread's new VCPU state
/*
When a client clones, we need to keep track of the new thread. This means:
- 1. allocate a ThreadId+ThreadState+stack for the the thread
+ 1. allocate a ThreadId+ThreadState+stack for the thread
2. initialize the thread's new VCPU state
static SysRes sys_set_tls ( ThreadId tid, Addr tlsptr );
/*
When a client clones, we need to keep track of the new thread. This means:
- 1. allocate a ThreadId+ThreadState+stack for the the thread
+ 1. allocate a ThreadId+ThreadState+stack for the thread
2. initialize the thread's new VCPU state
3. create the thread using the same args as the client requested,
but using the scheduler entrypoint for IP, and a separate stack
/*
When a client clones, we need to keep track of the new thread. This means:
- 1. allocate a ThreadId+ThreadState+stack for the the thread
+ 1. allocate a ThreadId+ThreadState+stack for the thread
2. initialize the thread's new VCPU state
// else print all errors and suppressions used.
extern void VG_(show_all_errors) ( Int verbosity, Bool xml );
-/* Print (in readable format) the last error that occured. */
+/* Print (in readable format) the last error that occurred. */
extern void VG_(show_last_error) ( void );
extern void VG_(show_error_counts_as_XML) ( void );
Note that gdbserver assumes that software breakpoint is supported
(as this will be done by re-instrumenting the code).
- Note that len is ignored for sofware breakpoints. hardware_breakpoint
+ Note that len is ignored for software breakpoints. hardware_breakpoint
are not supported.
Returns True if the point has properly been inserted or removed
}
/* Stops the given pid, wait for the process to be stopped.
- Returns True if succesful, False otherwise.
+ Returns True if successful, False otherwise.
msg is used in tracing and error reporting. */
static
Bool stop (pid_t pid, const char *msg)
}
/* Attaches to given pid, wait for the process to be stopped.
- Returns True if succesful, False otherwise.
+ Returns True if successful, False otherwise.
msg is used in tracing and error reporting. */
static
Bool attach (pid_t pid, const char *msg)
res = ptrace (PTRACE_GETREGSET, pid, NT_PRSTATUS, &iovec);
if (res == 0) {
if (has_working_ptrace_getregset == -1) {
- // First call to PTRACE_GETREGSET succesful =>
+ // First call to PTRACE_GETREGSET successful =>
has_working_ptrace_getregset = 1;
DEBUG(1, "detected a working PTRACE_GETREGSET\n");
}
res = ptrace (PTRACE_GETREGS, pid, NULL, regs);
if (res == 0) {
if (has_working_ptrace_getregs == -1) {
- // First call to PTRACE_GETREGS succesful =>
+ // First call to PTRACE_GETREGS successful =>
has_working_ptrace_getregs = 1;
DEBUG(1, "detected a working PTRACE_GETREGS\n");
}
last invoke. */
if (invoked_written != written_by_vgdb_before_sleep) {
if (invoker_invoke_gdbserver(pid)) {
- /* If invoke succesful, no need to invoke again
+ /* If invoke successful, no need to invoke again
for the same value of written_by_vgdb_before_sleep. */
invoked_written = written_by_vgdb_before_sleep;
}
tl_assert(dri->size > 0);
(void) VG_(get_data_description)(descr1, descr2, dri->addr);
- /* If there's nothing in descr1/2, free them. Why is it safe to to
+ /* If there's nothing in descr1/2, free them. Why is it safe to
VG_(indexXA) at zero here? Because VG_(get_data_description)
guarantees to zero terminate descr1/2 regardless of the outcome
of the call. So there's always at least one element in each XA
/**
* Initialize the memory 'p' points at as a semaphore_info structure for the
- * client semaphore at client addres 'semaphore'.
+ * client semaphore at client address 'semaphore'.
*/
static
void drd_semaphore_initialize(struct semaphore_info* const p,
/**
* Create a new segment for thread tid and update the vector clock of the last
- * segment of this thread with the the vector clock of segment sg. Call this
+ * segment of this thread with the vector clock of segment sg. Call this
* function after thread tid had to wait because of thread synchronization
* until the memory accesses in the segment sg finished.
*/
1: "approx": collect one stack trace per (notional) segment, that
is, collect a stack trace for a thread every time its vector
- clock changes. This faciliates showing the bounds of the
+ clock changes. This facilitates showing the bounds of the
conflicting segment(s), with relatively small overhead.
2: "full": collect a stack trace every time the constraints for a
if (xe->XE.Race.h1_ct_mbsegendEC) {
VG_(pp_ExeContext)( xe->XE.Race.h1_ct_mbsegendEC );
} else {
- emit( " <auxwhat>(the end of the the thread)</auxwhat>\n" );
+ emit( " <auxwhat>(the end of the thread)</auxwhat>\n" );
}
}
if (xe->XE.Race.h1_ct_mbsegendEC) {
VG_(pp_ExeContext)( xe->XE.Race.h1_ct_mbsegendEC );
} else {
- emit( " (the end of the the thread)\n" );
+ emit( " (the end of the thread)\n" );
}
}
{
/* TID declares that any happens-before edges notionally stored in
USERTAG can be deleted. If (as would normally be the case) a
- SO is associated with USERTAG, then the assocation is removed
+ SO is associated with USERTAG, then the association is removed
and all resources associated with SO are freed. Importantly,
that frees up any VTSs stored in SO. */
if (SHOW_EVENTS >= 1)
a pair, (Thr*, ULong), but that takes 16 bytes on a 64-bit target.
We pack it into 64 bits by representing the Thr* using a ThrID, a
small integer (18 bits), and a 46 bit integer for the timestamp
- number. The 46/18 split is arbitary, but has the effect that
+ number. The 46/18 split is arbitrary, but has the effect that
Helgrind can only handle programs that create 2^18 or fewer threads
over their entire lifetime, and have no more than 2^46 timestamp
ticks (synchronisation operations on the same thread).
/* Now figure out when the next GC should be. We'll allow the
number of VTSs to double before GCing again. Except of course
that since we can't (or, at least, don't) shrink vts_tab, we
- can't set the threshhold value smaller than it. */
+ can't set the threshold value smaller than it. */
tl_assert(nFreed <= nTab);
nLive = nTab - nFreed;
tl_assert(nLive >= 0 && nLive <= nTab);
/* Find 'ec' in the RCEC list whose head pointer lives at 'headp' and
- move it one step closer the the front of the list, so as to make
+ move it one step closer to the front of the list, so as to make
subsequent searches for it cheaper. */
static void move_RCEC_one_step_forward ( RCEC** headp, RCEC* ec )
{
return a pointer to the copy. The caller can safely have 'example'
on its stack, since we will always return a pointer to a copy of
it, not to the original. Note that the inserted node will have .rc
- of zero and so the caller must immediatly increment it. */
+ of zero and so the caller must immediately increment it. */
__attribute__((noinline))
static RCEC* ctxt__find_or_add ( RCEC* example )
{
do_RCEC_GC();
/* If there are still no entries available (all the table entries are full),
- and we hit the threshhold point, then do a GC */
+ and we hit the threshold point, then do a GC */
Bool vts_tab_GC = vts_tab_freelist == VtsID_INVALID
&& VG_(sizeXA)( vts_tab ) >= vts_next_GC_at;
if (UNLIKELY (vts_tab_GC))
// 1. Leak check mode (searched == 0).
// -----------------------------------
// Scan a block of memory between [start, start+len). This range may
-// be bogus, inaccessable, or otherwise strange; we deal with it. For each
+// be bogus, inaccessible, or otherwise strange; we deal with it. For each
// valid aligned word we assume it's a pointer to a chunk a push the chunk
// onto the mark stack if so.
// clique is the "highest level clique" in which indirectly leaked blocks have
/* Optimisation: the loop below will check for each begin
of SM chunk if the chunk is fully unaddressable. The idea is to
skip efficiently such fully unaddressable SM chunks.
- So, we preferrably start the loop on a chunk boundary.
+ So, we preferably start the loop on a chunk boundary.
If the chunk is not fully unaddressable, we might be in
an unaddressable page. Again, the idea is to skip efficiently
such unaddressable page : this is the "else" part.
* Some obscure uses of x86/amd64 byte registers can cause lossage
or confusion of origins. %AH .. %DH are treated as different
from, and unrelated to, their parent registers, %EAX .. %EDX.
- So some wierd sequences like
+ So some weird sequences like
movb undefined-value, %AH
movb defined-value, %AL
di->guard = guard;
/* Ideally the didn't-happen return value here would be all-ones
(all-undefined), so it'd be obvious if it got used
- inadvertantly. We can get by with the IR-mandated default
+ inadvertently. We can get by with the IR-mandated default
value (0b01 repeating, 0x55 etc) as that'll still look pretty
undefined if it ever leaks out. */
}
di->guard = guard;
/* Ideally the didn't-happen return value here would be
all-zeroes (unknown-origin), so it'd be harmless if it got
- used inadvertantly. We slum it out with the IR-mandated
+ used inadvertently. We slum it out with the IR-mandated
default value (0b01 repeating, 0x55 etc) as that'll probably
trump all legitimate otags via Max32, and it's pretty
obviously bogus. */
/* Check that a syscall's POST function gets called if it completes
due to being interrupted. nanosleep is used here, because it
writes a result even if it fails. wait*() could also be used,
- because they successully complete if interrupted by SIGCHLD.
+ because they successfully complete if interrupted by SIGCHLD.
*/
static void handler(int s)
{
}
printf ("after %d loops, last size block requested %lu\n", loop, bigsize);
- // verify if superblock fragmentation occured
+ // verify if superblock fragmentation occurred
// We consider that an arena of up to 3 times more than bigsize is ok.
{
#if defined(HAVE_MALLINFO)
fl.l_start = 0;
fl.l_len = 1;
- /* I'm assuming noone else tries to lock this! */
+ /* I'm assuming no one else tries to lock this! */
if (fcntl(fd, F_SETLK, &fl) != 0)
err(1, "Locking %s", file);
DEF_FP(mul)
DEF_ASM(fcom)
- DEF_ASM(fcom_1) /* non existant op, just to have a regular table */
+ DEF_ASM(fcom_1) /* non existent op, just to have a regular table */
DEF_FP1(com)
DEF_FP(comp)
DEF_FP(mul)
DEF_ASM(fcom)
- DEF_ASM(fcom_1) /* non existant op, just to have a regular table */
+ DEF_ASM(fcom_1) /* non existent op, just to have a regular table */
DEF_FP1(com)
DEF_FP(comp)
}
}
-/* generate prolog and epilog code for asm statment */
+/* generate prolog and epilog code for asm statement */
static void asm_gen_code(ASMOperand *operands, int nb_operands,
int nb_outputs, int is_output,
uint8_t *clobber_regs,