From 7a474c94552cd783e25d62f1ce88df10753c4eb7 Mon Sep 17 00:00:00 2001 From: Florian Krohm Date: Sun, 5 Jul 2015 21:53:33 +0000 Subject: [PATCH] Fix typos in source code. Patch by Dmitriy (olshevskiy87@bk.ru). Fixes BZ #349874 git-svn-id: svn://svn.valgrind.org/valgrind/trunk@15394 --- NEWS | 1 + callgrind/bb.c | 2 +- callgrind/dump.c | 4 ++-- coregrind/m_addrinfo.c | 2 +- coregrind/m_aspacemgr/aspacemgr-linux.c | 14 +++++++------- coregrind/m_aspacemgr/aspacemgr-segnames.c | 2 +- coregrind/m_debuginfo/debuginfo.c | 2 +- coregrind/m_debuginfo/readdwarf.c | 10 +++++----- coregrind/m_debuginfo/readdwarf3.c | 4 ++-- coregrind/m_debuginfo/readexidx.c | 8 ++++---- coregrind/m_debuginfo/storage.c | 2 +- coregrind/m_demangle/cplus-dem.c | 2 +- coregrind/m_gdbserver/m_gdbserver.c | 2 +- coregrind/m_gdbserver/server.c | 2 +- coregrind/m_gdbserver/target.h | 2 +- coregrind/m_libcbase.c | 2 +- coregrind/m_machine.c | 2 +- coregrind/m_mallocfree.c | 4 ++-- coregrind/m_replacemalloc/vg_replace_malloc.c | 2 +- coregrind/m_sigframe/sigframe-tilegx-linux.c | 2 +- coregrind/m_signals.c | 2 +- coregrind/m_stacks.c | 2 +- coregrind/m_syswrap/syswrap-amd64-linux.c | 2 +- coregrind/m_syswrap/syswrap-arm-linux.c | 2 +- coregrind/m_syswrap/syswrap-arm64-linux.c | 2 +- coregrind/m_syswrap/syswrap-mips32-linux.c | 2 +- coregrind/m_syswrap/syswrap-mips64-linux.c | 2 +- coregrind/m_syswrap/syswrap-ppc32-linux.c | 2 +- coregrind/m_syswrap/syswrap-ppc64-linux.c | 2 +- coregrind/m_syswrap/syswrap-s390x-linux.c | 2 +- coregrind/m_syswrap/syswrap-tilegx-linux.c | 2 +- coregrind/m_syswrap/syswrap-x86-linux.c | 2 +- coregrind/pub_core_errormgr.h | 2 +- coregrind/pub_core_gdbserver.h | 2 +- coregrind/vgdb-invoker-ptrace.c | 8 ++++---- coregrind/vgdb.c | 2 +- drd/drd_error.c | 2 +- drd/drd_semaphore.c | 2 +- drd/drd_thread.c | 2 +- helgrind/hg_basics.h | 2 +- helgrind/hg_errors.c | 4 ++-- helgrind/hg_main.c | 2 +- helgrind/libhb_core.c | 10 +++++----- memcheck/mc_leakcheck.c | 4 ++-- memcheck/mc_main.c | 2 +- memcheck/mc_translate.c | 4 ++-- memcheck/tests/post-syscall.c | 2 +- memcheck/tests/sbfragment.c | 2 +- none/tests/mmap_fcntl_bug.c | 2 +- perf/tinycc.c | 6 +++--- 50 files changed, 78 insertions(+), 77 deletions(-) diff --git a/NEWS b/NEWS index 9bbb318a7b..85e75299fc 100644 --- a/NEWS +++ b/NEWS @@ -248,6 +248,7 @@ where XXXXXX is the bug number as listed below. 349087 Fix UNKNOWN task message [id 3410, to mach_task_self(), reply 0x........] (task_set_special_port) 349626 Implemented additional Xen hypercalls +349874 Fix typos in source code n-i-bz Provide implementations of certain compiler builtins to support compilers who may not provide those n-i-bz Old STABS code is still being compiled, but never used. Remove it. diff --git a/callgrind/bb.c b/callgrind/bb.c index b9b3812ebf..d6d5efb587 100644 --- a/callgrind/bb.c +++ b/callgrind/bb.c @@ -234,7 +234,7 @@ obj_node* obj_of_address(Addr addr) * This involves a possibly different address, but is handled by * looking up a BB keyed by (obj_node, file offset). * - * bbIn==0 is possible for artifical BB without real code. + * bbIn==0 is possible for artificial BB without real code. * Such a BB is created when returning to an unknown function. */ BB* CLG_(get_bb)(Addr addr, IRSB* bbIn, /*OUT*/ Bool *seen_before) diff --git a/callgrind/dump.c b/callgrind/dump.c index c6d4de11bd..8f3213840b 100644 --- a/callgrind/dump.c +++ b/callgrind/dump.c @@ -589,7 +589,7 @@ static void fprint_jcc(VgFile *fp, jCC* jcc, AddrPos* curr, AddrPos* last, */ CLG_ASSERT(jcc->from->bb->obj == jcc->to->bb->obj); - /* only print if target position info is usefull */ + /* only print if target position info is useful */ if (!CLG_(clo).dump_instr && !CLG_(clo).dump_bb && target.line==0) { jcc->call_counter = 0; return; @@ -817,7 +817,7 @@ static Bool fprint_bbcc(VgFile *fp, BBCC* bbcc, AddrPos* last) } if (CLG_(clo).dump_bbs) VG_(fprintf)(fp, "\n"); - /* when every cost was immediatly written, we must have done so, + /* when every cost was immediately written, we must have done so, * as this function is only called when there's cost in a BBCC */ CLG_ASSERT(something_written); diff --git a/coregrind/m_addrinfo.c b/coregrind/m_addrinfo.c index e6f58ee721..46b4487a07 100644 --- a/coregrind/m_addrinfo.c +++ b/coregrind/m_addrinfo.c @@ -100,7 +100,7 @@ void VG_(describe_addr) ( Addr a, /*OUT*/AddrInfo* ai ) (void) VG_(get_data_description)( ai->Addr.Variable.descr1, ai->Addr.Variable.descr2, a ); - /* If there's nothing in descr1/2, free them. Why is it safe to to + /* If there's nothing in descr1/2, free them. Why is it safe to VG_(indexXA) at zero here? Because VG_(get_data_description) guarantees to zero terminate descr1/2 regardless of the outcome of the call. So there's always at least one element in each XA diff --git a/coregrind/m_aspacemgr/aspacemgr-linux.c b/coregrind/m_aspacemgr/aspacemgr-linux.c index a3df416aa7..a41d4dc093 100644 --- a/coregrind/m_aspacemgr/aspacemgr-linux.c +++ b/coregrind/m_aspacemgr/aspacemgr-linux.c @@ -1288,7 +1288,7 @@ Bool VG_(am_addr_is_in_extensible_client_stack)( Addr addr ) case SkResvn: { if (seg->smode != SmUpper) return False; - /* If the the abutting segment towards higher addresses is an SkAnonC + /* If the abutting segment towards higher addresses is an SkAnonC segment, then ADDR is a future stack pointer. */ const NSegment *next = VG_(am_next_nsegment)(seg, /*forward*/ True); if (next == NULL || next->kind != SkAnonC) return False; @@ -2829,9 +2829,9 @@ const NSegment *VG_(am_extend_into_adjacent_reservation_client)( Addr addr, } /* Ok, success with the kernel. Update our structures. */ - nsegments[segR].start += delta; - nsegments[segA].end += delta; - aspacem_assert(nsegments[segR].start <= nsegments[segR].end); + NSegment seg_copy = nsegments[segA]; + seg_copy.end += delta; + add_segment(&seg_copy); } else { @@ -2868,9 +2868,9 @@ const NSegment *VG_(am_extend_into_adjacent_reservation_client)( Addr addr, } /* Ok, success with the kernel. Update our structures. */ - nsegments[segR].end -= delta; - nsegments[segA].start -= delta; - aspacem_assert(nsegments[segR].start <= nsegments[segR].end); + NSegment seg_copy = nsegments[segA]; + seg_copy.start -= delta; + add_segment(&seg_copy); } AM_SANITY_CHECK; diff --git a/coregrind/m_aspacemgr/aspacemgr-segnames.c b/coregrind/m_aspacemgr/aspacemgr-segnames.c index 73a9472006..761608d912 100644 --- a/coregrind/m_aspacemgr/aspacemgr-segnames.c +++ b/coregrind/m_aspacemgr/aspacemgr-segnames.c @@ -92,7 +92,7 @@ Free slots are chained together in a singly linked list. An index of zero indicates the end of the chain. Note that zero cannot conflict - with an index into the string table as the minumum index is at least + with an index into the string table as the minimum index is at least four (see above). The typical way to traverse the segment names is: diff --git a/coregrind/m_debuginfo/debuginfo.c b/coregrind/m_debuginfo/debuginfo.c index 0dc78a3420..8f2178e204 100644 --- a/coregrind/m_debuginfo/debuginfo.c +++ b/coregrind/m_debuginfo/debuginfo.c @@ -118,7 +118,7 @@ static void cfsi_m_cache__invalidate ( void ); static DebugInfo* debugInfo_list = NULL; -/* Find 'di' in the debugInfo_list and move it one step closer the the +/* Find 'di' in the debugInfo_list and move it one step closer to the front of the list, so as to make subsequent searches for it cheaper. When used in a controlled way, makes a major improvement in some DebugInfo-search-intensive situations, most notably stack diff --git a/coregrind/m_debuginfo/readdwarf.c b/coregrind/m_debuginfo/readdwarf.c index 4a0f75019c..3909327782 100644 --- a/coregrind/m_debuginfo/readdwarf.c +++ b/coregrind/m_debuginfo/readdwarf.c @@ -454,9 +454,9 @@ void read_dwarf2_lineblock ( struct _DebugInfo* di, It seems to me that the Intel Fortran compiler generates bad DWARF2 line info code: It sets "is_stmt" of the state machine in - the the line info reader to be always false. Thus, there is - never a statement boundary generated and therefore never a - instruction range/line number mapping generated for valgrind. + the line info reader to be always false. Thus, there is never + a statement boundary generated and therefore never an instruction + range/line number mapping generated for valgrind. Please have a look at the DWARF2 specification, Ch. 6.2 (x86.ddj.com/ftp/manuals/tools/dwarf.pdf). Perhaps I understand @@ -464,7 +464,7 @@ void read_dwarf2_lineblock ( struct _DebugInfo* di, I just had a look at the GDB DWARF2 reader... They completely ignore "is_stmt" when recording line info ;-) That's the reason - "objdump -S" works on files from the the intel fortran compiler. + "objdump -S" works on files from the intel fortran compiler. Therefore: */ info.li_default_is_stmt = True; @@ -1437,7 +1437,7 @@ void ML_(read_debuginfo_dwarf1) ( } } - /* Move on the the next DIE. */ + /* Move on the next DIE. */ die_offset += die_szb; } /* Looping over DIEs */ diff --git a/coregrind/m_debuginfo/readdwarf3.c b/coregrind/m_debuginfo/readdwarf3.c index 89f247aa51..85070e4dde 100644 --- a/coregrind/m_debuginfo/readdwarf3.c +++ b/coregrind/m_debuginfo/readdwarf3.c @@ -1614,7 +1614,7 @@ typedef (DW_AT_subprogram), and for those, we also note the GExpr derived from its DW_AT_frame_base attribute, if any. Consequently it should be possible to find, for any - variable's DIE, the GExpr for the the containing function's + variable's DIE, the GExpr for the containing function's DW_AT_frame_base by scanning back through the stack to find the nearest entry associated with a function. This somewhat elaborate scheme is provided so as to make it possible to @@ -3411,7 +3411,7 @@ static void parse_type_DIE ( /*MOD*/XArray* /* of TyEnt */ tyents, if (is_decl && (!is_spec)) { /* It's a DW_AT_declaration. We require the name but nothing else. */ - /* JRS 2012-06-28: following discussion w/ tromey, if the the + /* JRS 2012-06-28: following discussion w/ tromey, if the type doesn't have name, just make one up, and accept it. It might be referred to by other DIEs, so ignoring it doesn't seem like a safe option. */ diff --git a/coregrind/m_debuginfo/readexidx.c b/coregrind/m_debuginfo/readexidx.c index 727deb32c8..35d07c5687 100644 --- a/coregrind/m_debuginfo/readexidx.c +++ b/coregrind/m_debuginfo/readexidx.c @@ -287,7 +287,7 @@ ExExtractResult ExtabEntryExtract ( MemoryRange* mr_exidx, GET_EXTAB_U32(data, extbl_data); if (!(data & ARM_EXIDX_COMPACT)) { // This denotes a "generic model" handler. That will involve - // executing arbitary machine code, which is something we + // executing arbitrary machine code, which is something we // can't represent here; hence reject it. return ExCantRepresent; } @@ -299,9 +299,9 @@ ExExtractResult ExtabEntryExtract ( MemoryRange* mr_exidx, extbl_data++; } - // Now look at the the handler table entry. The first word is - // |data| and subsequent words start at |*extbl_data|. The number - // of extra words to use is |extra|, provided that the personality + // Now look at the handler table entry. The first word is |data| + // and subsequent words start at |*extbl_data|. The number of + // extra words to use is |extra|, provided that the personality // allows extra words. Even if it does, none may be available -- // extra_allowed is the maximum number of extra words allowed. */ if (pers == 0) { diff --git a/coregrind/m_debuginfo/storage.c b/coregrind/m_debuginfo/storage.c index 89afca60d5..f4b1d2fdcc 100644 --- a/coregrind/m_debuginfo/storage.c +++ b/coregrind/m_debuginfo/storage.c @@ -32,7 +32,7 @@ /* This file manages the data structures built by the debuginfo system. These are: the top level SegInfo list. For each SegInfo, - there are tables for for address-to-symbol mappings, + there are tables for address-to-symbol mappings, address-to-src-file/line mappings, and address-to-CFI-info mappings. */ diff --git a/coregrind/m_demangle/cplus-dem.c b/coregrind/m_demangle/cplus-dem.c index 7fc2722f99..46cc3b5363 100644 --- a/coregrind/m_demangle/cplus-dem.c +++ b/coregrind/m_demangle/cplus-dem.c @@ -2666,7 +2666,7 @@ iterate_demangle_function (struct work_stuff *work, const char **mangled, /* Iterate over occurrences of __, allowing names and types to have a "__" sequence in them. We must start with the first (not the last) occurrence, since "__" most often occur between independent mangled - parts, hence starting at the last occurence inside a signature + parts, hence starting at the last occurrence inside a signature might get us a "successful" demangling of the signature. */ while (scan[2]) diff --git a/coregrind/m_gdbserver/m_gdbserver.c b/coregrind/m_gdbserver/m_gdbserver.c index 888340ae09..263bd4acce 100644 --- a/coregrind/m_gdbserver/m_gdbserver.c +++ b/coregrind/m_gdbserver/m_gdbserver.c @@ -773,7 +773,7 @@ static void call_gdbserver ( ThreadId tid , CallReason reason) } /* busy > 0 when gdbserver is currently being called. - busy is used to to avoid vgdb invoking gdbserver + busy is used to avoid vgdb invoking gdbserver while gdbserver by Valgrind. */ static volatile int busy = 0; diff --git a/coregrind/m_gdbserver/server.c b/coregrind/m_gdbserver/server.c index 485050b521..29290e31ae 100644 --- a/coregrind/m_gdbserver/server.c +++ b/coregrind/m_gdbserver/server.c @@ -244,7 +244,7 @@ int handle_gdb_valgrind_command (char *mon, OutputSink *sink_wanted_at_return) " v.do expensive_sanity_check_general : do an expensive sanity check now\n" " v.info gdbserver_status : show gdbserver status\n" " v.info memory [aspacemgr] : show valgrind heap memory stats\n" -" (with aspacemgr arg, also shows valgrind segments on log ouput)\n" +" (with aspacemgr arg, also shows valgrind segments on log output)\n" " v.info exectxt : show stacktraces and stats of all execontexts\n" " v.info scheduler : show valgrind thread state and stacktrace\n" " v.info stats : show various valgrind and tool stats\n" diff --git a/coregrind/m_gdbserver/target.h b/coregrind/m_gdbserver/target.h index 1cacc1eaf1..2ea8de9ca6 100644 --- a/coregrind/m_gdbserver/target.h +++ b/coregrind/m_gdbserver/target.h @@ -211,7 +211,7 @@ extern int valgrind_remove_watchpoint (char type, CORE_ADDR addr, int len); Returns True if the address of the variable could be found. *tls_addr is then set to this address. Returns False if tls support is not available for this arch, or - if an error occured. *tls_addr is set to NULL. */ + if an error occurred. *tls_addr is set to NULL. */ extern Bool valgrind_get_tls_addr (ThreadState *tst, CORE_ADDR offset, CORE_ADDR lm, diff --git a/coregrind/m_libcbase.c b/coregrind/m_libcbase.c index 60cad365eb..0f55b08001 100644 --- a/coregrind/m_libcbase.c +++ b/coregrind/m_libcbase.c @@ -653,7 +653,7 @@ void* VG_(memcpy) ( void *dest, const void *src, SizeT sz ) } /* If we're unlucky, the alignment constraints for the fast case - above won't apply, and we'll have to to it all here. Hence the + above won't apply, and we'll have to do it all here. Hence the unrolling. */ while (sz >= 4) { d[0] = s[0]; diff --git a/coregrind/m_machine.c b/coregrind/m_machine.c index 99afb7ddc4..e7b826fe31 100644 --- a/coregrind/m_machine.c +++ b/coregrind/m_machine.c @@ -526,7 +526,7 @@ static void handler_unsup_insn ( Int x ) { /* Helper function for VG_(machine_get_hwcaps), assumes the SIGILL/etc - * handlers are installed. Determines the the sizes affected by dcbz + * handlers are installed. Determines the sizes affected by dcbz * and dcbzl instructions and updates the given VexArchInfo structure * accordingly. * diff --git a/coregrind/m_mallocfree.c b/coregrind/m_mallocfree.c index 1c4ac50454..42f029c8f0 100644 --- a/coregrind/m_mallocfree.c +++ b/coregrind/m_mallocfree.c @@ -2113,8 +2113,8 @@ void VG_(arena_free) ( ArenaId aid, void* ptr ) /* The idea for malloc_aligned() is to allocate a big block, base, and - then split it into two parts: frag, which is returned to the the - free pool, and align, which is the bit we're really after. Here's + then split it into two parts: frag, which is returned to the free + pool, and align, which is the bit we're really after. Here's a picture. L and H denote the block lower and upper overheads, in bytes. The details are gruesome. Note it is slightly complicated because the initial request to generate base may return a bigger diff --git a/coregrind/m_replacemalloc/vg_replace_malloc.c b/coregrind/m_replacemalloc/vg_replace_malloc.c index 060cb15a61..fefb8e9d81 100644 --- a/coregrind/m_replacemalloc/vg_replace_malloc.c +++ b/coregrind/m_replacemalloc/vg_replace_malloc.c @@ -1055,7 +1055,7 @@ static size_t my_malloc_size ( void* zone, void* ptr ) } /* Note that the (void*) casts below are a kludge which stops - compilers complaining about the fact that the the replacement + compilers complaining about the fact that the replacement functions aren't really of the right type. */ static vki_malloc_zone_t vg_default_zone = { NULL, // reserved1 diff --git a/coregrind/m_sigframe/sigframe-tilegx-linux.c b/coregrind/m_sigframe/sigframe-tilegx-linux.c index 3448ff8860..4d4fc94c9e 100644 --- a/coregrind/m_sigframe/sigframe-tilegx-linux.c +++ b/coregrind/m_sigframe/sigframe-tilegx-linux.c @@ -262,7 +262,7 @@ void VG_(sigframe_destroy)( ThreadId tid, Bool isRT ) struct vki_ucontext *ucp = &frame->rs_uc; if (0) - VG_(printf)("destory signal frame; sp = %lx, " + VG_(printf)("destroy signal frame; sp = %lx, " " %pc = %lx, status=%d\n", (Addr)frame, tst->arch.vex.guest_pc, tst->status); diff --git a/coregrind/m_signals.c b/coregrind/m_signals.c index 78e2a6d0dc..4aa24f5a55 100644 --- a/coregrind/m_signals.c +++ b/coregrind/m_signals.c @@ -160,7 +160,7 @@ So what we have to do, when doing any syscall which SfMayBlock, is to quickly switch in the SCSS-specified signal mask just before the syscall, and switch it back just afterwards, and hope that we don't - get caught up in some wierd race condition. This is the primary + get caught up in some weird race condition. This is the primary purpose of the ultra-magical pieces of assembly code in coregrind/m_syswrap/syscall-.S diff --git a/coregrind/m_stacks.c b/coregrind/m_stacks.c index 9a2319f54f..1a56f41448 100644 --- a/coregrind/m_stacks.c +++ b/coregrind/m_stacks.c @@ -103,7 +103,7 @@ static UWord next_id; /* Next id we hand out to a newly registered stack */ */ static Stack *current_stack; -/* Find 'st' in the stacks_list and move it one step closer the the +/* Find 'st' in the stacks_list and move it one step closer to the front of the list, so as to make subsequent searches for it cheaper. */ static void move_Stack_one_step_forward ( Stack* st ) diff --git a/coregrind/m_syswrap/syswrap-amd64-linux.c b/coregrind/m_syswrap/syswrap-amd64-linux.c index de3288e521..fcdd9b498e 100644 --- a/coregrind/m_syswrap/syswrap-amd64-linux.c +++ b/coregrind/m_syswrap/syswrap-amd64-linux.c @@ -188,7 +188,7 @@ static void setup_child ( ThreadArchState*, ThreadArchState* ); /* When a client clones, we need to keep track of the new thread. This means: - 1. allocate a ThreadId+ThreadState+stack for the the thread + 1. allocate a ThreadId+ThreadState+stack for the thread 2. initialize the thread's new VCPU state diff --git a/coregrind/m_syswrap/syswrap-arm-linux.c b/coregrind/m_syswrap/syswrap-arm-linux.c index 32c41cad4f..4de7c41562 100644 --- a/coregrind/m_syswrap/syswrap-arm-linux.c +++ b/coregrind/m_syswrap/syswrap-arm-linux.c @@ -154,7 +154,7 @@ static SysRes sys_set_tls ( ThreadId tid, Addr tlsptr ); /* When a client clones, we need to keep track of the new thread. This means: - 1. allocate a ThreadId+ThreadState+stack for the the thread + 1. allocate a ThreadId+ThreadState+stack for the thread 2. initialize the thread's new VCPU state diff --git a/coregrind/m_syswrap/syswrap-arm64-linux.c b/coregrind/m_syswrap/syswrap-arm64-linux.c index 7551e8ae05..bb75c23cf2 100644 --- a/coregrind/m_syswrap/syswrap-arm64-linux.c +++ b/coregrind/m_syswrap/syswrap-arm64-linux.c @@ -198,7 +198,7 @@ static void assign_guest_tls(ThreadId ctid, Addr tlsptr); /* When a client clones, we need to keep track of the new thread. This means: - 1. allocate a ThreadId+ThreadState+stack for the the thread + 1. allocate a ThreadId+ThreadState+stack for the thread 2. initialize the thread's new VCPU state diff --git a/coregrind/m_syswrap/syswrap-mips32-linux.c b/coregrind/m_syswrap/syswrap-mips32-linux.c index 7a715766cb..d477a9c8ad 100644 --- a/coregrind/m_syswrap/syswrap-mips32-linux.c +++ b/coregrind/m_syswrap/syswrap-mips32-linux.c @@ -227,7 +227,7 @@ static SysRes mips_PRE_sys_mmap (ThreadId tid, UWord arg4, UWord arg5, Off64T arg6); /* When a client clones, we need to keep track of the new thread. This means: - 1. allocate a ThreadId+ThreadState+stack for the the thread + 1. allocate a ThreadId+ThreadState+stack for the thread 2. initialize the thread's new VCPU state 3. create the thread using the same args as the client requested, but using the scheduler entrypoint for IP, and a separate stack diff --git a/coregrind/m_syswrap/syswrap-mips64-linux.c b/coregrind/m_syswrap/syswrap-mips64-linux.c index 3a5e979609..8eea1fb1cd 100644 --- a/coregrind/m_syswrap/syswrap-mips64-linux.c +++ b/coregrind/m_syswrap/syswrap-mips64-linux.c @@ -185,7 +185,7 @@ static void setup_child ( ThreadArchState *, ThreadArchState *); static SysRes sys_set_tls ( ThreadId tid, Addr tlsptr); /* When a client clones, we need to keep track of the new thread. This means: - 1. allocate a ThreadId+ThreadState+stack for the the thread + 1. allocate a ThreadId+ThreadState+stack for the thread 2. initialize the thread's new VCPU state diff --git a/coregrind/m_syswrap/syswrap-ppc32-linux.c b/coregrind/m_syswrap/syswrap-ppc32-linux.c index 9c9937bdd4..a80e554515 100644 --- a/coregrind/m_syswrap/syswrap-ppc32-linux.c +++ b/coregrind/m_syswrap/syswrap-ppc32-linux.c @@ -221,7 +221,7 @@ static void setup_child ( ThreadArchState*, ThreadArchState* ); /* When a client clones, we need to keep track of the new thread. This means: - 1. allocate a ThreadId+ThreadState+stack for the the thread + 1. allocate a ThreadId+ThreadState+stack for the thread 2. initialize the thread's new VCPU state diff --git a/coregrind/m_syswrap/syswrap-ppc64-linux.c b/coregrind/m_syswrap/syswrap-ppc64-linux.c index e2432fd7c8..5799b08a71 100644 --- a/coregrind/m_syswrap/syswrap-ppc64-linux.c +++ b/coregrind/m_syswrap/syswrap-ppc64-linux.c @@ -371,7 +371,7 @@ static void setup_child ( ThreadArchState*, ThreadArchState* ); /* When a client clones, we need to keep track of the new thread. This means: - 1. allocate a ThreadId+ThreadState+stack for the the thread + 1. allocate a ThreadId+ThreadState+stack for the thread 2. initialize the thread's new VCPU state diff --git a/coregrind/m_syswrap/syswrap-s390x-linux.c b/coregrind/m_syswrap/syswrap-s390x-linux.c index db374f4a14..9cd68a9c4f 100644 --- a/coregrind/m_syswrap/syswrap-s390x-linux.c +++ b/coregrind/m_syswrap/syswrap-s390x-linux.c @@ -194,7 +194,7 @@ static void setup_child ( /*OUT*/ ThreadArchState *child, /* When a client clones, we need to keep track of the new thread. This means: - 1. allocate a ThreadId+ThreadState+stack for the the thread + 1. allocate a ThreadId+ThreadState+stack for the thread 2. initialize the thread's new VCPU state diff --git a/coregrind/m_syswrap/syswrap-tilegx-linux.c b/coregrind/m_syswrap/syswrap-tilegx-linux.c index fbdf4bfd4a..cbc8e3fb57 100644 --- a/coregrind/m_syswrap/syswrap-tilegx-linux.c +++ b/coregrind/m_syswrap/syswrap-tilegx-linux.c @@ -319,7 +319,7 @@ static void setup_child ( ThreadArchState *, ThreadArchState * ); static SysRes sys_set_tls ( ThreadId tid, Addr tlsptr ); /* When a client clones, we need to keep track of the new thread. This means: - 1. allocate a ThreadId+ThreadState+stack for the the thread + 1. allocate a ThreadId+ThreadState+stack for the thread 2. initialize the thread's new VCPU state 3. create the thread using the same args as the client requested, but using the scheduler entrypoint for IP, and a separate stack diff --git a/coregrind/m_syswrap/syswrap-x86-linux.c b/coregrind/m_syswrap/syswrap-x86-linux.c index e08af76ff3..168d4fc5e4 100644 --- a/coregrind/m_syswrap/syswrap-x86-linux.c +++ b/coregrind/m_syswrap/syswrap-x86-linux.c @@ -195,7 +195,7 @@ static SysRes sys_set_thread_area ( ThreadId, vki_modify_ldt_t* ); /* When a client clones, we need to keep track of the new thread. This means: - 1. allocate a ThreadId+ThreadState+stack for the the thread + 1. allocate a ThreadId+ThreadState+stack for the thread 2. initialize the thread's new VCPU state diff --git a/coregrind/pub_core_errormgr.h b/coregrind/pub_core_errormgr.h index 110c206a65..5454882012 100644 --- a/coregrind/pub_core_errormgr.h +++ b/coregrind/pub_core_errormgr.h @@ -57,7 +57,7 @@ extern void VG_(load_suppressions) ( void ); // else print all errors and suppressions used. extern void VG_(show_all_errors) ( Int verbosity, Bool xml ); -/* Print (in readable format) the last error that occured. */ +/* Print (in readable format) the last error that occurred. */ extern void VG_(show_last_error) ( void ); extern void VG_(show_error_counts_as_XML) ( void ); diff --git a/coregrind/pub_core_gdbserver.h b/coregrind/pub_core_gdbserver.h index 6ec9dafc64..71fc711d85 100644 --- a/coregrind/pub_core_gdbserver.h +++ b/coregrind/pub_core_gdbserver.h @@ -83,7 +83,7 @@ void VG_(set_ptracer)(void); Note that gdbserver assumes that software breakpoint is supported (as this will be done by re-instrumenting the code). - Note that len is ignored for sofware breakpoints. hardware_breakpoint + Note that len is ignored for software breakpoints. hardware_breakpoint are not supported. Returns True if the point has properly been inserted or removed diff --git a/coregrind/vgdb-invoker-ptrace.c b/coregrind/vgdb-invoker-ptrace.c index 8e7e42e5af..ca882c35f9 100644 --- a/coregrind/vgdb-invoker-ptrace.c +++ b/coregrind/vgdb-invoker-ptrace.c @@ -329,7 +329,7 @@ Bool waitstopped (pid_t pid, int signal_expected, const char *msg) } /* Stops the given pid, wait for the process to be stopped. - Returns True if succesful, False otherwise. + Returns True if successful, False otherwise. msg is used in tracing and error reporting. */ static Bool stop (pid_t pid, const char *msg) @@ -348,7 +348,7 @@ Bool stop (pid_t pid, const char *msg) } /* Attaches to given pid, wait for the process to be stopped. - Returns True if succesful, False otherwise. + Returns True if successful, False otherwise. msg is used in tracing and error reporting. */ static Bool attach (pid_t pid, const char *msg) @@ -572,7 +572,7 @@ Bool getregs (pid_t pid, void *regs, long regs_bsz) res = ptrace (PTRACE_GETREGSET, pid, NT_PRSTATUS, &iovec); if (res == 0) { if (has_working_ptrace_getregset == -1) { - // First call to PTRACE_GETREGSET succesful => + // First call to PTRACE_GETREGSET successful => has_working_ptrace_getregset = 1; DEBUG(1, "detected a working PTRACE_GETREGSET\n"); } @@ -607,7 +607,7 @@ Bool getregs (pid_t pid, void *regs, long regs_bsz) res = ptrace (PTRACE_GETREGS, pid, NULL, regs); if (res == 0) { if (has_working_ptrace_getregs == -1) { - // First call to PTRACE_GETREGS succesful => + // First call to PTRACE_GETREGS successful => has_working_ptrace_getregs = 1; DEBUG(1, "detected a working PTRACE_GETREGS\n"); } diff --git a/coregrind/vgdb.c b/coregrind/vgdb.c index 9057e468db..a85f26b963 100644 --- a/coregrind/vgdb.c +++ b/coregrind/vgdb.c @@ -290,7 +290,7 @@ void *invoke_gdbserver_in_valgrind(void *v_pid) last invoke. */ if (invoked_written != written_by_vgdb_before_sleep) { if (invoker_invoke_gdbserver(pid)) { - /* If invoke succesful, no need to invoke again + /* If invoke successful, no need to invoke again for the same value of written_by_vgdb_before_sleep. */ invoked_written = written_by_vgdb_before_sleep; } diff --git a/drd/drd_error.c b/drd/drd_error.c index c510b8820d..eb9268957a 100644 --- a/drd/drd_error.c +++ b/drd/drd_error.c @@ -174,7 +174,7 @@ void drd_report_data_race(const Error* const err, tl_assert(dri->size > 0); (void) VG_(get_data_description)(descr1, descr2, dri->addr); - /* If there's nothing in descr1/2, free them. Why is it safe to to + /* If there's nothing in descr1/2, free them. Why is it safe to VG_(indexXA) at zero here? Because VG_(get_data_description) guarantees to zero terminate descr1/2 regardless of the outcome of the call. So there's always at least one element in each XA diff --git a/drd/drd_semaphore.c b/drd/drd_semaphore.c index 40662b950f..90d460951f 100644 --- a/drd/drd_semaphore.c +++ b/drd/drd_semaphore.c @@ -90,7 +90,7 @@ void DRD_(semaphore_set_trace)(const Bool trace_semaphore) /** * Initialize the memory 'p' points at as a semaphore_info structure for the - * client semaphore at client addres 'semaphore'. + * client semaphore at client address 'semaphore'. */ static void drd_semaphore_initialize(struct semaphore_info* const p, diff --git a/drd/drd_thread.c b/drd/drd_thread.c index 247dd91eb1..ede5b825a1 100644 --- a/drd/drd_thread.c +++ b/drd/drd_thread.c @@ -1171,7 +1171,7 @@ static void thread_combine_vc_sync(DrdThreadId tid, const Segment* sg) /** * Create a new segment for thread tid and update the vector clock of the last - * segment of this thread with the the vector clock of segment sg. Call this + * segment of this thread with the vector clock of segment sg. Call this * function after thread tid had to wait because of thread synchronization * until the memory accesses in the segment sg finished. */ diff --git a/helgrind/hg_basics.h b/helgrind/hg_basics.h index 737cfa0eba..41ee803e12 100644 --- a/helgrind/hg_basics.h +++ b/helgrind/hg_basics.h @@ -80,7 +80,7 @@ extern Bool HG_(clo_cmp_race_err_addrs); 1: "approx": collect one stack trace per (notional) segment, that is, collect a stack trace for a thread every time its vector - clock changes. This faciliates showing the bounds of the + clock changes. This facilitates showing the bounds of the conflicting segment(s), with relatively small overhead. 2: "full": collect a stack trace every time the constraints for a diff --git a/helgrind/hg_errors.c b/helgrind/hg_errors.c index d2058d5467..49414549e8 100644 --- a/helgrind/hg_errors.c +++ b/helgrind/hg_errors.c @@ -1212,7 +1212,7 @@ void HG_(pp_Error) ( const Error* err ) if (xe->XE.Race.h1_ct_mbsegendEC) { VG_(pp_ExeContext)( xe->XE.Race.h1_ct_mbsegendEC ); } else { - emit( " (the end of the the thread)\n" ); + emit( " (the end of the thread)\n" ); } } @@ -1256,7 +1256,7 @@ void HG_(pp_Error) ( const Error* err ) if (xe->XE.Race.h1_ct_mbsegendEC) { VG_(pp_ExeContext)( xe->XE.Race.h1_ct_mbsegendEC ); } else { - emit( " (the end of the the thread)\n" ); + emit( " (the end of the thread)\n" ); } } diff --git a/helgrind/hg_main.c b/helgrind/hg_main.c index ccf1a56a57..b1a0aa32ad 100644 --- a/helgrind/hg_main.c +++ b/helgrind/hg_main.c @@ -3310,7 +3310,7 @@ void evh__HG_USERSO_FORGET_ALL ( ThreadId tid, UWord usertag ) { /* TID declares that any happens-before edges notionally stored in USERTAG can be deleted. If (as would normally be the case) a - SO is associated with USERTAG, then the assocation is removed + SO is associated with USERTAG, then the association is removed and all resources associated with SO are freed. Importantly, that frees up any VTSs stored in SO. */ if (SHOW_EVENTS >= 1) diff --git a/helgrind/libhb_core.c b/helgrind/libhb_core.c index bbc19859ea..96f731c172 100644 --- a/helgrind/libhb_core.c +++ b/helgrind/libhb_core.c @@ -143,7 +143,7 @@ typedef ULong SVal; a pair, (Thr*, ULong), but that takes 16 bytes on a 64-bit target. We pack it into 64 bits by representing the Thr* using a ThrID, a small integer (18 bits), and a 46 bit integer for the timestamp - number. The 46/18 split is arbitary, but has the effect that + number. The 46/18 split is arbitrary, but has the effect that Helgrind can only handle programs that create 2^18 or fewer threads over their entire lifetime, and have no more than 2^46 timestamp ticks (synchronisation operations on the same thread). @@ -3014,7 +3014,7 @@ static void vts_tab__do_GC ( Bool show_stats ) /* Now figure out when the next GC should be. We'll allow the number of VTSs to double before GCing again. Except of course that since we can't (or, at least, don't) shrink vts_tab, we - can't set the threshhold value smaller than it. */ + can't set the threshold value smaller than it. */ tl_assert(nFreed <= nTab); nLive = nTab - nFreed; tl_assert(nLive >= 0 && nLive <= nTab); @@ -4306,7 +4306,7 @@ static void ctxt__rcinc ( RCEC* ec ) /* Find 'ec' in the RCEC list whose head pointer lives at 'headp' and - move it one step closer the the front of the list, so as to make + move it one step closer to the front of the list, so as to make subsequent searches for it cheaper. */ static void move_RCEC_one_step_forward ( RCEC** headp, RCEC* ec ) { @@ -4354,7 +4354,7 @@ static void move_RCEC_one_step_forward ( RCEC** headp, RCEC* ec ) return a pointer to the copy. The caller can safely have 'example' on its stack, since we will always return a pointer to a copy of it, not to the original. Note that the inserted node will have .rc - of zero and so the caller must immediatly increment it. */ + of zero and so the caller must immediately increment it. */ __attribute__((noinline)) static RCEC* ctxt__find_or_add ( RCEC* example ) { @@ -7067,7 +7067,7 @@ void libhb_maybe_GC ( void ) do_RCEC_GC(); /* If there are still no entries available (all the table entries are full), - and we hit the threshhold point, then do a GC */ + and we hit the threshold point, then do a GC */ Bool vts_tab_GC = vts_tab_freelist == VtsID_INVALID && VG_(sizeXA)( vts_tab ) >= vts_next_GC_at; if (UNLIKELY (vts_tab_GC)) diff --git a/memcheck/mc_leakcheck.c b/memcheck/mc_leakcheck.c index 791a5c1b4f..ffa932b25f 100644 --- a/memcheck/mc_leakcheck.c +++ b/memcheck/mc_leakcheck.c @@ -943,7 +943,7 @@ void scan_all_valid_memory_catcher ( Int sigNo, Addr addr ) // 1. Leak check mode (searched == 0). // ----------------------------------- // Scan a block of memory between [start, start+len). This range may -// be bogus, inaccessable, or otherwise strange; we deal with it. For each +// be bogus, inaccessible, or otherwise strange; we deal with it. For each // valid aligned word we assume it's a pointer to a chunk a push the chunk // onto the mark stack if so. // clique is the "highest level clique" in which indirectly leaked blocks have @@ -995,7 +995,7 @@ lc_scan_memory(Addr start, SizeT len, Bool is_prior_definite, /* Optimisation: the loop below will check for each begin of SM chunk if the chunk is fully unaddressable. The idea is to skip efficiently such fully unaddressable SM chunks. - So, we preferrably start the loop on a chunk boundary. + So, we preferably start the loop on a chunk boundary. If the chunk is not fully unaddressable, we might be in an unaddressable page. Again, the idea is to skip efficiently such unaddressable page : this is the "else" part. diff --git a/memcheck/mc_main.c b/memcheck/mc_main.c index 0d6ebcd3a2..386c2319c2 100644 --- a/memcheck/mc_main.c +++ b/memcheck/mc_main.c @@ -2263,7 +2263,7 @@ void MC_(copy_address_range_state) ( Addr src, Addr dst, SizeT len ) * Some obscure uses of x86/amd64 byte registers can cause lossage or confusion of origins. %AH .. %DH are treated as different from, and unrelated to, their parent registers, %EAX .. %EDX. - So some wierd sequences like + So some weird sequences like movb undefined-value, %AH movb defined-value, %AL diff --git a/memcheck/mc_translate.c b/memcheck/mc_translate.c index 892b43b3be..1501f09694 100644 --- a/memcheck/mc_translate.c +++ b/memcheck/mc_translate.c @@ -4728,7 +4728,7 @@ IRAtom* expr2vbits_Load_WRK ( MCEnv* mce, di->guard = guard; /* Ideally the didn't-happen return value here would be all-ones (all-undefined), so it'd be obvious if it got used - inadvertantly. We can get by with the IR-mandated default + inadvertently. We can get by with the IR-mandated default value (0b01 repeating, 0x55 etc) as that'll still look pretty undefined if it ever leaks out. */ } @@ -6786,7 +6786,7 @@ static IRAtom* gen_guarded_load_b ( MCEnv* mce, Int szB, di->guard = guard; /* Ideally the didn't-happen return value here would be all-zeroes (unknown-origin), so it'd be harmless if it got - used inadvertantly. We slum it out with the IR-mandated + used inadvertently. We slum it out with the IR-mandated default value (0b01 repeating, 0x55 etc) as that'll probably trump all legitimate otags via Max32, and it's pretty obviously bogus. */ diff --git a/memcheck/tests/post-syscall.c b/memcheck/tests/post-syscall.c index cc473503e4..6655ab4985 100644 --- a/memcheck/tests/post-syscall.c +++ b/memcheck/tests/post-syscall.c @@ -9,7 +9,7 @@ /* Check that a syscall's POST function gets called if it completes due to being interrupted. nanosleep is used here, because it writes a result even if it fails. wait*() could also be used, - because they successully complete if interrupted by SIGCHLD. + because they successfully complete if interrupted by SIGCHLD. */ static void handler(int s) { diff --git a/memcheck/tests/sbfragment.c b/memcheck/tests/sbfragment.c index 16a9e6af7b..545fe3d0d6 100644 --- a/memcheck/tests/sbfragment.c +++ b/memcheck/tests/sbfragment.c @@ -75,7 +75,7 @@ int main(int argc, char *argv[]) } printf ("after %d loops, last size block requested %lu\n", loop, bigsize); - // verify if superblock fragmentation occured + // verify if superblock fragmentation occurred // We consider that an arena of up to 3 times more than bigsize is ok. { #if defined(HAVE_MALLINFO) diff --git a/none/tests/mmap_fcntl_bug.c b/none/tests/mmap_fcntl_bug.c index f49639a176..8bfae64431 100644 --- a/none/tests/mmap_fcntl_bug.c +++ b/none/tests/mmap_fcntl_bug.c @@ -40,7 +40,7 @@ int main(int argc, char *argv[]) fl.l_start = 0; fl.l_len = 1; - /* I'm assuming noone else tries to lock this! */ + /* I'm assuming no one else tries to lock this! */ if (fcntl(fd, F_SETLK, &fl) != 0) err(1, "Locking %s", file); diff --git a/perf/tinycc.c b/perf/tinycc.c index e4ca37f32d..50479230d4 100644 --- a/perf/tinycc.c +++ b/perf/tinycc.c @@ -3080,7 +3080,7 @@ enum tcc_token { DEF_FP(mul) DEF_ASM(fcom) - DEF_ASM(fcom_1) /* non existant op, just to have a regular table */ + DEF_ASM(fcom_1) /* non existent op, just to have a regular table */ DEF_FP1(com) DEF_FP(comp) @@ -4412,7 +4412,7 @@ static const char tcc_keywords[] = DEF_FP(mul) DEF_ASM(fcom) - DEF_ASM(fcom_1) /* non existant op, just to have a regular table */ + DEF_ASM(fcom_1) /* non existent op, just to have a regular table */ DEF_FP1(com) DEF_FP(comp) @@ -16970,7 +16970,7 @@ static void subst_asm_operand(CString *add_str, } } -/* generate prolog and epilog code for asm statment */ +/* generate prolog and epilog code for asm statement */ static void asm_gen_code(ASMOperand *operands, int nb_operands, int nb_outputs, int is_output, uint8_t *clobber_regs, -- 2.47.2