Adjust shift to compensate. */
UInt lanebits = 8 << size;
shift = lanebits - shift;
- vassert(shift >= 0 && shift < lanebits);
+ vassert(shift < lanebits);
const HChar* nm = NULL;
/**/ if (bitU == 0 && opcode == BITS5(0,1,1,1,0)) nm = "sqshl";
else if (bitU == 1 && opcode == BITS5(0,1,1,1,0)) nm = "uqshl";
Bool isQ = bitQ == 1;
Bool ok = getLaneInfo_IMMH_IMMB(&shift, &size, immh, immb);
if (!ok || (bitQ == 0 && size == X11)) return False;
- vassert(size >= 0 && size <= 3);
+ vassert(size <= 3);
/* The shift encoding has opposite sign for the leftwards case.
Adjust shift to compensate. */
UInt lanebits = 8 << size;
gate = False;
if (gate) {
- vassert(ix >= 0 && ix < 7);
+ vassert(ix < 7);
const HChar* inames[7]
= { "sha1c", "sha1p", "sha1m", "sha1su0",
"sha256h", "sha256h2", "sha256su1" };
len2check = vge->len[i];
/* stay sane */
- vassert(len2check >= 0 && len2check < 2000/*arbitrary*/);
+ vassert(len2check < 2000/*arbitrary*/);
/* Skip the check if the translation involved zero bytes */
if (len2check == 0)
/* stay sane ... */
vassert(dres.whatNext == Dis_StopHere || dres.whatNext == Dis_Continue);
/* ... disassembled insn length is sane ... */
- vassert(dres.len >= 0 && dres.len <= 24);
+ vassert(dres.len <= 24);
/* If the disassembly function passed us a hint, take note of it. */
if (LIKELY(dres.hint == Dis_HintNone)) {
n_frames = VG_(get_StackTrace)( tid, ips, N_FRAMES,
sps, fps, 0/*first_ip_delta*/ );
- vg_assert(n_frames >= 0 && n_frames <= N_FRAMES);
+ vg_assert(n_frames <= N_FRAMES);
for (j = 0; j < n_frames; j++) {
if (consider_vars_in_frame( dname1, dname2,
ep, data_addr,
DiCursor cur = get_AsciiZ(&c);
str = ML_(addStrFromCursor)( cc->di, cur );
dir_xa_ix = get_ULEB128( &c );
- if (dirname_xa != NULL
- && dir_xa_ix >= 0 && dir_xa_ix < VG_(sizeXA) (dirname_xa))
+ if (dirname_xa != NULL && dir_xa_ix < VG_(sizeXA) (dirname_xa))
dirname = *(HChar**)VG_(indexXA) ( dirname_xa, dir_xa_ix );
else
dirname = NULL;
sort_loctab_and_loctab_fndn_ix (di);
for (i = 0; i < ((Word)di->loctab_used)-1; i++) {
- vg_assert(di->loctab[i].size < 10000);
/* If two adjacent entries overlap, truncate the first. */
if (di->loctab[i].addr + di->loctab[i].size > di->loctab[i+1].addr) {
/* Do this in signed int32 because the actual .size fields
// The tool-specific part
ThreadId tid; // Initialised by core
- ExeContext* where; // Initialised by core
ErrorKind ekind; // Used by ALL. Must be in the range (0..)
+ ExeContext* where; // Initialised by core
Addr addr; // Used frequently
const HChar* string; // Used frequently
void* extra; // For any tool-specific extras
static struct valgrind_target_ops low_target = {
-1, // Must be computed at init time.
- regs,
7, //RSP
+ regs,
transfer_register,
get_pc,
set_pc,
static struct valgrind_target_ops low_target = {
num_regs,
- regs,
13, //SP
+ regs,
transfer_register,
get_pc,
set_pc,
static struct valgrind_target_ops low_target = {
num_regs,
- regs,
31, //SP
+ regs,
transfer_register,
get_pc,
set_pc,
static struct valgrind_target_ops low_target = {
num_regs,
- regs,
29, //sp = r29, which is register offset 29 in regs
+ regs,
transfer_register,
get_pc,
set_pc,
static struct valgrind_target_ops low_target = {
num_regs,
- regs,
29, //sp = r29, which is register offset 29 in regs
+ regs,
transfer_register,
get_pc,
set_pc,
static struct valgrind_target_ops low_target = {
num_regs,
- regs,
29, //sp = r29, which is register offset 29 in regs
+ regs,
transfer_register,
get_pc,
set_pc,
static struct valgrind_target_ops low_target = {
num_regs,
- regs,
1, //r1
+ regs,
transfer_register,
get_pc,
set_pc,
static struct valgrind_target_ops low_target = {
num_regs,
- regs,
1, //r1
+ regs,
transfer_register,
get_pc,
set_pc,
static struct valgrind_target_ops low_target = {
-1, // Override at init time.
- regs,
17, //sp = r15, which is register offset 17 in regs
+ regs,
transfer_register,
get_pc,
set_pc,
static struct valgrind_target_ops low_target = {
num_regs,
- regs,
4, //ESP
+ regs,
transfer_register,
get_pc,
set_pc,
struct valgrind_target_ops
{
int num_regs;
- struct reg *reg_defs;
int stack_pointer_regno;
/* register number of the stack pointer register */
-
+
+ struct reg *reg_defs;
+
/* transfer the register regno from/to valgrind (guest state)
to/from buf
according to transfer_direction.
}
else if VG_BOOL_CLOM(cloPD, arg, "--sym-offsets", VG_(clo_sym_offsets)) {}
- else if VG_BINT_CLOM(cloPD, arg, "--progress-interval",
- VG_(clo_progress_interval), 0, 3600) {}
+ else if VG_BUINT_CLOM(cloPD, arg, "--progress-interval",
+ VG_(clo_progress_interval), 3600) {}
else if VG_BOOL_CLO(arg, "--read-inline-info", VG_(clo_read_inline_info)) {}
else if VG_BOOL_CLO(arg, "--read-var-info", VG_(clo_read_var_info)) {}
for (i = 0; i < tteC->n_tte2ec; i++) {
ec_num = tteC->tte2ec_ec[i];
ec_idx = tteC->tte2ec_ix[i];
- vg_assert(ec_num >= 0 && ec_num < ECLASS_N);
+ vg_assert(ec_num < ECLASS_N);
vg_assert(ec_idx >= 0);
vg_assert(ec_idx < sec->ec2tte_used[ec_num]);
/* Assert that the two links point at each other. */
if (sr_isError(res))
return sr_Err(res);
- vg_assert2(sr_Res(res) >= 0 && sr_Res(res) < EXE_HANDLER_COUNT,
+ vg_assert2(sr_Res(res) < EXE_HANDLER_COUNT,
"invalid VG_(pre_exec_check) result");
ret = (*exe_handlers[sr_Res(res)].load_fn)(fd, exe, info);
} \
res;}))
+// As above, but for unsigned int arguments with a lower bound of 0
+#define VG_BUINTN_CLOM(qq_mode, qq_base, qq_arg, qq_option, qq_var, qq_hi) \
+(VG_(check_clom) \
+ (qq_mode, qq_arg, qq_option, \
+ VG_STREQN(VG_(strlen)(qq_option)+1, qq_arg, qq_option"=")) && \
+ ({Bool res = True; \
+ const HChar* val = &(qq_arg)[ VG_(strlen)(qq_option)+1 ]; \
+ HChar* s; \
+ Long n = VG_(strtoll##qq_base)( val, &s ); \
+ (qq_var) = n; \
+ if ('\0' != s[0] || (qq_var) != n) { \
+ VG_(fmsg_bad_option)(qq_arg, \
+ "Invalid integer value '%s'\n", val); \
+ res = False; } \
+ /* Check bounds. */ \
+ if ((qq_var) > (qq_hi)) { \
+ VG_(fmsg_bad_option)(qq_arg, \
+ "'%s' argument must be <= %lld\n", \
+ (qq_option), (Long)(qq_hi)); \
+ res = False; \
+ } \
+ res;}))
+
// Bounded decimal integer arg, eg. --foo=100
#define VG_BINT_CLO(qq_arg, qq_option, qq_var, qq_lo, qq_hi) \
VG_BINTN_CLOM(cloP, 10, (qq_arg), qq_option, (qq_var), (qq_lo), (qq_hi))
#define VG_BINT_CLOM(qq_mode, qq_arg, qq_option, qq_var, qq_lo, qq_hi) \
VG_BINTN_CLOM(qq_mode, 10, (qq_arg), qq_option, (qq_var), (qq_lo), (qq_hi))
+#define VG_BUINT_CLOM(qq_mode, qq_arg, qq_option, qq_var, qq_hi) \
+ VG_BUINTN_CLOM(qq_mode, 10, (qq_arg), qq_option, (qq_var), (qq_hi))
// Bounded hexadecimal integer arg, eg. --foo=0x1fa8
#define VG_BHEX_CLO(qq_arg, qq_option, qq_var, qq_lo, qq_hi) \
struct _MC_LeakSuppExtra {
UInt match_leak_kinds;
+ UInt leak_search_gen;
/* Maintains nr of blocks and bytes suppressed with this suppression
during the leak search identified by leak_search_gen.
used the first time during a leak search. */
SizeT blocks_suppressed;
SizeT bytes_suppressed;
- UInt leak_search_gen;
};
typedef struct {