From: Paul Floyd Date: Fri, 29 Sep 2023 20:45:32 +0000 (+0200) Subject: coverity: lots of checks that unsigned are >= 0 which is always true X-Git-Tag: VALGRIND_3_22_0~41 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=36b194bcec4d349f9631811358e438dc13b212ad;p=thirdparty%2Fvalgrind.git coverity: lots of checks that unsigned are >= 0 which is always true Also put back the isFF flag initialization (used for FreeBSD non-fixed RO ELF segmentd) . I had intended to delete it but in the end kept it for traces but had already deleted the init code. --- diff --git a/VEX/priv/guest_amd64_toIR.c b/VEX/priv/guest_amd64_toIR.c index bb1563dc41..3d4780363d 100644 --- a/VEX/priv/guest_amd64_toIR.c +++ b/VEX/priv/guest_amd64_toIR.c @@ -12266,7 +12266,7 @@ static Long dis_FXRSTOR ( const VexAbiInfo* vbi, static IRTemp math_PINSRW_128 ( IRTemp v128, IRTemp u16, UInt imm8 ) { - vassert(imm8 >= 0 && imm8 <= 7); + vassert(imm8 <= 7); // Create a V128 value which has the selected word in the // specified lane, and zeroes everywhere else. @@ -18892,7 +18892,7 @@ static Long dis_PCMPxSTRx ( const VexAbiInfo* vbi, Prefix pfx, static IRTemp math_PINSRB_128 ( IRTemp v128, IRTemp u8, UInt imm8 ) { - vassert(imm8 >= 0 && imm8 <= 15); + vassert(imm8 <= 15); // Create a V128 value which has the selected byte in the // specified lane, and zeroes everywhere else. diff --git a/VEX/priv/guest_arm64_toIR.c b/VEX/priv/guest_arm64_toIR.c index 16a7e075f0..505b66f30f 100644 --- a/VEX/priv/guest_arm64_toIR.c +++ b/VEX/priv/guest_arm64_toIR.c @@ -2403,7 +2403,7 @@ Bool dbm_DecodeBitMasks ( /*OUT*/ULong* wmask, /*OUT*/ULong* tmask, /* Be careful of these (1ULL << (S+1)) - 1 expressions, and the same below with d. S can be 63 in which case we have an out of range and hence undefined shift. */ - vassert(S >= 0 && S <= 63); + vassert(S <= 63); vassert(esize >= (S+1)); ULong elem_s = // Zeroes(esize-(S+1)):Ones(S+1) //(1ULL << (S+1)) - 1; @@ -2737,7 +2737,7 @@ Bool dis_ARM64_data_processing_immediate(/*MB_OUT*/DisResult* dres, } } - if (is64 && immS >= 0 && immS <= 62 + if (is64 && immS <= 62 && immR == immS + 1 && opc == BITS2(1,0)) { // 64-bit shift left UInt shift = 64 - immR; @@ -3654,7 +3654,7 @@ Bool dis_ARM64_data_processing_register(/*MB_OUT*/DisResult* dres, UInt sz = INSN(11,10); UInt nn = INSN(9,5); UInt dd = INSN(4,0); - vassert(sz >= 0 && sz <= 3); + vassert(sz <= 3); if ((bitSF == 0 && sz <= BITS2(1,0)) || (bitSF == 1 && sz == BITS2(1,1))) { UInt ix = (bitC == 1 ? 4 : 0) | sz; @@ -4430,7 +4430,7 @@ void math_INTERLEAVE2_64( /*OUTx2*/ IRTemp* i0, IRTemp* i1, return; } - vassert(laneSzBlg2 >= 0 && laneSzBlg2 <= 2); + vassert(laneSzBlg2 <= 2); IROp doubler = Iop_INVALID, halver = Iop_INVALID; math_get_doubler_and_halver(&doubler, &halver, laneSzBlg2); @@ -4461,7 +4461,7 @@ void math_INTERLEAVE3_64( return; } - vassert(laneSzBlg2 >= 0 && laneSzBlg2 <= 2); + vassert(laneSzBlg2 <= 2); IROp doubler = Iop_INVALID, halver = Iop_INVALID; math_get_doubler_and_halver(&doubler, &halver, laneSzBlg2); @@ -4497,7 +4497,7 @@ void math_INTERLEAVE4_64( return; } - vassert(laneSzBlg2 >= 0 && laneSzBlg2 <= 2); + vassert(laneSzBlg2 <= 2); IROp doubler = Iop_INVALID, halver = Iop_INVALID; math_get_doubler_and_halver(&doubler, &halver, laneSzBlg2); @@ -4543,7 +4543,7 @@ void math_DEINTERLEAVE2_64( /*OUTx2*/ IRTemp* u0, IRTemp* u1, return; } - vassert(laneSzBlg2 >= 0 && laneSzBlg2 <= 2); + vassert(laneSzBlg2 <= 2); IROp doubler = Iop_INVALID, halver = Iop_INVALID; math_get_doubler_and_halver(&doubler, &halver, laneSzBlg2); @@ -4575,7 +4575,7 @@ void math_DEINTERLEAVE3_64( return; } - vassert(laneSzBlg2 >= 0 && laneSzBlg2 <= 2); + vassert(laneSzBlg2 <= 2); IROp doubler = Iop_INVALID, halver = Iop_INVALID; math_get_doubler_and_halver(&doubler, &halver, laneSzBlg2); @@ -4611,7 +4611,7 @@ void math_DEINTERLEAVE4_64( return; } - vassert(laneSzBlg2 >= 0 && laneSzBlg2 <= 2); + vassert(laneSzBlg2 <= 2); IROp doubler = Iop_INVALID, halver = Iop_INVALID; math_get_doubler_and_halver(&doubler, &halver, laneSzBlg2); @@ -8386,7 +8386,7 @@ static IRTemp math_FOLDV ( IRTemp src, IROp op ) static IRTemp math_TBL_TBX ( IRTemp tab[4], UInt len, IRTemp src, IRTemp oor_values ) { - vassert(len >= 0 && len <= 3); + vassert(len <= 3); /* Generate some useful constants as concisely as possible. */ IRTemp half15 = newTemp(Ity_I64); @@ -9027,7 +9027,7 @@ void math_QSHL_IMM ( /*OUT*/IRTemp* res, /* Saturation has occurred if any of the shifted-out bits are different from the top bit of the original value. */ UInt rshift = laneBits - 1 - shift; - vassert(rshift >= 0 && rshift < laneBits-1); + vassert(rshift < laneBits-1); /* qDiff1 is the shifted out bits, and the top bit of the original value, preceded by zeroes. */ assign(*qDiff1, binop(mkVecSHRN(size), mkexpr(src), mkU8(rshift))); @@ -10339,7 +10339,7 @@ Bool dis_AdvSIMD_scalar_shift_by_imm(/*MB_OUT*/DisResult* dres, UInt insn) if (bitU == 0 && (immh & 8) == 8 && opcode == BITS5(0,1,0,1,0)) { /* -------- 0,1xxx,01010 SHL d_d_#imm -------- */ UInt sh = immhb - 64; - vassert(sh >= 0 && sh < 64); + vassert(sh < 64); putQReg128(dd, unop(Iop_ZeroHI64ofV128, sh == 0 ? getQReg128(nn) @@ -10580,7 +10580,7 @@ Bool dis_AdvSIMD_scalar_three_different(/*MB_OUT*/DisResult* dres, UInt insn) case BITS4(1,0,1,1): ks = 2; break; default: vassert(0); } - vassert(ks >= 0 && ks <= 2); + vassert(ks <= 2); if (size == X00 || size == X11) return False; vassert(size <= 2); IRTemp vecN, vecM, vecD, res, sat1q, sat1n, sat2q, sat2n; @@ -11687,7 +11687,7 @@ Bool dis_AdvSIMD_shift_by_immediate(/*MB_OUT*/DisResult* dres, UInt insn) Bool isAcc = opcode == BITS5(0,0,0,1,0); Bool ok = getLaneInfo_IMMH_IMMB(&shift, &size, immh, immb); if (!ok || (bitQ == 0 && size == X11)) return False; - vassert(size >= 0 && size <= 3); + vassert(size <= 3); UInt lanebits = 8 << size; vassert(shift >= 1 && shift <= lanebits); IROp op = isU ? mkVecSHRN(size) : mkVecSARN(size); @@ -12299,7 +12299,7 @@ Bool dis_AdvSIMD_three_different(/*MB_OUT*/DisResult* dres, UInt insn) case BITS4(1,0,1,0): ks = 2; break; default: vassert(0); } - vassert(ks >= 0 && ks <= 2); + vassert(ks <= 2); if (size == X11) return False; vassert(size <= 2); Bool isU = bitU == 1; @@ -14253,7 +14253,7 @@ Bool dis_AdvSIMD_vector_x_indexed_elem(/*MB_OUT*/DisResult* dres, UInt insn) case BITS4(0,1,1,0): ks = 2; break; default: vassert(0); } - vassert(ks >= 0 && ks <= 2); + vassert(ks <= 2); Bool isU = bitU == 1; Bool is2 = bitQ == 1; UInt mm = 32; // invalid diff --git a/VEX/priv/guest_arm_toIR.c b/VEX/priv/guest_arm_toIR.c index 6027d477e4..cdc1265f0d 100644 --- a/VEX/priv/guest_arm_toIR.c +++ b/VEX/priv/guest_arm_toIR.c @@ -630,7 +630,7 @@ static void putIRegT ( UInt iregNo, /* So, generate either an unconditional or a conditional write to the reg. */ ASSERT_IS_THUMB; - vassert(iregNo >= 0 && iregNo <= 14); + vassert(iregNo <= 14); if (guardT == IRTemp_INVALID) { /* unconditional write */ llPutIReg( iregNo, e ); @@ -1340,6 +1340,7 @@ void setFlags_D1_D2_ND ( UInt cc_op, IRTemp t_dep1, vassert(typeOfIRTemp(irsb->tyenv, t_dep1 == Ity_I32)); vassert(typeOfIRTemp(irsb->tyenv, t_dep2 == Ity_I32)); vassert(typeOfIRTemp(irsb->tyenv, t_ndep == Ity_I32)); + // strictly unsigned cc_op must always be >= 0, keeong for readability vassert(cc_op >= ARMG_CC_OP_COPY && cc_op < ARMG_CC_OP_NUMBER); if (guardT == IRTemp_INVALID) { /* unconditional */ @@ -17794,7 +17795,7 @@ DisResult disInstr_ARM_WRK ( IRTemp tmp = newTemp(Ity_I32); IRTemp res = newTemp(Ity_I32); UInt mask = ((1 << wm1) - 1) + (1 << wm1); - vassert(msb >= 0 && msb <= 31); + vassert(msb <= 31); vassert(mask != 0); // guaranteed by msb being in 0 .. 31 inclusive assign(src, getIRegA(rN)); @@ -22271,7 +22272,7 @@ DisResult disInstr_THUMB_WRK ( IRTemp tmp = newTemp(Ity_I32); IRTemp res = newTemp(Ity_I32); UInt mask = ((1 << wm1) - 1) + (1 << wm1); - vassert(msb >= 0 && msb <= 31); + vassert(msb <= 31); vassert(mask != 0); // guaranteed by msb being in 0 .. 31 inclusive assign(src, getIRegT(rN)); diff --git a/VEX/priv/guest_ppc_helpers.c b/VEX/priv/guest_ppc_helpers.c index 2914667514..bfdb11b14c 100644 --- a/VEX/priv/guest_ppc_helpers.c +++ b/VEX/priv/guest_ppc_helpers.c @@ -1521,8 +1521,8 @@ void write_ACC_entry (VexGuestPPC64State* gst, UInt offset, UInt acc, UInt reg, { U128* pU128_dst; - vassert( (acc >= 0) && (acc < 8) ); - vassert( (reg >= 0) && (reg < 4) ); + vassert(acc < 8); + vassert(reg < 4); pU128_dst = (U128*) (((UChar*)gst) + offset + acc*4*sizeof(U128) + reg*sizeof(U128)); diff --git a/VEX/priv/guest_ppc_toIR.c b/VEX/priv/guest_ppc_toIR.c index 16181768e4..f0c6ea8564 100644 --- a/VEX/priv/guest_ppc_toIR.c +++ b/VEX/priv/guest_ppc_toIR.c @@ -4086,8 +4086,8 @@ static void putACC( UInt index, UInt reg, IRExpr* src, Bool ACC_mapped_on_VSR) static IRExpr* /* :: Ity_V128 */ getACC ( UInt index, UInt reg, Bool ACC_mapped_on_VSR) { - vassert( (index >= 0) && (index < 8) ); - vassert( (reg >= 0) && (reg < 4) ); + vassert(index < 8); + vassert(reg < 4); return IRExpr_Get( base_acc_addr( ACC_mapped_on_VSR ) + ACC_offset( index, reg), Ity_V128 ); @@ -5656,7 +5656,7 @@ static void setup_fxstate_struct( IRDirty* d, UInt AT, IREffect AT_fx, d->fxState[3].fx = AT_fx; d->fxState[3].size = sizeof(U128); - vassert( (AT >= 0) && (AT < 8)); + vassert(AT < 8); acc_base_address = base_acc_addr( ACC_mapped_on_VSR ); diff --git a/VEX/priv/guest_x86_helpers.c b/VEX/priv/guest_x86_helpers.c index a1d086369d..1764f58f0b 100644 --- a/VEX/priv/guest_x86_helpers.c +++ b/VEX/priv/guest_x86_helpers.c @@ -2820,7 +2820,7 @@ ULong x86g_use_seg_selector ( HWord ldt, HWord gdt, /* Convert the segment selector onto a table index */ seg_selector >>= 3; - vassert(seg_selector >= 0 && seg_selector < 8192); + vassert(seg_selector < 8192); if (tiBit == 0) { diff --git a/VEX/priv/host_amd64_isel.c b/VEX/priv/host_amd64_isel.c index e66c97f64c..5104e7dbe7 100644 --- a/VEX/priv/host_amd64_isel.c +++ b/VEX/priv/host_amd64_isel.c @@ -173,7 +173,6 @@ static HReg lookupIRTemp ( ISelEnv* env, IRTemp tmp ) static void lookupIRTempPair ( HReg* vrHI, HReg* vrLO, ISelEnv* env, IRTemp tmp ) { - vassert(tmp >= 0); vassert(tmp < env->n_vregmap); vassert(! hregIsInvalid(env->vregmapHI[tmp])); *vrLO = env->vregmap[tmp]; @@ -574,7 +573,7 @@ void doHelperCall ( /*OUT*/UInt* stackAdjustAfterCall, never see IRExpr_VECRET() at this point, since the return-type check above should ensure all those cases use the slow scheme instead. */ - vassert(n_args >= 0 && n_args <= 6); + vassert(n_args <= 6); for (i = 0; i < n_args; i++) { IRExpr* arg = args[i]; if (LIKELY(!is_IRExpr_VECRET_or_GSPTR(arg))) { diff --git a/VEX/priv/host_arm_defs.c b/VEX/priv/host_arm_defs.c index 6f972a41b0..8d17238e3c 100644 --- a/VEX/priv/host_arm_defs.c +++ b/VEX/priv/host_arm_defs.c @@ -220,7 +220,7 @@ ARMAMode1* ARMAMode1_RRS ( HReg base, HReg index, UInt shift ) { am->ARMam1.RRS.base = base; am->ARMam1.RRS.index = index; am->ARMam1.RRS.shift = shift; - vassert(0 <= shift && shift <= 3); + vassert(shift <= 3); return am; } @@ -429,8 +429,8 @@ ARMRI84* ARMRI84_I84 ( UShort imm8, UShort imm4 ) { ri84->tag = ARMri84_I84; ri84->ARMri84.I84.imm8 = imm8; ri84->ARMri84.I84.imm4 = imm4; - vassert(imm8 >= 0 && imm8 <= 255); - vassert(imm4 >= 0 && imm4 <= 15); + vassert(imm8 <= 255); + vassert(imm4 <= 15); return ri84; } ARMRI84* ARMRI84_R ( HReg reg ) { diff --git a/VEX/priv/host_arm_isel.c b/VEX/priv/host_arm_isel.c index acbd39ad4d..891c9f68c2 100644 --- a/VEX/priv/host_arm_isel.c +++ b/VEX/priv/host_arm_isel.c @@ -252,7 +252,7 @@ static HReg iselNeonExpr ( ISelEnv* env, const IRExpr* e ); /*---------------------------------------------------------*/ static UInt ROR32 ( UInt x, UInt sh ) { - vassert(sh >= 0 && sh < 32); + vassert(sh < 32); if (sh == 0) return x; else diff --git a/VEX/priv/host_generic_reg_alloc3.c b/VEX/priv/host_generic_reg_alloc3.c index 21129d4a12..f1eb4c9b18 100644 --- a/VEX/priv/host_generic_reg_alloc3.c +++ b/VEX/priv/host_generic_reg_alloc3.c @@ -148,7 +148,8 @@ typedef } RRegLRState; -#define IS_VALID_VREGNO(v) ((v) >= 0 && (v) < n_vregs) +/* v is always unsigned, wish we could static assert that */ +#define IS_VALID_VREGNO(v) ((v) < n_vregs) #define IS_VALID_RREGNO(r) ((r) >= 0 && (r) < n_rregs) #define FREE_VREG(v) \ diff --git a/VEX/priv/host_mips_defs.c b/VEX/priv/host_mips_defs.c index 5af3b85659..7641ccf62e 100644 --- a/VEX/priv/host_mips_defs.c +++ b/VEX/priv/host_mips_defs.c @@ -3316,7 +3316,7 @@ static UChar *mkFormS(UChar * p, UInt opc1, UInt rRD, UInt rRS, UInt rRT, vassert(rRS < 0x20); vassert(rRT < 0x20); vassert(opc2 <= 0x3F); - vassert(sa >= 0 && sa <= 0x3F); + vassert(sa <= 0x3F); theInstr = ((opc1 << 26) | (rRS << 21) | (rRT << 16) | (rRD << 11) | ((sa & 0x1F) << 6) | (opc2)); diff --git a/VEX/priv/host_ppc_defs.c b/VEX/priv/host_ppc_defs.c index 4222b47868..ea140621e2 100644 --- a/VEX/priv/host_ppc_defs.c +++ b/VEX/priv/host_ppc_defs.c @@ -4333,7 +4333,7 @@ Int emit_PPCInstr ( /*MB_MOD*/Bool* is_profInc, as that is a handy way to sign extend the lower 32 bits into the upper 32 bits. */ if (mode64) - vassert(n >= 0 && n < 32); + vassert(n < 32); else vassert(n > 0 && n < 32); p = mkFormX(p, 31, r_srcL, r_dst, n, 824, 0, endness_host); diff --git a/VEX/priv/host_ppc_isel.c b/VEX/priv/host_ppc_isel.c index 5ee6d1b6da..75a753f75a 100644 --- a/VEX/priv/host_ppc_isel.c +++ b/VEX/priv/host_ppc_isel.c @@ -320,7 +320,6 @@ static void lookupIRTempQuad ( HReg* vrHi, HReg* vrMedHi, HReg* vrMedLo, HReg* vrLo, ISelEnv* env, IRTemp tmp ) { vassert(!env->mode64); - vassert(tmp >= 0); vassert(tmp < env->n_vregmap); vassert(! hregIsInvalid(env->vregmapMedLo[tmp])); *vrHi = env->vregmapHi[tmp]; diff --git a/VEX/priv/host_x86_isel.c b/VEX/priv/host_x86_isel.c index a0f6677143..391b3b4834 100644 --- a/VEX/priv/host_x86_isel.c +++ b/VEX/priv/host_x86_isel.c @@ -197,7 +197,6 @@ static HReg lookupIRTemp ( ISelEnv* env, IRTemp tmp ) static void lookupIRTemp64 ( HReg* vrHI, HReg* vrLO, ISelEnv* env, IRTemp tmp ) { - vassert(tmp >= 0); vassert(tmp < env->n_vregmap); vassert(! hregIsInvalid(env->vregmapHI[tmp])); *vrLO = env->vregmap[tmp]; diff --git a/VEX/priv/ir_defs.c b/VEX/priv/ir_defs.c index 2d82c41a1a..31710eb33a 100644 --- a/VEX/priv/ir_defs.c +++ b/VEX/priv/ir_defs.c @@ -4225,7 +4225,6 @@ IRTemp newIRTemp ( IRTypeEnv* env, IRType ty ) inline IRType typeOfIRTemp ( const IRTypeEnv* env, IRTemp tmp ) { - vassert(tmp >= 0); vassert(tmp < env->types_used); return env->types[tmp]; } diff --git a/cachegrind/cg_arch.c b/cachegrind/cg_arch.c index 8858b0ee6d..9b6071d2c3 100644 --- a/cachegrind/cg_arch.c +++ b/cachegrind/cg_arch.c @@ -240,7 +240,6 @@ maybe_tweak_LLc(cache_t *LLc) power of two. Then, increase the associativity by that factor. Finally, re-calculate the total size so as to make sure it divides exactly between the sets. */ - tl_assert(old_nSets >= 0); UInt new_nSets = floor_power_of_2 ( old_nSets ); tl_assert(new_nSets > 0 && new_nSets < old_nSets); Double factor = (Double)old_nSets / (Double)new_nSets; diff --git a/coregrind/m_aspacemgr/aspacemgr-linux.c b/coregrind/m_aspacemgr/aspacemgr-linux.c index 53d0536de4..232401e60e 100644 --- a/coregrind/m_aspacemgr/aspacemgr-linux.c +++ b/coregrind/m_aspacemgr/aspacemgr-linux.c @@ -1502,7 +1502,8 @@ static void init_nsegment ( /*OUT*/NSegment* seg ) seg->hasR = seg->hasW = seg->hasX = seg->hasT = seg->isCH = False; #if defined(VGO_freebsd) - seg->ignore_offset = False; + seg->isFF = False; + seg->ignore_offset = False; #endif } diff --git a/coregrind/m_debuginfo/image.c b/coregrind/m_debuginfo/image.c index 28dfd0b472..5c9d722467 100644 --- a/coregrind/m_debuginfo/image.c +++ b/coregrind/m_debuginfo/image.c @@ -553,7 +553,7 @@ static void set_CEnt ( const DiImage* img, UInt entNo, DiOffT off ) DiOffT off_orig = off; vg_assert(img != NULL); vg_assert(img->ces_used <= CACHE_N_ENTRIES); - vg_assert(entNo >= 0 && entNo < img->ces_used); + vg_assert(entNo < img->ces_used); vg_assert(off < img->real_size); CEnt* ce = img->ces[entNo]; vg_assert(ce != NULL); diff --git a/coregrind/m_transtab.c b/coregrind/m_transtab.c index 5e82d57d97..ff5b35b53a 100644 --- a/coregrind/m_transtab.c +++ b/coregrind/m_transtab.c @@ -1192,7 +1192,7 @@ void upd_eclasses_after_add ( /*MOD*/Sector* sec, TTEno tteno ) { Int i, r; EClassNo eclasses[3]; - vg_assert(tteno >= 0 && tteno < N_TTES_PER_SECTOR); + vg_assert(tteno < N_TTES_PER_SECTOR); TTEntryH* tteH = &sec->ttH[tteno]; r = vexGuestExtents_to_eclasses( eclasses, tteH ); @@ -1530,7 +1530,7 @@ static TTEno get_empty_tt_slot(SECno sNo) i = sectors[sNo].empty_tt_list; sectors[sNo].empty_tt_list = sectors[sNo].ttC[i].usage.next_empty_tte; - vg_assert (i >= 0 && i < N_TTES_PER_SECTOR); + vg_assert (i < N_TTES_PER_SECTOR); return i; } diff --git a/helgrind/hg_wordset.c b/helgrind/hg_wordset.c index 0c793d9d41..34978ffc5f 100644 --- a/helgrind/hg_wordset.c +++ b/helgrind/hg_wordset.c @@ -85,7 +85,6 @@ typedef WCache* _cache = &(_zzcache); \ tl_assert(_cache->dynMax >= 1); \ tl_assert(_cache->dynMax <= N_WCACHE_STAT_MAX); \ - tl_assert(_cache->inUse >= 0); \ tl_assert(_cache->inUse <= _cache->dynMax); \ if (_cache->inUse > 0) { \ if (_cache->ent[0].arg1 == _arg1 \ @@ -184,7 +183,6 @@ struct _WordSetU { static WordVec* new_WV_of_size ( WordSetU* wsu, UWord sz ) { WordVec* wv; - tl_assert(sz >= 0); wv = wsu->alloc( wsu->cc, sizeof(WordVec) ); wv->owner = wsu; wv->words = NULL; diff --git a/helgrind/libhb_core.c b/helgrind/libhb_core.c index f660a34ea0..2acf3fbd83 100644 --- a/helgrind/libhb_core.c +++ b/helgrind/libhb_core.c @@ -2434,8 +2434,8 @@ static void VTS__join ( /*OUT*/VTS* out, VTS* a, VTS* b ) from a and b in order, where thrid is the next ThrID occurring in either a or b, and tyma/b are the relevant scalar timestamps, taking into account implicit zeroes. */ - tl_assert(ia >= 0 && ia <= useda); - tl_assert(ib >= 0 && ib <= usedb); + tl_assert(ia <= useda); + tl_assert(ib <= usedb); if (ia == useda && ib == usedb) { /* both empty - done */ @@ -3056,7 +3056,7 @@ static void vts_tab__do_GC ( Bool show_stats ) can't set the threshold value smaller than it. */ tl_assert(nFreed <= nTab); nLive = nTab - nFreed; - tl_assert(nLive >= 0 && nLive <= nTab); + tl_assert(nLive <= nTab); vts_next_GC_at = 2 * nLive; if (vts_next_GC_at < nTab) vts_next_GC_at = nTab; diff --git a/memcheck/mc_main.c b/memcheck/mc_main.c index e86487a57d..2221f8d3d8 100644 --- a/memcheck/mc_main.c +++ b/memcheck/mc_main.c @@ -2704,7 +2704,7 @@ static OCacheLine* find_OCacheLine_SLOW ( Addr a ) UWord setno = (a >> OC_BITS_PER_LINE) & (OC_N_SETS - 1); UWord tagmask = ~((1 << OC_BITS_PER_LINE) - 1); UWord tag = a & tagmask; - tl_assert(setno >= 0 && setno < OC_N_SETS); + tl_assert(setno < OC_N_SETS); /* we already tried line == 0; skip therefore. */ for (line = 1; line < OC_LINES_PER_SET; line++) {