From: Paul Floyd Date: Sun, 8 Oct 2023 07:21:12 +0000 (+0200) Subject: coverity: most of the remaining unsigned comparisons >= 0 warnings X-Git-Tag: VALGRIND_3_22_0~34 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=0ed244b88f4e026cd42a211652277f4d7247cb7d;p=thirdparty%2Fvalgrind.git coverity: most of the remaining unsigned comparisons >= 0 warnings --- diff --git a/VEX/priv/guest_arm64_toIR.c b/VEX/priv/guest_arm64_toIR.c index e6aaf896c9..5777514379 100644 --- a/VEX/priv/guest_arm64_toIR.c +++ b/VEX/priv/guest_arm64_toIR.c @@ -10377,7 +10377,7 @@ Bool dis_AdvSIMD_scalar_shift_by_imm(/*MB_OUT*/DisResult* dres, UInt insn) UInt shift = 0; Bool ok = getLaneInfo_IMMH_IMMB(&shift, &size, immh, immb); if (!ok) return False; - vassert(size >= 0 && size <= 3); + vassert(size <= 3); /* The shift encoding has opposite sign for the leftwards case. Adjust shift to compensate. */ UInt lanebits = 8 << size; @@ -11774,7 +11774,7 @@ Bool dis_AdvSIMD_shift_by_immediate(/*MB_OUT*/DisResult* dres, UInt insn) Bool isQ = bitQ == 1; Bool ok = getLaneInfo_IMMH_IMMB(&shift, &size, immh, immb); if (!ok || (bitQ == 0 && size == X11)) return False; - vassert(size >= 0 && size <= 3); + vassert(size <= 3); UInt lanebits = 8 << size; vassert(shift >= 1 && shift <= lanebits); IRExpr* src = getQReg128(nn); diff --git a/VEX/priv/guest_arm_toIR.c b/VEX/priv/guest_arm_toIR.c index 58d4ed6b1e..12acfbc865 100644 --- a/VEX/priv/guest_arm_toIR.c +++ b/VEX/priv/guest_arm_toIR.c @@ -12797,7 +12797,7 @@ static Bool decode_V8_instruction ( const HChar* iNames[4] = { "aese", "aesd", "aesmc", "aesimc" }; - vassert(opc >= 0 && opc <= 3); + vassert(opc <= 3); void* helper = helpers[opc]; const HChar* hname = hNames[opc]; diff --git a/VEX/priv/guest_ppc_helpers.c b/VEX/priv/guest_ppc_helpers.c index 1a8e9fbf71..2611e52107 100644 --- a/VEX/priv/guest_ppc_helpers.c +++ b/VEX/priv/guest_ppc_helpers.c @@ -1545,8 +1545,8 @@ void get_ACC_entry (VexGuestPPC64State* gst, UInt offset, UInt acc, UInt reg, acc_word[1] = 0xBAD; acc_word[0] = 0xBEEF; - vassert( (acc >= 0) && (acc < 8) ); - vassert( (reg >= 0) && (reg < 4) ); + vassert(acc < 8); + vassert(reg < 4); pU128_src = (U128*) (((UChar*)gst) + offset + acc*4*sizeof(U128) + reg*sizeof(U128)); diff --git a/VEX/priv/guest_ppc_toIR.c b/VEX/priv/guest_ppc_toIR.c index f0c6ea8564..d05cce394d 100644 --- a/VEX/priv/guest_ppc_toIR.c +++ b/VEX/priv/guest_ppc_toIR.c @@ -5917,7 +5917,7 @@ static void vector_gen_pvc_mask ( const VexAbiInfo* vbi, IRDirty* d; - vassert( (VSX_addr >= 0) && (VSX_addr < 64) ); + vassert(VSX_addr < 64); UInt reg_offset = offsetofPPCGuestState( guest_VSR0 ) + sizeof(U128) * VSX_addr; diff --git a/VEX/priv/host_arm64_isel.c b/VEX/priv/host_arm64_isel.c index 4b75a08672..b4b7d80211 100644 --- a/VEX/priv/host_arm64_isel.c +++ b/VEX/priv/host_arm64_isel.c @@ -119,7 +119,6 @@ static HReg lookupIRTemp ( ISelEnv* env, IRTemp tmp ) static void lookupIRTempPair ( HReg* vrHI, HReg* vrLO, ISelEnv* env, IRTemp tmp ) { - vassert(tmp >= 0); vassert(tmp < env->n_vregmap); vassert(! hregIsInvalid(env->vregmapHI[tmp])); *vrLO = env->vregmap[tmp]; diff --git a/VEX/priv/host_generic_reg_alloc2.c b/VEX/priv/host_generic_reg_alloc2.c index ceea1f4d61..d94d2c92a7 100644 --- a/VEX/priv/host_generic_reg_alloc2.c +++ b/VEX/priv/host_generic_reg_alloc2.c @@ -137,6 +137,7 @@ typedef #define INVALID_RREG_NO ((Short)(-1)) #define IS_VALID_VREGNO(_zz) ((_zz) >= 0 && (_zz) < n_vregs) +#define IS_VALID_UNSIGNED_VREGNO(_zz) ((_zz) < n_vregs) #define IS_VALID_RREGNO(_zz) ((_zz) >= 0 && (_zz) < n_rregs) @@ -1055,8 +1056,8 @@ HInstrArray* doRegisterAllocation_v2 ( /* Finally, we can do the coalescing. It's trivial -- merely claim vregS's register for vregD. */ rreg_state[n].vreg = vregD; - vassert(IS_VALID_VREGNO(hregIndex(vregD))); - vassert(IS_VALID_VREGNO(hregIndex(vregS))); + vassert(IS_VALID_UNSIGNED_VREGNO(hregIndex(vregD))); + vassert(IS_VALID_UNSIGNED_VREGNO(hregIndex(vregS))); vreg_state[hregIndex(vregD)] = toShort(n); vreg_state[hregIndex(vregS)] = INVALID_RREG_NO; @@ -1080,7 +1081,7 @@ HInstrArray* doRegisterAllocation_v2 ( if (rreg_state[j].disp != Bound) continue; UInt vregno = hregIndex(rreg_state[j].vreg); - vassert(IS_VALID_VREGNO(vregno)); + vassert(IS_VALID_UNSIGNED_VREGNO(vregno)); if (vreg_lrs[vregno].dead_before <= ii) { rreg_state[j].disp = Free; rreg_state[j].eq_spill_slot = False; diff --git a/VEX/priv/ir_opt.c b/VEX/priv/ir_opt.c index 93dd6188ef..f918e9f858 100644 --- a/VEX/priv/ir_opt.c +++ b/VEX/priv/ir_opt.c @@ -6371,7 +6371,7 @@ static Bool do_XOR_TRANSFORM_IRSB ( IRSB* sb ) if (st->tag != Ist_WrTmp) continue; IRTemp t = st->Ist.WrTmp.tmp; - vassert(t >= 0 && t < n_tmps); + vassert(t < n_tmps); env[t] = st->Ist.WrTmp.data; } diff --git a/auxprogs/valgrind-di-server.c b/auxprogs/valgrind-di-server.c index 028948db04..1b6506e3da 100644 --- a/auxprogs/valgrind-di-server.c +++ b/auxprogs/valgrind-di-server.c @@ -650,7 +650,7 @@ static UInt calc_gnu_debuglink_crc32(/*OUT*/Bool* ok, int fd, ULong size) ULong img_szB = size; ULong curr_off = 0; while (1) { - assert(curr_off >= 0 && curr_off <= img_szB); + assert(curr_off <= img_szB); if (curr_off == img_szB) break; ULong avail = img_szB - curr_off; assert(avail > 0 && avail <= img_szB); diff --git a/cachegrind/cg_arch.c b/cachegrind/cg_arch.c index dac22feb2a..68314c9dbe 100644 --- a/cachegrind/cg_arch.c +++ b/cachegrind/cg_arch.c @@ -306,7 +306,7 @@ void VG_(post_clo_init_configure_caches)(cache_t* I1c, check_cache_or_override ("LL", LLc, DEFINED(clo_LLc)); // Then replace with any defined on the command line. (Already checked in - // VG(parse_clo_cache_opt)().) + // VG(str_clo_cache_opt)().) if (DEFINED(clo_I1c)) { *I1c = *clo_I1c; } if (DEFINED(clo_D1c)) { *D1c = *clo_D1c; } if (DEFINED(clo_LLc)) { *LLc = *clo_LLc; } diff --git a/callgrind/main.c b/callgrind/main.c index 0c2467a90f..4a14d5fe26 100644 --- a/callgrind/main.c +++ b/callgrind/main.c @@ -777,7 +777,6 @@ static InstrInfo* next_InstrInfo ( ClgState* clgs, UInt instr_size ) { InstrInfo* ii; - tl_assert(clgs->ii_index >= 0); tl_assert(clgs->ii_index < clgs->bb->instr_count); ii = &clgs->bb->instr[ clgs->ii_index ]; diff --git a/coregrind/m_debuginfo/d3basics.c b/coregrind/m_debuginfo/d3basics.c index e9e8944af8..bcfd456151 100644 --- a/coregrind/m_debuginfo/d3basics.c +++ b/coregrind/m_debuginfo/d3basics.c @@ -648,7 +648,6 @@ GXResult ML_(evaluate_Dwarf3_Expr) ( const UChar* expr, UWord exprszB, sp = -1; vg_assert(expr); - vg_assert(exprszB >= 0); limit = expr + exprszB; /* Deal with the case where the entire expression is a single diff --git a/coregrind/m_debuginfo/image.c b/coregrind/m_debuginfo/image.c index ede8357309..02e5090713 100644 --- a/coregrind/m_debuginfo/image.c +++ b/coregrind/m_debuginfo/image.c @@ -775,7 +775,7 @@ static UChar get_slowcase ( DiImage* img, DiOffT off ) if (!img->ces[i]->fromC) break; } - vg_assert(i >= 0 && i < CACHE_N_ENTRIES); + vg_assert(i < CACHE_N_ENTRIES); realloc_CEnt(img, i, size, /*fromC?*/cslc != NULL); img->ces[i]->size = size; @@ -1166,7 +1166,7 @@ SizeT ML_(img_get_some)(/*OUT*/void* dst, vg_assert(is_in_CEnt(ce, offset)); SizeT nToCopy = size - 1; SizeT nAvail = (SizeT)(ce->used - (offset + 1 - ce->off)); - vg_assert(nAvail >= 0 && nAvail <= ce->used-1); + vg_assert(nAvail <= ce->used-1); if (nAvail < nToCopy) nToCopy = nAvail; VG_(memcpy)(&dstU[1], &ce->data[offset + 1 - ce->off], nToCopy); return nToCopy + 1; diff --git a/coregrind/m_execontext.c b/coregrind/m_execontext.c index 9305f316ef..b00db18861 100644 --- a/coregrind/m_execontext.c +++ b/coregrind/m_execontext.c @@ -546,7 +546,7 @@ static ExeContext* record_ExeContext_wrk2 ( const Addr* ips, UInt n_ips ) /* Resize the hash table, maybe? */ if ( ((ULong)ec_totstored) > ((ULong)ec_htab_size) ) { - vg_assert(ec_htab_size_idx >= 0 && ec_htab_size_idx < N_EC_PRIMES); + vg_assert(ec_htab_size_idx < N_EC_PRIMES); if (ec_htab_size_idx < N_EC_PRIMES-1) resize_ec_htab(); } diff --git a/coregrind/m_transtab.c b/coregrind/m_transtab.c index 724ea6b1fc..ce54ce7108 100644 --- a/coregrind/m_transtab.c +++ b/coregrind/m_transtab.c @@ -1710,7 +1710,7 @@ static void initialiseSector ( SECno sno ) if (sector_search_order[ix] == sno) break; } - vg_assert(ix >= 0 && ix < n_sectors); + vg_assert(ix < n_sectors); if (VG_(clo_verbosity) > 2) VG_(message)(Vg_DebugMsg, "TT/TC: recycle sector %d\n", sno); @@ -2029,7 +2029,7 @@ static void delete_tte ( /*OUT*/Addr* ga_deleted, /* sec and secNo are mutually redundant; cross-check. */ vg_assert(sec == §ors[secNo]); - vg_assert(tteno >= 0 && tteno < N_TTES_PER_SECTOR); + vg_assert(tteno < N_TTES_PER_SECTOR); TTEntryC* tteC = &sec->ttC[tteno]; TTEntryH* tteH = &sec->ttH[tteno]; vg_assert(tteH->status == InUse); @@ -2104,7 +2104,7 @@ SizeT delete_translations_in_sector_eclass ( /*OUT*/Addr* ga_deleted, TTEno tteno; SizeT numDeld = 0; - vg_assert(ec >= 0 && ec < ECLASS_N); + vg_assert(ec < ECLASS_N); for (i = 0; i < sec->ec2tte_used[ec]; i++) { diff --git a/helgrind/hg_main.c b/helgrind/hg_main.c index a31aacf5c7..6a6e6294df 100644 --- a/helgrind/hg_main.c +++ b/helgrind/hg_main.c @@ -685,7 +685,7 @@ static ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr ) ThreadId tid; tl_assert(HG_(is_sane_Thread)(thr)); /* Check nobody used the invalid-threadid slot */ - tl_assert(VG_INVALID_THREADID >= 0 && VG_INVALID_THREADID < VG_N_THREADS); + tl_assert(VG_INVALID_THREADID < VG_N_THREADS); tl_assert(map_threads[VG_INVALID_THREADID] == NULL); tid = thr->coretid; tl_assert(HG_(is_sane_ThreadId)(tid)); diff --git a/helgrind/hg_wordset.c b/helgrind/hg_wordset.c index 1f9e30ddcc..70164245d9 100644 --- a/helgrind/hg_wordset.c +++ b/helgrind/hg_wordset.c @@ -111,7 +111,6 @@ typedef WCache* _cache = &(_zzcache); \ tl_assert(_cache->dynMax >= 1); \ tl_assert(_cache->dynMax <= N_WCACHE_STAT_MAX); \ - tl_assert(_cache->inUse >= 0); \ tl_assert(_cache->inUse <= _cache->dynMax); \ if (_cache->inUse < _cache->dynMax) \ _cache->inUse++; \ @@ -666,7 +665,7 @@ WordSet HG_(delFromWS) ( WordSetU* wsu, WordSet ws, UWord w ) } /* So w is present in ws, and the new set will be one element smaller. */ - tl_assert(i >= 0 && i < wv->size); + tl_assert(i < wv->size); tl_assert(wv->size > 0); wv_new = new_WV_of_size( wsu, wv->size - 1 ); diff --git a/helgrind/libhb_core.c b/helgrind/libhb_core.c index cb3daaa139..0a084dd60d 100644 --- a/helgrind/libhb_core.c +++ b/helgrind/libhb_core.c @@ -1033,7 +1033,6 @@ static void rcdec_LineZ ( LineZ* lineZ ) { inline static void write_twobit_array ( UChar* arr, UWord ix, UWord b2 ) { Word bix, shft, mask, prep; - tl_assert(ix >= 0); bix = ix >> 2; shft = 2 * (ix & 3); /* 0, 2, 4 or 6 */ mask = 3 << shft; @@ -1044,7 +1043,6 @@ static void write_twobit_array ( UChar* arr, UWord ix, UWord b2 ) { inline static UWord read_twobit_array ( UChar* arr, UWord ix ) { Word bix, shft; - tl_assert(ix >= 0); bix = ix >> 2; shft = 2 * (ix & 3); /* 0, 2, 4 or 6 */ return (arr[bix] >> shft) & 3; @@ -2364,7 +2362,7 @@ static void VTS__tick ( /*OUT*/VTS* out, Thr* me, VTS* vts ) copy it to the output but increment its timestamp value. Then copy the remaining entries. (c) is the common case. */ - tl_assert(i >= 0 && i <= n); + tl_assert(i <= n); if (i == n) { /* case (a) */ UInt hi = out->usedTS++; out->ts[hi].thrid = me_thrid; diff --git a/memcheck/mc_main.c b/memcheck/mc_main.c index e6e9ef80aa..3a2a81741a 100644 --- a/memcheck/mc_main.c +++ b/memcheck/mc_main.c @@ -2946,8 +2946,7 @@ void make_aligned_word64_undefined_w_otag ( Addr a, UInt otag ) //// Set the origins for a+0 .. a+7 { OCacheLine* line; UWord lineoff = oc_line_offset(a); - tl_assert(lineoff >= 0 - && lineoff < OC_W32S_PER_LINE -1/*'cos 8-aligned*/); + tl_assert(lineoff < OC_W32S_PER_LINE -1/*'cos 8-aligned*/); line = find_OCacheLine( a ); line->u.main.descr[lineoff+0] = 0xF; line->u.main.descr[lineoff+1] = 0xF; @@ -7120,7 +7119,7 @@ static Bool mc_handle_client_request ( ThreadId tid, UWord* arg, UWord* ret ) (cgbs[arg[2]].start == 0 && cgbs[arg[2]].size == 0)) { *ret = 1; } else { - tl_assert(arg[2] >= 0 && arg[2] < cgb_used); + tl_assert(arg[2] < cgb_used); cgbs[arg[2]].start = cgbs[arg[2]].size = 0; VG_(free)(cgbs[arg[2]].desc); cgb_discards++;