useful hints to compiler and static analyzers.
}
Z_INTERNAL void slide_hash_armv6(deflate_state *s) {
- Assert(s->w_size <= UINT16_MAX, "w_size should fit in uint16_t");
+ AssertHint(s->w_size <= UINT16_MAX, "w_size should fit in uint16_t");
uint16_t wsize = (uint16_t)s->w_size;
slide_hash_chain(s->head, HASH_SIZE, wsize);
}
Z_INTERNAL void slide_hash_neon(deflate_state *s) {
- Assert(s->w_size <= UINT16_MAX, "w_size should fit in uint16_t");
+ AssertHint(s->w_size <= UINT16_MAX, "w_size should fit in uint16_t");
uint16_t wsize = (uint16_t)s->w_size;
slide_hash_chain(s->head, HASH_SIZE, wsize);
#endif
#endif
words += N;
- Assert(comb <= UINT32_MAX, "comb should fit in uint32_t");
+ AssertHint(comb <= UINT32_MAX, "comb should fit in uint32_t");
c = (uint32_t)ZSWAPWORD(comb);
/* Update the pointer to the remaining bytes to process. */
}
void Z_INTERNAL SLIDE_PPC(deflate_state *s) {
- Assert(s->w_size <= UINT16_MAX, "w_size should fit in uint16_t");
+ AssertHint(s->w_size <= UINT16_MAX, "w_size should fit in uint16_t");
uint16_t wsize = (uint16_t)s->w_size;
slide_hash_chain(s->head, HASH_SIZE, wsize);
* loadchunk and storechunk to ensure the result is correct.
*/
static inline uint8_t* CHUNKCOPY(uint8_t *out, uint8_t const *from, unsigned len) {
- Assert(len > 0, "chunkcopy should never have a length 0");
+ AssertHint(len > 0, "chunkcopy should never have a length 0");
int32_t align = ((len - 1) % sizeof(chunk_t)) + 1;
memcpy(out, from, sizeof(chunk_t));
out += align;
}
Z_INTERNAL void slide_hash_rvv(deflate_state *s) {
- Assert(s->w_size <= UINT16_MAX, "w_size should fit in uint16_t");
+ AssertHint(s->w_size <= UINT16_MAX, "w_size should fit in uint16_t");
uint16_t wsize = (uint16_t)s->w_size;
slide_hash_chain(s->head, HASH_SIZE, wsize);
/* DFLTCC-CMPR will write to next_out, so make sure that buffers with
* higher precedence are empty.
*/
- Assert(state->pending == 0, "There must be no pending bytes");
- Assert(state->bi_valid < 8, "There must be less than 8 pending bits");
+ AssertHint(state->pending == 0, "There must be no pending bytes");
+ AssertHint(state->bi_valid < 8, "There must be less than 8 pending bits");
param->sbb = (unsigned int)state->bi_valid;
if (param->sbb > 0)
*strm->next_out = (unsigned char)state->bi_buf;
}
Z_INTERNAL void slide_hash_avx2(deflate_state *s) {
- Assert(s->w_size <= UINT16_MAX, "w_size should fit in uint16_t");
+ AssertHint(s->w_size <= UINT16_MAX, "w_size should fit in uint16_t");
uint16_t wsize = (uint16_t)s->w_size;
const __m256i ymm_wsize = _mm256_set1_epi16((short)wsize);
}
Z_INTERNAL void slide_hash_sse2(deflate_state *s) {
- Assert(s->w_size <= UINT16_MAX, "w_size should fit in uint16_t");
+ AssertHint(s->w_size <= UINT16_MAX, "w_size should fit in uint16_t");
uint16_t wsize = (uint16_t)s->w_size;
const __m128i xmm_wsize = _mm_set1_epi16((short)wsize);
reliable. */
#ifndef HAVE_CHUNKCOPY
static inline uint8_t* CHUNKCOPY(uint8_t *out, uint8_t const *from, unsigned len) {
- Assert(len > 0, "chunkcopy should never have a length 0");
+ AssertHint(len > 0, "chunkcopy should never have a length 0");
chunk_t chunk;
int32_t align = ((len - 1) % sizeof(chunk_t)) + 1;
loadchunk(from, &chunk);
static inline uint8_t* CHUNKMEMSET(uint8_t *out, uint8_t *from, unsigned len) {
/* Debug performance related issues when len < sizeof(uint64_t):
Assert(len >= sizeof(uint64_t), "chunkmemset should be called on larger chunks"); */
- Assert(from != out, "chunkmemset cannot have a distance 0");
+ AssertHint(from != out, "chunkmemset cannot have a distance 0");
chunk_t chunk_load;
uint32_t chunk_mod = 0;
if (s->wrap > 0)
s->wrap = -s->wrap; /* write the trailer only once! */
if (s->pending == 0) {
- Assert(s->bi_valid == 0, "bi_buf not flushed");
+ AssertHint(s->bi_valid == 0, "bi_buf not flushed");
return Z_STREAM_END;
}
return Z_OK;
unsigned int more; /* Amount of free space at the end of the window. */
unsigned int wsize = s->w_size;
- Assert(s->lookahead < MIN_LOOKAHEAD, "already enough lookahead");
+ AssertHint(s->lookahead < MIN_LOOKAHEAD, "already enough lookahead");
do {
more = s->window_size - s->lookahead - s->strstart;
* Otherwise, window_size == 2*WSIZE so more >= 2.
* If there was sliding, more >= WSIZE. So in all cases, more >= 2.
*/
- Assert(more >= 2, "more < 2");
+ AssertHint(more >= 2, "more < 2");
n = PREFIX(read_buf)(s->strm, s->window + s->strstart + s->lookahead, more);
s->lookahead += n;
}
if (match_len >= WANT_MIN_MATCH) {
- Assert(s->strstart <= UINT16_MAX, "strstart should fit in uint16_t");
- Assert(s->match_start <= UINT16_MAX, "match_start should fit in uint16_t");
+ AssertHint(s->strstart <= UINT16_MAX, "strstart should fit in uint16_t");
+ AssertHint(s->match_start <= UINT16_MAX, "match_start should fit in uint16_t");
check_match(s, (Pos)s->strstart, (Pos)s->match_start, match_len);
bflush = zng_tr_tally_dist(s, s->strstart - s->match_start, match_len - STD_MIN_MATCH);
/* dist: distance of matched string */
/* len: match length-STD_MIN_MATCH */
#ifdef LIT_MEM
- Assert(dist <= UINT16_MAX, "dist should fit in uint16_t");
- Assert(len <= UINT8_MAX, "len should fit in uint8_t");
+ AssertHint(dist <= UINT16_MAX, "dist should fit in uint16_t");
+ AssertHint(len <= UINT8_MAX, "len should fit in uint8_t");
s->d_buf[s->sym_next] = (uint16_t)dist;
s->l_buf[s->sym_next++] = (uint8_t)len;
#else
if (UNLIKELY(match_len > STD_MAX_MATCH))
match_len = STD_MAX_MATCH;
- Assert(s->strstart <= UINT16_MAX, "strstart should fit in uint16_t");
+ AssertHint(s->strstart <= UINT16_MAX, "strstart should fit in uint16_t");
check_match(s, (Pos)s->strstart, hash_head, match_len);
zng_tr_emit_dist(s, static_ltree, static_dtree, match_len - STD_MIN_MATCH, (uint32_t)dist);
/* Emit match if have run of STD_MIN_MATCH or longer, else emit literal */
if (match_len >= STD_MIN_MATCH) {
- Assert(s->strstart <= UINT16_MAX, "strstart should fit in uint16_t");
+ AssertHint(s->strstart <= UINT16_MAX, "strstart should fit in uint16_t");
check_match(s, (Pos)s->strstart, (Pos)(s->strstart - 1), match_len);
bflush = zng_tr_tally_dist(s, 1, match_len - STD_MIN_MATCH);
unsigned int max_insert = s->strstart + s->lookahead - STD_MIN_MATCH;
/* Do not insert strings in hash table beyond this. */
- Assert((s->strstart-1) <= UINT16_MAX, "strstart-1 should fit in uint16_t");
+ AssertHint((s->strstart-1) <= UINT16_MAX, "strstart-1 should fit in uint16_t");
check_match(s, (Pos)(s->strstart - 1), s->prev_match, s->prev_length);
bflush = zng_tr_tally_dist(s, s->strstart -1 - s->prev_match, s->prev_length - STD_MIN_MATCH);
* Performance tzcnt/bsf is identical on Intel cpu, tzcnt is faster than bsf on AMD cpu.
*/
static __forceinline int __builtin_ctz(unsigned int value) {
- Assert(value != 0, "Invalid input value: 0");
+ AssertHint(value != 0, "Invalid input value: 0");
# if defined(X86_FEATURES) && !(_MSC_VER < 1700)
return (int)_tzcnt_u32(value);
# else
* Because of that assumption trailing_zero is not initialized and the return value is not checked.
*/
static __forceinline int __builtin_ctzll(unsigned long long value) {
- Assert(value != 0, "Invalid input value: 0");
+ AssertHint(value != 0, "Invalid input value: 0");
# if defined(X86_FEATURES) && !(_MSC_VER < 1700)
return (int)_tzcnt_u64(value);
# else
strm->avail_out = (unsigned)(out < end ? (INFLATE_FAST_MIN_LEFT - 1) + (end - out)
: (INFLATE_FAST_MIN_LEFT - 1) - (out - end));
- Assert(bits <= 32, "Remaining bits greater than 32");
+ AssertHint(bits <= 32, "Remaining bits greater than 32");
state->hold = (uint32_t)hold;
state->bits = bits;
return;
send_code(s, curlen, s->bl_tree, bi_buf, bi_valid);
count--;
}
- Assert(count >= 3 && count <= 6, " 3_6?");
+ AssertHint(count >= 3 && count <= 6, " 3_6?");
send_code(s, REP_3_6, s->bl_tree, bi_buf, bi_valid);
send_bits(s, count-3, 2, bi_buf, bi_valid);
static void send_all_trees(deflate_state *s, int lcodes, int dcodes, int blcodes) {
int rank; /* index in bl_order */
- Assert(lcodes >= 257 && dcodes >= 1 && blcodes >= 4, "not enough codes");
- Assert(lcodes <= L_CODES && dcodes <= D_CODES && blcodes <= BL_CODES, "too many codes");
+ AssertHint(lcodes >= 257 && dcodes >= 1 && blcodes >= 4, "not enough codes");
+ AssertHint(lcodes <= L_CODES && dcodes <= D_CODES && blcodes <= BL_CODES, "too many codes");
// Temp local variables
uint32_t bi_valid = s->bi_valid;
Z_INTERNAL uint16_t PREFIX(bi_reverse)(unsigned code, int len) {
/* code: the value to invert */
/* len: its bit length */
- Assert(len >= 1 && len <= 15, "code length must be 1-15");
+ AssertHint(len >= 1 && len <= 15, "code length must be 1-15");
#define bitrev8(b) \
(uint8_t)((((uint8_t)(b) * 0x80200802ULL) & 0x0884422110ULL) * 0x0101010101ULL >> 32)
return (bitrev8(code >> 8) | (uint16_t)bitrev8(code) << 8) >> (16 - len);
/* Send the length code, len is the match length - STD_MIN_MATCH */
code = zng_length_code[lc];
c = code+LITERALS+1;
- Assert(c < L_CODES, "bad l_code");
+ AssertHint(c < L_CODES, "bad l_code");
send_code_trace(s, c);
match_bits = ltree[c].Code;
dist--; /* dist is now the match distance - 1 */
code = d_code(dist);
- Assert(code < D_CODES, "bad d_code");
+ AssertHint(code < D_CODES, "bad d_code");
send_code_trace(s, code);
/* Send the distance code */