#ifdef ARM_NEON
uint32_t adler32_neon(uint32_t adler, const uint8_t *buf, size_t len);
uint32_t adler32_fold_copy_neon(uint32_t adler, uint8_t *dst, const uint8_t *src, size_t len);
-uint32_t chunksize_neon(void);
uint8_t* chunkmemset_safe_neon(uint8_t *out, uint8_t *from, unsigned len, unsigned left);
# ifdef HAVE_BUILTIN_CTZLL
# define native_adler32_fold_copy adler32_fold_copy_neon
# undef native_chunkmemset_safe
# define native_chunkmemset_safe chunkmemset_safe_neon
-# undef native_chunksize
-# define native_chunksize chunksize_neon
# undef native_inflate_fast
# define native_inflate_fast inflate_fast_neon
# undef native_slide_hash
uint32_t adler32_fold_copy_c(uint32_t adler, uint8_t *dst, const uint8_t *src, size_t len);
uint8_t* chunkmemset_safe_c(uint8_t *out, uint8_t *from, unsigned len, unsigned left);
-uint32_t chunksize_c(void);
uint32_t compare256_c(const uint8_t *src0, const uint8_t *src1);
# define native_adler32 adler32_c
# define native_adler32_fold_copy adler32_fold_copy_c
# define native_chunkmemset_safe chunkmemset_safe_c
-# define native_chunksize chunksize_c
# define native_crc32 crc32_c
# define native_crc32_fold crc32_fold_c
# define native_crc32_fold_copy crc32_fold_copy_c
uint32_t longest_match_lsx(deflate_state *const s, Pos cur_match);
uint32_t longest_match_slow_lsx(deflate_state *const s, Pos cur_match);
# endif
-uint32_t chunksize_lsx(void);
uint8_t* chunkmemset_safe_lsx(uint8_t *out, uint8_t *from, unsigned len, unsigned left);
void inflate_fast_lsx(PREFIX3(stream) *strm, uint32_t start);
#endif
uint32_t longest_match_lasx(deflate_state *const s, Pos cur_match);
uint32_t longest_match_slow_lasx(deflate_state *const s, Pos cur_match);
# endif
-uint32_t chunksize_lasx(void);
uint8_t* chunkmemset_safe_lasx(uint8_t *out, uint8_t *from, unsigned len, unsigned left);
void inflate_fast_lasx(PREFIX3(stream) *strm, uint32_t start);
#endif
# define native_adler32_fold_copy adler32_fold_copy_lsx
# undef native_slide_hash
# define native_slide_hash slide_hash_lsx
-# undef native_chunksize
-# define native_chunksize chunksize_lsx
# undef native_chunkmemset_safe
# define native_chunkmemset_safe chunkmemset_safe_lsx
# undef native_inflate_fast
# define native_adler32_fold_copy adler32_fold_copy_lasx
# undef native_slide_hash
# define native_slide_hash slide_hash_lasx
-# undef native_chunksize
-# define native_chunksize chunksize_lasx
# undef native_chunkmemset_safe
# define native_chunkmemset_safe chunkmemset_safe_lasx
# undef native_inflate_fast
#ifdef POWER8_VSX
uint32_t adler32_power8(uint32_t adler, const uint8_t *buf, size_t len);
-uint32_t chunksize_power8(void);
uint8_t* chunkmemset_safe_power8(uint8_t *out, uint8_t *from, unsigned len, unsigned left);
uint32_t crc32_power8(uint32_t crc, const uint8_t *buf, size_t len);
void slide_hash_power8(deflate_state *s);
# define native_adler32 adler32_power8
# undef native_chunkmemset_safe
# define native_chunkmemset_safe chunkmemset_safe_power8
-# undef native_chunksize
-# define native_chunksize chunksize_power8
# undef native_inflate_fast
# define native_inflate_fast inflate_fast_power8
# undef native_slide_hash
#ifdef RISCV_RVV
uint32_t adler32_rvv(uint32_t adler, const uint8_t *buf, size_t len);
uint32_t adler32_fold_copy_rvv(uint32_t adler, uint8_t *dst, const uint8_t *src, size_t len);
-uint32_t chunksize_rvv(void);
uint8_t* chunkmemset_safe_rvv(uint8_t *out, uint8_t *from, unsigned len, unsigned left);
uint32_t compare256_rvv(const uint8_t *src0, const uint8_t *src1);
# define native_adler32_fold_copy adler32_fold_copy_rvv
# undef native_chunkmemset_safe
# define native_chunkmemset_safe chunkmemset_safe_rvv
-# undef native_chunksize
-# define native_chunksize chunksize_rvv
# undef native_compare256
# define native_compare256 compare256_rvv
# undef native_inflate_fast
#endif
#ifdef X86_SSE2
-uint32_t chunksize_sse2(void);
uint8_t* chunkmemset_safe_sse2(uint8_t *out, uint8_t *from, unsigned len, unsigned left);
# ifdef HAVE_BUILTIN_CTZ
#ifdef X86_AVX2
uint32_t adler32_avx2(uint32_t adler, const uint8_t *buf, size_t len);
uint32_t adler32_fold_copy_avx2(uint32_t adler, uint8_t *dst, const uint8_t *src, size_t len);
-uint32_t chunksize_avx2(void);
uint8_t* chunkmemset_safe_avx2(uint8_t *out, uint8_t *from, unsigned len, unsigned left);
# ifdef HAVE_BUILTIN_CTZ
#ifdef X86_AVX512
uint32_t adler32_avx512(uint32_t adler, const uint8_t *buf, size_t len);
uint32_t adler32_fold_copy_avx512(uint32_t adler, uint8_t *dst, const uint8_t *src, size_t len);
-uint32_t chunksize_avx512(void);
uint8_t* chunkmemset_safe_avx512(uint8_t *out, uint8_t *from, unsigned len, unsigned left);
void inflate_fast_avx512(PREFIX3(stream)* strm, uint32_t start);
# ifdef HAVE_BUILTIN_CTZLL
# if (defined(X86_SSE2) && defined(__SSE2__)) || defined(__x86_64__) || defined(_M_X64) || defined(X86_NOCHECK_SSE2)
# undef native_chunkmemset_safe
# define native_chunkmemset_safe chunkmemset_safe_sse2
-# undef native_chunksize
-# define native_chunksize chunksize_sse2
# undef native_inflate_fast
# define native_inflate_fast inflate_fast_sse2
# undef native_slide_hash
# define native_adler32_fold_copy adler32_fold_copy_avx2
# undef native_chunkmemset_safe
# define native_chunkmemset_safe chunkmemset_safe_avx2
-# undef native_chunksize
-# define native_chunksize chunksize_avx2
# undef native_inflate_fast
# define native_inflate_fast inflate_fast_avx2
# undef native_slide_hash
# define native_adler32_fold_copy adler32_fold_copy_avx512
# undef native_chunkmemset_safe
# define native_chunkmemset_safe chunkmemset_safe_avx512
-# undef native_chunksize
-# define native_chunksize chunksize_avx512
# undef native_inflate_fast
# define native_inflate_fast inflate_fast_avx512
# ifdef HAVE_BUILTIN_CTZLL
#include <stdlib.h>
/* Returns the chunk size */
-Z_INTERNAL uint32_t CHUNKSIZE(void) {
+static inline size_t CHUNKSIZE(void) {
return sizeof(chunk_t);
}
ft.adler32 = &adler32_c;
ft.adler32_fold_copy = &adler32_fold_copy_c;
ft.chunkmemset_safe = &chunkmemset_safe_c;
- ft.chunksize = &chunksize_c;
ft.crc32 = &crc32_c;
ft.crc32_fold = &crc32_fold_c;
ft.crc32_fold_copy = &crc32_fold_copy_c;
# endif
{
ft.chunkmemset_safe = &chunkmemset_safe_sse2;
- ft.chunksize = &chunksize_sse2;
#if !defined(WITHOUT_CHORBA) && !defined(NO_CHORBA_SSE)
ft.crc32 = &crc32_chorba_sse2;
#endif
ft.adler32 = &adler32_avx2;
ft.adler32_fold_copy = &adler32_fold_copy_avx2;
ft.chunkmemset_safe = &chunkmemset_safe_avx2;
- ft.chunksize = &chunksize_avx2;
ft.inflate_fast = &inflate_fast_avx2;
ft.slide_hash = &slide_hash_avx2;
# ifdef HAVE_BUILTIN_CTZ
ft.adler32 = &adler32_avx512;
ft.adler32_fold_copy = &adler32_fold_copy_avx512;
ft.chunkmemset_safe = &chunkmemset_safe_avx512;
- ft.chunksize = &chunksize_avx512;
ft.inflate_fast = &inflate_fast_avx512;
# ifdef HAVE_BUILTIN_CTZLL
ft.compare256 = &compare256_avx512;
ft.adler32 = &adler32_neon;
ft.adler32_fold_copy = &adler32_fold_copy_neon;
ft.chunkmemset_safe = &chunkmemset_safe_neon;
- ft.chunksize = &chunksize_neon;
ft.inflate_fast = &inflate_fast_neon;
ft.slide_hash = &slide_hash_neon;
# ifdef HAVE_BUILTIN_CTZLL
if (cf.power.has_arch_2_07) {
ft.adler32 = &adler32_power8;
ft.chunkmemset_safe = &chunkmemset_safe_power8;
- ft.chunksize = &chunksize_power8;
ft.inflate_fast = &inflate_fast_power8;
ft.slide_hash = &slide_hash_power8;
}
ft.adler32 = &adler32_rvv;
ft.adler32_fold_copy = &adler32_fold_copy_rvv;
ft.chunkmemset_safe = &chunkmemset_safe_rvv;
- ft.chunksize = &chunksize_rvv;
ft.compare256 = &compare256_rvv;
ft.inflate_fast = &inflate_fast_rvv;
ft.longest_match = &longest_match_rvv;
ft.longest_match = &longest_match_lsx;
ft.longest_match_slow = &longest_match_slow_lsx;
# endif
- ft.chunksize = &chunksize_lsx;
ft.chunkmemset_safe = &chunkmemset_safe_lsx;
ft.inflate_fast = &inflate_fast_lsx;
}
ft.longest_match = &longest_match_lasx;
ft.longest_match_slow = &longest_match_slow_lasx;
# endif
- ft.chunksize = &chunksize_lasx;
ft.chunkmemset_safe = &chunkmemset_safe_lasx;
ft.inflate_fast = &inflate_fast_lasx;
}
FUNCTABLE_ASSIGN(ft, adler32);
FUNCTABLE_ASSIGN(ft, adler32_fold_copy);
FUNCTABLE_ASSIGN(ft, chunkmemset_safe);
- FUNCTABLE_ASSIGN(ft, chunksize);
FUNCTABLE_ASSIGN(ft, compare256);
FUNCTABLE_ASSIGN(ft, crc32);
FUNCTABLE_ASSIGN(ft, crc32_fold);
return functable.chunkmemset_safe(out, from, len, left);
}
-static uint32_t chunksize_stub(void) {
- init_functable();
- return functable.chunksize();
-}
-
static uint32_t compare256_stub(const uint8_t* src0, const uint8_t* src1) {
init_functable();
return functable.compare256(src0, src1);
adler32_stub,
adler32_fold_copy_stub,
chunkmemset_safe_stub,
- chunksize_stub,
compare256_stub,
crc32_stub,
crc32_fold_stub,
uint32_t (* adler32) (uint32_t adler, const uint8_t *buf, size_t len);
uint32_t (* adler32_fold_copy) (uint32_t adler, uint8_t *dst, const uint8_t *src, size_t len);
uint8_t* (* chunkmemset_safe) (uint8_t *out, uint8_t *from, unsigned len, unsigned left);
- uint32_t (* chunksize) (void);
uint32_t (* compare256) (const uint8_t *src0, const uint8_t *src1);
uint32_t (* crc32) (uint32_t crc, const uint8_t *buf, size_t len);
void (* crc32_fold) (struct crc32_fold_s *crc, const uint8_t *src, size_t len, uint32_t init_crc);
state->window = window;
state->wnext = 0;
state->whave = 0;
- state->chunksize = FUNCTABLE_CALL(chunksize)();
#ifdef INFLATE_STRICT
state->dmax = 32768U;
#endif
so unroll and roundoff operations can write beyond `out+len` so long
as they stay within 258 bytes of `out`.
*/
- if (dist >= len || dist >= state->chunksize)
+ if (dist >= len || dist >= CHUNKSIZE())
out = CHUNKCOPY(out, out - dist, len);
else
out = CHUNKMEMSET(out, out - dist, len);
strm->state = (struct internal_state *)state;
state->strm = strm;
state->mode = HEAD; /* to pass state test in inflateReset2() */
- state->chunksize = FUNCTABLE_CALL(chunksize)();
ret = PREFIX(inflateReset2)(strm, windowBits);
if (ret != Z_OK) {
free_inflate(strm);
uint32_t whave; /* valid bytes in the window */
uint32_t wnext; /* window write index */
unsigned char *window; /* allocated sliding window, if needed */
- uint32_t chunksize; /* size of memory copying chunk */
/* bit accumulator */
uint64_t hold; /* input bit accumulator */