static inline void chunkmemset_2(uint8_t *from, chunk_t *chunk) {
uint16_t tmp;
- zmemcpy_2(&tmp, from);
+ memcpy(&tmp, from, sizeof(tmp));
*chunk = vreinterpretq_u8_u16(vdupq_n_u16(tmp));
}
static inline void chunkmemset_4(uint8_t *from, chunk_t *chunk) {
uint32_t tmp;
- zmemcpy_4(&tmp, from);
+ memcpy(&tmp, from, sizeof(tmp));
*chunk = vreinterpretq_u8_u32(vdupq_n_u32(tmp));
}
static inline void chunkmemset_8(uint8_t *from, chunk_t *chunk) {
uint64_t tmp;
- zmemcpy_8(&tmp, from);
+ memcpy(&tmp, from, sizeof(tmp));
*chunk = vreinterpretq_u8_u64(vdupq_n_u64(tmp));
}
/* See note in chunkset_sse41.c for why this is ok */
__msan_unpoison(buf + dist, 16 - dist);
#endif
-
+
/* This version of table is only available on aarch64 */
#if defined(_M_ARM64) || defined(__aarch64__)
uint8x16_t ret_vec = vld1q_u8(buf);
static inline void chunkmemset_2(uint8_t *from, chunk_t *chunk) {
uint16_t tmp;
- zmemcpy_2(&tmp, from);
+ memcpy(&tmp, from, sizeof(tmp));
*chunk = (vector unsigned char)vec_splats(tmp);
}
static inline void chunkmemset_4(uint8_t *from, chunk_t *chunk) {
uint32_t tmp;
- zmemcpy_4(&tmp, from);
+ memcpy(&tmp, from, sizeof(tmp));
*chunk = (vector unsigned char)vec_splats(tmp);
}
static inline void chunkmemset_8(uint8_t *from, chunk_t *chunk) {
uint64_t tmp;
- zmemcpy_8(&tmp, from);
+ memcpy(&tmp, from, sizeof(tmp));
*chunk = (vector unsigned char)vec_splats(tmp);
}
static inline void chunkmemset_2(uint8_t *from, chunk_t *chunk) {
int16_t tmp;
- zmemcpy_2(&tmp, from);
+ memcpy(&tmp, from, sizeof(tmp));
*chunk = _mm256_set1_epi16(tmp);
}
static inline void chunkmemset_4(uint8_t *from, chunk_t *chunk) {
int32_t tmp;
- zmemcpy_4(&tmp, from);
+ memcpy(&tmp, from, sizeof(tmp));
*chunk = _mm256_set1_epi32(tmp);
}
static inline void chunkmemset_8(uint8_t *from, chunk_t *chunk) {
int64_t tmp;
- zmemcpy_8(&tmp, from);
+ memcpy(&tmp, from, sizeof(tmp));
*chunk = _mm256_set1_epi64x(tmp);
}
static inline void chunkmemset_2(uint8_t *from, chunk_t *chunk) {
int16_t tmp;
- zmemcpy_2(&tmp, from);
+ memcpy(&tmp, from, sizeof(tmp));
*chunk = _mm_set1_epi16(tmp);
}
static inline void chunkmemset_4(uint8_t *from, chunk_t *chunk) {
int32_t tmp;
- zmemcpy_4(&tmp, from);
+ memcpy(&tmp, from, sizeof(tmp));
*chunk = _mm_set1_epi32(tmp);
}
static inline void chunkmemset_8(uint8_t *from, chunk_t *chunk) {
int64_t tmp;
- zmemcpy_8(&tmp, from);
+ memcpy(&tmp, from, sizeof(tmp));
*chunk = _mm_set1_epi64x(tmp);
}
static inline void chunkmemset_2(uint8_t *from, chunk_t *chunk) {
int16_t tmp;
- zmemcpy_2(&tmp, from);
+ memcpy(&tmp, from, sizeof(tmp));
*chunk = _mm_set1_epi16(tmp);
}
static inline void chunkmemset_4(uint8_t *from, chunk_t *chunk) {
int32_t tmp;
- zmemcpy_4(&tmp, from);
+ memcpy(&tmp, from, sizeof(tmp));
*chunk = _mm_set1_epi32(tmp);
}
static inline void chunkmemset_8(uint8_t *from, chunk_t *chunk) {
int64_t tmp;
- zmemcpy_8(&tmp, from);
+ memcpy(&tmp, from, sizeof(tmp));
*chunk = _mm_set1_epi64x(tmp);
}
lut_rem_pair lut_rem = perm_idx_lut[dist - 3];
__m128i perm_vec, ret_vec;
#ifdef Z_MEMORY_SANITIZER
- /* Important to note:
+ /* Important to note:
* This is _not_ to subvert the memory sanitizer but to instead unpoison some
* bytes we willingly and purposefully load uninitialized that we swizzle over
* in a vector register, anyway. If what we assume is wrong about what is used,
static inline void chunkmemset_4(uint8_t *from, chunk_t *chunk) {
uint8_t *dest = (uint8_t *)chunk;
- zmemcpy_4(dest, from);
- zmemcpy_4(dest+4, from);
+ memcpy(dest, from, sizeof(uint32_t));
+ memcpy(dest+4, from, sizeof(uint32_t));
}
static inline void chunkmemset_8(uint8_t *from, chunk_t *chunk) {
- zmemcpy_8(chunk, from);
+ memcpy(chunk, from, sizeof(uint64_t));
}
static inline void loadchunk(uint8_t const *s, chunk_t *chunk) {
- zmemcpy_8(chunk, (uint8_t *)s);
+ memcpy(chunk, (uint8_t *)s, sizeof(uint64_t));
}
static inline void storechunk(uint8_t *out, chunk_t *chunk) {
- zmemcpy_8(out, chunk);
+ memcpy(out, chunk, sizeof(uint64_t));
}
#define CHUNKSIZE chunksize_c
do {
uint32_t sv, mv, diff;
- zmemcpy_4(&sv, src0);
- zmemcpy_4(&mv, src1);
+ memcpy(&sv, src0, sizeof(sv));
+ memcpy(&mv, src1, sizeof(mv));
diff = sv ^ mv;
if (diff) {
do {
uint64_t sv, mv, diff;
- zmemcpy_8(&sv, src0);
- zmemcpy_8(&mv, src1);
+ memcpy(&sv, src0, sizeof(sv));
+ memcpy(&mv, src1, sizeof(mv));
diff = sv ^ mv;
if (diff) {
#if BYTE_ORDER == BIG_ENDIAN
w = ZSWAP16(w);
#endif
- zmemcpy_2(&s->pending_buf[s->pending], &w);
+ memcpy(&s->pending_buf[s->pending], &w, sizeof(w));
s->pending += 2;
}
#if BYTE_ORDER == LITTLE_ENDIAN
w = ZSWAP16(w);
#endif
- zmemcpy_2(&s->pending_buf[s->pending], &w);
+ memcpy(&s->pending_buf[s->pending], &w, sizeof(w));
s->pending += 2;
}
#if BYTE_ORDER == BIG_ENDIAN
dw = ZSWAP32(dw);
#endif
- zmemcpy_4(&s->pending_buf[s->pending], &dw);
+ memcpy(&s->pending_buf[s->pending], &dw, sizeof(dw));
s->pending += 4;
}
#if BYTE_ORDER == LITTLE_ENDIAN
dw = ZSWAP32(dw);
#endif
- zmemcpy_4(&s->pending_buf[s->pending], &dw);
+ memcpy(&s->pending_buf[s->pending], &dw, sizeof(dw));
s->pending += 4;
}
#if BYTE_ORDER == BIG_ENDIAN
lld = ZSWAP64(lld);
#endif
- zmemcpy_8(&s->pending_buf[s->pending], &lld);
+ memcpy(&s->pending_buf[s->pending], &lld, sizeof(lld));
s->pending += 8;
}
/* Load 64 bits from IN and place the bytes at offset BITS in the result. */
static inline uint64_t load_64_bits(const unsigned char *in, unsigned bits) {
uint64_t chunk;
- zmemcpy_8(&chunk, in);
+ memcpy(&chunk, in, sizeof(chunk));
#if BYTE_ORDER == LITTLE_ENDIAN
return chunk << bits;
}
if (tocopy >= 8) {
- zmemcpy_8(out, from);
+ memcpy(out, from, 8);
out += 8;
from += 8;
tocopy -= 8;
}
if (tocopy >= 4) {
- zmemcpy_4(out, from);
+ memcpy(out, from, 4);
out += 4;
from += 4;
tocopy -= 4;
}
if (tocopy >= 2) {
- zmemcpy_2(out, from);
+ memcpy(out, from, 2);
out += 2;
from += 2;
tocopy -= 2;
#ifndef HASH_CALC_READ
# if BYTE_ORDER == LITTLE_ENDIAN
# define HASH_CALC_READ \
- zmemcpy_4(&val, strstart);
+ memcpy(&val, strstart, sizeof(val));
# else
# define HASH_CALC_READ \
val = ((uint32_t)(strstart[0])); \
#endif
#ifdef UNALIGNED64_OK
- zmemcpy_8(scan_start, scan);
- zmemcpy_8(scan_end, scan+offset);
+ memcpy(scan_start, scan, sizeof(uint64_t));
+ memcpy(scan_end, scan+offset, sizeof(uint64_t));
#elif defined(UNALIGNED_OK)
- zmemcpy_4(scan_start, scan);
- zmemcpy_4(scan_end, scan+offset);
+ memcpy(scan_start, scan, sizeof(uint32_t));
+ memcpy(scan_end, scan+offset, sizeof(uint32_t));
#else
scan_end[0] = *(scan+offset);
scan_end[1] = *(scan+offset+1);
#endif
#ifdef UNALIGNED64_OK
- zmemcpy_8(scan_end, scan+offset);
+ memcpy(scan_end, scan+offset, sizeof(uint64_t));
#elif defined(UNALIGNED_OK)
- zmemcpy_4(scan_end, scan+offset);
+ memcpy(scan_end, scan+offset, sizeof(uint32_t));
#else
scan_end[0] = *(scan+offset);
scan_end[1] = *(scan+offset+1);
# endif
#endif
-/* Force compiler to emit unaligned memory accesses if unaligned access is supported
+/* Force compiler to emit unaligned memory comparisons if unaligned access is supported
on the architecture, otherwise don't assume unaligned access is supported. Older
- compilers don't optimize memcpy and memcmp calls to unaligned access instructions
- when it is supported on the architecture resulting in significant performance impact.
- Newer compilers might optimize memcpy but not all optimize memcmp for all integer types. */
+ compilers don't optimize memcmp calls for all integer types to unaligned access instructions
+ when it is supported on the architecture resulting in significant performance impact. */
#ifdef UNALIGNED_OK
-# define zmemcpy_2(dest, src) (*((uint16_t *)(dest)) = *((uint16_t *)(src)))
# define zmemcmp_2(str1, str2) (*((uint16_t *)(str1)) != *((uint16_t *)(str2)))
-# define zmemcpy_4(dest, src) (*((uint32_t *)(dest)) = *((uint32_t *)(src)))
# define zmemcmp_4(str1, str2) (*((uint32_t *)(str1)) != *((uint32_t *)(str2)))
# if defined(UNALIGNED64_OK) && (UINTPTR_MAX == UINT64_MAX)
-# define zmemcpy_8(dest, src) (*((uint64_t *)(dest)) = *((uint64_t *)(src)))
# define zmemcmp_8(str1, str2) (*((uint64_t *)(str1)) != *((uint64_t *)(str2)))
# else
-# define zmemcpy_8(dest, src) (((uint32_t *)(dest))[0] = ((uint32_t *)(src))[0], \
- ((uint32_t *)(dest))[1] = ((uint32_t *)(src))[1])
# define zmemcmp_8(str1, str2) (((uint32_t *)(str1))[0] != ((uint32_t *)(str2))[0] || \
((uint32_t *)(str1))[1] != ((uint32_t *)(str2))[1])
# endif
#else
-# define zmemcpy_2(dest, src) memcpy(dest, src, 2)
# define zmemcmp_2(str1, str2) memcmp(str1, str2, 2)
-# define zmemcpy_4(dest, src) memcpy(dest, src, 4)
# define zmemcmp_4(str1, str2) memcmp(str1, str2, 4)
-# define zmemcpy_8(dest, src) memcpy(dest, src, 8)
# define zmemcmp_8(str1, str2) memcmp(str1, str2, 8)
#endif