lut_rem_pair lut_rem = perm_idx_lut[dist - 3];
*chunk_rem = lut_rem.remval;
-#ifdef Z_MEMORY_SANITIZER
/* See note in chunkset_ssse3.c for why this is ok */
__msan_unpoison(buf + dist, 16 - dist);
-#endif
/* This version of table is only available on aarch64 */
#if defined(_M_ARM64) || defined(__aarch64__)
unsigned char **op1, size_t *len1,
z_const unsigned char **op2, size_t *len2, void *hist) {
unsigned char *t2 = op1 ? *op1 : NULL;
-#ifdef Z_MEMORY_SANITIZER
unsigned char *orig_t2 = t2;
-#endif
size_t t3 = len1 ? *len1 : 0;
z_const unsigned char *t4 = op2 ? *op2 : NULL;
size_t t5 = len2 ? *len2 : 0;
: "cc", "memory");
t2 = r2; t3 = r3; t4 = r4; t5 = r5;
-#ifdef Z_MEMORY_SANITIZER
switch (fn & DFLTCC_FN_MASK) {
case DFLTCC_QAF:
__msan_unpoison(param, DFLTCC_SIZEOF_QAF);
__msan_unpoison(orig_t2, t2 - orig_t2);
break;
}
-#endif
if (op1)
*op1 = t2;
* GPRs to begin with the 256 bit load is _probably_ just as inexpensive */
*chunk_rem = lut_rem.remval;
-#ifdef Z_MEMORY_SANITIZER
/* See note in chunkset_ssse3.c for why this is ok */
__msan_unpoison(buf + dist, 32 - dist);
-#endif
if (dist < 16) {
/* This simpler case still requires us to shuffle in 128 bit lanes, so we must apply a static offset after
static inline chunk_t GET_CHUNK_MAG(uint8_t *buf, uint32_t *chunk_rem, uint32_t dist) {
lut_rem_pair lut_rem = perm_idx_lut[dist - 3];
__m128i perm_vec, ret_vec;
-#ifdef Z_MEMORY_SANITIZER
/* Important to note:
* This is _not_ to subvert the memory sanitizer but to instead unpoison some
* bytes we willingly and purposefully load uninitialized that we swizzle over
* in a vector register, anyway. If what we assume is wrong about what is used,
* the memory sanitizer will still usefully flag it */
__msan_unpoison(buf + dist, 16 - dist);
-#endif
ret_vec = _mm_loadu_si128((__m128i*)buf);
*chunk_rem = lut_rem.remval;
# endif
#endif
+#ifndef Z_MEMORY_SANITIZER
+# define __msan_unpoison(a, size) do { Z_UNUSED(a); Z_UNUSED(size); } while (0)
+#endif
+
#endif