state->total_len += len;
if (state->memsize + len < 32) { /* fill in tmp buffer */
- XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len);
+ if (input != NULL) {
+ XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len);
+ }
state->memsize += (U32)len;
return XXH_OK;
}
{
BYTE* const oLitEnd = op + sequence.litLength;
size_t const sequenceLength = sequence.litLength + sequence.matchLength;
- BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
const BYTE* const iLitEnd = *litPtr + sequence.litLength;
const BYTE* match = oLitEnd - sequence.offset;
BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH;
- /* bounds checks */
- assert(oLitEnd < oMatchEnd);
- RETURN_ERROR_IF(oMatchEnd > oend, dstSize_tooSmall, "last match must fit within dstBuffer");
- RETURN_ERROR_IF(iLitEnd > litLimit, corruption_detected, "try to read beyond literal buffer");
+ /* bounds checks : careful of address space overflow in 32-bit mode */
+ RETURN_ERROR_IF(sequenceLength > (size_t)(oend - op), dstSize_tooSmall, "last match must fit within dstBuffer");
+ RETURN_ERROR_IF(sequence.litLength > (size_t)(litLimit - *litPtr), corruption_detected, "try to read beyond literal buffer");
+ assert(op < op + sequenceLength);
+ assert(oLitEnd < op + sequenceLength);
/* copy literals */
ZSTD_safecopy(op, oend_w, *litPtr, sequence.litLength, ZSTD_no_overlap);
BYTE* const oLitEnd = op + sequence.litLength;
size_t const sequenceLength = sequence.litLength + sequence.matchLength;
BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
- BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH;
+ BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH; /* risk : address space underflow on oend=NULL */
const BYTE* const iLitEnd = *litPtr + sequence.litLength;
const BYTE* match = oLitEnd - sequence.offset;
- /* Errors and uncommon cases handled here. */
- assert(oLitEnd < oMatchEnd);
- if (UNLIKELY(iLitEnd > litLimit || oMatchEnd > oend_w))
+ assert(op != NULL /* Precondition */);
+ assert(oend_w < oend /* No underflow */);
+ /* Handle edge cases in a slow path:
+ * - Read beyond end of literals
+ * - Match end is within WILDCOPY_OVERLIMIT of oend
+ * - 32-bit mode and the match length overflows
+ */
+ if (UNLIKELY(
+ iLitEnd > litLimit ||
+ oMatchEnd > oend_w ||
+ (MEM_32bits() && (size_t)(oend - op) < sequenceLength + WILDCOPY_OVERLENGTH)))
return ZSTD_execSequenceEnd(op, oend, sequence, litPtr, litLimit, prefixStart, virtualStart, dictEnd);
/* Assumptions (everything else goes into ZSTD_execSequenceEnd()) */
+ assert(op <= oLitEnd /* No overflow */);
+ assert(oLitEnd < oMatchEnd /* Non-zero match & no overflow */);
+ assert(oMatchEnd <= oend /* No underflow */);
assert(iLitEnd <= litLimit /* Literal length is in bounds */);
assert(oLitEnd <= oend_w /* Can wildcopy literals */);
assert(oMatchEnd <= oend_w /* Can wildcopy matches */);
ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
+ assert(dst != NULL);
ZSTD_STATIC_ASSERT(
BIT_DStream_unfinished < BIT_DStream_completed &&
/* last literal segment */
{ size_t const lastLLSize = litEnd - litPtr;
RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, "");
- memcpy(op, litPtr, lastLLSize);
- op += lastLLSize;
+ if (op != NULL) {
+ memcpy(op, litPtr, lastLLSize);
+ op += lastLLSize;
+ }
}
return op-ostart;
seqState.prefixStart = prefixStart;
seqState.pos = (size_t)(op-prefixStart);
seqState.dictEnd = dictEnd;
+ assert(dst != NULL);
assert(iend >= ip);
RETURN_ERROR_IF(
ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)),
/* last literal segment */
{ size_t const lastLLSize = litEnd - litPtr;
RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, "");
- memcpy(op, litPtr, lastLLSize);
- op += lastLLSize;
+ if (op != NULL) {
+ memcpy(op, litPtr, lastLLSize);
+ op += lastLLSize;
+ }
}
return op-ostart;
}
#endif
-
size_t
ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
void* dst, size_t dstCapacity,
ip += seqHSize;
srcSize -= seqHSize;
+ RETURN_ERROR_IF(dst == NULL && nbSeq > 0, dstSize_tooSmall, "NULL not handled");
+
#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
!defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
if ( !usePrefetchDecoder
BYTE* const ostart = (BYTE*) dst;
BYTE* op = ostart;
BYTE* const omax = op + maxDstSize;
- BYTE* const olimit = omax-15;
+ BYTE* const olimit = maxDstSize < 15 ? op : omax-15;
const void* ptr = DTable;
const HUF_DElt* const dt = (const HUF_DElt*)(ptr)+1;
static size_t ZSTD_copyUncompressedBlock(void* dst, size_t maxDstSize, const void* src, size_t srcSize)
{
if (srcSize > maxDstSize) return ERROR(dstSize_tooSmall);
- memcpy(dst, src, srcSize);
+ if (srcSize > 0) {
+ memcpy(dst, src, srcSize);
+ }
return srcSize;
}
size_t rleSize = litbp.origSize;
if (rleSize>maxDstSize) return ERROR(dstSize_tooSmall);
if (!srcSize) return ERROR(srcSize_wrong);
- memset(oend - rleSize, *ip, rleSize);
+ if (rleSize > 0) {
+ memset(oend - rleSize, *ip, rleSize);
+ }
*litStart = oend - rleSize;
*litSize = rleSize;
ip++;
{
size_t lastLLSize = litEnd - litPtr;
if (op+lastLLSize > oend) return ERROR(dstSize_tooSmall);
- if (op != litPtr) memmove(op, litPtr, lastLLSize);
- op += lastLLSize;
+ if (lastLLSize > 0) {
+ if (op != litPtr) memmove(op, litPtr, lastLLSize);
+ op += lastLLSize;
+ }
}
}
static size_t ZSTD_copyUncompressedBlock(void* dst, size_t maxDstSize, const void* src, size_t srcSize)
{
if (srcSize > maxDstSize) return ERROR(dstSize_tooSmall);
- memcpy(dst, src, srcSize);
+ if (srcSize > 0) {
+ memcpy(dst, src, srcSize);
+ }
return srcSize;
}
size_t lastLLSize = litEnd - litPtr;
if (litPtr > litEnd) return ERROR(corruption_detected);
if (op+lastLLSize > oend) return ERROR(dstSize_tooSmall);
- if (op != litPtr) memmove(op, litPtr, lastLLSize);
- op += lastLLSize;
+ if (lastLLSize > 0) {
+ if (op != litPtr) memmove(op, litPtr, lastLLSize);
+ op += lastLLSize;
+ }
}
}
static size_t ZSTD_copyUncompressedBlock(void* dst, size_t maxDstSize, const void* src, size_t srcSize)
{
if (srcSize > maxDstSize) return ERROR(dstSize_tooSmall);
- memcpy(dst, src, srcSize);
+ if (srcSize > 0) {
+ memcpy(dst, src, srcSize);
+ }
return srcSize;
}
size_t lastLLSize = litEnd - litPtr;
if (litPtr > litEnd) return ERROR(corruption_detected);
if (op+lastLLSize > oend) return ERROR(dstSize_tooSmall);
- if (op != litPtr) memmove(op, litPtr, lastLLSize);
- op += lastLLSize;
+ if (lastLLSize > 0) {
+ if (op != litPtr) memmove(op, litPtr, lastLLSize);
+ op += lastLLSize;
+ }
}
}
static size_t ZSTD_copyRawBlock(void* dst, size_t maxDstSize, const void* src, size_t srcSize)
{
if (srcSize > maxDstSize) return ERROR(dstSize_tooSmall);
- memcpy(dst, src, srcSize);
+ if (srcSize > 0) {
+ memcpy(dst, src, srcSize);
+ }
return srcSize;
}
size_t lastLLSize = litEnd - litPtr;
if (litPtr > litEnd) return ERROR(corruption_detected);
if (op+lastLLSize > oend) return ERROR(dstSize_tooSmall);
- if (op != litPtr) memcpy(op, litPtr, lastLLSize);
- op += lastLLSize;
+ if (lastLLSize > 0) {
+ if (op != litPtr) memcpy(op, litPtr, lastLLSize);
+ op += lastLLSize;
+ }
}
}
size_t lastLLSize = litEnd - litPtr;
if (litPtr > litEnd) return ERROR(corruption_detected); /* too many literals already used */
if (op+lastLLSize > oend) return ERROR(dstSize_tooSmall);
- memcpy(op, litPtr, lastLLSize);
- op += lastLLSize;
+ if (lastLLSize > 0) {
+ memcpy(op, litPtr, lastLLSize);
+ op += lastLLSize;
+ }
}
return op-ostart;
{ size_t const lastLLSize = litEnd - litPtr;
if (litPtr > litEnd) return ERROR(corruption_detected); /* too many literals already used */
if (op+lastLLSize > oend) return ERROR(dstSize_tooSmall);
- memcpy(op, litPtr, lastLLSize);
- op += lastLLSize;
+ if (lastLLSize > 0) {
+ memcpy(op, litPtr, lastLLSize);
+ op += lastLLSize;
+ }
}
return op-ostart;
static size_t ZSTDv07_copyRawBlock(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
{
if (srcSize > dstCapacity) return ERROR(dstSize_tooSmall);
- memcpy(dst, src, srcSize);
+ if (srcSize > 0) {
+ memcpy(dst, src, srcSize);
+ }
return srcSize;
}
{ size_t const lastLLSize = litEnd - litPtr;
/* if (litPtr > litEnd) return ERROR(corruption_detected); */ /* too many literals already used */
if (lastLLSize > (size_t)(oend-op)) return ERROR(dstSize_tooSmall);
- memcpy(op, litPtr, lastLLSize);
- op += lastLLSize;
+ if (lastLLSize > 0) {
+ memcpy(op, litPtr, lastLLSize);
+ op += lastLLSize;
+ }
}
return op-ostart;
static size_t ZSTDv07_generateNxBytes(void* dst, size_t dstCapacity, BYTE byte, size_t length)
{
if (length > dstCapacity) return ERROR(dstSize_tooSmall);
- memset(dst, byte, length);
+ if (length > 0) {
+ memset(dst, byte, length);
+ }
return length;
}
FUZZ_TARGET_FLAGS = $(FUZZ_CPPFLAGS) $(FUZZ_CXXFLAGS) $(FUZZ_LDFLAGS)
FUZZ_HEADERS := fuzz_helpers.h fuzz.h zstd_helpers.h fuzz_data_producer.h
-FUZZ_SRC := $(PRGDIR)/util.c zstd_helpers.c fuzz_data_producer.c
+FUZZ_SRC := $(PRGDIR)/util.c fuzz_helpers.c zstd_helpers.c fuzz_data_producer.c
ZSTDCOMMON_SRC := $(ZSTDDIR)/common/*.c
ZSTDCOMP_SRC := $(ZSTDDIR)/compress/*.c
/* Allocate all buffers and contexts if not already allocated */
if (neededBufSize > bufSize) {
free(rBuf);
- rBuf = malloc(neededBufSize);
+ rBuf = FUZZ_malloc(neededBufSize);
bufSize = neededBufSize;
- FUZZ_ASSERT(rBuf);
}
if (!dctx) {
dctx = ZSTD_createDCtx();
FUZZ_ZASSERT(ret);
if (ret == 0) {
FUZZ_ASSERT(resultCapacity >= srcSize);
- memcpy(result, src, srcSize);
+ if (srcSize > 0) {
+ memcpy(result, src, srcSize);
+ }
return srcSize;
}
ZSTD_decompressBegin(dctx);
if (neededBufSize > bufSize || !cBuf || !rBuf) {
free(cBuf);
free(rBuf);
- cBuf = malloc(neededBufSize);
- rBuf = malloc(neededBufSize);
+ cBuf = FUZZ_malloc(neededBufSize);
+ rBuf = FUZZ_malloc(neededBufSize);
bufSize = neededBufSize;
- FUZZ_ASSERT(cBuf && rBuf);
}
if (!cctx) {
cctx = ZSTD_createCCtx();
cLevel);
FUZZ_ZASSERT(result);
FUZZ_ASSERT_MSG(result == size, "Incorrect regenerated size");
- FUZZ_ASSERT_MSG(!memcmp(src, rBuf, size), "Corruption!");
+ FUZZ_ASSERT_MSG(!FUZZ_memcmp(src, rBuf, size), "Corruption!");
}
FUZZ_dataProducer_free(producer);
#ifndef STATEFUL_FUZZING
{
size_t const bufSize = FUZZ_dataProducer_uint32Range(producer, 0, 10 * size);
- void* rBuf = malloc(bufSize);
- FUZZ_ASSERT(rBuf);
+ void* rBuf = FUZZ_malloc(bufSize);
if (ddict) {
ZSTD_decompress_usingDDict(dctx, rBuf, bufSize, src, size, ddict);
} else {
DEBUGLOG(2, "Dict content type %d", dct);
DEBUGLOG(2, "Dict size %u", (unsigned)size);
- void* const rBuf = malloc(size);
- FUZZ_ASSERT(rBuf);
+ void* const rBuf = FUZZ_malloc(size);
size_t const cBufSize = ZSTD_compressBound(size);
- void* const cBuf = malloc(cBufSize);
- FUZZ_ASSERT(cBuf);
+ void* const cBuf = FUZZ_malloc(cBufSize);
size_t const cSize =
compress(cBuf, cBufSize, src, size, src, size, dlm, dct, refPrefix);
size_t const rSize =
decompress(rBuf, size, cBuf, cSize, src, size, dlm, dct, refPrefix);
FUZZ_ASSERT_MSG(rSize == size, "Incorrect regenerated size");
- FUZZ_ASSERT_MSG(!memcmp(src, rBuf, size), "Corruption!");
+ FUZZ_ASSERT_MSG(!FUZZ_memcmp(src, rBuf, size), "Corruption!");
out:
free(cBuf);
size = FUZZ_dataProducer_reserveDataPrefix(producer);
size_t const rBufSize = size;
- void* rBuf = malloc(rBufSize);
+ void* rBuf = FUZZ_malloc(rBufSize);
size_t cBufSize = ZSTD_compressBound(size) * 2;
void *cBuf;
/* Half of the time fuzz with a 1 byte smaller output size.
* giving us 4 bytes of overhead.
*/
cBufSize -= FUZZ_dataProducer_uint32Range(producer, 0, 1);
- cBuf = malloc(cBufSize);
+ cBuf = FUZZ_malloc(cBufSize);
if (!cctx) {
cctx = ZSTD_createCCtx();
roundTripTest(rBuf, rBufSize, cBuf, cBufSize, src, size, producer);
FUZZ_ZASSERT(result);
FUZZ_ASSERT_MSG(result == size, "Incorrect regenerated size");
- FUZZ_ASSERT_MSG(!memcmp(src, rBuf, size), "Corruption!");
+ FUZZ_ASSERT_MSG(!FUZZ_memcmp(src, rBuf, size), "Corruption!");
}
free(rBuf);
free(cBuf);
};
FUZZ_dataProducer_t *FUZZ_dataProducer_create(const uint8_t *data, size_t size) {
- FUZZ_dataProducer_t *producer = malloc(sizeof(FUZZ_dataProducer_t));
-
- FUZZ_ASSERT(producer != NULL);
+ FUZZ_dataProducer_t *producer = FUZZ_malloc(sizeof(FUZZ_dataProducer_t));
producer->data = data;
producer->size = size;
--- /dev/null
+/*
+ * Copyright (c) 2016-2020, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+#include "fuzz_helpers.h"
+
+#include <stddef.h>
+#include <stdlib.h>
+#include <string.h>
+
+void* FUZZ_malloc(size_t size)
+{
+ if (size > 0) {
+ void* const mem = malloc(size);
+ FUZZ_ASSERT(mem);
+ return mem;
+ }
+ return NULL;
+}
+
+int FUZZ_memcmp(void const* lhs, void const* rhs, size_t size)
+{
+ if (size == 0) {
+ return 0;
+ }
+ return memcmp(lhs, rhs, size);
+}
\ No newline at end of file
#define FUZZ_STATIC static
#endif
+/**
+ * malloc except return NULL for zero sized data and FUZZ_ASSERT
+ * that malloc doesn't fail.
+ */
+void* FUZZ_malloc(size_t size);
+
+/**
+ * memcmp but accepts NULL.
+ */
+int FUZZ_memcmp(void const* lhs, void const* rhs, size_t size);
+
#ifdef __cplusplus
}
#endif
FUZZ_ASSERT(cctx);
}
- void *rBuf = malloc(bufSize);
- FUZZ_ASSERT(rBuf);
+ void *rBuf = FUZZ_malloc(bufSize);
ZSTD_compressCCtx(cctx, rBuf, bufSize, src, size, cLevel);
free(rBuf);
FUZZ_dataProducer_free(producer);
}
size_t const bufSize = FUZZ_dataProducer_uint32Range(producer, 0, 10 * size);
- void *rBuf = malloc(bufSize);
- FUZZ_ASSERT(rBuf);
+ void *rBuf = FUZZ_malloc(bufSize);
ZSTD_decompressDCtx(dctx, rBuf, bufSize, src, size);
free(rBuf);
int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size)
{
size_t const rBufSize = size;
- void* rBuf = malloc(rBufSize);
+ void* rBuf = FUZZ_malloc(rBufSize);
size_t cBufSize = ZSTD_compressBound(size);
void* cBuf;
*/
cBufSize -= FUZZ_dataProducer_uint32Range(producer, 0, 1);
- cBuf = malloc(cBufSize);
-
- FUZZ_ASSERT(cBuf && rBuf);
+ cBuf = FUZZ_malloc(cBufSize);
if (!cctx) {
cctx = ZSTD_createCCtx();
roundTripTest(rBuf, rBufSize, cBuf, cBufSize, src, size, producer);
FUZZ_ZASSERT(result);
FUZZ_ASSERT_MSG(result == size, "Incorrect regenerated size");
- FUZZ_ASSERT_MSG(!memcmp(src, rBuf, size), "Corruption!");
+ FUZZ_ASSERT_MSG(!FUZZ_memcmp(src, rBuf, size), "Corruption!");
}
free(rBuf);
free(cBuf);
/* Allocate all buffers and contexts if not already allocated */
if (!buf) {
- buf = malloc(kBufSize);
- FUZZ_ASSERT(buf);
- }
+ buf = FUZZ_malloc(kBufSize);
+ }
if (!dstream) {
dstream = ZSTD_createDStream();
ZSTD_inBuffer in = makeInBuffer(&src, &size, producer, prevInWasZero ? 1 : 0);
prevInWasZero = in.size == 0;
while (in.pos != in.size) {
- if (!stableOutBuffer || FUZZ_dataProducer_uint32Range(producer, 0, 100) == 55) {
+ if (!stableOutBuffer || prevOutWasZero || FUZZ_dataProducer_uint32Range(producer, 0, 100) == 55) {
out = makeOutBuffer(producer, prevOutWasZero ? 1 : 0);
}
prevOutWasZero = out.size == 0;
if (neededBufSize > bufSize) {
free(cBuf);
free(rBuf);
- cBuf = (uint8_t*)malloc(neededBufSize);
- rBuf = (uint8_t*)malloc(neededBufSize);
+ cBuf = (uint8_t*)FUZZ_malloc(neededBufSize);
+ rBuf = (uint8_t*)FUZZ_malloc(neededBufSize);
bufSize = neededBufSize;
- FUZZ_ASSERT(cBuf && rBuf);
}
if (!cctx) {
cctx = ZSTD_createCCtx();
ZSTD_decompressDCtx(dctx, rBuf, neededBufSize, cBuf, cSize);
FUZZ_ZASSERT(rSize);
FUZZ_ASSERT_MSG(rSize == size, "Incorrect regenerated size");
- FUZZ_ASSERT_MSG(!memcmp(src, rBuf, size), "Corruption!");
+ FUZZ_ASSERT_MSG(!FUZZ_memcmp(src, rBuf, size), "Corruption!");
}
FUZZ_dataProducer_free(producer);
int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size)
{
ZSTD_frameHeader zfh;
+ if (size == 0) {
+ src = NULL;
+ }
/* You can fuzz any helper functions here that are fast, and take zstd
* compressed data as input. E.g. don't expect the input to be a dictionary,
* so don't fuzz ZSTD_getDictID_fromDict().
{
size_t const dictSize = MAX(srcSize / 8, 1024);
size_t const totalSampleSize = dictSize * 11;
- FUZZ_dict_t dict = { malloc(dictSize), dictSize };
- char* const samples = (char*)malloc(totalSampleSize);
+ FUZZ_dict_t dict = { FUZZ_malloc(dictSize), dictSize };
+ char* const samples = (char*)FUZZ_malloc(totalSampleSize);
unsigned nbSamples = 100;
- size_t* const samplesSizes = (size_t*)malloc(sizeof(size_t) * nbSamples);
+ size_t* const samplesSizes = (size_t*)FUZZ_malloc(sizeof(size_t) * nbSamples);
size_t pos = 0;
size_t sample = 0;
ZDICT_fastCover_params_t params;
- FUZZ_ASSERT(dict.buff && samples && samplesSizes);
for (sample = 0; sample < nbSamples; ++sample) {
size_t const remaining = totalSampleSize - pos;
memcpy(samples + pos, src + offset, toCopy);
pos += toCopy;
samplesSizes[sample] = toCopy;
-
}
memset(samples + pos, 0, totalSampleSize - pos);
if (ZSTD_getErrorCode(r) != ZSTD_error_srcSize_wrong) goto _output_error; }
DISPLAYLEVEL(3, "OK \n");
+ DISPLAYLEVEL(3, "test%3i : decompress into NULL buffer : ", testNb++);
+ { size_t const r = ZSTD_decompress(NULL, 0, compressedBuffer, compressedBufferSize);
+ if (!ZSTD_isError(r)) goto _output_error;
+ if (ZSTD_getErrorCode(r) != ZSTD_error_dstSize_tooSmall) goto _output_error; }
+ DISPLAYLEVEL(3, "OK \n");
+
DISPLAYLEVEL(3, "test%3i : ZSTD_decompressBound test with content size missing : ", testNb++);
{ /* create compressed buffer with content size missing */
ZSTD_CCtx* const cctx = ZSTD_createCCtx();