*********************************************************/
#include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memmove, ZSTD_memset */
#include "../common/compiler.h" /* prefetch */
-#include "../common/cpu.h" /* bmi2 */
#include "../common/mem.h" /* low level memory routines */
+#include <stddef.h>
#define FSE_STATIC_LINKING_ONLY
#include "../common/fse.h"
#include "../common/huf.h"
#include "../common/zstd_internal.h"
#include "zstd_decompress_internal.h" /* ZSTD_DCtx */
-#include "zstd_ddict.h" /* ZSTD_DDictDictContent */
#include "zstd_decompress_block.h"
#include "../common/bits.h" /* ZSTD_highbit32 */
ip++;
/* Build DTables */
+ assert(ip <= iend);
{ size_t const llhSize = ZSTD_buildSeqTable(dctx->entropy.LLTable, &dctx->LLTptr,
LLtype, MaxLL, LLFSELog,
- ip, iend-ip,
+ ip, (size_t)(iend-ip),
LL_base, LL_bits,
LL_defaultDTable, dctx->fseEntropy,
dctx->ddictIsCold, nbSeq,
ip += llhSize;
}
+ assert(ip <= iend);
{ size_t const ofhSize = ZSTD_buildSeqTable(dctx->entropy.OFTable, &dctx->OFTptr,
OFtype, MaxOff, OffFSELog,
- ip, iend-ip,
+ ip, (size_t)(iend-ip),
OF_base, OF_bits,
OF_defaultDTable, dctx->fseEntropy,
dctx->ddictIsCold, nbSeq,
ip += ofhSize;
}
+ assert(ip <= iend);
{ size_t const mlhSize = ZSTD_buildSeqTable(dctx->entropy.MLTable, &dctx->MLTptr,
MLtype, MaxML, MLFSELog,
- ip, iend-ip,
+ ip, (size_t)(iend-ip),
ML_base, ML_bits,
ML_defaultDTable, dctx->fseEntropy,
dctx->ddictIsCold, nbSeq,
}
}
- return ip-istart;
+ return (size_t)(ip-istart);
}
* Precondition: *ip <= *op
* Postcondition: *op - *op >= 8
*/
-HINT_INLINE void ZSTD_overlapCopy8(BYTE** op, BYTE const** ip, size_t offset) {
+HINT_INLINE void ZSTD_overlapCopy8(BYTE** op, BYTE const** ip, size_t offset)
+{
assert(*ip <= *op);
if (offset < 8) {
/* close range match, overlap */
* - ZSTD_overlap_src_before_dst: The src and dst may overlap and may be any distance apart.
* The src buffer must be before the dst buffer.
*/
-static void ZSTD_safecopy(BYTE* op, const BYTE* const oend_w, BYTE const* ip, ptrdiff_t length, ZSTD_overlap_e ovtype) {
+static void
+ZSTD_safecopy(BYTE* op, const BYTE* const oend_w, BYTE const* ip, size_t length, ZSTD_overlap_e ovtype)
+{
ptrdiff_t const diff = op - ip;
BYTE* const oend = op + length;
if (ovtype == ZSTD_overlap_src_before_dst) {
/* Copy 8 bytes and ensure the offset >= 8 when there can be overlap. */
assert(length >= 8);
- ZSTD_overlapCopy8(&op, &ip, diff);
+ assert(diff > 0);
+ ZSTD_overlapCopy8(&op, &ip, (size_t)diff);
length -= 8;
assert(op - ip >= 8);
assert(op <= oend);
if (op <= oend_w) {
/* Wildcopy until we get close to the end. */
assert(oend > oend_w);
- ZSTD_wildcopy(op, ip, oend_w - op, ovtype);
+ ZSTD_wildcopy(op, ip, (size_t)(oend_w - op), ovtype);
ip += oend_w - op;
op += oend_w - op;
}
/* ZSTD_safecopyDstBeforeSrc():
* This version allows overlap with dst before src, or handles the non-overlap case with dst after src
* Kept separate from more common ZSTD_safecopy case to avoid performance impact to the safecopy common case */
-static void ZSTD_safecopyDstBeforeSrc(BYTE* op, const BYTE* ip, ptrdiff_t length) {
+static void ZSTD_safecopyDstBeforeSrc(BYTE* op, const BYTE* ip, size_t length)
+{
ptrdiff_t const diff = op - ip;
BYTE* const oend = op + length;
}
if (op <= oend - WILDCOPY_OVERLENGTH && diff < -WILDCOPY_VECLEN) {
- ZSTD_wildcopy(op, ip, oend - WILDCOPY_OVERLENGTH - op, ZSTD_no_overlap);
+ ZSTD_wildcopy(op, ip, (size_t)(oend - WILDCOPY_OVERLENGTH - op), ZSTD_no_overlap);
ip += oend - WILDCOPY_OVERLENGTH - op;
op += oend - WILDCOPY_OVERLENGTH - op;
}
return sequenceLength;
}
/* span extDict & currentPrefixSegment */
- { size_t const length1 = dictEnd - match;
- ZSTD_memmove(oLitEnd, match, length1);
- op = oLitEnd + length1;
- sequence.matchLength -= length1;
- match = prefixStart;
+ { size_t const length1 = (size_t)(dictEnd - match);
+ ZSTD_memmove(oLitEnd, match, length1);
+ op = oLitEnd + length1;
+ sequence.matchLength -= length1;
+ match = prefixStart;
}
}
ZSTD_safecopy(op, oend_w, match, sequence.matchLength, ZSTD_overlap_src_before_dst);
return sequenceLength;
}
/* span extDict & currentPrefixSegment */
- { size_t const length1 = dictEnd - match;
- ZSTD_memmove(oLitEnd, match, length1);
- op = oLitEnd + length1;
- sequence.matchLength -= length1;
- match = prefixStart;
+ { size_t const length1 = (size_t)(dictEnd - match);
+ ZSTD_memmove(oLitEnd, match, length1);
+ op = oLitEnd + length1;
+ sequence.matchLength -= length1;
+ match = prefixStart;
}
}
ZSTD_safecopy(op, oend_w, match, sequence.matchLength, ZSTD_overlap_src_before_dst);
return sequenceLength;
}
/* span extDict & currentPrefixSegment */
- { size_t const length1 = dictEnd - match;
- ZSTD_memmove(oLitEnd, match, length1);
- op = oLitEnd + length1;
- sequence.matchLength -= length1;
- match = prefixStart;
+ { size_t const length1 = (size_t)(dictEnd - match);
+ ZSTD_memmove(oLitEnd, match, length1);
+ op = oLitEnd + length1;
+ sequence.matchLength -= length1;
+ match = prefixStart;
}
}
/* Match within prefix of 1 or more bytes */
* longer than literals (in general). In silesia, ~10% of matches are longer
* than 16 bytes.
*/
- ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength, ZSTD_no_overlap);
+ ZSTD_wildcopy(op, match, sequence.matchLength, ZSTD_no_overlap);
return sequenceLength;
}
assert(sequence.offset < WILDCOPY_VECLEN);
/* If the match length is > 8 bytes, then continue with the wildcopy. */
if (sequence.matchLength > 8) {
assert(op < oMatchEnd);
- ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength - 8, ZSTD_overlap_src_before_dst);
+ ZSTD_wildcopy(op, match, sequence.matchLength - 8, ZSTD_overlap_src_before_dst);
}
return sequenceLength;
}
return sequenceLength;
}
/* span extDict & currentPrefixSegment */
- { size_t const length1 = dictEnd - match;
+ { size_t const length1 = (size_t)(dictEnd - match);
ZSTD_memmove(oLitEnd, match, length1);
op = oLitEnd + length1;
sequence.matchLength -= length1;
* longer than literals (in general). In silesia, ~10% of matches are longer
* than 16 bytes.
*/
- ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength, ZSTD_no_overlap);
+ ZSTD_wildcopy(op, match, sequence.matchLength, ZSTD_no_overlap);
return sequenceLength;
}
assert(sequence.offset < WILDCOPY_VECLEN);
/* If the match length is > 8 bytes, then continue with the wildcopy. */
if (sequence.matchLength > 8) {
assert(op < oMatchEnd);
- ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8, ZSTD_overlap_src_before_dst);
+ ZSTD_wildcopy(op, match, sequence.matchLength-8, ZSTD_overlap_src_before_dst);
}
return sequenceLength;
}
const void* seqStart, size_t seqSize, int nbSeq,
const ZSTD_longOffset_e isLongOffset)
{
- const BYTE* ip = (const BYTE*)seqStart;
- const BYTE* const iend = ip + seqSize;
BYTE* const ostart = (BYTE*)dst;
- BYTE* const oend = ZSTD_maybeNullPtrAdd(ostart, maxDstSize);
+ BYTE* const oend = ZSTD_maybeNullPtrAdd(ostart, (ptrdiff_t)maxDstSize);
BYTE* op = ostart;
const BYTE* litPtr = dctx->litPtr;
const BYTE* litBufferEnd = dctx->litBufferEnd;
dctx->fseEntropy = 1;
{ U32 i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; }
RETURN_ERROR_IF(
- ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)),
+ ERR_isError(BIT_initDStream(&seqState.DStream, seqStart, seqSize)),
corruption_detected, "");
ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
/* If there are more sequences, they will need to read literals from litExtraBuffer; copy over the remainder from dst and update litPtr and litEnd */
if (nbSeq > 0) {
- const size_t leftoverLit = dctx->litBufferEnd - litPtr;
+ const size_t leftoverLit = (size_t)(dctx->litBufferEnd - litPtr);
+ assert(dctx->litBufferEnd >= litPtr);
DEBUGLOG(6, "There are %i sequences left, and %zu/%zu literals left in buffer", nbSeq, leftoverLit, sequence.litLength);
if (leftoverLit) {
RETURN_ERROR_IF(leftoverLit > (size_t)(oend - op), dstSize_tooSmall, "remaining lit must fit within dstBuffer");
const void* seqStart, size_t seqSize, int nbSeq,
const ZSTD_longOffset_e isLongOffset)
{
- const BYTE* ip = (const BYTE*)seqStart;
- const BYTE* const iend = ip + seqSize;
BYTE* const ostart = (BYTE*)dst;
- BYTE* const oend = dctx->litBufferLocation == ZSTD_not_in_dst ? ZSTD_maybeNullPtrAdd(ostart, maxDstSize) : dctx->litBuffer;
+ BYTE* const oend = (dctx->litBufferLocation == ZSTD_not_in_dst) ?
+ ZSTD_maybeNullPtrAdd(ostart, (ptrdiff_t)maxDstSize) :
+ dctx->litBuffer;
BYTE* op = ostart;
const BYTE* litPtr = dctx->litPtr;
const BYTE* const litEnd = litPtr + dctx->litSize;
dctx->fseEntropy = 1;
{ U32 i; for (i = 0; i < ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; }
RETURN_ERROR_IF(
- ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend - ip)),
+ ERR_isError(BIT_initDStream(&seqState.DStream, seqStart, seqSize)),
corruption_detected, "");
ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
{ const BYTE* const matchBase = (sequence.offset > prefetchPos) ? dictEnd : prefixStart;
/* note : this operation can overflow when seq.offset is really too large, which can only happen when input is corrupted.
* No consequence though : memory address is only used for prefetching, not for dereferencing */
- const BYTE* const match = ZSTD_wrappedPtrSub(ZSTD_wrappedPtrAdd(matchBase, prefetchPos), sequence.offset);
+ const BYTE* const match = ZSTD_wrappedPtrSub(ZSTD_wrappedPtrAdd(matchBase, (ptrdiff_t)prefetchPos), (ptrdiff_t)sequence.offset);
PREFETCH_L1(match); PREFETCH_L1(match+CACHELINE_SIZE); /* note : it's safe to invoke PREFETCH() on any memory address, including invalid ones */
}
return prefetchPos + sequence.matchLength;
const void* seqStart, size_t seqSize, int nbSeq,
const ZSTD_longOffset_e isLongOffset)
{
- const BYTE* ip = (const BYTE*)seqStart;
- const BYTE* const iend = ip + seqSize;
BYTE* const ostart = (BYTE*)dst;
- BYTE* const oend = dctx->litBufferLocation == ZSTD_in_dst ? dctx->litBuffer : ZSTD_maybeNullPtrAdd(ostart, maxDstSize);
+ BYTE* const oend = (dctx->litBufferLocation == ZSTD_in_dst) ?
+ dctx->litBuffer :
+ ZSTD_maybeNullPtrAdd(ostart, (ptrdiff_t)maxDstSize);
BYTE* op = ostart;
const BYTE* litPtr = dctx->litPtr;
const BYTE* litBufferEnd = dctx->litBufferEnd;
dctx->fseEntropy = 1;
{ int i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; }
assert(dst != NULL);
- assert(iend >= ip);
RETURN_ERROR_IF(
- ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)),
+ ERR_isError(BIT_initDStream(&seqState.DStream, seqStart, seqSize)),
corruption_detected, "");
ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
if (dctx->litBufferLocation == ZSTD_split && litPtr + sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK].litLength > dctx->litBufferEnd) {
/* lit buffer is reaching split point, empty out the first buffer and transition to litExtraBuffer */
- const size_t leftoverLit = dctx->litBufferEnd - litPtr;
- if (leftoverLit)
- {
+ const size_t leftoverLit = (size_t)(dctx->litBufferEnd - litPtr);
+ assert(dctx->litBufferEnd >= litPtr);
+ if (leftoverLit) {
RETURN_ERROR_IF(leftoverLit > (size_t)(oend - op), dstSize_tooSmall, "remaining lit must fit within dstBuffer");
ZSTD_safecopyDstBeforeSrc(op, litPtr, leftoverLit);
sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK].litLength -= leftoverLit;
for ( ; seqNb<nbSeq ; seqNb++) {
seq_t *sequence = &(sequences[seqNb&STORED_SEQS_MASK]);
if (dctx->litBufferLocation == ZSTD_split && litPtr + sequence->litLength > dctx->litBufferEnd) {
- const size_t leftoverLit = dctx->litBufferEnd - litPtr;
+ const size_t leftoverLit = (size_t)(dctx->litBufferEnd - litPtr);
+ assert(dctx->litBufferEnd >= litPtr);
if (leftoverLit) {
RETURN_ERROR_IF(leftoverLit > (size_t)(oend - op), dstSize_tooSmall, "remaining lit must fit within dstBuffer");
ZSTD_safecopyDstBeforeSrc(op, litPtr, leftoverLit);
/* last literal segment */
if (dctx->litBufferLocation == ZSTD_split) { /* first deplete literal buffer in dst, then copy litExtraBuffer */
- size_t const lastLLSize = litBufferEnd - litPtr;
+ size_t const lastLLSize = (size_t)(litBufferEnd - litPtr);
+ assert(litBufferEnd >= litPtr);
RETURN_ERROR_IF(lastLLSize > (size_t)(oend - op), dstSize_tooSmall, "");
if (op != NULL) {
ZSTD_memmove(op, litPtr, lastLLSize);
litPtr = dctx->litExtraBuffer;
litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE;
}
- { size_t const lastLLSize = litBufferEnd - litPtr;
+ { size_t const lastLLSize = (size_t)(litBufferEnd - litPtr);
+ assert(litBufferEnd >= litPtr);
RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, "");
if (op != NULL) {
ZSTD_memmove(op, litPtr, lastLLSize);
* Additionally, take the min with dstCapacity to ensure that the totalHistorySize fits in a size_t.
*/
size_t const blockSizeMax = MIN(dstCapacity, ZSTD_blockSizeMax(dctx));
- size_t const totalHistorySize = ZSTD_totalHistorySize(ZSTD_maybeNullPtrAdd((BYTE*)dst, blockSizeMax), (BYTE const*)dctx->virtualStart);
+ size_t const totalHistorySize = ZSTD_totalHistorySize(ZSTD_maybeNullPtrAdd(dst, (ptrdiff_t)blockSizeMax), (BYTE const*)dctx->virtualStart);
/* isLongOffset must be true if there are long offsets.
* Offsets are long if they are larger than ZSTD_maxShortOffset().
* We don't expect that to be the case in 64-bit mode.