{
}
-BinaryTokenizer::BinaryTokenizer(const SBuf &data):
+BinaryTokenizer::BinaryTokenizer(const SBuf &data, const bool expectMore):
context(""),
data_(data),
parsed_(0),
- syncPoint_(0)
+ syncPoint_(0),
+ expectMore_(expectMore)
{
}
/// debugging helper that prints a "standard" debugs() trailer
#define BinaryTokenizer_tail(size, start) \
- " occupying " << (size) << " bytes @" << (start) << " in " << this;
+ " occupying " << (size) << " bytes @" << (start) << " in " << this << \
+ (expectMore_ ? ';' : '.');
/// logs and throws if fewer than size octets remain; no other side effects
void
if (parsed_ + size > data_.length()) {
debugs(24, 5, (parsed_ + size - data_.length()) << " more bytes for " <<
context << description << BinaryTokenizer_tail(size, parsed_));
+ Must(expectMore_); // throw an error on premature input termination
throw InsufficientInput();
}
}
}
void
-BinaryTokenizer::reset(const SBuf &data)
+BinaryTokenizer::reset(const SBuf &data, const bool expectMore)
{
- *this = BinaryTokenizer(data);
+ *this = BinaryTokenizer(data, expectMore);
}
void
typedef uint64_t size_type; // enough for the largest supported offset
BinaryTokenizer();
- explicit BinaryTokenizer(const SBuf &data);
+ explicit BinaryTokenizer(const SBuf &data, const bool expectMore = false);
/// restart parsing from the very beginning
/// this method is for using one BinaryTokenizer to parse independent inputs
- void reset(const SBuf &data);
+ void reset(const SBuf &data, const bool expectMore);
- /// change input without changing parsing state
+ /// change input state without changing parsing state
/// this method avoids append overheads during incremental parsing
- void reinput(const SBuf &data) { data_ = data; }
+ void reinput(const SBuf &data, const bool expectMore) { data_ = data; expectMore_ = expectMore; }
/// make progress: future parsing failures will not rollback beyond this point
void commit();
SBuf data_;
uint64_t parsed_; ///< number of data bytes parsed or skipped
uint64_t syncPoint_; ///< where to re-start the next parsing attempt
+ bool expectMore_; ///< whether more data bytes may arrive in the future
};
#endif // SQUID_BINARY_TOKENIZER_H
if (currentContentType != record.type) {
Must(tkMessages.atEnd()); // no currentContentType leftovers
fragments = record.fragment;
- tkMessages.reset(fragments);
+ tkMessages.reset(fragments, true); // true because more fragments may come
currentContentType = record.type;
} else {
fragments.append(record.fragment);
- tkMessages.reinput(fragments);
+ tkMessages.reinput(fragments, true); // true because more fragments may come
tkMessages.rollback();
}
parseMessages();
Security::HandshakeParser::parseServerHello(const SBuf &data)
{
try {
- tkRecords.reinput(data); // data contains _everything_ read so far
+ // data contains everything read so far, but we may read more later
+ tkRecords.reinput(data, true);
tkRecords.rollback();
while (!tkRecords.atEnd() && !parseDone)
parseRecord();
Security::HandshakeParser::parseClientHello(const SBuf &data)
{
try {
- tkRecords.reinput(data); // data contains _everything_ read so far
+ // data contains everything read so far, but we may read more later
+ tkRecords.reinput(data, true);
tkRecords.rollback();
while (!tkRecords.atEnd() && !parseDone)
parseRecord();