From: Francesco Chemolli Date: Wed, 18 Feb 2015 11:25:46 +0000 (+0100) Subject: Instrumented SBuf::compare and many Tokenizer methods with debug statements. X-Git-Tag: merge-candidate-3-v1~258^2~3 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=72ab4ab573615b7cd0dbabdf8b4d6fd6872e8ac8;p=thirdparty%2Fsquid.git Instrumented SBuf::compare and many Tokenizer methods with debug statements. --- diff --git a/src/SBuf.cc b/src/SBuf.cc index 8b79a3ec06..e1f98837e5 100644 --- a/src/SBuf.cc +++ b/src/SBuf.cc @@ -365,23 +365,33 @@ memcasecmp(const char *b1, const char *b2, SBuf::size_type len) int SBuf::compare(const SBuf &S, const SBufCaseSensitive isCaseSensitive, const size_type n) const { - if (n != npos && (n > length() || n > S.length())) + if (n != npos && (n > length() || n > S.length())) { + debugs(24, 8, "length specified. substr and recurse"); return substr(0,n).compare(S.substr(0,n),isCaseSensitive); + } const size_type byteCompareLen = min(S.length(), length()); ++stats.compareSlow; int rv = 0; + debugs(24, 8, "comparing length " << byteCompareLen); if (isCaseSensitive == caseSensitive) { rv = memcmp(buf(), S.buf(), byteCompareLen); } else { rv = memcasecmp(buf(), S.buf(), byteCompareLen); } - if (rv != 0) + if (rv != 0) { + debugs(24, 8, "result: " << rv); return rv; - if (length() == S.length()) + } + if (length() == S.length()) { + debugs(24, 8, "same contents and length. Equal"); return 0; - if (length() > S.length()) + } + if (length() > S.length()) { + debugs(24, 8, "lhs is longer than rhs. Result is 1"); return 1; + } + debugs(24, 8, "rhs is longer than lhs. Result is -1"); return -1; } diff --git a/src/parser/Tokenizer.cc b/src/parser/Tokenizer.cc index c14937f678..bd2e678f9a 100644 --- a/src/parser/Tokenizer.cc +++ b/src/parser/Tokenizer.cc @@ -6,7 +6,10 @@ * Please see the COPYING and CONTRIBUTORS files for details. */ +// debug section 24 + #include "squid.h" +#include "Debug.h" #include "parser/Tokenizer.h" #include @@ -41,6 +44,7 @@ SBuf Parser::Tokenizer::consume(const SBuf::size_type n) { // careful: n may be npos! + debugs(24, 5, "consuming " << n << " bytes"); const SBuf result = buf_.consume(n); parsed_ += result.length(); return result; @@ -60,11 +64,14 @@ Parser::Tokenizer::token(SBuf &returnedToken, const CharacterSet &delimiters) skipAll(delimiters); const SBuf::size_type tokenLen = buf_.findFirstOf(delimiters); // not found = npos => consume to end if (tokenLen == SBuf::npos) { + debugs(24, 8, "no token found for delimiters " << delimiters.name); *this = saved; return false; } returnedToken = consume(tokenLen); // cannot be empty skipAll(delimiters); + debugs(24, DBG_DATA, "token found for delimiters " << delimiters.name << ": '" << + returnedToken << '\''); return true; } @@ -72,12 +79,19 @@ bool Parser::Tokenizer::prefix(SBuf &returnedToken, const CharacterSet &tokenChars, const SBuf::size_type limit) { SBuf::size_type prefixLen = buf_.substr(0,limit).findFirstNotOf(tokenChars); - if (prefixLen == 0) + if (prefixLen == 0) { + debugs(24, 8, "no prefix for set " << tokenChars.name); return false; - if (prefixLen == SBuf::npos && (atEnd() || limit == 0)) + } + if (prefixLen == SBuf::npos && (atEnd() || limit == 0)) { + debugs(24, 8, "no char in set " << tokenChars.name << " while looking for prefix"); return false; - if (prefixLen == SBuf::npos && limit > 0) + } + if (prefixLen == SBuf::npos && limit > 0) { + debugs(24, 8, "whole haystack matched"); prefixLen = limit; + } + debugs(24, 8, "found with length " << prefixLen); returnedToken = consume(prefixLen); // cannot be empty after the npos check return true; } @@ -86,32 +100,44 @@ SBuf::size_type Parser::Tokenizer::skipAll(const CharacterSet &tokenChars) { const SBuf::size_type prefixLen = buf_.findFirstNotOf(tokenChars); - if (prefixLen == 0) + if (prefixLen == 0) { + debugs(24, 8, "no match when trying to skipAll " << tokenChars.name); return 0; + } + debugs(24, 8, "skipping all in " << tokenChars.name << " len " << prefixLen); return success(prefixLen); } bool Parser::Tokenizer::skipOne(const CharacterSet &chars) { - if (!buf_.isEmpty() && chars[buf_[0]]) + if (!buf_.isEmpty() && chars[buf_[0]]) { + debugs(24, 8, "skipping one-of " << chars.name); return success(1); + } + debugs(24, 8, "no match while skipping one-of " << chars.name); return false; } bool Parser::Tokenizer::skip(const SBuf &tokenToSkip) { - if (buf_.startsWith(tokenToSkip)) + if (buf_.startsWith(tokenToSkip)) { + debugs(24, 8, "skipping " << tokenToSkip.length()); return success(tokenToSkip.length()); + } + debugs(24, 8, "no match, not skipping '" << tokenToSkip << '\''); return false; } bool Parser::Tokenizer::skip(const char tokenChar) { - if (!buf_.isEmpty() && buf_[0] == tokenChar) + if (!buf_.isEmpty() && buf_[0] == tokenChar) { + debugs(24, 8, "skipping char '" << tokenChar << '\''); return success(1); + } + debugs(24, 8, "no match, not skipping char '" << tokenChar << '\''); return false; }