int
SBuf::compare(const SBuf &S, const SBufCaseSensitive isCaseSensitive, const size_type n) const
{
- if (n != npos && (n > length() || n > S.length()))
+ if (n != npos && (n > length() || n > S.length())) {
+ debugs(24, 8, "length specified. substr and recurse");
return substr(0,n).compare(S.substr(0,n),isCaseSensitive);
+ }
const size_type byteCompareLen = min(S.length(), length());
++stats.compareSlow;
int rv = 0;
+ debugs(24, 8, "comparing length " << byteCompareLen);
if (isCaseSensitive == caseSensitive) {
rv = memcmp(buf(), S.buf(), byteCompareLen);
} else {
rv = memcasecmp(buf(), S.buf(), byteCompareLen);
}
- if (rv != 0)
+ if (rv != 0) {
+ debugs(24, 8, "result: " << rv);
return rv;
- if (length() == S.length())
+ }
+ if (length() == S.length()) {
+ debugs(24, 8, "same contents and length. Equal");
return 0;
- if (length() > S.length())
+ }
+ if (length() > S.length()) {
+ debugs(24, 8, "lhs is longer than rhs. Result is 1");
return 1;
+ }
+ debugs(24, 8, "rhs is longer than lhs. Result is -1");
return -1;
}
* Please see the COPYING and CONTRIBUTORS files for details.
*/
+// debug section 24
+
#include "squid.h"
+#include "Debug.h"
#include "parser/Tokenizer.h"
#include <cerrno>
Parser::Tokenizer::consume(const SBuf::size_type n)
{
// careful: n may be npos!
+ debugs(24, 5, "consuming " << n << " bytes");
const SBuf result = buf_.consume(n);
parsed_ += result.length();
return result;
skipAll(delimiters);
const SBuf::size_type tokenLen = buf_.findFirstOf(delimiters); // not found = npos => consume to end
if (tokenLen == SBuf::npos) {
+ debugs(24, 8, "no token found for delimiters " << delimiters.name);
*this = saved;
return false;
}
returnedToken = consume(tokenLen); // cannot be empty
skipAll(delimiters);
+ debugs(24, DBG_DATA, "token found for delimiters " << delimiters.name << ": '" <<
+ returnedToken << '\'');
return true;
}
Parser::Tokenizer::prefix(SBuf &returnedToken, const CharacterSet &tokenChars, const SBuf::size_type limit)
{
SBuf::size_type prefixLen = buf_.substr(0,limit).findFirstNotOf(tokenChars);
- if (prefixLen == 0)
+ if (prefixLen == 0) {
+ debugs(24, 8, "no prefix for set " << tokenChars.name);
return false;
- if (prefixLen == SBuf::npos && (atEnd() || limit == 0))
+ }
+ if (prefixLen == SBuf::npos && (atEnd() || limit == 0)) {
+ debugs(24, 8, "no char in set " << tokenChars.name << " while looking for prefix");
return false;
- if (prefixLen == SBuf::npos && limit > 0)
+ }
+ if (prefixLen == SBuf::npos && limit > 0) {
+ debugs(24, 8, "whole haystack matched");
prefixLen = limit;
+ }
+ debugs(24, 8, "found with length " << prefixLen);
returnedToken = consume(prefixLen); // cannot be empty after the npos check
return true;
}
Parser::Tokenizer::skipAll(const CharacterSet &tokenChars)
{
const SBuf::size_type prefixLen = buf_.findFirstNotOf(tokenChars);
- if (prefixLen == 0)
+ if (prefixLen == 0) {
+ debugs(24, 8, "no match when trying to skipAll " << tokenChars.name);
return 0;
+ }
+ debugs(24, 8, "skipping all in " << tokenChars.name << " len " << prefixLen);
return success(prefixLen);
}
bool
Parser::Tokenizer::skipOne(const CharacterSet &chars)
{
- if (!buf_.isEmpty() && chars[buf_[0]])
+ if (!buf_.isEmpty() && chars[buf_[0]]) {
+ debugs(24, 8, "skipping one-of " << chars.name);
return success(1);
+ }
+ debugs(24, 8, "no match while skipping one-of " << chars.name);
return false;
}
bool
Parser::Tokenizer::skip(const SBuf &tokenToSkip)
{
- if (buf_.startsWith(tokenToSkip))
+ if (buf_.startsWith(tokenToSkip)) {
+ debugs(24, 8, "skipping " << tokenToSkip.length());
return success(tokenToSkip.length());
+ }
+ debugs(24, 8, "no match, not skipping '" << tokenToSkip << '\'');
return false;
}
bool
Parser::Tokenizer::skip(const char tokenChar)
{
- if (!buf_.isEmpty() && buf_[0] == tokenChar)
+ if (!buf_.isEmpty() && buf_[0] == tokenChar) {
+ debugs(24, 8, "skipping char '" << tokenChar << '\'');
return success(1);
+ }
+ debugs(24, 8, "no match, not skipping char '" << tokenChar << '\'');
return false;
}