]>
Commit | Line | Data |
---|---|---|
6821c276 CT |
1 | /* |
2 | * Copyright (C) 1996-2015 The Squid Software Foundation and contributors | |
3 | * | |
4 | * Squid software is distributed under GPLv2+ license and includes | |
5 | * contributions from numerous individuals and organizations. | |
6 | * Please see the COPYING and CONTRIBUTORS files for details. | |
7 | */ | |
8 | ||
9 | /* DEBUG: section 24 SBuf */ | |
10 | ||
11 | #include "squid.h" | |
12 | #include "BinaryTokenizer.h" | |
13 | ||
14 | BinaryTokenizer::BinaryTokenizer(): BinaryTokenizer(SBuf()) | |
15 | { | |
16 | } | |
17 | ||
18 | BinaryTokenizer::BinaryTokenizer(const SBuf &data): | |
19 | context(""), | |
20 | data_(data), | |
21 | parsed_(0), | |
22 | syncPoint_(0) | |
23 | { | |
24 | } | |
25 | ||
26 | /// debugging helper that prints a "standard" debugs() trailer | |
27 | #define BinaryTokenizer_tail(size, start) \ | |
28 | " occupying " << (size) << " bytes @" << (start) << " in " << this; | |
29 | ||
30 | /// logs and throws if fewer than size octets remain; no other side effects | |
31 | void | |
32 | BinaryTokenizer::want(uint64_t size, const char *description) const | |
33 | { | |
34 | if (parsed_ + size > data_.length()) { | |
35 | debugs(24, 5, (parsed_ + size - data_.length()) << " more bytes for " << | |
36 | context << description << BinaryTokenizer_tail(size, parsed_)); | |
37 | throw InsufficientInput(); | |
38 | } | |
39 | } | |
40 | ||
41 | /// debugging helper for parsed number fields | |
42 | void | |
43 | BinaryTokenizer::got(uint32_t value, uint64_t size, const char *description) const | |
44 | { | |
45 | debugs(24, 7, context << description << '=' << value << | |
46 | BinaryTokenizer_tail(size, parsed_ - size)); | |
47 | } | |
48 | ||
49 | /// debugging helper for parsed areas/blobs | |
50 | void | |
51 | BinaryTokenizer::got(const SBuf &value, uint64_t size, const char *description) const | |
52 | { | |
53 | debugs(24, 7, context << description << '=' << | |
54 | Raw(nullptr, value.rawContent(), value.length()).hex() << | |
55 | BinaryTokenizer_tail(size, parsed_ - size)); | |
56 | ||
57 | } | |
58 | ||
59 | /// debugging helper for skipped fields | |
60 | void | |
61 | BinaryTokenizer::skipped(uint64_t size, const char *description) const | |
62 | { | |
63 | debugs(24, 7, context << description << BinaryTokenizer_tail(size, parsed_ - size)); | |
64 | ||
65 | } | |
66 | ||
67 | /// Returns the next ready-for-shift byte, adjusting the number of parsed bytes. | |
68 | /// The larger 32-bit return type helps callers shift/merge octets into numbers. | |
69 | /// This internal method does not perform out-of-bounds checks. | |
70 | uint32_t | |
71 | BinaryTokenizer::octet() | |
72 | { | |
73 | // While char may be signed, we view data characters as unsigned, | |
74 | // which helps to arrive at the right 32-bit return value. | |
75 | return static_cast<uint8_t>(data_[parsed_++]); | |
76 | } | |
77 | ||
78 | void | |
79 | BinaryTokenizer::reset(const SBuf &data) | |
80 | { | |
81 | *this = BinaryTokenizer(data); | |
82 | } | |
83 | ||
84 | void | |
85 | BinaryTokenizer::rollback() | |
86 | { | |
87 | parsed_ = syncPoint_; | |
88 | } | |
89 | ||
90 | void | |
91 | BinaryTokenizer::commit() | |
92 | { | |
93 | if (context && *context) | |
94 | debugs(24, 6, context << BinaryTokenizer_tail(parsed_ - syncPoint_, syncPoint_)); | |
95 | syncPoint_ = parsed_; | |
96 | } | |
97 | ||
98 | bool | |
99 | BinaryTokenizer::atEnd() const | |
100 | { | |
101 | return parsed_ >= data_.length(); | |
102 | } | |
103 | ||
104 | uint8_t | |
105 | BinaryTokenizer::uint8(const char *description) | |
106 | { | |
107 | want(1, description); | |
108 | const uint8_t result = octet(); | |
109 | got(result, 1, description); | |
110 | return result; | |
111 | } | |
112 | ||
113 | uint16_t | |
114 | BinaryTokenizer::uint16(const char *description) | |
115 | { | |
116 | want(2, description); | |
117 | const uint16_t result = (octet() << 8) | octet(); | |
118 | got(result, 2, description); | |
119 | return result; | |
120 | } | |
121 | ||
122 | uint32_t | |
123 | BinaryTokenizer::uint24(const char *description) | |
124 | { | |
125 | want(3, description); | |
126 | const uint32_t result = (octet() << 16) | (octet() << 8) | octet(); | |
127 | got(result, 3, description); | |
128 | return result; | |
129 | } | |
130 | ||
131 | uint32_t | |
132 | BinaryTokenizer::uint32(const char *description) | |
133 | { | |
134 | want(4, description); | |
135 | const uint32_t result = (octet() << 24) | (octet() << 16) | (octet() << 8) | octet(); | |
136 | got(result, 4, description); | |
137 | return result; | |
138 | } | |
139 | ||
140 | SBuf | |
141 | BinaryTokenizer::area(uint64_t size, const char *description) | |
142 | { | |
143 | want(size, description); | |
144 | const SBuf result = data_.substr(parsed_, size); | |
145 | parsed_ += size; | |
146 | got(result, size, description); | |
147 | return result; | |
148 | } | |
149 | ||
150 | void | |
151 | BinaryTokenizer::skip(uint64_t size, const char *description) | |
152 | { | |
153 | want(size, description); | |
154 | parsed_ += size; | |
155 | skipped(size, description); | |
156 | } | |
157 |