]> git.ipfire.org Git - thirdparty/openssl.git/blob - include/internal/ring_buf.h
Copyright year updates
[thirdparty/openssl.git] / include / internal / ring_buf.h
1 /*
2 * Copyright 2022-2023 The OpenSSL Project Authors. All Rights Reserved.
3 *
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
8 */
9
10 #ifndef OSSL_INTERNAL_RING_BUF_H
11 # define OSSL_INTERNAL_RING_BUF_H
12 # pragma once
13
14 # include <openssl/e_os2.h> /* For 'ossl_inline' */
15 # include "internal/safe_math.h"
16
17 /*
18 * ==================================================================
19 * Byte-wise ring buffer which supports pushing and popping blocks of multiple
20 * bytes at a time. The logical offset of each byte for the purposes of a QUIC
21 * stream is tracked. Bytes can be popped from the ring buffer in two stages;
22 * first they are popped, and then they are culled. Bytes which have been popped
23 * but not yet culled will not be overwritten, and can be restored.
24 */
25 struct ring_buf {
26 void *start;
27 size_t alloc; /* size of buffer allocation in bytes */
28
29 /*
30 * Logical offset of the head (where we append to). This is the current size
31 * of the QUIC stream. This increases monotonically.
32 */
33 uint64_t head_offset;
34
35 /*
36 * Logical offset of the cull tail. Data is no longer needed and is
37 * deallocated as the cull tail advances, which occurs as data is
38 * acknowledged. This increases monotonically.
39 */
40 uint64_t ctail_offset;
41 };
42
43 OSSL_SAFE_MATH_UNSIGNED(u64, uint64_t)
44
45 #define MAX_OFFSET (((uint64_t)1) << 62) /* QUIC-imposed limit */
46
47 static ossl_inline int ring_buf_init(struct ring_buf *r)
48 {
49 r->start = NULL;
50 r->alloc = 0;
51 r->head_offset = r->ctail_offset = 0;
52 return 1;
53 }
54
55 static ossl_inline void ring_buf_destroy(struct ring_buf *r, int cleanse)
56 {
57 if (cleanse)
58 OPENSSL_clear_free(r->start, r->alloc);
59 else
60 OPENSSL_free(r->start);
61 r->start = NULL;
62 r->alloc = 0;
63 }
64
65 static ossl_inline size_t ring_buf_used(struct ring_buf *r)
66 {
67 return (size_t)(r->head_offset - r->ctail_offset);
68 }
69
70 static ossl_inline size_t ring_buf_avail(struct ring_buf *r)
71 {
72 return r->alloc - ring_buf_used(r);
73 }
74
75 static ossl_inline int ring_buf_write_at(struct ring_buf *r,
76 uint64_t logical_offset,
77 const unsigned char *buf,
78 size_t buf_len)
79 {
80 size_t avail, idx, l;
81 unsigned char *start = r->start;
82 int i, err = 0;
83
84 avail = ring_buf_avail(r);
85 if (logical_offset < r->ctail_offset
86 || safe_add_u64(logical_offset, buf_len, &err)
87 > safe_add_u64(r->head_offset, avail, &err)
88 || safe_add_u64(r->head_offset, buf_len, &err)
89 > MAX_OFFSET
90 || err)
91 return 0;
92
93 for (i = 0; buf_len > 0 && i < 2; ++i) {
94 idx = logical_offset % r->alloc;
95 l = r->alloc - idx;
96 if (buf_len < l)
97 l = buf_len;
98
99 memcpy(start + idx, buf, l);
100 if (r->head_offset < logical_offset + l)
101 r->head_offset = logical_offset + l;
102
103 logical_offset += l;
104 buf += l;
105 buf_len -= l;
106 }
107
108 assert(buf_len == 0);
109
110 return 1;
111 }
112
113 static ossl_inline size_t ring_buf_push(struct ring_buf *r,
114 const unsigned char *buf,
115 size_t buf_len)
116 {
117 size_t pushed = 0, avail, idx, l;
118 unsigned char *start = r->start;
119
120 for (;;) {
121 avail = ring_buf_avail(r);
122 if (buf_len > avail)
123 buf_len = avail;
124
125 if (buf_len > MAX_OFFSET - r->head_offset)
126 buf_len = (size_t)(MAX_OFFSET - r->head_offset);
127
128 if (buf_len == 0)
129 break;
130
131 idx = r->head_offset % r->alloc;
132 l = r->alloc - idx;
133 if (buf_len < l)
134 l = buf_len;
135
136 memcpy(start + idx, buf, l);
137 r->head_offset += l;
138 buf += l;
139 buf_len -= l;
140 pushed += l;
141 }
142
143 return pushed;
144 }
145
146 static ossl_inline const unsigned char *ring_buf_get_ptr(const struct ring_buf *r,
147 uint64_t logical_offset,
148 size_t *max_len)
149 {
150 unsigned char *start = r->start;
151 size_t idx;
152
153 if (logical_offset >= r->head_offset || logical_offset < r->ctail_offset)
154 return NULL;
155 idx = logical_offset % r->alloc;
156 *max_len = r->alloc - idx;
157 return start + idx;
158 }
159
160 /*
161 * Retrieves data out of the read side of the ring buffer starting at the given
162 * logical offset. *buf is set to point to a contiguous span of bytes and
163 * *buf_len is set to the number of contiguous bytes. After this function
164 * returns, there may or may not be more bytes available at the logical offset
165 * of (logical_offset + *buf_len) by calling this function again. If the logical
166 * offset is out of the range retained by the ring buffer, returns 0, else
167 * returns 1. A logical offset at the end of the range retained by the ring
168 * buffer is not considered an error and is returned with a *buf_len of 0.
169 *
170 * The ring buffer state is not changed.
171 */
172 static ossl_inline int ring_buf_get_buf_at(const struct ring_buf *r,
173 uint64_t logical_offset,
174 const unsigned char **buf,
175 size_t *buf_len)
176 {
177 const unsigned char *start = r->start;
178 size_t idx, l;
179
180 if (logical_offset > r->head_offset || logical_offset < r->ctail_offset)
181 return 0;
182
183 if (r->alloc == 0) {
184 *buf = NULL;
185 *buf_len = 0;
186 return 1;
187 }
188
189 idx = logical_offset % r->alloc;
190 l = (size_t)(r->head_offset - logical_offset);
191 if (l > r->alloc - idx)
192 l = r->alloc - idx;
193
194 *buf = start + idx;
195 *buf_len = l;
196 return 1;
197 }
198
199 static ossl_inline void ring_buf_cpop_range(struct ring_buf *r,
200 uint64_t start, uint64_t end,
201 int cleanse)
202 {
203 assert(end >= start);
204
205 if (start > r->ctail_offset || end >= MAX_OFFSET)
206 return;
207
208 if (cleanse && r->alloc > 0 && end > r->ctail_offset) {
209 size_t idx = r->ctail_offset % r->alloc;
210 uint64_t cleanse_end = end + 1;
211 size_t l;
212
213 if (cleanse_end > r->head_offset)
214 cleanse_end = r->head_offset;
215 l = (size_t)(cleanse_end - r->ctail_offset);
216 if (l > r->alloc - idx) {
217 OPENSSL_cleanse((unsigned char *)r->start + idx, r->alloc - idx);
218 l -= r->alloc - idx;
219 idx = 0;
220 }
221 if (l > 0)
222 OPENSSL_cleanse((unsigned char *)r->start + idx, l);
223 }
224
225 r->ctail_offset = end + 1;
226 /* Allow culling unpushed data */
227 if (r->head_offset < r->ctail_offset)
228 r->head_offset = r->ctail_offset;
229 }
230
231 static ossl_inline int ring_buf_resize(struct ring_buf *r, size_t num_bytes,
232 int cleanse)
233 {
234 struct ring_buf rnew = {0};
235 const unsigned char *src = NULL;
236 size_t src_len = 0, copied = 0;
237
238 if (num_bytes == r->alloc)
239 return 1;
240
241 if (num_bytes < ring_buf_used(r))
242 return 0;
243
244 rnew.start = OPENSSL_malloc(num_bytes);
245 if (rnew.start == NULL)
246 return 0;
247
248 rnew.alloc = num_bytes;
249 rnew.head_offset = r->head_offset - ring_buf_used(r);
250 rnew.ctail_offset = rnew.head_offset;
251
252 for (;;) {
253 if (!ring_buf_get_buf_at(r, r->ctail_offset + copied, &src, &src_len)) {
254 OPENSSL_free(rnew.start);
255 return 0;
256 }
257
258 if (src_len == 0)
259 break;
260
261 if (ring_buf_push(&rnew, src, src_len) != src_len) {
262 OPENSSL_free(rnew.start);
263 return 0;
264 }
265
266 copied += src_len;
267 }
268
269 assert(rnew.head_offset == r->head_offset);
270 rnew.ctail_offset = r->ctail_offset;
271
272 ring_buf_destroy(r, cleanse);
273 memcpy(r, &rnew, sizeof(*r));
274 return 1;
275 }
276
277 #endif /* OSSL_INTERNAL_RING_BUF_H */