]> git.ipfire.org Git - thirdparty/openssl.git/blob - include/internal/ring_buf.h
Cleanse data in send and receive ring buffers on release
[thirdparty/openssl.git] / include / internal / ring_buf.h
1 /*
2 * Copyright 2022 The OpenSSL Project Authors. All Rights Reserved.
3 *
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
8 */
9
10 #ifndef OSSL_INTERNAL_RING_BUF_H
11 # define OSSL_INTERNAL_RING_BUF_H
12 # pragma once
13
14 # include <openssl/e_os2.h> /* For 'ossl_inline' */
15
16 /*
17 * ==================================================================
18 * Byte-wise ring buffer which supports pushing and popping blocks of multiple
19 * bytes at a time. The logical offset of each byte for the purposes of a QUIC
20 * stream is tracked. Bytes can be popped from the ring buffer in two stages;
21 * first they are popped, and then they are culled. Bytes which have been popped
22 * but not yet culled will not be overwritten, and can be restored.
23 */
24 struct ring_buf {
25 void *start;
26 size_t alloc; /* size of buffer allocation in bytes */
27
28 /*
29 * Logical offset of the head (where we append to). This is the current size
30 * of the QUIC stream. This increases monotonically.
31 */
32 uint64_t head_offset;
33
34 /*
35 * Logical offset of the cull tail. Data is no longer needed and is
36 * deallocated as the cull tail advances, which occurs as data is
37 * acknowledged. This increases monotonically.
38 */
39 uint64_t ctail_offset;
40 };
41
42 static ossl_inline int ring_buf_init(struct ring_buf *r)
43 {
44 r->start = NULL;
45 r->alloc = 0;
46 r->head_offset = r->ctail_offset = 0;
47 return 1;
48 }
49
50 static ossl_inline void ring_buf_destroy(struct ring_buf *r, int cleanse)
51 {
52 if (cleanse)
53 OPENSSL_clear_free(r->start, r->alloc);
54 else
55 OPENSSL_free(r->start);
56 r->start = NULL;
57 r->alloc = 0;
58 }
59
60 static ossl_inline size_t ring_buf_used(struct ring_buf *r)
61 {
62 return (size_t)(r->head_offset - r->ctail_offset);
63 }
64
65 static ossl_inline size_t ring_buf_avail(struct ring_buf *r)
66 {
67 return r->alloc - ring_buf_used(r);
68 }
69
70 static ossl_inline int ring_buf_write_at(struct ring_buf *r,
71 uint64_t logical_offset,
72 const unsigned char *buf,
73 size_t buf_len)
74 {
75 size_t avail, idx, l;
76 unsigned char *start = r->start;
77 int i;
78
79 avail = ring_buf_avail(r);
80 if (logical_offset < r->ctail_offset
81 || logical_offset + buf_len > r->head_offset + avail)
82 return 0;
83
84 for (i = 0; buf_len > 0 && i < 2; ++i) {
85 idx = logical_offset % r->alloc;
86 l = r->alloc - idx;
87 if (buf_len < l)
88 l = buf_len;
89
90 memcpy(start + idx, buf, l);
91 if (r->head_offset < logical_offset + l)
92 r->head_offset = logical_offset + l;
93
94 logical_offset += l;
95 buf += l;
96 buf_len -= l;
97 }
98
99 assert(buf_len == 0);
100
101 return 1;
102 }
103
104 static ossl_inline size_t ring_buf_push(struct ring_buf *r,
105 const unsigned char *buf,
106 size_t buf_len)
107 {
108 size_t pushed = 0, avail, idx, l;
109 unsigned char *start = r->start;
110
111 for (;;) {
112 avail = ring_buf_avail(r);
113 if (buf_len > avail)
114 buf_len = avail;
115
116 if (buf_len == 0)
117 break;
118
119 idx = r->head_offset % r->alloc;
120 l = r->alloc - idx;
121 if (buf_len < l)
122 l = buf_len;
123
124 memcpy(start + idx, buf, l);
125 r->head_offset += l;
126 buf += l;
127 buf_len -= l;
128 pushed += l;
129 }
130
131 return pushed;
132 }
133
134 static ossl_inline const unsigned char *ring_buf_get_ptr(const struct ring_buf *r,
135 uint64_t logical_offset,
136 size_t *max_len)
137 {
138 unsigned char *start = r->start;
139 size_t idx;
140
141 if (logical_offset >= r->head_offset || logical_offset < r->ctail_offset)
142 return NULL;
143 idx = logical_offset % r->alloc;
144 *max_len = r->alloc - idx;
145 return start + idx;
146 }
147
148 /*
149 * Retrieves data out of the read side of the ring buffer starting at the given
150 * logical offset. *buf is set to point to a contiguous span of bytes and
151 * *buf_len is set to the number of contiguous bytes. After this function
152 * returns, there may or may not be more bytes available at the logical offset
153 * of (logical_offset + *buf_len) by calling this function again. If the logical
154 * offset is out of the range retained by the ring buffer, returns 0, else
155 * returns 1. A logical offset at the end of the range retained by the ring
156 * buffer is not considered an error and is returned with a *buf_len of 0.
157 *
158 * The ring buffer state is not changed.
159 */
160 static ossl_inline int ring_buf_get_buf_at(const struct ring_buf *r,
161 uint64_t logical_offset,
162 const unsigned char **buf,
163 size_t *buf_len)
164 {
165 const unsigned char *start = r->start;
166 size_t idx, l;
167
168 if (logical_offset > r->head_offset || logical_offset < r->ctail_offset)
169 return 0;
170
171 if (r->alloc == 0) {
172 *buf = NULL;
173 *buf_len = 0;
174 return 1;
175 }
176
177 idx = logical_offset % r->alloc;
178 l = (size_t)(r->head_offset - logical_offset);
179 if (l > r->alloc - idx)
180 l = r->alloc - idx;
181
182 *buf = start + idx;
183 *buf_len = l;
184 return 1;
185 }
186
187 static ossl_inline void ring_buf_cpop_range(struct ring_buf *r,
188 uint64_t start, uint64_t end,
189 int cleanse)
190 {
191 assert(end >= start);
192
193 if (start > r->ctail_offset)
194 return;
195
196 if (cleanse && r->alloc > 0 && end > r->ctail_offset) {
197 size_t idx = r->ctail_offset % r->alloc;
198 uint64_t cleanse_end = end + 1;
199 size_t l;
200
201 if (cleanse_end > r->head_offset)
202 cleanse_end = r->head_offset;
203 l = (size_t)(cleanse_end - r->ctail_offset);
204 if (l > r->alloc - idx) {
205 OPENSSL_cleanse((unsigned char *)r->start + idx, r->alloc - idx);
206 l -= r->alloc - idx;
207 idx = 0;
208 }
209 if (l > 0)
210 OPENSSL_cleanse((unsigned char *)r->start + idx, l);
211 }
212
213 r->ctail_offset = end + 1;
214 /* Allow culling unpushed data */
215 if (r->head_offset < r->ctail_offset)
216 r->head_offset = r->ctail_offset;
217 }
218
219 static ossl_inline int ring_buf_resize(struct ring_buf *r, size_t num_bytes,
220 int cleanse)
221 {
222 struct ring_buf rnew = {0};
223 const unsigned char *src = NULL;
224 size_t src_len = 0, copied = 0;
225
226 if (num_bytes == r->alloc)
227 return 1;
228
229 if (num_bytes < ring_buf_used(r))
230 return 0;
231
232 rnew.start = OPENSSL_malloc(num_bytes);
233 if (rnew.start == NULL)
234 return 0;
235
236 rnew.alloc = num_bytes;
237 rnew.head_offset = r->head_offset - ring_buf_used(r);
238 rnew.ctail_offset = rnew.head_offset;
239
240 for (;;) {
241 if (!ring_buf_get_buf_at(r, r->ctail_offset + copied, &src, &src_len)) {
242 OPENSSL_free(rnew.start);
243 return 0;
244 }
245
246 if (src_len == 0)
247 break;
248
249 if (ring_buf_push(&rnew, src, src_len) != src_len) {
250 OPENSSL_free(rnew.start);
251 return 0;
252 }
253
254 copied += src_len;
255 }
256
257 assert(rnew.head_offset == r->head_offset);
258 rnew.ctail_offset = r->ctail_offset;
259
260 ring_buf_destroy(r, cleanse);
261 memcpy(r, &rnew, sizeof(*r));
262 return 1;
263 }
264
265 #endif /* OSSL_INTERNAL_RING_BUF_H */