]>
git.ipfire.org Git - thirdparty/openssl.git/blob - include/internal/ring_buf.h
2 * Copyright 2022-2023 The OpenSSL Project Authors. All Rights Reserved.
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
10 #ifndef OSSL_INTERNAL_RING_BUF_H
11 # define OSSL_INTERNAL_RING_BUF_H
14 # include <openssl/e_os2.h> /* For 'ossl_inline' */
15 # include "internal/safe_math.h"
18 * ==================================================================
19 * Byte-wise ring buffer which supports pushing and popping blocks of multiple
20 * bytes at a time. The logical offset of each byte for the purposes of a QUIC
21 * stream is tracked. Bytes can be popped from the ring buffer in two stages;
22 * first they are popped, and then they are culled. Bytes which have been popped
23 * but not yet culled will not be overwritten, and can be restored.
27 size_t alloc
; /* size of buffer allocation in bytes */
30 * Logical offset of the head (where we append to). This is the current size
31 * of the QUIC stream. This increases monotonically.
36 * Logical offset of the cull tail. Data is no longer needed and is
37 * deallocated as the cull tail advances, which occurs as data is
38 * acknowledged. This increases monotonically.
40 uint64_t ctail_offset
;
43 OSSL_SAFE_MATH_UNSIGNED(u64
, uint64_t)
45 #define MAX_OFFSET (((uint64_t)1) << 62) /* QUIC-imposed limit */
47 static ossl_inline
int ring_buf_init(struct ring_buf
*r
)
51 r
->head_offset
= r
->ctail_offset
= 0;
55 static ossl_inline
void ring_buf_destroy(struct ring_buf
*r
, int cleanse
)
58 OPENSSL_clear_free(r
->start
, r
->alloc
);
60 OPENSSL_free(r
->start
);
65 static ossl_inline
size_t ring_buf_used(struct ring_buf
*r
)
67 return (size_t)(r
->head_offset
- r
->ctail_offset
);
70 static ossl_inline
size_t ring_buf_avail(struct ring_buf
*r
)
72 return r
->alloc
- ring_buf_used(r
);
75 static ossl_inline
int ring_buf_write_at(struct ring_buf
*r
,
76 uint64_t logical_offset
,
77 const unsigned char *buf
,
81 unsigned char *start
= r
->start
;
84 avail
= ring_buf_avail(r
);
85 if (logical_offset
< r
->ctail_offset
86 || safe_add_u64(logical_offset
, buf_len
, &err
)
87 > safe_add_u64(r
->head_offset
, avail
, &err
)
88 || safe_add_u64(r
->head_offset
, buf_len
, &err
)
93 for (i
= 0; buf_len
> 0 && i
< 2; ++i
) {
94 idx
= logical_offset
% r
->alloc
;
99 memcpy(start
+ idx
, buf
, l
);
100 if (r
->head_offset
< logical_offset
+ l
)
101 r
->head_offset
= logical_offset
+ l
;
108 assert(buf_len
== 0);
113 static ossl_inline
size_t ring_buf_push(struct ring_buf
*r
,
114 const unsigned char *buf
,
117 size_t pushed
= 0, avail
, idx
, l
;
118 unsigned char *start
= r
->start
;
121 avail
= ring_buf_avail(r
);
125 if (buf_len
> MAX_OFFSET
- r
->head_offset
)
126 buf_len
= (size_t)(MAX_OFFSET
- r
->head_offset
);
131 idx
= r
->head_offset
% r
->alloc
;
136 memcpy(start
+ idx
, buf
, l
);
146 static ossl_inline
const unsigned char *ring_buf_get_ptr(const struct ring_buf
*r
,
147 uint64_t logical_offset
,
150 unsigned char *start
= r
->start
;
153 if (logical_offset
>= r
->head_offset
|| logical_offset
< r
->ctail_offset
)
155 idx
= logical_offset
% r
->alloc
;
156 *max_len
= r
->alloc
- idx
;
161 * Retrieves data out of the read side of the ring buffer starting at the given
162 * logical offset. *buf is set to point to a contiguous span of bytes and
163 * *buf_len is set to the number of contiguous bytes. After this function
164 * returns, there may or may not be more bytes available at the logical offset
165 * of (logical_offset + *buf_len) by calling this function again. If the logical
166 * offset is out of the range retained by the ring buffer, returns 0, else
167 * returns 1. A logical offset at the end of the range retained by the ring
168 * buffer is not considered an error and is returned with a *buf_len of 0.
170 * The ring buffer state is not changed.
172 static ossl_inline
int ring_buf_get_buf_at(const struct ring_buf
*r
,
173 uint64_t logical_offset
,
174 const unsigned char **buf
,
177 const unsigned char *start
= r
->start
;
180 if (logical_offset
> r
->head_offset
|| logical_offset
< r
->ctail_offset
)
189 idx
= logical_offset
% r
->alloc
;
190 l
= (size_t)(r
->head_offset
- logical_offset
);
191 if (l
> r
->alloc
- idx
)
199 static ossl_inline
void ring_buf_cpop_range(struct ring_buf
*r
,
200 uint64_t start
, uint64_t end
,
203 assert(end
>= start
);
205 if (start
> r
->ctail_offset
|| end
>= MAX_OFFSET
)
208 if (cleanse
&& r
->alloc
> 0 && end
> r
->ctail_offset
) {
209 size_t idx
= r
->ctail_offset
% r
->alloc
;
210 uint64_t cleanse_end
= end
+ 1;
213 if (cleanse_end
> r
->head_offset
)
214 cleanse_end
= r
->head_offset
;
215 l
= (size_t)(cleanse_end
- r
->ctail_offset
);
216 if (l
> r
->alloc
- idx
) {
217 OPENSSL_cleanse((unsigned char *)r
->start
+ idx
, r
->alloc
- idx
);
222 OPENSSL_cleanse((unsigned char *)r
->start
+ idx
, l
);
225 r
->ctail_offset
= end
+ 1;
226 /* Allow culling unpushed data */
227 if (r
->head_offset
< r
->ctail_offset
)
228 r
->head_offset
= r
->ctail_offset
;
231 static ossl_inline
int ring_buf_resize(struct ring_buf
*r
, size_t num_bytes
,
234 struct ring_buf rnew
= {0};
235 const unsigned char *src
= NULL
;
236 size_t src_len
= 0, copied
= 0;
238 if (num_bytes
== r
->alloc
)
241 if (num_bytes
< ring_buf_used(r
))
244 rnew
.start
= OPENSSL_malloc(num_bytes
);
245 if (rnew
.start
== NULL
)
248 rnew
.alloc
= num_bytes
;
249 rnew
.head_offset
= r
->head_offset
- ring_buf_used(r
);
250 rnew
.ctail_offset
= rnew
.head_offset
;
253 if (!ring_buf_get_buf_at(r
, r
->ctail_offset
+ copied
, &src
, &src_len
)) {
254 OPENSSL_free(rnew
.start
);
261 if (ring_buf_push(&rnew
, src
, src_len
) != src_len
) {
262 OPENSSL_free(rnew
.start
);
269 assert(rnew
.head_offset
== r
->head_offset
);
270 rnew
.ctail_offset
= r
->ctail_offset
;
272 ring_buf_destroy(r
, cleanse
);
273 memcpy(r
, &rnew
, sizeof(*r
));
277 #endif /* OSSL_INTERNAL_RING_BUF_H */