]>
Commit | Line | Data |
---|---|---|
1 | /* SPDX-License-Identifier: LGPL-2.1-or-later */ | |
2 | #pragma once | |
3 | ||
4 | #include <string.h> | |
5 | ||
6 | #include "forward.h" | |
7 | #include "memory-util-fundamental.h" /* IWYU pragma: export */ | |
8 | ||
9 | size_t page_size(void) _pure_; | |
10 | #define PAGE_ALIGN(l) ALIGN_TO(l, page_size()) | |
11 | #define PAGE_ALIGN_U64(l) ALIGN_TO_U64(l, page_size()) | |
12 | #define PAGE_ALIGN_DOWN(l) ALIGN_DOWN(l, page_size()) | |
13 | #define PAGE_ALIGN_DOWN_U64(l) ALIGN_DOWN_U64(l, page_size()) | |
14 | #define PAGE_OFFSET(l) ALIGN_OFFSET(l, page_size()) | |
15 | #define PAGE_OFFSET_U64(l) ALIGN_OFFSET_U64(l, page_size()) | |
16 | ||
17 | /* Normal memcpy() requires src to be nonnull. We do nothing if n is 0. */ | |
18 | static inline void* memcpy_safe(void *dst, const void *src, size_t n) { | |
19 | if (n == 0) | |
20 | return dst; | |
21 | assert(src); | |
22 | return memcpy(dst, src, n); | |
23 | } | |
24 | ||
25 | /* Normal mempcpy() requires src to be nonnull. We do nothing if n is 0. */ | |
26 | static inline void* mempcpy_safe(void *dst, const void *src, size_t n) { | |
27 | if (n == 0) | |
28 | return dst; | |
29 | assert(src); | |
30 | return mempcpy(dst, src, n); | |
31 | } | |
32 | ||
33 | #define _mempcpy_typesafe(dst, src, n, sz) \ | |
34 | ({ \ | |
35 | size_t sz; \ | |
36 | assert_se(MUL_SAFE(&sz, sizeof((dst)[0]), n)); \ | |
37 | (typeof((dst)[0])*) mempcpy_safe(dst, src, sz); \ | |
38 | }) | |
39 | ||
40 | #define mempcpy_typesafe(dst, src, n) \ | |
41 | _mempcpy_typesafe(dst, src, n, UNIQ_T(sz, UNIQ)) | |
42 | ||
43 | /* Normal memcmp() requires s1 and s2 to be nonnull. We do nothing if n is 0. */ | |
44 | static inline int memcmp_safe(const void *s1, const void *s2, size_t n) { | |
45 | if (n == 0) | |
46 | return 0; | |
47 | assert(s1); | |
48 | assert(s2); | |
49 | return memcmp(s1, s2, n); | |
50 | } | |
51 | ||
52 | /* Compare s1 (length n1) with s2 (length n2) in lexicographic order. */ | |
53 | static inline int memcmp_nn(const void *s1, size_t n1, const void *s2, size_t n2) { | |
54 | return memcmp_safe(s1, s2, MIN(n1, n2)) | |
55 | ?: CMP(n1, n2); | |
56 | } | |
57 | ||
58 | #define zero(x) (memzero(&(x), sizeof(x))) | |
59 | ||
60 | bool memeqbyte(uint8_t byte, const void *data, size_t length) _nonnull_if_nonzero_(2, 3); | |
61 | ||
62 | #define memeqzero(data, length) memeqbyte(0x00, data, length) | |
63 | ||
64 | #define eqzero(x) memeqzero(x, sizeof(x)) | |
65 | ||
66 | static inline void* mempset(void *s, int c, size_t n) { | |
67 | memset(s, c, n); | |
68 | return (uint8_t*) s + n; | |
69 | } | |
70 | ||
71 | /* Normal memmem() requires haystack to be nonnull, which is annoying for zero-length buffers */ | |
72 | static inline void* memmem_safe(const void *haystack, size_t haystacklen, const void *needle, size_t needlelen) { | |
73 | ||
74 | if (needlelen <= 0) | |
75 | return (void*) haystack; | |
76 | ||
77 | if (haystacklen < needlelen) | |
78 | return NULL; | |
79 | ||
80 | assert(haystack); | |
81 | assert(needle); | |
82 | ||
83 | return memmem(haystack, haystacklen, needle, needlelen); | |
84 | } | |
85 | ||
86 | static inline void* mempmem_safe(const void *haystack, size_t haystacklen, const void *needle, size_t needlelen) { | |
87 | const uint8_t *p; | |
88 | ||
89 | p = memmem_safe(haystack, haystacklen, needle, needlelen); | |
90 | if (!p) | |
91 | return NULL; | |
92 | ||
93 | return (uint8_t*) p + needlelen; | |
94 | } | |
95 | ||
96 | void* erase_and_free(void *p); | |
97 | ||
98 | static inline void erase_and_freep(void *p) { | |
99 | erase_and_free(*(void**) p); | |
100 | } | |
101 | ||
102 | /* Use with _cleanup_ to erase a single 'char' when leaving scope */ | |
103 | static inline void erase_char(char *p) { | |
104 | explicit_bzero_safe(p, sizeof(char)); | |
105 | } | |
106 | ||
107 | /* Makes a copy of the buffer with reversed order of bytes */ | |
108 | void* memdup_reverse(const void *mem, size_t size); |