]> git.ipfire.org Git - thirdparty/systemd.git/blame - src/basic/memory-util.h
Merge pull request #17549 from yuwata/tiny-fixes
[thirdparty/systemd.git] / src / basic / memory-util.h
CommitLineData
db9ecf05 1/* SPDX-License-Identifier: LGPL-2.1-or-later */
0a970718
LP
2#pragma once
3
4#include <inttypes.h>
44c786f0 5#include <malloc.h>
0a970718
LP
6#include <stdbool.h>
7#include <string.h>
8#include <sys/types.h>
9
e514aa1e 10#include "alloc-util.h"
0a970718
LP
11#include "macro.h"
12
13size_t page_size(void) _pure_;
14#define PAGE_ALIGN(l) ALIGN_TO((l), page_size())
13933c6b 15#define PAGE_ALIGN_DOWN(l) ((l) & ~(page_size() - 1))
b98f393d 16#define PAGE_OFFSET(l) ((l) & (page_size() - 1))
0a970718
LP
17
18/* Normal memcpy requires src to be nonnull. We do nothing if n is 0. */
19static inline void memcpy_safe(void *dst, const void *src, size_t n) {
20 if (n == 0)
21 return;
22 assert(src);
23 memcpy(dst, src, n);
24}
25
26/* Normal memcmp requires s1 and s2 to be nonnull. We do nothing if n is 0. */
27static inline int memcmp_safe(const void *s1, const void *s2, size_t n) {
28 if (n == 0)
29 return 0;
30 assert(s1);
31 assert(s2);
32 return memcmp(s1, s2, n);
33}
34
35/* Compare s1 (length n1) with s2 (length n2) in lexicographic order. */
36static inline int memcmp_nn(const void *s1, size_t n1, const void *s2, size_t n2) {
37 return memcmp_safe(s1, s2, MIN(n1, n2))
38 ?: CMP(n1, n2);
39}
40
41#define memzero(x,l) \
42 ({ \
43 size_t _l_ = (l); \
3135369c
ZJS
44 if (_l_ > 0) \
45 memset(x, 0, _l_); \
0a970718
LP
46 })
47
48#define zero(x) (memzero(&(x), sizeof(x)))
49
50bool memeqzero(const void *data, size_t length);
51
52#define eqzero(x) memeqzero(x, sizeof(x))
53
54static inline void *mempset(void *s, int c, size_t n) {
55 memset(s, c, n);
56 return (uint8_t*)s + n;
57}
090a9c1e
LP
58
59/* Normal memmem() requires haystack to be nonnull, which is annoying for zero-length buffers */
60static inline void *memmem_safe(const void *haystack, size_t haystacklen, const void *needle, size_t needlelen) {
61
62 if (needlelen <= 0)
63 return (void*) haystack;
64
65 if (haystacklen < needlelen)
66 return NULL;
67
68 assert(haystack);
69 assert(needle);
70
71 return memmem(haystack, haystacklen, needle, needlelen);
72}
73
74#if HAVE_EXPLICIT_BZERO
75static inline void* explicit_bzero_safe(void *p, size_t l) {
76 if (l > 0)
77 explicit_bzero(p, l);
78
79 return p;
80}
81#else
82void *explicit_bzero_safe(void *p, size_t l);
83#endif
e1ed99c8 84
282bde10
LP
85static inline void* erase_and_free(void *p) {
86 size_t l;
87
88 if (!p)
89 return NULL;
90
91 l = malloc_usable_size(p);
92 explicit_bzero_safe(p, l);
e514aa1e 93 return mfree(p);
282bde10
LP
94}
95
96static inline void erase_and_freep(void *p) {
97 erase_and_free(*(void**) p);
44c786f0
ZJS
98}
99
e1ed99c8
LP
100/* Use with _cleanup_ to erase a single 'char' when leaving scope */
101static inline void erase_char(char *p) {
102 explicit_bzero_safe(p, sizeof(char));
103}