1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_STRING_64_H
3 #define _ASM_X86_STRING_64_H
6 #include <linux/jump_label.h>
8 /* Written 2002 by Andi Kleen */
10 /* Only used for special circumstances. Stolen from i386/string.h */
11 static __always_inline
void *__inline_memcpy(void *to
, const void *from
, size_t n
)
13 unsigned long d0
, d1
, d2
;
14 asm volatile("rep ; movsl\n\t"
18 "1:\ttestb $1,%b4\n\t"
22 : "=&c" (d0
), "=&D" (d1
), "=&S" (d2
)
23 : "0" (n
/ 4), "q" (n
), "1" ((long)to
), "2" ((long)from
)
28 /* Even with __builtin_ the compiler may decide to use the out of line
31 #define __HAVE_ARCH_MEMCPY 1
32 extern void *memcpy(void *to
, const void *from
, size_t len
);
33 extern void *__memcpy(void *to
, const void *from
, size_t len
);
35 #ifndef CONFIG_FORTIFY_SOURCE
36 #ifndef CONFIG_KMEMCHECK
37 #if (__GNUC__ == 4 && __GNUC_MINOR__ < 3) || __GNUC__ < 4
38 #define memcpy(dst, src, len) \
40 size_t __len = (len); \
42 if (__builtin_constant_p(len) && __len >= 64) \
43 __ret = __memcpy((dst), (src), __len); \
45 __ret = __builtin_memcpy((dst), (src), __len); \
51 * kmemcheck becomes very happy if we use the REP instructions unconditionally,
52 * because it means that we know both memory operands in advance.
54 #define memcpy(dst, src, len) __inline_memcpy((dst), (src), (len))
56 #endif /* !CONFIG_FORTIFY_SOURCE */
58 #define __HAVE_ARCH_MEMSET
59 void *memset(void *s
, int c
, size_t n
);
60 void *__memset(void *s
, int c
, size_t n
);
62 #define __HAVE_ARCH_MEMSET16
63 static inline void *memset16(uint16_t *s
, uint16_t v
, size_t n
)
66 asm volatile("rep\n\t"
68 : "=&c" (d0
), "=&D" (d1
)
69 : "a" (v
), "1" (s
), "0" (n
)
74 #define __HAVE_ARCH_MEMSET32
75 static inline void *memset32(uint32_t *s
, uint32_t v
, size_t n
)
78 asm volatile("rep\n\t"
80 : "=&c" (d0
), "=&D" (d1
)
81 : "a" (v
), "1" (s
), "0" (n
)
86 #define __HAVE_ARCH_MEMSET64
87 static inline void *memset64(uint64_t *s
, uint64_t v
, size_t n
)
90 asm volatile("rep\n\t"
92 : "=&c" (d0
), "=&D" (d1
)
93 : "a" (v
), "1" (s
), "0" (n
)
98 #define __HAVE_ARCH_MEMMOVE
99 void *memmove(void *dest
, const void *src
, size_t count
);
100 void *__memmove(void *dest
, const void *src
, size_t count
);
102 int memcmp(const void *cs
, const void *ct
, size_t count
);
103 size_t strlen(const char *s
);
104 char *strcpy(char *dest
, const char *src
);
105 char *strcat(char *dest
, const char *src
);
106 int strcmp(const char *cs
, const char *ct
);
108 #if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
111 * For files that not instrumented (e.g. mm/slub.c) we
112 * should use not instrumented version of mem* functions.
116 #define memcpy(dst, src, len) __memcpy(dst, src, len)
117 #define memmove(dst, src, len) __memmove(dst, src, len)
118 #define memset(s, c, n) __memset(s, c, n)
121 #define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
126 #define __HAVE_ARCH_MEMCPY_MCSAFE 1
127 __must_check
int memcpy_mcsafe_unrolled(void *dst
, const void *src
, size_t cnt
);
128 DECLARE_STATIC_KEY_FALSE(mcsafe_key
);
131 * memcpy_mcsafe - copy memory with indication if a machine check happened
133 * @dst: destination address
134 * @src: source address
135 * @cnt: number of bytes to copy
137 * Low level memory copy function that catches machine checks
138 * We only call into the "safe" function on systems that can
139 * actually do machine check recovery. Everyone else can just
142 * Return 0 for success, -EFAULT for fail
144 static __always_inline __must_check
int
145 memcpy_mcsafe(void *dst
, const void *src
, size_t cnt
)
147 #ifdef CONFIG_X86_MCE
148 if (static_branch_unlikely(&mcsafe_key
))
149 return memcpy_mcsafe_unrolled(dst
, src
, cnt
);
152 memcpy(dst
, src
, cnt
);
156 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
157 #define __HAVE_ARCH_MEMCPY_FLUSHCACHE 1
158 void memcpy_flushcache(void *dst
, const void *src
, size_t cnt
);
161 #endif /* __KERNEL__ */
163 #endif /* _ASM_X86_STRING_64_H */