} ____cacheline_aligned_in_smp movsl_mask;
#endif
-#define ARCH_HAS_NOCACHE_UACCESS 1
+#define ARCH_HAS_NONTEMPORAL_UACCESS 1
/*
* The "unsafe" user accesses aren't really "unsafe", but the naming
return __copy_user_ll(to, (__force const void *)from, n);
}
-static __always_inline unsigned long
-__copy_from_user_inatomic_nocache(void *to, const void __user *from,
- unsigned long n)
-{
- return __copy_from_user_ll_nocache_nozero(to, from, n);
-}
-
+unsigned long __must_check copy_from_user_inatomic_nontemporal(void *, const void __user *, unsigned long n);
unsigned long __must_check clear_user(void __user *mem, unsigned long len);
unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
extern long __copy_user_flushcache(void *dst, const void __user *src, unsigned size);
static inline int
-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
+copy_from_user_inatomic_nontemporal(void *dst, const void __user *src,
unsigned size)
{
long ret;
kasan_check_write(dst, size);
+ src = mask_user_address(src);
stac();
ret = copy_to_nontemporal(dst, (__force const void *)src, size);
clac();
}
EXPORT_SYMBOL(__copy_user_ll);
-unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from,
+unsigned long copy_from_user_inatomic_nontemporal(void *to, const void __user *from,
unsigned long n)
{
- __uaccess_begin_nospec();
+ if (!user_access_begin(from, n))
+ return n;
#ifdef CONFIG_X86_INTEL_USERCOPY
if (n > 64 && static_cpu_has(X86_FEATURE_XMM2))
n = __copy_user_intel_nocache(to, from, n);
#else
__copy_user(to, from, n);
#endif
- __uaccess_end();
+ user_access_end();
return n;
}
-EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
+EXPORT_SYMBOL(copy_from_user_inatomic_nontemporal);
/* We can use the cpu mem copy function because this is X86. */
vaddr = io_mapping_map_atomic_wc(mapping, base);
- unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
+ unwritten = copy_from_user_inatomic_nontemporal((void __force *)vaddr + offset,
user_data, length);
io_mapping_unmap_atomic(vaddr);
if (unwritten) {
/* TODO copy slow path code from i915 */
fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_MASK));
- unwritten = __copy_from_user_inatomic_nocache
+ unwritten = copy_from_user_inatomic_nontemporal
(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_MASK),
u64_to_user_ptr(cmd->command), cmd->command_size);
#endif /* CONFIG_ARCH_HAS_SUBPAGE_FAULTS */
-#ifndef ARCH_HAS_NOCACHE_UACCESS
+#ifndef ARCH_HAS_NONTEMPORAL_UACCESS
static inline __must_check unsigned long
-__copy_from_user_inatomic_nocache(void *to, const void __user *from,
+copy_from_user_inatomic_nontemporal(void *to, const void __user *from,
unsigned long n)
{
+ if (can_do_masked_user_access())
+ from = mask_user_address(from);
+ else
+ if (!access_ok(from, n))
+ return n;
return __copy_from_user_inatomic(to, from, n);
}
-#endif /* ARCH_HAS_NOCACHE_UACCESS */
+#endif /* ARCH_HAS_NONTEMPORAL_UACCESS */
extern __must_check int check_zeroed_user(const void __user *from, size_t size);
size_t copy_from_user_iter_nocache(void __user *iter_from, size_t progress,
size_t len, void *to, void *priv2)
{
- return __copy_from_user_inatomic_nocache(to + progress, iter_from, len);
+ return copy_from_user_inatomic_nontemporal(to + progress, iter_from, len);
}
size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)