]> git.ipfire.org Git - thirdparty/linux.git/blob - include/linux/uio.h
x86, uaccess: introduce copy_from_iter_flushcache for pmem / cache-bypass operations
[thirdparty/linux.git] / include / linux / uio.h
1 /*
2 * Berkeley style UIO structures - Alan Cox 1994.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9 #ifndef __LINUX_UIO_H
10 #define __LINUX_UIO_H
11
12 #include <linux/kernel.h>
13 #include <uapi/linux/uio.h>
14
15 struct page;
16 struct pipe_inode_info;
17
18 struct kvec {
19 void *iov_base; /* and that should *never* hold a userland pointer */
20 size_t iov_len;
21 };
22
23 enum {
24 ITER_IOVEC = 0,
25 ITER_KVEC = 2,
26 ITER_BVEC = 4,
27 ITER_PIPE = 8,
28 };
29
30 struct iov_iter {
31 int type;
32 size_t iov_offset;
33 size_t count;
34 union {
35 const struct iovec *iov;
36 const struct kvec *kvec;
37 const struct bio_vec *bvec;
38 struct pipe_inode_info *pipe;
39 };
40 union {
41 unsigned long nr_segs;
42 struct {
43 int idx;
44 int start_idx;
45 };
46 };
47 };
48
49 /*
50 * Total number of bytes covered by an iovec.
51 *
52 * NOTE that it is not safe to use this function until all the iovec's
53 * segment lengths have been validated. Because the individual lengths can
54 * overflow a size_t when added together.
55 */
56 static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
57 {
58 unsigned long seg;
59 size_t ret = 0;
60
61 for (seg = 0; seg < nr_segs; seg++)
62 ret += iov[seg].iov_len;
63 return ret;
64 }
65
66 static inline struct iovec iov_iter_iovec(const struct iov_iter *iter)
67 {
68 return (struct iovec) {
69 .iov_base = iter->iov->iov_base + iter->iov_offset,
70 .iov_len = min(iter->count,
71 iter->iov->iov_len - iter->iov_offset),
72 };
73 }
74
75 #define iov_for_each(iov, iter, start) \
76 if (!((start).type & (ITER_BVEC | ITER_PIPE))) \
77 for (iter = (start); \
78 (iter).count && \
79 ((iov = iov_iter_iovec(&(iter))), 1); \
80 iov_iter_advance(&(iter), (iov).iov_len))
81
82 unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to);
83
84 size_t iov_iter_copy_from_user_atomic(struct page *page,
85 struct iov_iter *i, unsigned long offset, size_t bytes);
86 void iov_iter_advance(struct iov_iter *i, size_t bytes);
87 void iov_iter_revert(struct iov_iter *i, size_t bytes);
88 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
89 size_t iov_iter_single_seg_count(const struct iov_iter *i);
90 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
91 struct iov_iter *i);
92 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
93 struct iov_iter *i);
94 size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
95 size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
96 bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i);
97 size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
98 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
99 /*
100 * Note, users like pmem that depend on the stricter semantics of
101 * copy_from_iter_flushcache() than copy_from_iter_nocache() must check for
102 * IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the
103 * destination is flushed from the cache on return.
104 */
105 size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i);
106 #else
107 static inline size_t copy_from_iter_flushcache(void *addr, size_t bytes,
108 struct iov_iter *i)
109 {
110 return copy_from_iter_nocache(addr, bytes, i);
111 }
112 #endif
113 bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i);
114 size_t iov_iter_zero(size_t bytes, struct iov_iter *);
115 unsigned long iov_iter_alignment(const struct iov_iter *i);
116 unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
117 void iov_iter_init(struct iov_iter *i, int direction, const struct iovec *iov,
118 unsigned long nr_segs, size_t count);
119 void iov_iter_kvec(struct iov_iter *i, int direction, const struct kvec *kvec,
120 unsigned long nr_segs, size_t count);
121 void iov_iter_bvec(struct iov_iter *i, int direction, const struct bio_vec *bvec,
122 unsigned long nr_segs, size_t count);
123 void iov_iter_pipe(struct iov_iter *i, int direction, struct pipe_inode_info *pipe,
124 size_t count);
125 ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages,
126 size_t maxsize, unsigned maxpages, size_t *start);
127 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages,
128 size_t maxsize, size_t *start);
129 int iov_iter_npages(const struct iov_iter *i, int maxpages);
130
131 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags);
132
133 static inline size_t iov_iter_count(const struct iov_iter *i)
134 {
135 return i->count;
136 }
137
138 static inline bool iter_is_iovec(const struct iov_iter *i)
139 {
140 return !(i->type & (ITER_BVEC | ITER_KVEC | ITER_PIPE));
141 }
142
143 /*
144 * Get one of READ or WRITE out of iter->type without any other flags OR'd in
145 * with it.
146 *
147 * The ?: is just for type safety.
148 */
149 #define iov_iter_rw(i) ((0 ? (struct iov_iter *)0 : (i))->type & (READ | WRITE))
150
151 /*
152 * Cap the iov_iter by given limit; note that the second argument is
153 * *not* the new size - it's upper limit for such. Passing it a value
154 * greater than the amount of data in iov_iter is fine - it'll just do
155 * nothing in that case.
156 */
157 static inline void iov_iter_truncate(struct iov_iter *i, u64 count)
158 {
159 /*
160 * count doesn't have to fit in size_t - comparison extends both
161 * operands to u64 here and any value that would be truncated by
162 * conversion in assignement is by definition greater than all
163 * values of size_t, including old i->count.
164 */
165 if (i->count > count)
166 i->count = count;
167 }
168
169 /*
170 * reexpand a previously truncated iterator; count must be no more than how much
171 * we had shrunk it.
172 */
173 static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
174 {
175 i->count = count;
176 }
177 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
178 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
179 bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
180
181 int import_iovec(int type, const struct iovec __user * uvector,
182 unsigned nr_segs, unsigned fast_segs,
183 struct iovec **iov, struct iov_iter *i);
184
185 #ifdef CONFIG_COMPAT
186 struct compat_iovec;
187 int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
188 unsigned nr_segs, unsigned fast_segs,
189 struct iovec **iov, struct iov_iter *i);
190 #endif
191
192 int import_single_range(int type, void __user *buf, size_t len,
193 struct iovec *iov, struct iov_iter *i);
194
195 #endif