1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * Berkeley style UIO structures - Alan Cox 1994.
4 */
5 #ifndef __LINUX_UIO_H
6 #define __LINUX_UIO_H
7
8 #include <linux/kernel.h>
9 #include <linux/thread_info.h>
10 #include <uapi/linux/uio.h>
11
12 struct page;
13 struct pipe_inode_info;
14
15 struct kvec {
16 void *iov_base; /* and that should *never* hold a userland pointer */
17 size_t iov_len;
18 };
19
20 enum iter_type {
21 /* iter types */
22 ITER_IOVEC,
23 ITER_KVEC,
24 ITER_BVEC,
25 ITER_PIPE,
26 ITER_XARRAY,
27 ITER_DISCARD,
28 };
29
30 struct iov_iter_state {
31 size_t iov_offset;
32 size_t count;
33 unsigned long nr_segs;
34 };
35
36 struct iov_iter {
37 u8 iter_type;
38 bool nofault;
39 bool data_source;
40 size_t iov_offset;
41 size_t count;
42 union {
43 const struct iovec *iov;
44 const struct kvec *kvec;
45 const struct bio_vec *bvec;
46 struct xarray *xarray;
47 struct pipe_inode_info *pipe;
48 };
49 union {
50 unsigned long nr_segs;
51 struct {
52 unsigned int head;
53 unsigned int start_head;
54 };
55 loff_t xarray_start;
56 };
57 };
58
iov_iter_type(const struct iov_iter * i)59 static inline enum iter_type iov_iter_type(const struct iov_iter *i)
60 {
61 return i->iter_type;
62 }
63
iov_iter_save_state(struct iov_iter * iter,struct iov_iter_state * state)64 static inline void iov_iter_save_state(struct iov_iter *iter,
65 struct iov_iter_state *state)
66 {
67 state->iov_offset = iter->iov_offset;
68 state->count = iter->count;
69 state->nr_segs = iter->nr_segs;
70 }
71
iter_is_iovec(const struct iov_iter * i)72 static inline bool iter_is_iovec(const struct iov_iter *i)
73 {
74 return iov_iter_type(i) == ITER_IOVEC;
75 }
76
iov_iter_is_kvec(const struct iov_iter * i)77 static inline bool iov_iter_is_kvec(const struct iov_iter *i)
78 {
79 return iov_iter_type(i) == ITER_KVEC;
80 }
81
iov_iter_is_bvec(const struct iov_iter * i)82 static inline bool iov_iter_is_bvec(const struct iov_iter *i)
83 {
84 return iov_iter_type(i) == ITER_BVEC;
85 }
86
iov_iter_is_pipe(const struct iov_iter * i)87 static inline bool iov_iter_is_pipe(const struct iov_iter *i)
88 {
89 return iov_iter_type(i) == ITER_PIPE;
90 }
91
iov_iter_is_discard(const struct iov_iter * i)92 static inline bool iov_iter_is_discard(const struct iov_iter *i)
93 {
94 return iov_iter_type(i) == ITER_DISCARD;
95 }
96
iov_iter_is_xarray(const struct iov_iter * i)97 static inline bool iov_iter_is_xarray(const struct iov_iter *i)
98 {
99 return iov_iter_type(i) == ITER_XARRAY;
100 }
101
iov_iter_rw(const struct iov_iter * i)102 static inline unsigned char iov_iter_rw(const struct iov_iter *i)
103 {
104 return i->data_source ? WRITE : READ;
105 }
106
107 /*
108 * Total number of bytes covered by an iovec.
109 *
110 * NOTE that it is not safe to use this function until all the iovec's
111 * segment lengths have been validated. Because the individual lengths can
112 * overflow a size_t when added together.
113 */
iov_length(const struct iovec * iov,unsigned long nr_segs)114 static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
115 {
116 unsigned long seg;
117 size_t ret = 0;
118
119 for (seg = 0; seg < nr_segs; seg++)
120 ret += iov[seg].iov_len;
121 return ret;
122 }
123
iov_iter_iovec(const struct iov_iter * iter)124 static inline struct iovec iov_iter_iovec(const struct iov_iter *iter)
125 {
126 return (struct iovec) {
127 .iov_base = iter->iov->iov_base + iter->iov_offset,
128 .iov_len = min(iter->count,
129 iter->iov->iov_len - iter->iov_offset),
130 };
131 }
132
133 size_t copy_page_from_iter_atomic(struct page *page, unsigned offset,
134 size_t bytes, struct iov_iter *i);
135 void iov_iter_advance(struct iov_iter *i, size_t bytes);
136 void iov_iter_revert(struct iov_iter *i, size_t bytes);
137 size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t bytes);
138 size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t bytes);
139 size_t iov_iter_single_seg_count(const struct iov_iter *i);
140 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
141 struct iov_iter *i);
142 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
143 struct iov_iter *i);
144
145 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
146 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
147 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
148
149 static __always_inline __must_check
copy_to_iter(const void * addr,size_t bytes,struct iov_iter * i)150 size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
151 {
152 if (unlikely(!check_copy_size(addr, bytes, true)))
153 return 0;
154 else
155 return _copy_to_iter(addr, bytes, i);
156 }
157
158 static __always_inline __must_check
copy_from_iter(void * addr,size_t bytes,struct iov_iter * i)159 size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
160 {
161 if (unlikely(!check_copy_size(addr, bytes, false)))
162 return 0;
163 else
164 return _copy_from_iter(addr, bytes, i);
165 }
166
167 static __always_inline __must_check
copy_from_iter_full(void * addr,size_t bytes,struct iov_iter * i)168 bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
169 {
170 size_t copied = copy_from_iter(addr, bytes, i);
171 if (likely(copied == bytes))
172 return true;
173 iov_iter_revert(i, copied);
174 return false;
175 }
176
177 static __always_inline __must_check
copy_from_iter_nocache(void * addr,size_t bytes,struct iov_iter * i)178 size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
179 {
180 if (unlikely(!check_copy_size(addr, bytes, false)))
181 return 0;
182 else
183 return _copy_from_iter_nocache(addr, bytes, i);
184 }
185
186 static __always_inline __must_check
copy_from_iter_full_nocache(void * addr,size_t bytes,struct iov_iter * i)187 bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
188 {
189 size_t copied = copy_from_iter_nocache(addr, bytes, i);
190 if (likely(copied == bytes))
191 return true;
192 iov_iter_revert(i, copied);
193 return false;
194 }
195
196 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
197 /*
198 * Note, users like pmem that depend on the stricter semantics of
199 * copy_from_iter_flushcache() than copy_from_iter_nocache() must check for
200 * IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the
201 * destination is flushed from the cache on return.
202 */
203 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i);
204 #else
205 #define _copy_from_iter_flushcache _copy_from_iter_nocache
206 #endif
207
208 #ifdef CONFIG_ARCH_HAS_COPY_MC
209 size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
210 #else
211 #define _copy_mc_to_iter _copy_to_iter
212 #endif
213
214 static __always_inline __must_check
copy_from_iter_flushcache(void * addr,size_t bytes,struct iov_iter * i)215 size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
216 {
217 if (unlikely(!check_copy_size(addr, bytes, false)))
218 return 0;
219 else
220 return _copy_from_iter_flushcache(addr, bytes, i);
221 }
222
223 static __always_inline __must_check
copy_mc_to_iter(void * addr,size_t bytes,struct iov_iter * i)224 size_t copy_mc_to_iter(void *addr, size_t bytes, struct iov_iter *i)
225 {
226 if (unlikely(!check_copy_size(addr, bytes, true)))
227 return 0;
228 else
229 return _copy_mc_to_iter(addr, bytes, i);
230 }
231
232 size_t iov_iter_zero(size_t bytes, struct iov_iter *);
233 unsigned long iov_iter_alignment(const struct iov_iter *i);
234 unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
235 void iov_iter_init(struct iov_iter *i, unsigned int direction, const struct iovec *iov,
236 unsigned long nr_segs, size_t count);
237 void iov_iter_kvec(struct iov_iter *i, unsigned int direction, const struct kvec *kvec,
238 unsigned long nr_segs, size_t count);
239 void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_vec *bvec,
240 unsigned long nr_segs, size_t count);
241 void iov_iter_pipe(struct iov_iter *i, unsigned int direction, struct pipe_inode_info *pipe,
242 size_t count);
243 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count);
244 void iov_iter_xarray(struct iov_iter *i, unsigned int direction, struct xarray *xarray,
245 loff_t start, size_t count);
246 ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages,
247 size_t maxsize, unsigned maxpages, size_t *start);
248 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages,
249 size_t maxsize, size_t *start);
250 int iov_iter_npages(const struct iov_iter *i, int maxpages);
251 void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state);
252
253 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags);
254
iov_iter_count(const struct iov_iter * i)255 static inline size_t iov_iter_count(const struct iov_iter *i)
256 {
257 return i->count;
258 }
259
260 /*
261 * Cap the iov_iter by given limit; note that the second argument is
262 * *not* the new size - it's upper limit for such. Passing it a value
263 * greater than the amount of data in iov_iter is fine - it'll just do
264 * nothing in that case.
265 */
iov_iter_truncate(struct iov_iter * i,u64 count)266 static inline void iov_iter_truncate(struct iov_iter *i, u64 count)
267 {
268 /*
269 * count doesn't have to fit in size_t - comparison extends both
270 * operands to u64 here and any value that would be truncated by
271 * conversion in assignement is by definition greater than all
272 * values of size_t, including old i->count.
273 */
274 if (i->count > count)
275 i->count = count;
276 }
277
278 /*
279 * reexpand a previously truncated iterator; count must be no more than how much
280 * we had shrunk it.
281 */
iov_iter_reexpand(struct iov_iter * i,size_t count)282 static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
283 {
284 i->count = count;
285 }
286
287 struct csum_state {
288 __wsum csum;
289 size_t off;
290 };
291
292 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csstate, struct iov_iter *i);
293 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
294
295 static __always_inline __must_check
csum_and_copy_from_iter_full(void * addr,size_t bytes,__wsum * csum,struct iov_iter * i)296 bool csum_and_copy_from_iter_full(void *addr, size_t bytes,
297 __wsum *csum, struct iov_iter *i)
298 {
299 size_t copied = csum_and_copy_from_iter(addr, bytes, csum, i);
300 if (likely(copied == bytes))
301 return true;
302 iov_iter_revert(i, copied);
303 return false;
304 }
305 size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
306 struct iov_iter *i);
307
308 struct iovec *iovec_from_user(const struct iovec __user *uvector,
309 unsigned long nr_segs, unsigned long fast_segs,
310 struct iovec *fast_iov, bool compat);
311 ssize_t import_iovec(int type, const struct iovec __user *uvec,
312 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
313 struct iov_iter *i);
314 ssize_t __import_iovec(int type, const struct iovec __user *uvec,
315 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
316 struct iov_iter *i, bool compat);
317 int import_single_range(int type, void __user *buf, size_t len,
318 struct iovec *iov, struct iov_iter *i);
319
320 #endif
321