1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HIGHMEM_H
3 #define _LINUX_HIGHMEM_H
4 
5 #include <linux/fs.h>
6 #include <linux/kernel.h>
7 #include <linux/bug.h>
8 #include <linux/cacheflush.h>
9 #include <linux/mm.h>
10 #include <linux/uaccess.h>
11 #include <linux/hardirq.h>
12 
13 #include "highmem-internal.h"
14 
15 /**
16  * kmap - Map a page for long term usage
17  * @page:	Pointer to the page to be mapped
18  *
19  * Returns: The virtual address of the mapping
20  *
21  * Can only be invoked from preemptible task context because on 32bit
22  * systems with CONFIG_HIGHMEM enabled this function might sleep.
23  *
24  * For systems with CONFIG_HIGHMEM=n and for pages in the low memory area
25  * this returns the virtual address of the direct kernel mapping.
26  *
27  * The returned virtual address is globally visible and valid up to the
28  * point where it is unmapped via kunmap(). The pointer can be handed to
29  * other contexts.
30  *
31  * For highmem pages on 32bit systems this can be slow as the mapping space
32  * is limited and protected by a global lock. In case that there is no
33  * mapping slot available the function blocks until a slot is released via
34  * kunmap().
35  */
36 static inline void *kmap(struct page *page);
37 
38 /**
39  * kunmap - Unmap the virtual address mapped by kmap()
40  * @addr:	Virtual address to be unmapped
41  *
42  * Counterpart to kmap(). A NOOP for CONFIG_HIGHMEM=n and for mappings of
43  * pages in the low memory area.
44  */
45 static inline void kunmap(struct page *page);
46 
47 /**
48  * kmap_to_page - Get the page for a kmap'ed address
49  * @addr:	The address to look up
50  *
51  * Returns: The page which is mapped to @addr.
52  */
53 static inline struct page *kmap_to_page(void *addr);
54 
55 /**
56  * kmap_flush_unused - Flush all unused kmap mappings in order to
57  *		       remove stray mappings
58  */
59 static inline void kmap_flush_unused(void);
60 
61 /**
62  * kmap_local_page - Map a page for temporary usage
63  * @page:	Pointer to the page to be mapped
64  *
65  * Returns: The virtual address of the mapping
66  *
67  * Can be invoked from any context.
68  *
69  * Requires careful handling when nesting multiple mappings because the map
70  * management is stack based. The unmap has to be in the reverse order of
71  * the map operation:
72  *
73  * addr1 = kmap_local_page(page1);
74  * addr2 = kmap_local_page(page2);
75  * ...
76  * kunmap_local(addr2);
77  * kunmap_local(addr1);
78  *
79  * Unmapping addr1 before addr2 is invalid and causes malfunction.
80  *
81  * Contrary to kmap() mappings the mapping is only valid in the context of
82  * the caller and cannot be handed to other contexts.
83  *
84  * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the
85  * virtual address of the direct mapping. Only real highmem pages are
86  * temporarily mapped.
87  *
88  * While it is significantly faster than kmap() for the higmem case it
89  * comes with restrictions about the pointer validity. Only use when really
90  * necessary.
91  *
92  * On HIGHMEM enabled systems mapping a highmem page has the side effect of
93  * disabling migration in order to keep the virtual address stable across
94  * preemption. No caller of kmap_local_page() can rely on this side effect.
95  */
96 static inline void *kmap_local_page(struct page *page);
97 
98 /**
99  * kmap_local_folio - Map a page in this folio for temporary usage
100  * @folio: The folio containing the page.
101  * @offset: The byte offset within the folio which identifies the page.
102  *
103  * Requires careful handling when nesting multiple mappings because the map
104  * management is stack based. The unmap has to be in the reverse order of
105  * the map operation::
106  *
107  *   addr1 = kmap_local_folio(folio1, offset1);
108  *   addr2 = kmap_local_folio(folio2, offset2);
109  *   ...
110  *   kunmap_local(addr2);
111  *   kunmap_local(addr1);
112  *
113  * Unmapping addr1 before addr2 is invalid and causes malfunction.
114  *
115  * Contrary to kmap() mappings the mapping is only valid in the context of
116  * the caller and cannot be handed to other contexts.
117  *
118  * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the
119  * virtual address of the direct mapping. Only real highmem pages are
120  * temporarily mapped.
121  *
122  * While it is significantly faster than kmap() for the higmem case it
123  * comes with restrictions about the pointer validity. Only use when really
124  * necessary.
125  *
126  * On HIGHMEM enabled systems mapping a highmem page has the side effect of
127  * disabling migration in order to keep the virtual address stable across
128  * preemption. No caller of kmap_local_folio() can rely on this side effect.
129  *
130  * Context: Can be invoked from any context.
131  * Return: The virtual address of @offset.
132  */
133 static inline void *kmap_local_folio(struct folio *folio, size_t offset);
134 
135 /**
136  * kmap_atomic - Atomically map a page for temporary usage - Deprecated!
137  * @page:	Pointer to the page to be mapped
138  *
139  * Returns: The virtual address of the mapping
140  *
141  * Effectively a wrapper around kmap_local_page() which disables pagefaults
142  * and preemption.
143  *
144  * Do not use in new code. Use kmap_local_page() instead.
145  */
146 static inline void *kmap_atomic(struct page *page);
147 
148 /**
149  * kunmap_atomic - Unmap the virtual address mapped by kmap_atomic()
150  * @addr:	Virtual address to be unmapped
151  *
152  * Counterpart to kmap_atomic().
153  *
154  * Effectively a wrapper around kunmap_local() which additionally undoes
155  * the side effects of kmap_atomic(), i.e. reenabling pagefaults and
156  * preemption.
157  */
158 
159 /* Highmem related interfaces for management code */
160 static inline unsigned int nr_free_highpages(void);
161 static inline unsigned long totalhigh_pages(void);
162 
163 #ifndef ARCH_HAS_FLUSH_ANON_PAGE
flush_anon_page(struct vm_area_struct * vma,struct page * page,unsigned long vmaddr)164 static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
165 {
166 }
167 #endif
168 
169 #ifndef ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE
flush_kernel_vmap_range(void * vaddr,int size)170 static inline void flush_kernel_vmap_range(void *vaddr, int size)
171 {
172 }
invalidate_kernel_vmap_range(void * vaddr,int size)173 static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
174 {
175 }
176 #endif
177 
178 /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
179 #ifndef clear_user_highpage
clear_user_highpage(struct page * page,unsigned long vaddr)180 static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
181 {
182 	void *addr = kmap_local_page(page);
183 	clear_user_page(addr, vaddr, page);
184 	kunmap_local(addr);
185 }
186 #endif
187 
188 #ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
189 /**
190  * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
191  * @vma: The VMA the page is to be allocated for
192  * @vaddr: The virtual address the page will be inserted into
193  *
194  * This function will allocate a page for a VMA that the caller knows will
195  * be able to migrate in the future using move_pages() or reclaimed
196  *
197  * An architecture may override this function by defining
198  * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE and providing their own
199  * implementation.
200  */
201 static inline struct page *
alloc_zeroed_user_highpage_movable(struct vm_area_struct * vma,unsigned long vaddr)202 alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
203 				   unsigned long vaddr)
204 {
205 	struct page *page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
206 
207 	if (page)
208 		clear_user_highpage(page, vaddr);
209 
210 	return page;
211 }
212 #endif
213 
clear_highpage(struct page * page)214 static inline void clear_highpage(struct page *page)
215 {
216 	void *kaddr = kmap_local_page(page);
217 	clear_page(kaddr);
218 	kunmap_local(kaddr);
219 }
220 
221 #ifndef __HAVE_ARCH_TAG_CLEAR_HIGHPAGE
222 
tag_clear_highpage(struct page * page)223 static inline void tag_clear_highpage(struct page *page)
224 {
225 }
226 
227 #endif
228 
229 /*
230  * If we pass in a base or tail page, we can zero up to PAGE_SIZE.
231  * If we pass in a head page, we can zero up to the size of the compound page.
232  */
233 #ifdef CONFIG_HIGHMEM
234 void zero_user_segments(struct page *page, unsigned start1, unsigned end1,
235 		unsigned start2, unsigned end2);
236 #else
zero_user_segments(struct page * page,unsigned start1,unsigned end1,unsigned start2,unsigned end2)237 static inline void zero_user_segments(struct page *page,
238 		unsigned start1, unsigned end1,
239 		unsigned start2, unsigned end2)
240 {
241 	void *kaddr = kmap_local_page(page);
242 	unsigned int i;
243 
244 	BUG_ON(end1 > page_size(page) || end2 > page_size(page));
245 
246 	if (end1 > start1)
247 		memset(kaddr + start1, 0, end1 - start1);
248 
249 	if (end2 > start2)
250 		memset(kaddr + start2, 0, end2 - start2);
251 
252 	kunmap_local(kaddr);
253 	for (i = 0; i < compound_nr(page); i++)
254 		flush_dcache_page(page + i);
255 }
256 #endif
257 
zero_user_segment(struct page * page,unsigned start,unsigned end)258 static inline void zero_user_segment(struct page *page,
259 	unsigned start, unsigned end)
260 {
261 	zero_user_segments(page, start, end, 0, 0);
262 }
263 
zero_user(struct page * page,unsigned start,unsigned size)264 static inline void zero_user(struct page *page,
265 	unsigned start, unsigned size)
266 {
267 	zero_user_segments(page, start, start + size, 0, 0);
268 }
269 
270 #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
271 
copy_user_highpage(struct page * to,struct page * from,unsigned long vaddr,struct vm_area_struct * vma)272 static inline void copy_user_highpage(struct page *to, struct page *from,
273 	unsigned long vaddr, struct vm_area_struct *vma)
274 {
275 	char *vfrom, *vto;
276 
277 	vfrom = kmap_local_page(from);
278 	vto = kmap_local_page(to);
279 	copy_user_page(vto, vfrom, vaddr, to);
280 	kunmap_local(vto);
281 	kunmap_local(vfrom);
282 }
283 
284 #endif
285 
286 #ifndef __HAVE_ARCH_COPY_HIGHPAGE
287 
copy_highpage(struct page * to,struct page * from)288 static inline void copy_highpage(struct page *to, struct page *from)
289 {
290 	char *vfrom, *vto;
291 
292 	vfrom = kmap_local_page(from);
293 	vto = kmap_local_page(to);
294 	copy_page(vto, vfrom);
295 	kunmap_local(vto);
296 	kunmap_local(vfrom);
297 }
298 
299 #endif
300 
memcpy_page(struct page * dst_page,size_t dst_off,struct page * src_page,size_t src_off,size_t len)301 static inline void memcpy_page(struct page *dst_page, size_t dst_off,
302 			       struct page *src_page, size_t src_off,
303 			       size_t len)
304 {
305 	char *dst = kmap_local_page(dst_page);
306 	char *src = kmap_local_page(src_page);
307 
308 	VM_BUG_ON(dst_off + len > PAGE_SIZE || src_off + len > PAGE_SIZE);
309 	memcpy(dst + dst_off, src + src_off, len);
310 	kunmap_local(src);
311 	kunmap_local(dst);
312 }
313 
memmove_page(struct page * dst_page,size_t dst_off,struct page * src_page,size_t src_off,size_t len)314 static inline void memmove_page(struct page *dst_page, size_t dst_off,
315 			       struct page *src_page, size_t src_off,
316 			       size_t len)
317 {
318 	char *dst = kmap_local_page(dst_page);
319 	char *src = kmap_local_page(src_page);
320 
321 	VM_BUG_ON(dst_off + len > PAGE_SIZE || src_off + len > PAGE_SIZE);
322 	memmove(dst + dst_off, src + src_off, len);
323 	kunmap_local(src);
324 	kunmap_local(dst);
325 }
326 
memset_page(struct page * page,size_t offset,int val,size_t len)327 static inline void memset_page(struct page *page, size_t offset, int val,
328 			       size_t len)
329 {
330 	char *addr = kmap_local_page(page);
331 
332 	VM_BUG_ON(offset + len > PAGE_SIZE);
333 	memset(addr + offset, val, len);
334 	kunmap_local(addr);
335 }
336 
memcpy_from_page(char * to,struct page * page,size_t offset,size_t len)337 static inline void memcpy_from_page(char *to, struct page *page,
338 				    size_t offset, size_t len)
339 {
340 	char *from = kmap_local_page(page);
341 
342 	VM_BUG_ON(offset + len > PAGE_SIZE);
343 	memcpy(to, from + offset, len);
344 	kunmap_local(from);
345 }
346 
memcpy_to_page(struct page * page,size_t offset,const char * from,size_t len)347 static inline void memcpy_to_page(struct page *page, size_t offset,
348 				  const char *from, size_t len)
349 {
350 	char *to = kmap_local_page(page);
351 
352 	VM_BUG_ON(offset + len > PAGE_SIZE);
353 	memcpy(to + offset, from, len);
354 	flush_dcache_page(page);
355 	kunmap_local(to);
356 }
357 
memzero_page(struct page * page,size_t offset,size_t len)358 static inline void memzero_page(struct page *page, size_t offset, size_t len)
359 {
360 	char *addr = kmap_local_page(page);
361 	memset(addr + offset, 0, len);
362 	flush_dcache_page(page);
363 	kunmap_local(addr);
364 }
365 
366 /**
367  * folio_zero_segments() - Zero two byte ranges in a folio.
368  * @folio: The folio to write to.
369  * @start1: The first byte to zero.
370  * @xend1: One more than the last byte in the first range.
371  * @start2: The first byte to zero in the second range.
372  * @xend2: One more than the last byte in the second range.
373  */
folio_zero_segments(struct folio * folio,size_t start1,size_t xend1,size_t start2,size_t xend2)374 static inline void folio_zero_segments(struct folio *folio,
375 		size_t start1, size_t xend1, size_t start2, size_t xend2)
376 {
377 	zero_user_segments(&folio->page, start1, xend1, start2, xend2);
378 }
379 
380 /**
381  * folio_zero_segment() - Zero a byte range in a folio.
382  * @folio: The folio to write to.
383  * @start: The first byte to zero.
384  * @xend: One more than the last byte to zero.
385  */
folio_zero_segment(struct folio * folio,size_t start,size_t xend)386 static inline void folio_zero_segment(struct folio *folio,
387 		size_t start, size_t xend)
388 {
389 	zero_user_segments(&folio->page, start, xend, 0, 0);
390 }
391 
392 /**
393  * folio_zero_range() - Zero a byte range in a folio.
394  * @folio: The folio to write to.
395  * @start: The first byte to zero.
396  * @length: The number of bytes to zero.
397  */
folio_zero_range(struct folio * folio,size_t start,size_t length)398 static inline void folio_zero_range(struct folio *folio,
399 		size_t start, size_t length)
400 {
401 	zero_user_segments(&folio->page, start, start + length, 0, 0);
402 }
403 
404 #endif /* _LINUX_HIGHMEM_H */
405