1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_PGTABLE_H
3 #define _ASM_X86_PGTABLE_H
4 
5 #include <linux/mem_encrypt.h>
6 #include <asm/page.h>
7 #include <asm/pgtable_types.h>
8 
9 /*
10  * Macro to mark a page protection value as UC-
11  */
12 #define pgprot_noncached(prot)						\
13 	((boot_cpu_data.x86 > 3)					\
14 	 ? (__pgprot(pgprot_val(prot) |					\
15 		     cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS)))	\
16 	 : (prot))
17 
18 /*
19  * Macros to add or remove encryption attribute
20  */
21 #define pgprot_encrypted(prot)	__pgprot(__sme_set(pgprot_val(prot)))
22 #define pgprot_decrypted(prot)	__pgprot(__sme_clr(pgprot_val(prot)))
23 
24 #ifndef __ASSEMBLY__
25 #include <asm/x86_init.h>
26 #include <asm/pkru.h>
27 #include <asm/fpu/api.h>
28 #include <asm-generic/pgtable_uffd.h>
29 
30 extern pgd_t early_top_pgt[PTRS_PER_PGD];
31 bool __init __early_make_pgtable(unsigned long address, pmdval_t pmd);
32 
33 void ptdump_walk_pgd_level(struct seq_file *m, struct mm_struct *mm);
34 void ptdump_walk_pgd_level_debugfs(struct seq_file *m, struct mm_struct *mm,
35 				   bool user);
36 void ptdump_walk_pgd_level_checkwx(void);
37 void ptdump_walk_user_pgd_level_checkwx(void);
38 
39 #ifdef CONFIG_DEBUG_WX
40 #define debug_checkwx()		ptdump_walk_pgd_level_checkwx()
41 #define debug_checkwx_user()	ptdump_walk_user_pgd_level_checkwx()
42 #else
43 #define debug_checkwx()		do { } while (0)
44 #define debug_checkwx_user()	do { } while (0)
45 #endif
46 
47 /*
48  * ZERO_PAGE is a global shared page that is always zero: used
49  * for zero-mapped memory areas etc..
50  */
51 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
52 	__visible;
53 #define ZERO_PAGE(vaddr) ((void)(vaddr),virt_to_page(empty_zero_page))
54 
55 extern spinlock_t pgd_lock;
56 extern struct list_head pgd_list;
57 
58 extern struct mm_struct *pgd_page_get_mm(struct page *page);
59 
60 extern pmdval_t early_pmd_flags;
61 
62 #ifdef CONFIG_PARAVIRT_XXL
63 #include <asm/paravirt.h>
64 #else  /* !CONFIG_PARAVIRT_XXL */
65 #define set_pte(ptep, pte)		native_set_pte(ptep, pte)
66 
67 #define set_pte_atomic(ptep, pte)					\
68 	native_set_pte_atomic(ptep, pte)
69 
70 #define set_pmd(pmdp, pmd)		native_set_pmd(pmdp, pmd)
71 
72 #ifndef __PAGETABLE_P4D_FOLDED
73 #define set_pgd(pgdp, pgd)		native_set_pgd(pgdp, pgd)
74 #define pgd_clear(pgd)			(pgtable_l5_enabled() ? native_pgd_clear(pgd) : 0)
75 #endif
76 
77 #ifndef set_p4d
78 # define set_p4d(p4dp, p4d)		native_set_p4d(p4dp, p4d)
79 #endif
80 
81 #ifndef __PAGETABLE_PUD_FOLDED
82 #define p4d_clear(p4d)			native_p4d_clear(p4d)
83 #endif
84 
85 #ifndef set_pud
86 # define set_pud(pudp, pud)		native_set_pud(pudp, pud)
87 #endif
88 
89 #ifndef __PAGETABLE_PUD_FOLDED
90 #define pud_clear(pud)			native_pud_clear(pud)
91 #endif
92 
93 #define pte_clear(mm, addr, ptep)	native_pte_clear(mm, addr, ptep)
94 #define pmd_clear(pmd)			native_pmd_clear(pmd)
95 
96 #define pgd_val(x)	native_pgd_val(x)
97 #define __pgd(x)	native_make_pgd(x)
98 
99 #ifndef __PAGETABLE_P4D_FOLDED
100 #define p4d_val(x)	native_p4d_val(x)
101 #define __p4d(x)	native_make_p4d(x)
102 #endif
103 
104 #ifndef __PAGETABLE_PUD_FOLDED
105 #define pud_val(x)	native_pud_val(x)
106 #define __pud(x)	native_make_pud(x)
107 #endif
108 
109 #ifndef __PAGETABLE_PMD_FOLDED
110 #define pmd_val(x)	native_pmd_val(x)
111 #define __pmd(x)	native_make_pmd(x)
112 #endif
113 
114 #define pte_val(x)	native_pte_val(x)
115 #define __pte(x)	native_make_pte(x)
116 
117 #define arch_end_context_switch(prev)	do {} while(0)
118 #endif	/* CONFIG_PARAVIRT_XXL */
119 
120 /*
121  * The following only work if pte_present() is true.
122  * Undefined behaviour if not..
123  */
pte_dirty(pte_t pte)124 static inline int pte_dirty(pte_t pte)
125 {
126 	return pte_flags(pte) & _PAGE_DIRTY;
127 }
128 
pte_young(pte_t pte)129 static inline int pte_young(pte_t pte)
130 {
131 	return pte_flags(pte) & _PAGE_ACCESSED;
132 }
133 
pmd_dirty(pmd_t pmd)134 static inline int pmd_dirty(pmd_t pmd)
135 {
136 	return pmd_flags(pmd) & _PAGE_DIRTY;
137 }
138 
pmd_young(pmd_t pmd)139 static inline int pmd_young(pmd_t pmd)
140 {
141 	return pmd_flags(pmd) & _PAGE_ACCESSED;
142 }
143 
pud_dirty(pud_t pud)144 static inline int pud_dirty(pud_t pud)
145 {
146 	return pud_flags(pud) & _PAGE_DIRTY;
147 }
148 
pud_young(pud_t pud)149 static inline int pud_young(pud_t pud)
150 {
151 	return pud_flags(pud) & _PAGE_ACCESSED;
152 }
153 
pte_write(pte_t pte)154 static inline int pte_write(pte_t pte)
155 {
156 	return pte_flags(pte) & _PAGE_RW;
157 }
158 
pte_huge(pte_t pte)159 static inline int pte_huge(pte_t pte)
160 {
161 	return pte_flags(pte) & _PAGE_PSE;
162 }
163 
pte_global(pte_t pte)164 static inline int pte_global(pte_t pte)
165 {
166 	return pte_flags(pte) & _PAGE_GLOBAL;
167 }
168 
pte_exec(pte_t pte)169 static inline int pte_exec(pte_t pte)
170 {
171 	return !(pte_flags(pte) & _PAGE_NX);
172 }
173 
pte_special(pte_t pte)174 static inline int pte_special(pte_t pte)
175 {
176 	return pte_flags(pte) & _PAGE_SPECIAL;
177 }
178 
179 /* Entries that were set to PROT_NONE are inverted */
180 
181 static inline u64 protnone_mask(u64 val);
182 
pte_pfn(pte_t pte)183 static inline unsigned long pte_pfn(pte_t pte)
184 {
185 	phys_addr_t pfn = pte_val(pte);
186 	pfn ^= protnone_mask(pfn);
187 	return (pfn & PTE_PFN_MASK) >> PAGE_SHIFT;
188 }
189 
pmd_pfn(pmd_t pmd)190 static inline unsigned long pmd_pfn(pmd_t pmd)
191 {
192 	phys_addr_t pfn = pmd_val(pmd);
193 	pfn ^= protnone_mask(pfn);
194 	return (pfn & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
195 }
196 
pud_pfn(pud_t pud)197 static inline unsigned long pud_pfn(pud_t pud)
198 {
199 	phys_addr_t pfn = pud_val(pud);
200 	pfn ^= protnone_mask(pfn);
201 	return (pfn & pud_pfn_mask(pud)) >> PAGE_SHIFT;
202 }
203 
p4d_pfn(p4d_t p4d)204 static inline unsigned long p4d_pfn(p4d_t p4d)
205 {
206 	return (p4d_val(p4d) & p4d_pfn_mask(p4d)) >> PAGE_SHIFT;
207 }
208 
pgd_pfn(pgd_t pgd)209 static inline unsigned long pgd_pfn(pgd_t pgd)
210 {
211 	return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
212 }
213 
214 #define p4d_leaf	p4d_large
p4d_large(p4d_t p4d)215 static inline int p4d_large(p4d_t p4d)
216 {
217 	/* No 512 GiB pages yet */
218 	return 0;
219 }
220 
221 #define pte_page(pte)	pfn_to_page(pte_pfn(pte))
222 
223 #define pmd_leaf	pmd_large
pmd_large(pmd_t pte)224 static inline int pmd_large(pmd_t pte)
225 {
226 	return pmd_flags(pte) & _PAGE_PSE;
227 }
228 
229 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
230 /* NOTE: when predicate huge page, consider also pmd_devmap, or use pmd_large */
pmd_trans_huge(pmd_t pmd)231 static inline int pmd_trans_huge(pmd_t pmd)
232 {
233 	return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
234 }
235 
236 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
pud_trans_huge(pud_t pud)237 static inline int pud_trans_huge(pud_t pud)
238 {
239 	return (pud_val(pud) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
240 }
241 #endif
242 
243 #define has_transparent_hugepage has_transparent_hugepage
has_transparent_hugepage(void)244 static inline int has_transparent_hugepage(void)
245 {
246 	return boot_cpu_has(X86_FEATURE_PSE);
247 }
248 
249 #ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
pmd_devmap(pmd_t pmd)250 static inline int pmd_devmap(pmd_t pmd)
251 {
252 	return !!(pmd_val(pmd) & _PAGE_DEVMAP);
253 }
254 
255 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
pud_devmap(pud_t pud)256 static inline int pud_devmap(pud_t pud)
257 {
258 	return !!(pud_val(pud) & _PAGE_DEVMAP);
259 }
260 #else
pud_devmap(pud_t pud)261 static inline int pud_devmap(pud_t pud)
262 {
263 	return 0;
264 }
265 #endif
266 
pgd_devmap(pgd_t pgd)267 static inline int pgd_devmap(pgd_t pgd)
268 {
269 	return 0;
270 }
271 #endif
272 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
273 
pte_set_flags(pte_t pte,pteval_t set)274 static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
275 {
276 	pteval_t v = native_pte_val(pte);
277 
278 	return native_make_pte(v | set);
279 }
280 
pte_clear_flags(pte_t pte,pteval_t clear)281 static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
282 {
283 	pteval_t v = native_pte_val(pte);
284 
285 	return native_make_pte(v & ~clear);
286 }
287 
288 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
pte_uffd_wp(pte_t pte)289 static inline int pte_uffd_wp(pte_t pte)
290 {
291 	return pte_flags(pte) & _PAGE_UFFD_WP;
292 }
293 
pte_mkuffd_wp(pte_t pte)294 static inline pte_t pte_mkuffd_wp(pte_t pte)
295 {
296 	return pte_set_flags(pte, _PAGE_UFFD_WP);
297 }
298 
pte_clear_uffd_wp(pte_t pte)299 static inline pte_t pte_clear_uffd_wp(pte_t pte)
300 {
301 	return pte_clear_flags(pte, _PAGE_UFFD_WP);
302 }
303 #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
304 
pte_mkclean(pte_t pte)305 static inline pte_t pte_mkclean(pte_t pte)
306 {
307 	return pte_clear_flags(pte, _PAGE_DIRTY);
308 }
309 
pte_mkold(pte_t pte)310 static inline pte_t pte_mkold(pte_t pte)
311 {
312 	return pte_clear_flags(pte, _PAGE_ACCESSED);
313 }
314 
pte_wrprotect(pte_t pte)315 static inline pte_t pte_wrprotect(pte_t pte)
316 {
317 	return pte_clear_flags(pte, _PAGE_RW);
318 }
319 
pte_mkexec(pte_t pte)320 static inline pte_t pte_mkexec(pte_t pte)
321 {
322 	return pte_clear_flags(pte, _PAGE_NX);
323 }
324 
pte_mkdirty(pte_t pte)325 static inline pte_t pte_mkdirty(pte_t pte)
326 {
327 	return pte_set_flags(pte, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
328 }
329 
pte_mkyoung(pte_t pte)330 static inline pte_t pte_mkyoung(pte_t pte)
331 {
332 	return pte_set_flags(pte, _PAGE_ACCESSED);
333 }
334 
pte_mkwrite(pte_t pte)335 static inline pte_t pte_mkwrite(pte_t pte)
336 {
337 	return pte_set_flags(pte, _PAGE_RW);
338 }
339 
pte_mkhuge(pte_t pte)340 static inline pte_t pte_mkhuge(pte_t pte)
341 {
342 	return pte_set_flags(pte, _PAGE_PSE);
343 }
344 
pte_clrhuge(pte_t pte)345 static inline pte_t pte_clrhuge(pte_t pte)
346 {
347 	return pte_clear_flags(pte, _PAGE_PSE);
348 }
349 
pte_mkglobal(pte_t pte)350 static inline pte_t pte_mkglobal(pte_t pte)
351 {
352 	return pte_set_flags(pte, _PAGE_GLOBAL);
353 }
354 
pte_clrglobal(pte_t pte)355 static inline pte_t pte_clrglobal(pte_t pte)
356 {
357 	return pte_clear_flags(pte, _PAGE_GLOBAL);
358 }
359 
pte_mkspecial(pte_t pte)360 static inline pte_t pte_mkspecial(pte_t pte)
361 {
362 	return pte_set_flags(pte, _PAGE_SPECIAL);
363 }
364 
pte_mkdevmap(pte_t pte)365 static inline pte_t pte_mkdevmap(pte_t pte)
366 {
367 	return pte_set_flags(pte, _PAGE_SPECIAL|_PAGE_DEVMAP);
368 }
369 
pmd_set_flags(pmd_t pmd,pmdval_t set)370 static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
371 {
372 	pmdval_t v = native_pmd_val(pmd);
373 
374 	return native_make_pmd(v | set);
375 }
376 
pmd_clear_flags(pmd_t pmd,pmdval_t clear)377 static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
378 {
379 	pmdval_t v = native_pmd_val(pmd);
380 
381 	return native_make_pmd(v & ~clear);
382 }
383 
384 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
pmd_uffd_wp(pmd_t pmd)385 static inline int pmd_uffd_wp(pmd_t pmd)
386 {
387 	return pmd_flags(pmd) & _PAGE_UFFD_WP;
388 }
389 
pmd_mkuffd_wp(pmd_t pmd)390 static inline pmd_t pmd_mkuffd_wp(pmd_t pmd)
391 {
392 	return pmd_set_flags(pmd, _PAGE_UFFD_WP);
393 }
394 
pmd_clear_uffd_wp(pmd_t pmd)395 static inline pmd_t pmd_clear_uffd_wp(pmd_t pmd)
396 {
397 	return pmd_clear_flags(pmd, _PAGE_UFFD_WP);
398 }
399 #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
400 
pmd_mkold(pmd_t pmd)401 static inline pmd_t pmd_mkold(pmd_t pmd)
402 {
403 	return pmd_clear_flags(pmd, _PAGE_ACCESSED);
404 }
405 
pmd_mkclean(pmd_t pmd)406 static inline pmd_t pmd_mkclean(pmd_t pmd)
407 {
408 	return pmd_clear_flags(pmd, _PAGE_DIRTY);
409 }
410 
pmd_wrprotect(pmd_t pmd)411 static inline pmd_t pmd_wrprotect(pmd_t pmd)
412 {
413 	return pmd_clear_flags(pmd, _PAGE_RW);
414 }
415 
pmd_mkdirty(pmd_t pmd)416 static inline pmd_t pmd_mkdirty(pmd_t pmd)
417 {
418 	return pmd_set_flags(pmd, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
419 }
420 
pmd_mkdevmap(pmd_t pmd)421 static inline pmd_t pmd_mkdevmap(pmd_t pmd)
422 {
423 	return pmd_set_flags(pmd, _PAGE_DEVMAP);
424 }
425 
pmd_mkhuge(pmd_t pmd)426 static inline pmd_t pmd_mkhuge(pmd_t pmd)
427 {
428 	return pmd_set_flags(pmd, _PAGE_PSE);
429 }
430 
pmd_mkyoung(pmd_t pmd)431 static inline pmd_t pmd_mkyoung(pmd_t pmd)
432 {
433 	return pmd_set_flags(pmd, _PAGE_ACCESSED);
434 }
435 
pmd_mkwrite(pmd_t pmd)436 static inline pmd_t pmd_mkwrite(pmd_t pmd)
437 {
438 	return pmd_set_flags(pmd, _PAGE_RW);
439 }
440 
pud_set_flags(pud_t pud,pudval_t set)441 static inline pud_t pud_set_flags(pud_t pud, pudval_t set)
442 {
443 	pudval_t v = native_pud_val(pud);
444 
445 	return native_make_pud(v | set);
446 }
447 
pud_clear_flags(pud_t pud,pudval_t clear)448 static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear)
449 {
450 	pudval_t v = native_pud_val(pud);
451 
452 	return native_make_pud(v & ~clear);
453 }
454 
pud_mkold(pud_t pud)455 static inline pud_t pud_mkold(pud_t pud)
456 {
457 	return pud_clear_flags(pud, _PAGE_ACCESSED);
458 }
459 
pud_mkclean(pud_t pud)460 static inline pud_t pud_mkclean(pud_t pud)
461 {
462 	return pud_clear_flags(pud, _PAGE_DIRTY);
463 }
464 
pud_wrprotect(pud_t pud)465 static inline pud_t pud_wrprotect(pud_t pud)
466 {
467 	return pud_clear_flags(pud, _PAGE_RW);
468 }
469 
pud_mkdirty(pud_t pud)470 static inline pud_t pud_mkdirty(pud_t pud)
471 {
472 	return pud_set_flags(pud, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
473 }
474 
pud_mkdevmap(pud_t pud)475 static inline pud_t pud_mkdevmap(pud_t pud)
476 {
477 	return pud_set_flags(pud, _PAGE_DEVMAP);
478 }
479 
pud_mkhuge(pud_t pud)480 static inline pud_t pud_mkhuge(pud_t pud)
481 {
482 	return pud_set_flags(pud, _PAGE_PSE);
483 }
484 
pud_mkyoung(pud_t pud)485 static inline pud_t pud_mkyoung(pud_t pud)
486 {
487 	return pud_set_flags(pud, _PAGE_ACCESSED);
488 }
489 
pud_mkwrite(pud_t pud)490 static inline pud_t pud_mkwrite(pud_t pud)
491 {
492 	return pud_set_flags(pud, _PAGE_RW);
493 }
494 
495 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
pte_soft_dirty(pte_t pte)496 static inline int pte_soft_dirty(pte_t pte)
497 {
498 	return pte_flags(pte) & _PAGE_SOFT_DIRTY;
499 }
500 
pmd_soft_dirty(pmd_t pmd)501 static inline int pmd_soft_dirty(pmd_t pmd)
502 {
503 	return pmd_flags(pmd) & _PAGE_SOFT_DIRTY;
504 }
505 
pud_soft_dirty(pud_t pud)506 static inline int pud_soft_dirty(pud_t pud)
507 {
508 	return pud_flags(pud) & _PAGE_SOFT_DIRTY;
509 }
510 
pte_mksoft_dirty(pte_t pte)511 static inline pte_t pte_mksoft_dirty(pte_t pte)
512 {
513 	return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
514 }
515 
pmd_mksoft_dirty(pmd_t pmd)516 static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
517 {
518 	return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
519 }
520 
pud_mksoft_dirty(pud_t pud)521 static inline pud_t pud_mksoft_dirty(pud_t pud)
522 {
523 	return pud_set_flags(pud, _PAGE_SOFT_DIRTY);
524 }
525 
pte_clear_soft_dirty(pte_t pte)526 static inline pte_t pte_clear_soft_dirty(pte_t pte)
527 {
528 	return pte_clear_flags(pte, _PAGE_SOFT_DIRTY);
529 }
530 
pmd_clear_soft_dirty(pmd_t pmd)531 static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
532 {
533 	return pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY);
534 }
535 
pud_clear_soft_dirty(pud_t pud)536 static inline pud_t pud_clear_soft_dirty(pud_t pud)
537 {
538 	return pud_clear_flags(pud, _PAGE_SOFT_DIRTY);
539 }
540 
541 #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
542 
543 /*
544  * Mask out unsupported bits in a present pgprot.  Non-present pgprots
545  * can use those bits for other purposes, so leave them be.
546  */
massage_pgprot(pgprot_t pgprot)547 static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
548 {
549 	pgprotval_t protval = pgprot_val(pgprot);
550 
551 	if (protval & _PAGE_PRESENT)
552 		protval &= __supported_pte_mask;
553 
554 	return protval;
555 }
556 
check_pgprot(pgprot_t pgprot)557 static inline pgprotval_t check_pgprot(pgprot_t pgprot)
558 {
559 	pgprotval_t massaged_val = massage_pgprot(pgprot);
560 
561 	/* mmdebug.h can not be included here because of dependencies */
562 #ifdef CONFIG_DEBUG_VM
563 	WARN_ONCE(pgprot_val(pgprot) != massaged_val,
564 		  "attempted to set unsupported pgprot: %016llx "
565 		  "bits: %016llx supported: %016llx\n",
566 		  (u64)pgprot_val(pgprot),
567 		  (u64)pgprot_val(pgprot) ^ massaged_val,
568 		  (u64)__supported_pte_mask);
569 #endif
570 
571 	return massaged_val;
572 }
573 
pfn_pte(unsigned long page_nr,pgprot_t pgprot)574 static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
575 {
576 	phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
577 	pfn ^= protnone_mask(pgprot_val(pgprot));
578 	pfn &= PTE_PFN_MASK;
579 	return __pte(pfn | check_pgprot(pgprot));
580 }
581 
pfn_pmd(unsigned long page_nr,pgprot_t pgprot)582 static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
583 {
584 	phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
585 	pfn ^= protnone_mask(pgprot_val(pgprot));
586 	pfn &= PHYSICAL_PMD_PAGE_MASK;
587 	return __pmd(pfn | check_pgprot(pgprot));
588 }
589 
pfn_pud(unsigned long page_nr,pgprot_t pgprot)590 static inline pud_t pfn_pud(unsigned long page_nr, pgprot_t pgprot)
591 {
592 	phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
593 	pfn ^= protnone_mask(pgprot_val(pgprot));
594 	pfn &= PHYSICAL_PUD_PAGE_MASK;
595 	return __pud(pfn | check_pgprot(pgprot));
596 }
597 
pmd_mkinvalid(pmd_t pmd)598 static inline pmd_t pmd_mkinvalid(pmd_t pmd)
599 {
600 	return pfn_pmd(pmd_pfn(pmd),
601 		      __pgprot(pmd_flags(pmd) & ~(_PAGE_PRESENT|_PAGE_PROTNONE)));
602 }
603 
604 static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask);
605 
pte_modify(pte_t pte,pgprot_t newprot)606 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
607 {
608 	pteval_t val = pte_val(pte), oldval = val;
609 
610 	/*
611 	 * Chop off the NX bit (if present), and add the NX portion of
612 	 * the newprot (if present):
613 	 */
614 	val &= _PAGE_CHG_MASK;
615 	val |= check_pgprot(newprot) & ~_PAGE_CHG_MASK;
616 	val = flip_protnone_guard(oldval, val, PTE_PFN_MASK);
617 	return __pte(val);
618 }
619 
pmd_modify(pmd_t pmd,pgprot_t newprot)620 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
621 {
622 	pmdval_t val = pmd_val(pmd), oldval = val;
623 
624 	val &= _HPAGE_CHG_MASK;
625 	val |= check_pgprot(newprot) & ~_HPAGE_CHG_MASK;
626 	val = flip_protnone_guard(oldval, val, PHYSICAL_PMD_PAGE_MASK);
627 	return __pmd(val);
628 }
629 
630 /*
631  * mprotect needs to preserve PAT and encryption bits when updating
632  * vm_page_prot
633  */
634 #define pgprot_modify pgprot_modify
pgprot_modify(pgprot_t oldprot,pgprot_t newprot)635 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
636 {
637 	pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
638 	pgprotval_t addbits = pgprot_val(newprot) & ~_PAGE_CHG_MASK;
639 	return __pgprot(preservebits | addbits);
640 }
641 
642 #define pte_pgprot(x) __pgprot(pte_flags(x))
643 #define pmd_pgprot(x) __pgprot(pmd_flags(x))
644 #define pud_pgprot(x) __pgprot(pud_flags(x))
645 #define p4d_pgprot(x) __pgprot(p4d_flags(x))
646 
647 #define canon_pgprot(p) __pgprot(massage_pgprot(p))
648 
arch_filter_pgprot(pgprot_t prot)649 static inline pgprot_t arch_filter_pgprot(pgprot_t prot)
650 {
651 	return canon_pgprot(prot);
652 }
653 
is_new_memtype_allowed(u64 paddr,unsigned long size,enum page_cache_mode pcm,enum page_cache_mode new_pcm)654 static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
655 					 enum page_cache_mode pcm,
656 					 enum page_cache_mode new_pcm)
657 {
658 	/*
659 	 * PAT type is always WB for untracked ranges, so no need to check.
660 	 */
661 	if (x86_platform.is_untracked_pat_range(paddr, paddr + size))
662 		return 1;
663 
664 	/*
665 	 * Certain new memtypes are not allowed with certain
666 	 * requested memtype:
667 	 * - request is uncached, return cannot be write-back
668 	 * - request is write-combine, return cannot be write-back
669 	 * - request is write-through, return cannot be write-back
670 	 * - request is write-through, return cannot be write-combine
671 	 */
672 	if ((pcm == _PAGE_CACHE_MODE_UC_MINUS &&
673 	     new_pcm == _PAGE_CACHE_MODE_WB) ||
674 	    (pcm == _PAGE_CACHE_MODE_WC &&
675 	     new_pcm == _PAGE_CACHE_MODE_WB) ||
676 	    (pcm == _PAGE_CACHE_MODE_WT &&
677 	     new_pcm == _PAGE_CACHE_MODE_WB) ||
678 	    (pcm == _PAGE_CACHE_MODE_WT &&
679 	     new_pcm == _PAGE_CACHE_MODE_WC)) {
680 		return 0;
681 	}
682 
683 	return 1;
684 }
685 
686 pmd_t *populate_extra_pmd(unsigned long vaddr);
687 pte_t *populate_extra_pte(unsigned long vaddr);
688 
689 #ifdef CONFIG_PAGE_TABLE_ISOLATION
690 pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd);
691 
692 /*
693  * Take a PGD location (pgdp) and a pgd value that needs to be set there.
694  * Populates the user and returns the resulting PGD that must be set in
695  * the kernel copy of the page tables.
696  */
pti_set_user_pgtbl(pgd_t * pgdp,pgd_t pgd)697 static inline pgd_t pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
698 {
699 	if (!static_cpu_has(X86_FEATURE_PTI))
700 		return pgd;
701 	return __pti_set_user_pgtbl(pgdp, pgd);
702 }
703 #else   /* CONFIG_PAGE_TABLE_ISOLATION */
pti_set_user_pgtbl(pgd_t * pgdp,pgd_t pgd)704 static inline pgd_t pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
705 {
706 	return pgd;
707 }
708 #endif  /* CONFIG_PAGE_TABLE_ISOLATION */
709 
710 #endif	/* __ASSEMBLY__ */
711 
712 
713 #ifdef CONFIG_X86_32
714 # include <asm/pgtable_32.h>
715 #else
716 # include <asm/pgtable_64.h>
717 #endif
718 
719 #ifndef __ASSEMBLY__
720 #include <linux/mm_types.h>
721 #include <linux/mmdebug.h>
722 #include <linux/log2.h>
723 #include <asm/fixmap.h>
724 
pte_none(pte_t pte)725 static inline int pte_none(pte_t pte)
726 {
727 	return !(pte.pte & ~(_PAGE_KNL_ERRATUM_MASK));
728 }
729 
730 #define __HAVE_ARCH_PTE_SAME
pte_same(pte_t a,pte_t b)731 static inline int pte_same(pte_t a, pte_t b)
732 {
733 	return a.pte == b.pte;
734 }
735 
pte_present(pte_t a)736 static inline int pte_present(pte_t a)
737 {
738 	return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
739 }
740 
741 #ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
pte_devmap(pte_t a)742 static inline int pte_devmap(pte_t a)
743 {
744 	return (pte_flags(a) & _PAGE_DEVMAP) == _PAGE_DEVMAP;
745 }
746 #endif
747 
748 #define pte_accessible pte_accessible
pte_accessible(struct mm_struct * mm,pte_t a)749 static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
750 {
751 	if (pte_flags(a) & _PAGE_PRESENT)
752 		return true;
753 
754 	if ((pte_flags(a) & _PAGE_PROTNONE) &&
755 			mm_tlb_flush_pending(mm))
756 		return true;
757 
758 	return false;
759 }
760 
pmd_present(pmd_t pmd)761 static inline int pmd_present(pmd_t pmd)
762 {
763 	/*
764 	 * Checking for _PAGE_PSE is needed too because
765 	 * split_huge_page will temporarily clear the present bit (but
766 	 * the _PAGE_PSE flag will remain set at all times while the
767 	 * _PAGE_PRESENT bit is clear).
768 	 */
769 	return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE);
770 }
771 
772 #ifdef CONFIG_NUMA_BALANCING
773 /*
774  * These work without NUMA balancing but the kernel does not care. See the
775  * comment in include/linux/pgtable.h
776  */
pte_protnone(pte_t pte)777 static inline int pte_protnone(pte_t pte)
778 {
779 	return (pte_flags(pte) & (_PAGE_PROTNONE | _PAGE_PRESENT))
780 		== _PAGE_PROTNONE;
781 }
782 
pmd_protnone(pmd_t pmd)783 static inline int pmd_protnone(pmd_t pmd)
784 {
785 	return (pmd_flags(pmd) & (_PAGE_PROTNONE | _PAGE_PRESENT))
786 		== _PAGE_PROTNONE;
787 }
788 #endif /* CONFIG_NUMA_BALANCING */
789 
pmd_none(pmd_t pmd)790 static inline int pmd_none(pmd_t pmd)
791 {
792 	/* Only check low word on 32-bit platforms, since it might be
793 	   out of sync with upper half. */
794 	unsigned long val = native_pmd_val(pmd);
795 	return (val & ~_PAGE_KNL_ERRATUM_MASK) == 0;
796 }
797 
pmd_page_vaddr(pmd_t pmd)798 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
799 {
800 	return (unsigned long)__va(pmd_val(pmd) & pmd_pfn_mask(pmd));
801 }
802 
803 /*
804  * Currently stuck as a macro due to indirect forward reference to
805  * linux/mmzone.h's __section_mem_map_addr() definition:
806  */
807 #define pmd_page(pmd)	pfn_to_page(pmd_pfn(pmd))
808 
809 /*
810  * Conversion functions: convert a page and protection to a page entry,
811  * and a page entry and page directory to the page they refer to.
812  *
813  * (Currently stuck as a macro because of indirect forward reference
814  * to linux/mm.h:page_to_nid())
815  */
816 #define mk_pte(page, pgprot)   pfn_pte(page_to_pfn(page), (pgprot))
817 
pmd_bad(pmd_t pmd)818 static inline int pmd_bad(pmd_t pmd)
819 {
820 	return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
821 }
822 
pages_to_mb(unsigned long npg)823 static inline unsigned long pages_to_mb(unsigned long npg)
824 {
825 	return npg >> (20 - PAGE_SHIFT);
826 }
827 
828 #if CONFIG_PGTABLE_LEVELS > 2
pud_none(pud_t pud)829 static inline int pud_none(pud_t pud)
830 {
831 	return (native_pud_val(pud) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0;
832 }
833 
pud_present(pud_t pud)834 static inline int pud_present(pud_t pud)
835 {
836 	return pud_flags(pud) & _PAGE_PRESENT;
837 }
838 
pud_pgtable(pud_t pud)839 static inline pmd_t *pud_pgtable(pud_t pud)
840 {
841 	return (pmd_t *)__va(pud_val(pud) & pud_pfn_mask(pud));
842 }
843 
844 /*
845  * Currently stuck as a macro due to indirect forward reference to
846  * linux/mmzone.h's __section_mem_map_addr() definition:
847  */
848 #define pud_page(pud)	pfn_to_page(pud_pfn(pud))
849 
850 #define pud_leaf	pud_large
pud_large(pud_t pud)851 static inline int pud_large(pud_t pud)
852 {
853 	return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
854 		(_PAGE_PSE | _PAGE_PRESENT);
855 }
856 
pud_bad(pud_t pud)857 static inline int pud_bad(pud_t pud)
858 {
859 	return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
860 }
861 #else
862 #define pud_leaf	pud_large
pud_large(pud_t pud)863 static inline int pud_large(pud_t pud)
864 {
865 	return 0;
866 }
867 #endif	/* CONFIG_PGTABLE_LEVELS > 2 */
868 
869 #if CONFIG_PGTABLE_LEVELS > 3
p4d_none(p4d_t p4d)870 static inline int p4d_none(p4d_t p4d)
871 {
872 	return (native_p4d_val(p4d) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0;
873 }
874 
p4d_present(p4d_t p4d)875 static inline int p4d_present(p4d_t p4d)
876 {
877 	return p4d_flags(p4d) & _PAGE_PRESENT;
878 }
879 
p4d_pgtable(p4d_t p4d)880 static inline pud_t *p4d_pgtable(p4d_t p4d)
881 {
882 	return (pud_t *)__va(p4d_val(p4d) & p4d_pfn_mask(p4d));
883 }
884 
885 /*
886  * Currently stuck as a macro due to indirect forward reference to
887  * linux/mmzone.h's __section_mem_map_addr() definition:
888  */
889 #define p4d_page(p4d)	pfn_to_page(p4d_pfn(p4d))
890 
p4d_bad(p4d_t p4d)891 static inline int p4d_bad(p4d_t p4d)
892 {
893 	unsigned long ignore_flags = _KERNPG_TABLE | _PAGE_USER;
894 
895 	if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION))
896 		ignore_flags |= _PAGE_NX;
897 
898 	return (p4d_flags(p4d) & ~ignore_flags) != 0;
899 }
900 #endif  /* CONFIG_PGTABLE_LEVELS > 3 */
901 
p4d_index(unsigned long address)902 static inline unsigned long p4d_index(unsigned long address)
903 {
904 	return (address >> P4D_SHIFT) & (PTRS_PER_P4D - 1);
905 }
906 
907 #if CONFIG_PGTABLE_LEVELS > 4
pgd_present(pgd_t pgd)908 static inline int pgd_present(pgd_t pgd)
909 {
910 	if (!pgtable_l5_enabled())
911 		return 1;
912 	return pgd_flags(pgd) & _PAGE_PRESENT;
913 }
914 
pgd_page_vaddr(pgd_t pgd)915 static inline unsigned long pgd_page_vaddr(pgd_t pgd)
916 {
917 	return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
918 }
919 
920 /*
921  * Currently stuck as a macro due to indirect forward reference to
922  * linux/mmzone.h's __section_mem_map_addr() definition:
923  */
924 #define pgd_page(pgd)	pfn_to_page(pgd_pfn(pgd))
925 
926 /* to find an entry in a page-table-directory. */
p4d_offset(pgd_t * pgd,unsigned long address)927 static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
928 {
929 	if (!pgtable_l5_enabled())
930 		return (p4d_t *)pgd;
931 	return (p4d_t *)pgd_page_vaddr(*pgd) + p4d_index(address);
932 }
933 
pgd_bad(pgd_t pgd)934 static inline int pgd_bad(pgd_t pgd)
935 {
936 	unsigned long ignore_flags = _PAGE_USER;
937 
938 	if (!pgtable_l5_enabled())
939 		return 0;
940 
941 	if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION))
942 		ignore_flags |= _PAGE_NX;
943 
944 	return (pgd_flags(pgd) & ~ignore_flags) != _KERNPG_TABLE;
945 }
946 
pgd_none(pgd_t pgd)947 static inline int pgd_none(pgd_t pgd)
948 {
949 	if (!pgtable_l5_enabled())
950 		return 0;
951 	/*
952 	 * There is no need to do a workaround for the KNL stray
953 	 * A/D bit erratum here.  PGDs only point to page tables
954 	 * except on 32-bit non-PAE which is not supported on
955 	 * KNL.
956 	 */
957 	return !native_pgd_val(pgd);
958 }
959 #endif	/* CONFIG_PGTABLE_LEVELS > 4 */
960 
961 #endif	/* __ASSEMBLY__ */
962 
963 #define KERNEL_PGD_BOUNDARY	pgd_index(PAGE_OFFSET)
964 #define KERNEL_PGD_PTRS		(PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
965 
966 #ifndef __ASSEMBLY__
967 
968 extern int direct_gbpages;
969 void init_mem_mapping(void);
970 void early_alloc_pgt_buf(void);
971 extern void memblock_find_dma_reserve(void);
972 void __init poking_init(void);
973 unsigned long init_memory_mapping(unsigned long start,
974 				  unsigned long end, pgprot_t prot);
975 
976 #ifdef CONFIG_X86_64
977 extern pgd_t trampoline_pgd_entry;
978 #endif
979 
980 /* local pte updates need not use xchg for locking */
native_local_ptep_get_and_clear(pte_t * ptep)981 static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
982 {
983 	pte_t res = *ptep;
984 
985 	/* Pure native function needs no input for mm, addr */
986 	native_pte_clear(NULL, 0, ptep);
987 	return res;
988 }
989 
native_local_pmdp_get_and_clear(pmd_t * pmdp)990 static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp)
991 {
992 	pmd_t res = *pmdp;
993 
994 	native_pmd_clear(pmdp);
995 	return res;
996 }
997 
native_local_pudp_get_and_clear(pud_t * pudp)998 static inline pud_t native_local_pudp_get_and_clear(pud_t *pudp)
999 {
1000 	pud_t res = *pudp;
1001 
1002 	native_pud_clear(pudp);
1003 	return res;
1004 }
1005 
set_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte)1006 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
1007 			      pte_t *ptep, pte_t pte)
1008 {
1009 	set_pte(ptep, pte);
1010 }
1011 
set_pmd_at(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp,pmd_t pmd)1012 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1013 			      pmd_t *pmdp, pmd_t pmd)
1014 {
1015 	set_pmd(pmdp, pmd);
1016 }
1017 
set_pud_at(struct mm_struct * mm,unsigned long addr,pud_t * pudp,pud_t pud)1018 static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
1019 			      pud_t *pudp, pud_t pud)
1020 {
1021 	native_set_pud(pudp, pud);
1022 }
1023 
1024 /*
1025  * We only update the dirty/accessed state if we set
1026  * the dirty bit by hand in the kernel, since the hardware
1027  * will do the accessed bit for us, and we don't want to
1028  * race with other CPU's that might be updating the dirty
1029  * bit at the same time.
1030  */
1031 struct vm_area_struct;
1032 
1033 #define  __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1034 extern int ptep_set_access_flags(struct vm_area_struct *vma,
1035 				 unsigned long address, pte_t *ptep,
1036 				 pte_t entry, int dirty);
1037 
1038 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1039 extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
1040 				     unsigned long addr, pte_t *ptep);
1041 
1042 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1043 extern int ptep_clear_flush_young(struct vm_area_struct *vma,
1044 				  unsigned long address, pte_t *ptep);
1045 
1046 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
ptep_get_and_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)1047 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
1048 				       pte_t *ptep)
1049 {
1050 	pte_t pte = native_ptep_get_and_clear(ptep);
1051 	return pte;
1052 }
1053 
1054 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
ptep_get_and_clear_full(struct mm_struct * mm,unsigned long addr,pte_t * ptep,int full)1055 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1056 					    unsigned long addr, pte_t *ptep,
1057 					    int full)
1058 {
1059 	pte_t pte;
1060 	if (full) {
1061 		/*
1062 		 * Full address destruction in progress; paravirt does not
1063 		 * care about updates and native needs no locking
1064 		 */
1065 		pte = native_local_ptep_get_and_clear(ptep);
1066 	} else {
1067 		pte = ptep_get_and_clear(mm, addr, ptep);
1068 	}
1069 	return pte;
1070 }
1071 
1072 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
ptep_set_wrprotect(struct mm_struct * mm,unsigned long addr,pte_t * ptep)1073 static inline void ptep_set_wrprotect(struct mm_struct *mm,
1074 				      unsigned long addr, pte_t *ptep)
1075 {
1076 	clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
1077 }
1078 
1079 #define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
1080 
1081 #define mk_pmd(page, pgprot)   pfn_pmd(page_to_pfn(page), (pgprot))
1082 
1083 #define  __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
1084 extern int pmdp_set_access_flags(struct vm_area_struct *vma,
1085 				 unsigned long address, pmd_t *pmdp,
1086 				 pmd_t entry, int dirty);
1087 extern int pudp_set_access_flags(struct vm_area_struct *vma,
1088 				 unsigned long address, pud_t *pudp,
1089 				 pud_t entry, int dirty);
1090 
1091 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1092 extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1093 				     unsigned long addr, pmd_t *pmdp);
1094 extern int pudp_test_and_clear_young(struct vm_area_struct *vma,
1095 				     unsigned long addr, pud_t *pudp);
1096 
1097 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
1098 extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
1099 				  unsigned long address, pmd_t *pmdp);
1100 
1101 
1102 #define pmd_write pmd_write
pmd_write(pmd_t pmd)1103 static inline int pmd_write(pmd_t pmd)
1104 {
1105 	return pmd_flags(pmd) & _PAGE_RW;
1106 }
1107 
1108 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
pmdp_huge_get_and_clear(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp)1109 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr,
1110 				       pmd_t *pmdp)
1111 {
1112 	return native_pmdp_get_and_clear(pmdp);
1113 }
1114 
1115 #define __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR
pudp_huge_get_and_clear(struct mm_struct * mm,unsigned long addr,pud_t * pudp)1116 static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
1117 					unsigned long addr, pud_t *pudp)
1118 {
1119 	return native_pudp_get_and_clear(pudp);
1120 }
1121 
1122 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
pmdp_set_wrprotect(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp)1123 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1124 				      unsigned long addr, pmd_t *pmdp)
1125 {
1126 	clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
1127 }
1128 
1129 #define pud_write pud_write
pud_write(pud_t pud)1130 static inline int pud_write(pud_t pud)
1131 {
1132 	return pud_flags(pud) & _PAGE_RW;
1133 }
1134 
1135 #ifndef pmdp_establish
1136 #define pmdp_establish pmdp_establish
pmdp_establish(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp,pmd_t pmd)1137 static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
1138 		unsigned long address, pmd_t *pmdp, pmd_t pmd)
1139 {
1140 	if (IS_ENABLED(CONFIG_SMP)) {
1141 		return xchg(pmdp, pmd);
1142 	} else {
1143 		pmd_t old = *pmdp;
1144 		WRITE_ONCE(*pmdp, pmd);
1145 		return old;
1146 	}
1147 }
1148 #endif
1149 /*
1150  * Page table pages are page-aligned.  The lower half of the top
1151  * level is used for userspace and the top half for the kernel.
1152  *
1153  * Returns true for parts of the PGD that map userspace and
1154  * false for the parts that map the kernel.
1155  */
pgdp_maps_userspace(void * __ptr)1156 static inline bool pgdp_maps_userspace(void *__ptr)
1157 {
1158 	unsigned long ptr = (unsigned long)__ptr;
1159 
1160 	return (((ptr & ~PAGE_MASK) / sizeof(pgd_t)) < PGD_KERNEL_START);
1161 }
1162 
1163 #define pgd_leaf	pgd_large
pgd_large(pgd_t pgd)1164 static inline int pgd_large(pgd_t pgd) { return 0; }
1165 
1166 #ifdef CONFIG_PAGE_TABLE_ISOLATION
1167 /*
1168  * All top-level PAGE_TABLE_ISOLATION page tables are order-1 pages
1169  * (8k-aligned and 8k in size).  The kernel one is at the beginning 4k and
1170  * the user one is in the last 4k.  To switch between them, you
1171  * just need to flip the 12th bit in their addresses.
1172  */
1173 #define PTI_PGTABLE_SWITCH_BIT	PAGE_SHIFT
1174 
1175 /*
1176  * This generates better code than the inline assembly in
1177  * __set_bit().
1178  */
ptr_set_bit(void * ptr,int bit)1179 static inline void *ptr_set_bit(void *ptr, int bit)
1180 {
1181 	unsigned long __ptr = (unsigned long)ptr;
1182 
1183 	__ptr |= BIT(bit);
1184 	return (void *)__ptr;
1185 }
ptr_clear_bit(void * ptr,int bit)1186 static inline void *ptr_clear_bit(void *ptr, int bit)
1187 {
1188 	unsigned long __ptr = (unsigned long)ptr;
1189 
1190 	__ptr &= ~BIT(bit);
1191 	return (void *)__ptr;
1192 }
1193 
kernel_to_user_pgdp(pgd_t * pgdp)1194 static inline pgd_t *kernel_to_user_pgdp(pgd_t *pgdp)
1195 {
1196 	return ptr_set_bit(pgdp, PTI_PGTABLE_SWITCH_BIT);
1197 }
1198 
user_to_kernel_pgdp(pgd_t * pgdp)1199 static inline pgd_t *user_to_kernel_pgdp(pgd_t *pgdp)
1200 {
1201 	return ptr_clear_bit(pgdp, PTI_PGTABLE_SWITCH_BIT);
1202 }
1203 
kernel_to_user_p4dp(p4d_t * p4dp)1204 static inline p4d_t *kernel_to_user_p4dp(p4d_t *p4dp)
1205 {
1206 	return ptr_set_bit(p4dp, PTI_PGTABLE_SWITCH_BIT);
1207 }
1208 
user_to_kernel_p4dp(p4d_t * p4dp)1209 static inline p4d_t *user_to_kernel_p4dp(p4d_t *p4dp)
1210 {
1211 	return ptr_clear_bit(p4dp, PTI_PGTABLE_SWITCH_BIT);
1212 }
1213 #endif /* CONFIG_PAGE_TABLE_ISOLATION */
1214 
1215 /*
1216  * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
1217  *
1218  *  dst - pointer to pgd range anywhere on a pgd page
1219  *  src - ""
1220  *  count - the number of pgds to copy.
1221  *
1222  * dst and src can be on the same page, but the range must not overlap,
1223  * and must not cross a page boundary.
1224  */
clone_pgd_range(pgd_t * dst,pgd_t * src,int count)1225 static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
1226 {
1227 	memcpy(dst, src, count * sizeof(pgd_t));
1228 #ifdef CONFIG_PAGE_TABLE_ISOLATION
1229 	if (!static_cpu_has(X86_FEATURE_PTI))
1230 		return;
1231 	/* Clone the user space pgd as well */
1232 	memcpy(kernel_to_user_pgdp(dst), kernel_to_user_pgdp(src),
1233 	       count * sizeof(pgd_t));
1234 #endif
1235 }
1236 
1237 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
page_level_shift(enum pg_level level)1238 static inline int page_level_shift(enum pg_level level)
1239 {
1240 	return (PAGE_SHIFT - PTE_SHIFT) + level * PTE_SHIFT;
1241 }
page_level_size(enum pg_level level)1242 static inline unsigned long page_level_size(enum pg_level level)
1243 {
1244 	return 1UL << page_level_shift(level);
1245 }
page_level_mask(enum pg_level level)1246 static inline unsigned long page_level_mask(enum pg_level level)
1247 {
1248 	return ~(page_level_size(level) - 1);
1249 }
1250 
1251 /*
1252  * The x86 doesn't have any external MMU info: the kernel page
1253  * tables contain all the necessary information.
1254  */
update_mmu_cache(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)1255 static inline void update_mmu_cache(struct vm_area_struct *vma,
1256 		unsigned long addr, pte_t *ptep)
1257 {
1258 }
update_mmu_cache_pmd(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmd)1259 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
1260 		unsigned long addr, pmd_t *pmd)
1261 {
1262 }
update_mmu_cache_pud(struct vm_area_struct * vma,unsigned long addr,pud_t * pud)1263 static inline void update_mmu_cache_pud(struct vm_area_struct *vma,
1264 		unsigned long addr, pud_t *pud)
1265 {
1266 }
1267 
1268 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
pte_swp_mksoft_dirty(pte_t pte)1269 static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
1270 {
1271 	return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY);
1272 }
1273 
pte_swp_soft_dirty(pte_t pte)1274 static inline int pte_swp_soft_dirty(pte_t pte)
1275 {
1276 	return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY;
1277 }
1278 
pte_swp_clear_soft_dirty(pte_t pte)1279 static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
1280 {
1281 	return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
1282 }
1283 
1284 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
pmd_swp_mksoft_dirty(pmd_t pmd)1285 static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
1286 {
1287 	return pmd_set_flags(pmd, _PAGE_SWP_SOFT_DIRTY);
1288 }
1289 
pmd_swp_soft_dirty(pmd_t pmd)1290 static inline int pmd_swp_soft_dirty(pmd_t pmd)
1291 {
1292 	return pmd_flags(pmd) & _PAGE_SWP_SOFT_DIRTY;
1293 }
1294 
pmd_swp_clear_soft_dirty(pmd_t pmd)1295 static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
1296 {
1297 	return pmd_clear_flags(pmd, _PAGE_SWP_SOFT_DIRTY);
1298 }
1299 #endif
1300 #endif
1301 
1302 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
pte_swp_mkuffd_wp(pte_t pte)1303 static inline pte_t pte_swp_mkuffd_wp(pte_t pte)
1304 {
1305 	return pte_set_flags(pte, _PAGE_SWP_UFFD_WP);
1306 }
1307 
pte_swp_uffd_wp(pte_t pte)1308 static inline int pte_swp_uffd_wp(pte_t pte)
1309 {
1310 	return pte_flags(pte) & _PAGE_SWP_UFFD_WP;
1311 }
1312 
pte_swp_clear_uffd_wp(pte_t pte)1313 static inline pte_t pte_swp_clear_uffd_wp(pte_t pte)
1314 {
1315 	return pte_clear_flags(pte, _PAGE_SWP_UFFD_WP);
1316 }
1317 
pmd_swp_mkuffd_wp(pmd_t pmd)1318 static inline pmd_t pmd_swp_mkuffd_wp(pmd_t pmd)
1319 {
1320 	return pmd_set_flags(pmd, _PAGE_SWP_UFFD_WP);
1321 }
1322 
pmd_swp_uffd_wp(pmd_t pmd)1323 static inline int pmd_swp_uffd_wp(pmd_t pmd)
1324 {
1325 	return pmd_flags(pmd) & _PAGE_SWP_UFFD_WP;
1326 }
1327 
pmd_swp_clear_uffd_wp(pmd_t pmd)1328 static inline pmd_t pmd_swp_clear_uffd_wp(pmd_t pmd)
1329 {
1330 	return pmd_clear_flags(pmd, _PAGE_SWP_UFFD_WP);
1331 }
1332 #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
1333 
pte_flags_pkey(unsigned long pte_flags)1334 static inline u16 pte_flags_pkey(unsigned long pte_flags)
1335 {
1336 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
1337 	/* ifdef to avoid doing 59-bit shift on 32-bit values */
1338 	return (pte_flags & _PAGE_PKEY_MASK) >> _PAGE_BIT_PKEY_BIT0;
1339 #else
1340 	return 0;
1341 #endif
1342 }
1343 
__pkru_allows_pkey(u16 pkey,bool write)1344 static inline bool __pkru_allows_pkey(u16 pkey, bool write)
1345 {
1346 	u32 pkru = read_pkru();
1347 
1348 	if (!__pkru_allows_read(pkru, pkey))
1349 		return false;
1350 	if (write && !__pkru_allows_write(pkru, pkey))
1351 		return false;
1352 
1353 	return true;
1354 }
1355 
1356 /*
1357  * 'pteval' can come from a PTE, PMD or PUD.  We only check
1358  * _PAGE_PRESENT, _PAGE_USER, and _PAGE_RW in here which are the
1359  * same value on all 3 types.
1360  */
__pte_access_permitted(unsigned long pteval,bool write)1361 static inline bool __pte_access_permitted(unsigned long pteval, bool write)
1362 {
1363 	unsigned long need_pte_bits = _PAGE_PRESENT|_PAGE_USER;
1364 
1365 	if (write)
1366 		need_pte_bits |= _PAGE_RW;
1367 
1368 	if ((pteval & need_pte_bits) != need_pte_bits)
1369 		return 0;
1370 
1371 	return __pkru_allows_pkey(pte_flags_pkey(pteval), write);
1372 }
1373 
1374 #define pte_access_permitted pte_access_permitted
pte_access_permitted(pte_t pte,bool write)1375 static inline bool pte_access_permitted(pte_t pte, bool write)
1376 {
1377 	return __pte_access_permitted(pte_val(pte), write);
1378 }
1379 
1380 #define pmd_access_permitted pmd_access_permitted
pmd_access_permitted(pmd_t pmd,bool write)1381 static inline bool pmd_access_permitted(pmd_t pmd, bool write)
1382 {
1383 	return __pte_access_permitted(pmd_val(pmd), write);
1384 }
1385 
1386 #define pud_access_permitted pud_access_permitted
pud_access_permitted(pud_t pud,bool write)1387 static inline bool pud_access_permitted(pud_t pud, bool write)
1388 {
1389 	return __pte_access_permitted(pud_val(pud), write);
1390 }
1391 
1392 #define __HAVE_ARCH_PFN_MODIFY_ALLOWED 1
1393 extern bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot);
1394 
arch_has_pfn_modify_check(void)1395 static inline bool arch_has_pfn_modify_check(void)
1396 {
1397 	return boot_cpu_has_bug(X86_BUG_L1TF);
1398 }
1399 
1400 #define arch_faults_on_old_pte arch_faults_on_old_pte
arch_faults_on_old_pte(void)1401 static inline bool arch_faults_on_old_pte(void)
1402 {
1403 	return false;
1404 }
1405 
1406 #endif	/* __ASSEMBLY__ */
1407 
1408 #endif /* _ASM_X86_PGTABLE_H */
1409