1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2003 Ralf Baechle
7 */
8 #ifndef _ASM_PGTABLE_H
9 #define _ASM_PGTABLE_H
10
11 #include <linux/mm_types.h>
12 #include <linux/mmzone.h>
13 #ifdef CONFIG_32BIT
14 #include <asm/pgtable-32.h>
15 #endif
16 #ifdef CONFIG_64BIT
17 #include <asm/pgtable-64.h>
18 #endif
19
20 #include <asm/cmpxchg.h>
21 #include <asm/io.h>
22 #include <asm/pgtable-bits.h>
23 #include <asm/cpu-features.h>
24
25 struct mm_struct;
26 struct vm_area_struct;
27
28 #define PAGE_SHARED vm_get_page_prot(VM_READ|VM_WRITE|VM_SHARED)
29
30 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
31 _PAGE_GLOBAL | _page_cachable_default)
32 #define PAGE_KERNEL_NC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
33 _PAGE_GLOBAL | _CACHE_CACHABLE_NONCOHERENT)
34 #define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \
35 __WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED)
36
37 /*
38 * If _PAGE_NO_EXEC is not defined, we can't do page protection for
39 * execute, and consider it to be the same as read. Also, write
40 * permissions imply read permissions. This is the closest we can get
41 * by reasonable means..
42 */
43
44 /*
45 * Dummy values to fill the table in mmap.c
46 * The real values will be generated at runtime
47 */
48 #define __P000 __pgprot(0)
49 #define __P001 __pgprot(0)
50 #define __P010 __pgprot(0)
51 #define __P011 __pgprot(0)
52 #define __P100 __pgprot(0)
53 #define __P101 __pgprot(0)
54 #define __P110 __pgprot(0)
55 #define __P111 __pgprot(0)
56
57 #define __S000 __pgprot(0)
58 #define __S001 __pgprot(0)
59 #define __S010 __pgprot(0)
60 #define __S011 __pgprot(0)
61 #define __S100 __pgprot(0)
62 #define __S101 __pgprot(0)
63 #define __S110 __pgprot(0)
64 #define __S111 __pgprot(0)
65
66 extern unsigned long _page_cachable_default;
67 extern void __update_cache(unsigned long address, pte_t pte);
68
69 /*
70 * ZERO_PAGE is a global shared page that is always zero; used
71 * for zero-mapped memory areas etc..
72 */
73
74 extern unsigned long empty_zero_page;
75 extern unsigned long zero_page_mask;
76
77 #define ZERO_PAGE(vaddr) \
78 (virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))))
79 #define __HAVE_COLOR_ZERO_PAGE
80
81 extern void paging_init(void);
82
83 /*
84 * Conversion functions: convert a page and protection to a page entry,
85 * and a page entry and page directory to the page they refer to.
86 */
87 #define pmd_phys(pmd) virt_to_phys((void *)pmd_val(pmd))
88
89 #ifndef CONFIG_MIPS_HUGE_TLB_SUPPORT
90 #define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
91 #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
92
93 #define pmd_page_vaddr(pmd) pmd_val(pmd)
94
95 #define htw_stop() \
96 do { \
97 unsigned long __flags; \
98 \
99 if (cpu_has_htw) { \
100 local_irq_save(__flags); \
101 if(!raw_current_cpu_data.htw_seq++) { \
102 write_c0_pwctl(read_c0_pwctl() & \
103 ~(1 << MIPS_PWCTL_PWEN_SHIFT)); \
104 back_to_back_c0_hazard(); \
105 } \
106 local_irq_restore(__flags); \
107 } \
108 } while(0)
109
110 #define htw_start() \
111 do { \
112 unsigned long __flags; \
113 \
114 if (cpu_has_htw) { \
115 local_irq_save(__flags); \
116 if (!--raw_current_cpu_data.htw_seq) { \
117 write_c0_pwctl(read_c0_pwctl() | \
118 (1 << MIPS_PWCTL_PWEN_SHIFT)); \
119 back_to_back_c0_hazard(); \
120 } \
121 local_irq_restore(__flags); \
122 } \
123 } while(0)
124
125 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
126 pte_t *ptep, pte_t pteval);
127
128 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
129
130 #ifdef CONFIG_XPA
131 # define pte_none(pte) (!(((pte).pte_high) & ~_PAGE_GLOBAL))
132 #else
133 # define pte_none(pte) (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL))
134 #endif
135
136 #define pte_present(pte) ((pte).pte_low & _PAGE_PRESENT)
137 #define pte_no_exec(pte) ((pte).pte_low & _PAGE_NO_EXEC)
138
set_pte(pte_t * ptep,pte_t pte)139 static inline void set_pte(pte_t *ptep, pte_t pte)
140 {
141 ptep->pte_high = pte.pte_high;
142 smp_wmb();
143 ptep->pte_low = pte.pte_low;
144
145 #ifdef CONFIG_XPA
146 if (pte.pte_high & _PAGE_GLOBAL) {
147 #else
148 if (pte.pte_low & _PAGE_GLOBAL) {
149 #endif
150 pte_t *buddy = ptep_buddy(ptep);
151 /*
152 * Make sure the buddy is global too (if it's !none,
153 * it better already be global)
154 */
155 if (pte_none(*buddy)) {
156 if (!IS_ENABLED(CONFIG_XPA))
157 buddy->pte_low |= _PAGE_GLOBAL;
158 buddy->pte_high |= _PAGE_GLOBAL;
159 }
160 }
161 }
162
163 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
164 {
165 pte_t null = __pte(0);
166
167 htw_stop();
168 /* Preserve global status for the pair */
169 if (IS_ENABLED(CONFIG_XPA)) {
170 if (ptep_buddy(ptep)->pte_high & _PAGE_GLOBAL)
171 null.pte_high = _PAGE_GLOBAL;
172 } else {
173 if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL)
174 null.pte_low = null.pte_high = _PAGE_GLOBAL;
175 }
176
177 set_pte_at(mm, addr, ptep, null);
178 htw_start();
179 }
180 #else
181
182 #define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL))
183 #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
184 #define pte_no_exec(pte) (pte_val(pte) & _PAGE_NO_EXEC)
185
186 /*
187 * Certain architectures need to do special things when pte's
188 * within a page table are directly modified. Thus, the following
189 * hook is made available.
190 */
191 static inline void set_pte(pte_t *ptep, pte_t pteval)
192 {
193 *ptep = pteval;
194 #if !defined(CONFIG_CPU_R3K_TLB)
195 if (pte_val(pteval) & _PAGE_GLOBAL) {
196 pte_t *buddy = ptep_buddy(ptep);
197 /*
198 * Make sure the buddy is global too (if it's !none,
199 * it better already be global)
200 */
201 # if defined(CONFIG_PHYS_ADDR_T_64BIT) && !defined(CONFIG_CPU_MIPS32)
202 cmpxchg64(&buddy->pte, 0, _PAGE_GLOBAL);
203 # else
204 cmpxchg(&buddy->pte, 0, _PAGE_GLOBAL);
205 # endif
206 }
207 #endif
208 }
209
210 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
211 {
212 htw_stop();
213 #if !defined(CONFIG_CPU_R3K_TLB)
214 /* Preserve global status for the pair */
215 if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
216 set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL));
217 else
218 #endif
219 set_pte_at(mm, addr, ptep, __pte(0));
220 htw_start();
221 }
222 #endif
223
224 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
225 pte_t *ptep, pte_t pteval)
226 {
227
228 if (!pte_present(pteval))
229 goto cache_sync_done;
230
231 if (pte_present(*ptep) && (pte_pfn(*ptep) == pte_pfn(pteval)))
232 goto cache_sync_done;
233
234 __update_cache(addr, pteval);
235 cache_sync_done:
236 set_pte(ptep, pteval);
237 }
238
239 /*
240 * (pmds are folded into puds so this doesn't get actually called,
241 * but the define is needed for a generic inline function.)
242 */
243 #define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while(0)
244
245 #ifndef __PAGETABLE_PMD_FOLDED
246 /*
247 * (puds are folded into pgds so this doesn't get actually called,
248 * but the define is needed for a generic inline function.)
249 */
250 #define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while(0)
251 #endif
252
253 #define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1)
254 #define PMD_T_LOG2 (__builtin_ffs(sizeof(pmd_t)) - 1)
255 #define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1)
256
257 /*
258 * We used to declare this array with size but gcc 3.3 and older are not able
259 * to find that this expression is a constant, so the size is dropped.
260 */
261 extern pgd_t swapper_pg_dir[];
262
263 /*
264 * Platform specific pte_special() and pte_mkspecial() definitions
265 * are required only when ARCH_HAS_PTE_SPECIAL is enabled.
266 */
267 #if defined(CONFIG_ARCH_HAS_PTE_SPECIAL)
268 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
269 static inline int pte_special(pte_t pte)
270 {
271 return pte.pte_low & _PAGE_SPECIAL;
272 }
273
274 static inline pte_t pte_mkspecial(pte_t pte)
275 {
276 pte.pte_low |= _PAGE_SPECIAL;
277 return pte;
278 }
279 #else
280 static inline int pte_special(pte_t pte)
281 {
282 return pte_val(pte) & _PAGE_SPECIAL;
283 }
284
285 static inline pte_t pte_mkspecial(pte_t pte)
286 {
287 pte_val(pte) |= _PAGE_SPECIAL;
288 return pte;
289 }
290 #endif
291 #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
292
293 /*
294 * The following only work if pte_present() is true.
295 * Undefined behaviour if not..
296 */
297 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
298 static inline int pte_write(pte_t pte) { return pte.pte_low & _PAGE_WRITE; }
299 static inline int pte_dirty(pte_t pte) { return pte.pte_low & _PAGE_MODIFIED; }
300 static inline int pte_young(pte_t pte) { return pte.pte_low & _PAGE_ACCESSED; }
301
302 static inline pte_t pte_wrprotect(pte_t pte)
303 {
304 pte.pte_low &= ~_PAGE_WRITE;
305 if (!IS_ENABLED(CONFIG_XPA))
306 pte.pte_low &= ~_PAGE_SILENT_WRITE;
307 pte.pte_high &= ~_PAGE_SILENT_WRITE;
308 return pte;
309 }
310
311 static inline pte_t pte_mkclean(pte_t pte)
312 {
313 pte.pte_low &= ~_PAGE_MODIFIED;
314 if (!IS_ENABLED(CONFIG_XPA))
315 pte.pte_low &= ~_PAGE_SILENT_WRITE;
316 pte.pte_high &= ~_PAGE_SILENT_WRITE;
317 return pte;
318 }
319
320 static inline pte_t pte_mkold(pte_t pte)
321 {
322 pte.pte_low &= ~_PAGE_ACCESSED;
323 if (!IS_ENABLED(CONFIG_XPA))
324 pte.pte_low &= ~_PAGE_SILENT_READ;
325 pte.pte_high &= ~_PAGE_SILENT_READ;
326 return pte;
327 }
328
329 static inline pte_t pte_mkwrite(pte_t pte)
330 {
331 pte.pte_low |= _PAGE_WRITE;
332 if (pte.pte_low & _PAGE_MODIFIED) {
333 if (!IS_ENABLED(CONFIG_XPA))
334 pte.pte_low |= _PAGE_SILENT_WRITE;
335 pte.pte_high |= _PAGE_SILENT_WRITE;
336 }
337 return pte;
338 }
339
340 static inline pte_t pte_mkdirty(pte_t pte)
341 {
342 pte.pte_low |= _PAGE_MODIFIED;
343 if (pte.pte_low & _PAGE_WRITE) {
344 if (!IS_ENABLED(CONFIG_XPA))
345 pte.pte_low |= _PAGE_SILENT_WRITE;
346 pte.pte_high |= _PAGE_SILENT_WRITE;
347 }
348 return pte;
349 }
350
351 static inline pte_t pte_mkyoung(pte_t pte)
352 {
353 pte.pte_low |= _PAGE_ACCESSED;
354 if (!(pte.pte_low & _PAGE_NO_READ)) {
355 if (!IS_ENABLED(CONFIG_XPA))
356 pte.pte_low |= _PAGE_SILENT_READ;
357 pte.pte_high |= _PAGE_SILENT_READ;
358 }
359 return pte;
360 }
361 #else
362 static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
363 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_MODIFIED; }
364 static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
365
366 static inline pte_t pte_wrprotect(pte_t pte)
367 {
368 pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
369 return pte;
370 }
371
372 static inline pte_t pte_mkclean(pte_t pte)
373 {
374 pte_val(pte) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
375 return pte;
376 }
377
378 static inline pte_t pte_mkold(pte_t pte)
379 {
380 pte_val(pte) &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ);
381 return pte;
382 }
383
384 static inline pte_t pte_mkwrite(pte_t pte)
385 {
386 pte_val(pte) |= _PAGE_WRITE;
387 if (pte_val(pte) & _PAGE_MODIFIED)
388 pte_val(pte) |= _PAGE_SILENT_WRITE;
389 return pte;
390 }
391
392 static inline pte_t pte_mkdirty(pte_t pte)
393 {
394 pte_val(pte) |= _PAGE_MODIFIED | _PAGE_SOFT_DIRTY;
395 if (pte_val(pte) & _PAGE_WRITE)
396 pte_val(pte) |= _PAGE_SILENT_WRITE;
397 return pte;
398 }
399
400 static inline pte_t pte_mkyoung(pte_t pte)
401 {
402 pte_val(pte) |= _PAGE_ACCESSED;
403 if (!(pte_val(pte) & _PAGE_NO_READ))
404 pte_val(pte) |= _PAGE_SILENT_READ;
405 return pte;
406 }
407
408 #define pte_sw_mkyoung pte_mkyoung
409
410 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
411 static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE; }
412
413 static inline pte_t pte_mkhuge(pte_t pte)
414 {
415 pte_val(pte) |= _PAGE_HUGE;
416 return pte;
417 }
418
419 #define pmd_write pmd_write
420 static inline int pmd_write(pmd_t pmd)
421 {
422 return !!(pmd_val(pmd) & _PAGE_WRITE);
423 }
424
425 static inline unsigned long pmd_pfn(pmd_t pmd)
426 {
427 return pmd_val(pmd) >> _PFN_SHIFT;
428 }
429
430 static inline struct page *pmd_page(pmd_t pmd)
431 {
432 if (pmd_val(pmd) & _PAGE_HUGE)
433 return pfn_to_page(pmd_pfn(pmd));
434
435 return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT);
436 }
437 #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
438
439 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
440 static inline bool pte_soft_dirty(pte_t pte)
441 {
442 return pte_val(pte) & _PAGE_SOFT_DIRTY;
443 }
444 #define pte_swp_soft_dirty pte_soft_dirty
445
446 static inline pte_t pte_mksoft_dirty(pte_t pte)
447 {
448 pte_val(pte) |= _PAGE_SOFT_DIRTY;
449 return pte;
450 }
451 #define pte_swp_mksoft_dirty pte_mksoft_dirty
452
453 static inline pte_t pte_clear_soft_dirty(pte_t pte)
454 {
455 pte_val(pte) &= ~(_PAGE_SOFT_DIRTY);
456 return pte;
457 }
458 #define pte_swp_clear_soft_dirty pte_clear_soft_dirty
459
460 #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
461
462 #endif
463
464 /*
465 * Macro to make mark a page protection value as "uncacheable". Note
466 * that "protection" is really a misnomer here as the protection value
467 * contains the memory attribute bits, dirty bits, and various other
468 * bits as well.
469 */
470 #define pgprot_noncached pgprot_noncached
471
472 static inline pgprot_t pgprot_noncached(pgprot_t _prot)
473 {
474 unsigned long prot = pgprot_val(_prot);
475
476 prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED;
477
478 return __pgprot(prot);
479 }
480
481 #define pgprot_writecombine pgprot_writecombine
482
483 static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
484 {
485 unsigned long prot = pgprot_val(_prot);
486
487 /* cpu_data[0].writecombine is already shifted by _CACHE_SHIFT */
488 prot = (prot & ~_CACHE_MASK) | cpu_data[0].writecombine;
489
490 return __pgprot(prot);
491 }
492
493 static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
494 unsigned long address)
495 {
496 }
497
498 #define __HAVE_ARCH_PTE_SAME
499 static inline int pte_same(pte_t pte_a, pte_t pte_b)
500 {
501 return pte_val(pte_a) == pte_val(pte_b);
502 }
503
504 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
505 static inline int ptep_set_access_flags(struct vm_area_struct *vma,
506 unsigned long address, pte_t *ptep,
507 pte_t entry, int dirty)
508 {
509 if (!pte_same(*ptep, entry))
510 set_pte_at(vma->vm_mm, address, ptep, entry);
511 /*
512 * update_mmu_cache will unconditionally execute, handling both
513 * the case that the PTE changed and the spurious fault case.
514 */
515 return true;
516 }
517
518 /*
519 * Conversion functions: convert a page and protection to a page entry,
520 * and a page entry and page directory to the page they refer to.
521 */
522 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
523
524 #if defined(CONFIG_XPA)
525 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
526 {
527 pte.pte_low &= (_PAGE_MODIFIED | _PAGE_ACCESSED | _PFNX_MASK);
528 pte.pte_high &= (_PFN_MASK | _CACHE_MASK);
529 pte.pte_low |= pgprot_val(newprot) & ~_PFNX_MASK;
530 pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK);
531 return pte;
532 }
533 #elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
534 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
535 {
536 pte.pte_low &= _PAGE_CHG_MASK;
537 pte.pte_high &= (_PFN_MASK | _CACHE_MASK);
538 pte.pte_low |= pgprot_val(newprot);
539 pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK);
540 return pte;
541 }
542 #else
543 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
544 {
545 pte_val(pte) &= _PAGE_CHG_MASK;
546 pte_val(pte) |= pgprot_val(newprot) & ~_PAGE_CHG_MASK;
547 if ((pte_val(pte) & _PAGE_ACCESSED) && !(pte_val(pte) & _PAGE_NO_READ))
548 pte_val(pte) |= _PAGE_SILENT_READ;
549 return pte;
550 }
551 #endif
552
553
554 extern void __update_tlb(struct vm_area_struct *vma, unsigned long address,
555 pte_t pte);
556
557 static inline void update_mmu_cache(struct vm_area_struct *vma,
558 unsigned long address, pte_t *ptep)
559 {
560 pte_t pte = *ptep;
561 __update_tlb(vma, address, pte);
562 }
563
564 #define __HAVE_ARCH_UPDATE_MMU_TLB
565 #define update_mmu_tlb update_mmu_cache
566
567 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
568 unsigned long address, pmd_t *pmdp)
569 {
570 pte_t pte = *(pte_t *)pmdp;
571
572 __update_tlb(vma, address, pte);
573 }
574
575 #define kern_addr_valid(addr) (1)
576
577 /*
578 * Allow physical addresses to be fixed up to help 36-bit peripherals.
579 */
580 #ifdef CONFIG_MIPS_FIXUP_BIGPHYS_ADDR
581 phys_addr_t fixup_bigphys_addr(phys_addr_t addr, phys_addr_t size);
582 int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long vaddr,
583 unsigned long pfn, unsigned long size, pgprot_t prot);
584 #define io_remap_pfn_range io_remap_pfn_range
585 #else
586 #define fixup_bigphys_addr(addr, size) (addr)
587 #endif /* CONFIG_MIPS_FIXUP_BIGPHYS_ADDR */
588
589 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
590
591 /* We don't have hardware dirty/accessed bits, generic_pmdp_establish is fine.*/
592 #define pmdp_establish generic_pmdp_establish
593
594 #define has_transparent_hugepage has_transparent_hugepage
595 extern int has_transparent_hugepage(void);
596
597 static inline int pmd_trans_huge(pmd_t pmd)
598 {
599 return !!(pmd_val(pmd) & _PAGE_HUGE);
600 }
601
602 static inline pmd_t pmd_mkhuge(pmd_t pmd)
603 {
604 pmd_val(pmd) |= _PAGE_HUGE;
605
606 return pmd;
607 }
608
609 extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
610 pmd_t *pmdp, pmd_t pmd);
611
612 static inline pmd_t pmd_wrprotect(pmd_t pmd)
613 {
614 pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
615 return pmd;
616 }
617
618 static inline pmd_t pmd_mkwrite(pmd_t pmd)
619 {
620 pmd_val(pmd) |= _PAGE_WRITE;
621 if (pmd_val(pmd) & _PAGE_MODIFIED)
622 pmd_val(pmd) |= _PAGE_SILENT_WRITE;
623
624 return pmd;
625 }
626
627 static inline int pmd_dirty(pmd_t pmd)
628 {
629 return !!(pmd_val(pmd) & _PAGE_MODIFIED);
630 }
631
632 static inline pmd_t pmd_mkclean(pmd_t pmd)
633 {
634 pmd_val(pmd) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
635 return pmd;
636 }
637
638 static inline pmd_t pmd_mkdirty(pmd_t pmd)
639 {
640 pmd_val(pmd) |= _PAGE_MODIFIED | _PAGE_SOFT_DIRTY;
641 if (pmd_val(pmd) & _PAGE_WRITE)
642 pmd_val(pmd) |= _PAGE_SILENT_WRITE;
643
644 return pmd;
645 }
646
647 static inline int pmd_young(pmd_t pmd)
648 {
649 return !!(pmd_val(pmd) & _PAGE_ACCESSED);
650 }
651
652 static inline pmd_t pmd_mkold(pmd_t pmd)
653 {
654 pmd_val(pmd) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ);
655
656 return pmd;
657 }
658
659 static inline pmd_t pmd_mkyoung(pmd_t pmd)
660 {
661 pmd_val(pmd) |= _PAGE_ACCESSED;
662
663 if (!(pmd_val(pmd) & _PAGE_NO_READ))
664 pmd_val(pmd) |= _PAGE_SILENT_READ;
665
666 return pmd;
667 }
668
669 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
670 static inline int pmd_soft_dirty(pmd_t pmd)
671 {
672 return !!(pmd_val(pmd) & _PAGE_SOFT_DIRTY);
673 }
674
675 static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
676 {
677 pmd_val(pmd) |= _PAGE_SOFT_DIRTY;
678 return pmd;
679 }
680
681 static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
682 {
683 pmd_val(pmd) &= ~(_PAGE_SOFT_DIRTY);
684 return pmd;
685 }
686
687 #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
688
689 /* Extern to avoid header file madness */
690 extern pmd_t mk_pmd(struct page *page, pgprot_t prot);
691
692 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
693 {
694 pmd_val(pmd) = (pmd_val(pmd) & (_PAGE_CHG_MASK | _PAGE_HUGE)) |
695 (pgprot_val(newprot) & ~_PAGE_CHG_MASK);
696 return pmd;
697 }
698
699 static inline pmd_t pmd_mkinvalid(pmd_t pmd)
700 {
701 pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY);
702
703 return pmd;
704 }
705
706 /*
707 * The generic version pmdp_huge_get_and_clear uses a version of pmd_clear() with a
708 * different prototype.
709 */
710 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
711 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
712 unsigned long address, pmd_t *pmdp)
713 {
714 pmd_t old = *pmdp;
715
716 pmd_clear(pmdp);
717
718 return old;
719 }
720
721 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
722
723 #ifdef _PAGE_HUGE
724 #define pmd_leaf(pmd) ((pmd_val(pmd) & _PAGE_HUGE) != 0)
725 #define pud_leaf(pud) ((pud_val(pud) & _PAGE_HUGE) != 0)
726 #endif
727
728 #define gup_fast_permitted(start, end) (!cpu_has_dc_aliases)
729
730 /*
731 * We provide our own get_unmapped area to cope with the virtual aliasing
732 * constraints placed on us by the cache architecture.
733 */
734 #define HAVE_ARCH_UNMAPPED_AREA
735 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
736
737 #endif /* _ASM_PGTABLE_H */
738