/linux/arch/arm64/mm/ |
A D | hugetlbpage.c | 66 case PUD_SIZE: in arch_hugetlb_migration_supported() 131 case PUD_SIZE: in num_contig_ptes() 274 if (sz == PUD_SIZE) { in huge_pte_alloc() 322 if (sz != PUD_SIZE && pud_none(pud)) in huge_pte_offset() 354 } else if (pagesize != PUD_SIZE && pagesize != PMD_SIZE) { in arch_make_huge_pte() 511 case PUD_SIZE: in arch_hugetlb_valid_size()
|
/linux/include/asm-generic/ |
A D | pgtable-nopud.h | 20 #define PUD_SIZE (1UL << PUD_SHIFT) macro 21 #define PUD_MASK (~(PUD_SIZE-1))
|
/linux/arch/powerpc/include/asm/nohash/64/ |
A D | pgtable-4k.h | 36 #define PUD_SIZE (1UL << PUD_SHIFT) macro 37 #define PUD_MASK (~(PUD_SIZE-1))
|
/linux/arch/x86/include/asm/ |
A D | pgtable_64_types.h | 100 #define PUD_SIZE (_AC(1, UL) << PUD_SHIFT) macro 101 #define PUD_MASK (~(PUD_SIZE - 1))
|
/linux/arch/powerpc/mm/book3s64/ |
A D | radix_pgtable.c | 89 if (map_page_size == PUD_SIZE) { in early_map_kernel_page() 155 if (map_page_size == PUD_SIZE) { in __map_kernel_page() 291 if (IS_ALIGNED(addr, PUD_SIZE) && gap >= PUD_SIZE && in create_physical_mapping() 293 mapping_size = PUD_SIZE; in create_physical_mapping() 815 if (!IS_ALIGNED(addr, PUD_SIZE) || in remove_pud_table() 816 !IS_ALIGNED(next, PUD_SIZE)) { in remove_pud_table() 1119 flush_tlb_kernel_range(addr, addr + PUD_SIZE); in pud_free_pmd_page()
|
A D | radix_hugetlbpage.c | 38 if (end - start >= PUD_SIZE) in radix__flush_hugetlb_tlb_range()
|
/linux/drivers/dax/ |
A D | device.c | 154 unsigned int fault_size = PUD_SIZE; in __dev_dax_pud_fault() 160 if (dev_dax->align > PUD_SIZE) { in __dev_dax_pud_fault() 173 (pud_addr + PUD_SIZE) > vmf->vma->vm_end) in __dev_dax_pud_fault() 177 phys = dax_pgoff_to_phys(dev_dax, pgoff, PUD_SIZE); in __dev_dax_pud_fault() 220 fault_size = PUD_SIZE; in dev_dax_huge_fault()
|
A D | dax-private.h | 94 if (align == PUD_SIZE && IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) in dax_align_valid()
|
/linux/arch/s390/mm/ |
A D | vmem.c | 293 const unsigned long end = start + PUD_SIZE; in try_free_pmd_table() 330 if (IS_ALIGNED(addr, PUD_SIZE) && in modify_pud_table() 331 IS_ALIGNED(next, PUD_SIZE)) { in modify_pud_table() 338 if (IS_ALIGNED(addr, PUD_SIZE) && in modify_pud_table() 339 IS_ALIGNED(next, PUD_SIZE) && in modify_pud_table()
|
A D | hugetlbpage.c | 140 size = PUD_SIZE; in clear_huge_pte_skeys() 205 if (sz == PUD_SIZE) in huge_pte_alloc() 261 else if (MACHINE_HAS_EDAT2 && size == PUD_SIZE) in arch_hugetlb_valid_size()
|
A D | kasan_init.c | 150 IS_ALIGNED(address, PUD_SIZE) && in kasan_early_pgtable_populate() 151 end - address >= PUD_SIZE) { in kasan_early_pgtable_populate() 154 address = (address + PUD_SIZE) & PUD_MASK; in kasan_early_pgtable_populate()
|
/linux/arch/x86/mm/ |
A D | kasan_init_64.c | 83 ((end - addr) == PUD_SIZE) && in kasan_populate_pud() 84 IS_ALIGNED(addr, PUD_SIZE)) { in kasan_populate_pud() 85 p = early_alloc(PUD_SIZE, nid, false); in kasan_populate_pud() 88 memblock_free(p, PUD_SIZE); in kasan_populate_pud()
|
A D | init.c | 343 unsigned long start = round_down(mr[i].start, PUD_SIZE); in adjust_range_page_size_mask() 344 unsigned long end = round_up(mr[i].end, PUD_SIZE); in adjust_range_page_size_mask() 415 end_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE)); in split_mem_range() 428 start_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE)); in split_mem_range() 429 end_pfn = round_down(limit_pfn, PFN_DOWN(PUD_SIZE)); in split_mem_range()
|
A D | kaslr.c | 136 vaddr = round_up(vaddr + 1, PUD_SIZE); in kernel_randomize_memory()
|
A D | mem_encrypt_identity.c | 274 entries += (DIV_ROUND_UP(len, PUD_SIZE) + 1) * sizeof(pmd_t) * PTRS_PER_PMD; in sme_pgtable_calc() 285 tables += DIV_ROUND_UP(entries, PUD_SIZE) * sizeof(pmd_t) * PTRS_PER_PMD; in sme_pgtable_calc()
|
/linux/mm/kasan/ |
A D | init.c | 150 if (IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) { in zero_pud_populate() 401 if (IS_ALIGNED(addr, PUD_SIZE) && in kasan_remove_pud_table() 402 IS_ALIGNED(next, PUD_SIZE)) { in kasan_remove_pud_table()
|
/linux/arch/arc/include/asm/ |
A D | pgtable-levels.h | 75 #define PUD_SIZE BIT(PUD_SHIFT) macro 76 #define PUD_MASK (~(PUD_SIZE - 1))
|
/linux/arch/x86/mm/pat/ |
A D | set_memory.c | 1195 if (start & (PUD_SIZE - 1)) { in unmap_pud_range() 1196 unsigned long next_page = (start + PUD_SIZE) & PUD_MASK; in unmap_pud_range() 1208 while (end - start >= PUD_SIZE) { in unmap_pud_range() 1213 unmap_pmd_range(pud, start, start + PUD_SIZE); in unmap_pud_range() 1215 start += PUD_SIZE; in unmap_pud_range() 1358 if (start & (PUD_SIZE - 1)) { in populate_pud() 1360 unsigned long next_page = (start + PUD_SIZE) & PUD_MASK; in populate_pud() 1393 while (boot_cpu_has(X86_FEATURE_GBPAGES) && end - start >= PUD_SIZE) { in populate_pud() 1397 start += PUD_SIZE; in populate_pud() 1398 cpa->pfn += PUD_SIZE >> PAGE_SHIFT; in populate_pud() [all …]
|
/linux/arch/arm64/kvm/ |
A D | mmu.c | 889 if ((hva & (PUD_SIZE - 1)) == (pa & (PUD_SIZE - 1)) && in get_vma_page_shift() 890 ALIGN_DOWN(hva, PUD_SIZE) >= vma->vm_start && in get_vma_page_shift() 891 ALIGN(hva, PUD_SIZE) <= vma->vm_end) in get_vma_page_shift() 1002 if (fault_supports_stage2_huge_mapping(memslot, hva, PUD_SIZE)) in user_mem_abort() 1024 if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE) in user_mem_abort() 1351 WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE); in kvm_age_gfn()
|
/linux/arch/riscv/mm/ |
A D | hugetlbpage.c | 19 else if (IS_ENABLED(CONFIG_64BIT) && size == PUD_SIZE) in arch_hugetlb_valid_size()
|
/linux/arch/mips/include/asm/ |
A D | pgtable-64.h | 60 #define PUD_SIZE (1UL << PUD_SHIFT) macro 61 #define PUD_MASK (~(PUD_SIZE-1))
|
/linux/arch/sparc/mm/ |
A D | hugetlbpage.c | 293 if (sz >= PUD_SIZE) in huge_pte_alloc() 340 if (size >= PUD_SIZE) in set_huge_pte_at() 377 if (size >= PUD_SIZE) in huge_ptep_get_and_clear()
|
/linux/arch/arm64/include/asm/ |
A D | pgtable-hwdef.h | 60 #define PUD_SIZE (_AC(1, UL) << PUD_SHIFT) macro 61 #define PUD_MASK (~(PUD_SIZE-1))
|
/linux/arch/x86/boot/compressed/ |
A D | kaslr.c | 236 if (memparse(p, &p) != PUD_SIZE) { in parse_gb_huge_pages() 523 pud_start = ALIGN(region->start, PUD_SIZE); in process_gb_huge_pages() 524 pud_end = ALIGN_DOWN(region->start + region->size, PUD_SIZE); in process_gb_huge_pages()
|
/linux/arch/x86/xen/ |
A D | xen-head.S | 92 ELFNOTE(Xen, XEN_ELFNOTE_INIT_P2M, .quad (PUD_SIZE * PTRS_PER_PUD))
|