Home
last modified time | relevance | path

Searched refs:PMD_SIZE (Results 1 – 25 of 114) sorted by relevance

12345

/linux/arch/riscv/mm/
A Dinit.c382 if (sz == PMD_SIZE) { in create_pmd_mapping()
447 if ((base & (PMD_SIZE - 1)) || (size & (PMD_SIZE - 1))) in best_map_size()
450 return PMD_SIZE; in best_map_size()
531 PMD_SIZE, PAGE_KERNEL_EXEC); in create_kernel_page_table()
538 PMD_SIZE, PAGE_KERNEL); in create_kernel_page_table()
549 PMD_SIZE, in create_kernel_page_table()
563 uintptr_t pa = dtb_pa & ~(PMD_SIZE - 1); in create_fdt_early_page_table()
572 pa, PMD_SIZE, PAGE_KERNEL); in create_fdt_early_page_table()
574 pa + PMD_SIZE, PMD_SIZE, PAGE_KERNEL); in create_fdt_early_page_table()
617 BUG_ON((kernel_map.phys_addr % PMD_SIZE) != 0); in setup_vm()
[all …]
A Dkasan_init.c91 if (pmd_none(*pmdp) && IS_ALIGNED(vaddr, PMD_SIZE) && (next - vaddr) >= PMD_SIZE) { in kasan_populate_pmd()
92 phys_addr = memblock_phys_alloc(PMD_SIZE, PMD_SIZE); in kasan_populate_pmd()
/linux/arch/s390/mm/
A Dvmem.c113 if (likely(IS_ALIGNED(unused_sub_pmd_start, PMD_SIZE))) in vmemmap_use_sub_pmd()
123 unsigned long page = ALIGN_DOWN(start, PMD_SIZE); in vmemmap_use_new_sub_pmd()
131 if (!IS_ALIGNED(start, PMD_SIZE)) in vmemmap_use_new_sub_pmd()
138 if (!IS_ALIGNED(end, PMD_SIZE)) in vmemmap_use_new_sub_pmd()
145 unsigned long page = ALIGN_DOWN(start, PMD_SIZE); in vmemmap_unuse_sub_pmd()
229 if (IS_ALIGNED(addr, PMD_SIZE) && in modify_pmd_table()
230 IS_ALIGNED(next, PMD_SIZE)) { in modify_pmd_table()
242 if (IS_ALIGNED(addr, PMD_SIZE) && in modify_pmd_table()
243 IS_ALIGNED(next, PMD_SIZE) && in modify_pmd_table()
262 if (!IS_ALIGNED(addr, PMD_SIZE) || in modify_pmd_table()
[all …]
A Dkasan_init.c163 if (IS_ALIGNED(address, PMD_SIZE) && in kasan_early_pgtable_populate()
164 end - address >= PMD_SIZE) { in kasan_early_pgtable_populate()
167 address = (address + PMD_SIZE) & PMD_MASK; in kasan_early_pgtable_populate()
179 address = (address + PMD_SIZE) & PMD_MASK; in kasan_early_pgtable_populate()
186 address = (address + PMD_SIZE) & PMD_MASK; in kasan_early_pgtable_populate()
/linux/arch/x86/mm/
A Dinit.c331 unsigned long end = round_up(mr[i].end, PMD_SIZE); in adjust_range_page_size_mask()
397 end_pfn = PFN_DOWN(PMD_SIZE); in split_mem_range()
399 end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); in split_mem_range()
401 end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); in split_mem_range()
411 start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); in split_mem_range()
438 start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); in split_mem_range()
619 addr = memblock_phys_alloc_range(PMD_SIZE, PMD_SIZE, map_start, in memory_map_top_down()
621 memblock_phys_free(addr, PMD_SIZE); in memory_map_top_down()
622 real_end = addr + PMD_SIZE; in memory_map_top_down()
625 step_size = PMD_SIZE; in memory_map_top_down()
[all …]
A Dinit_64.c373 for (; size; phys += PMD_SIZE, size -= PMD_SIZE) { in __init_extra_mapping()
436 for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) { in cleanup_highmap()
887 if (likely(IS_ALIGNED(end, PMD_SIZE))) in vmemmap_use_sub_pmd()
916 if (!IS_ALIGNED(start, PMD_SIZE)) in vmemmap_use_new_sub_pmd()
925 if (!IS_ALIGNED(end, PMD_SIZE)) in vmemmap_use_new_sub_pmd()
1117 if (IS_ALIGNED(addr, PMD_SIZE) && in remove_pmd_table()
1118 IS_ALIGNED(next, PMD_SIZE)) { in remove_pmd_table()
1582 addr_end = addr + PMD_SIZE; in vmemmap_populate_hugepages()
1583 p_end = p + PMD_SIZE; in vmemmap_populate_hugepages()
1585 if (!IS_ALIGNED(addr, PMD_SIZE) || in vmemmap_populate_hugepages()
[all …]
A Dmem_encrypt.c134 vaddr += PMD_SIZE; in __sme_early_map_unmap_mem()
135 paddr += PMD_SIZE; in __sme_early_map_unmap_mem()
136 size = (size <= PMD_SIZE) ? 0 : size - PMD_SIZE; in __sme_early_map_unmap_mem()
/linux/arch/m68k/mm/
A Dkmap.c50 #define IO_SIZE PMD_SIZE
85 virtaddr += PMD_SIZE; in __free_io_area()
86 size -= PMD_SIZE; in __free_io_area()
248 if (!(virtaddr & (PMD_SIZE-1))) in __ioremap()
263 physaddr += PMD_SIZE; in __ioremap()
264 virtaddr += PMD_SIZE; in __ioremap()
265 size -= PMD_SIZE; in __ioremap()
370 virtaddr += PMD_SIZE; in kernel_set_cachemode()
371 size -= PMD_SIZE; in kernel_set_cachemode()
/linux/arch/parisc/kernel/
A Dpci-dma.c85 if (end > PMD_SIZE) in map_pte_uncached()
86 end = PMD_SIZE; in map_pte_uncached()
120 vaddr = (vaddr + PMD_SIZE) & PMD_MASK; in map_pmd_uncached()
121 orig_vaddr += PMD_SIZE; in map_pmd_uncached()
170 if (end > PMD_SIZE) in unmap_uncached_pte()
171 end = PMD_SIZE; in unmap_uncached_pte()
210 vaddr = (vaddr + PMD_SIZE) & PMD_MASK; in unmap_uncached_pmd()
211 orig_vaddr += PMD_SIZE; in unmap_uncached_pmd()
/linux/arch/sh/include/asm/
A Dpgtable-3level.h23 #define PMD_SIZE (1UL << PMD_SHIFT) macro
24 #define PMD_MASK (~(PMD_SIZE-1))
26 #define PTRS_PER_PMD ((1 << PGDIR_SHIFT) / PMD_SIZE)
/linux/arch/arm64/mm/
A Dhugetlbpage.c69 case PMD_SIZE: in arch_hugetlb_migration_supported()
117 *pgsize = PMD_SIZE; in find_num_contig()
136 case PMD_SIZE: in num_contig_ptes()
140 *pgsize = PMD_SIZE; in num_contig_ptes()
290 } else if (sz == PMD_SIZE) { in huge_pte_alloc()
334 if (!(sz == PMD_SIZE || sz == CONT_PMD_SIZE) && in huge_pte_offset()
354 } else if (pagesize != PUD_SIZE && pagesize != PMD_SIZE) { in arch_make_huge_pte()
515 case PMD_SIZE: in arch_hugetlb_valid_size()
/linux/arch/arm64/kvm/hyp/
A Dreserved_mem.c95 hyp_mem_base = memblock_phys_alloc(ALIGN(hyp_mem_size, PMD_SIZE), in kvm_hyp_reserve()
96 PMD_SIZE); in kvm_hyp_reserve()
100 hyp_mem_size = ALIGN(hyp_mem_size, PMD_SIZE); in kvm_hyp_reserve()
/linux/arch/x86/include/asm/
A Dpgtable_32_types.h12 # define PMD_SIZE (1UL << PMD_SHIFT) macro
13 # define PMD_MASK (~(PMD_SIZE - 1))
A Dpgtable_64_types.h98 #define PMD_SIZE (_AC(1, UL) << PMD_SHIFT) macro
99 #define PMD_MASK (~(PMD_SIZE - 1))
/linux/arch/nios2/mm/
A Dioremap.c33 if (end > PMD_SIZE) in remap_area_pte()
34 end = PMD_SIZE; in remap_area_pte()
70 address = (address + PMD_SIZE) & PMD_MASK; in remap_area_pmd()
/linux/arch/x86/kernel/
A Dvmlinux.lds.S69 #define ALIGN_ENTRY_TEXT_BEGIN . = ALIGN(PMD_SIZE);
70 #define ALIGN_ENTRY_TEXT_END . = ALIGN(PMD_SIZE);
81 . = ALIGN(PMD_SIZE); \
86 . = ALIGN(PMD_SIZE); \
/linux/include/asm-generic/
A Dpgtable-nopmd.h22 #define PMD_SIZE (1UL << PMD_SHIFT) macro
23 #define PMD_MASK (~(PMD_SIZE-1))
/linux/arch/arm/mm/
A Dmmu.c1085 next = (addr + PMD_SIZE - 1) & PMD_MASK; in fill_pmd_gaps()
1177 if (!IS_ALIGNED(block_start, PMD_SIZE)) { in adjust_lowmem_bounds()
1180 len = round_up(block_start, PMD_SIZE) - block_start; in adjust_lowmem_bounds()
1213 if (!IS_ALIGNED(block_start, PMD_SIZE)) in adjust_lowmem_bounds()
1215 else if (!IS_ALIGNED(block_end, PMD_SIZE)) in adjust_lowmem_bounds()
1234 memblock_limit = round_down(memblock_limit, PMD_SIZE); in adjust_lowmem_bounds()
1264 for (addr = 0; addr < KASAN_SHADOW_START; addr += PMD_SIZE) in prepare_page_table()
1275 for (addr = 0; addr < MODULES_VADDR; addr += PMD_SIZE) in prepare_page_table()
1281 addr = ((unsigned long)_exiprom + PMD_SIZE - 1) & PMD_MASK; in prepare_page_table()
1283 for ( ; addr < PAGE_OFFSET; addr += PMD_SIZE) in prepare_page_table()
[all …]
/linux/arch/riscv/include/asm/
A Dpgtable-64.h18 #define PMD_SIZE (_AC(1, UL) << PMD_SHIFT) macro
19 #define PMD_MASK (~(PMD_SIZE - 1))
/linux/arch/powerpc/include/asm/nohash/64/
A Dpgtable-4k.h31 #define PMD_SIZE (1UL << PMD_SHIFT) macro
32 #define PMD_MASK (~(PMD_SIZE-1))
/linux/arch/x86/boot/compressed/
A Dident_map_64.c98 start = round_down(start, PMD_SIZE); in add_identity_map()
99 end = round_up(end, PMD_SIZE); in add_identity_map()
333 end = address + PMD_SIZE; in do_boot_page_fault()
/linux/arch/m68k/include/asm/
A Dpgtable_mm.h41 #define PMD_SIZE (1UL << PMD_SHIFT) macro
42 #define PMD_MASK (~(PMD_SIZE-1))
/linux/arch/powerpc/mm/book3s64/
A Dradix_pgtable.c99 if (map_page_size == PMD_SIZE) { in early_map_kernel_page()
162 if (map_page_size == PMD_SIZE) { in __map_kernel_page()
295 } else if (IS_ALIGNED(addr, PMD_SIZE) && gap >= PMD_SIZE && in create_physical_mapping()
297 mapping_size = PMD_SIZE; in create_physical_mapping()
785 if (!IS_ALIGNED(addr, PMD_SIZE) || in remove_pmd_table()
786 !IS_ALIGNED(next, PMD_SIZE)) { in remove_pmd_table()
1165 flush_tlb_kernel_range(addr, addr + PMD_SIZE); in pmd_free_pte_page()
/linux/drivers/dax/
A Ddevice.c113 unsigned int fault_size = PMD_SIZE; in __dev_dax_pmd_fault()
118 if (dev_dax->align > PMD_SIZE) { in __dev_dax_pmd_fault()
131 (pmd_addr + PMD_SIZE) > vmf->vma->vm_end) in __dev_dax_pmd_fault()
135 phys = dax_pgoff_to_phys(dev_dax, pgoff, PMD_SIZE); in __dev_dax_pmd_fault()
216 fault_size = PMD_SIZE; in dev_dax_huge_fault()
/linux/arch/sparc/mm/
A Dhugetlbpage.c298 if (sz >= PMD_SIZE) in huge_pte_alloc()
342 else if (size >= PMD_SIZE) in set_huge_pte_at()
379 else if (size >= PMD_SIZE) in huge_ptep_get_and_clear()
510 addr += PMD_SIZE; in hugetlb_free_pgd_range()
520 end -= PMD_SIZE; in hugetlb_free_pgd_range()

Completed in 38 milliseconds

12345