/linux/arch/x86/mm/ |
A D | init.c | 306 if (start_pfn < end_pfn) { in save_mr() 403 if (end_pfn > limit_pfn) in split_mem_range() 404 end_pfn = limit_pfn; in split_mem_range() 405 if (start_pfn < end_pfn) { in split_mem_range() 407 pfn = end_pfn; in split_mem_range() 420 if (start_pfn < end_pfn) { in split_mem_range() 423 pfn = end_pfn; in split_mem_range() 434 pfn = end_pfn; in split_mem_range() 443 pfn = end_pfn; in split_mem_range() 449 end_pfn = limit_pfn; in split_mem_range() [all …]
|
A D | init_32.c | 261 unsigned long start_pfn, end_pfn; in kernel_physical_mapping_init() local 272 end_pfn = end >> PAGE_SHIFT; in kernel_physical_mapping_init() 301 if (pfn >= end_pfn) in kernel_physical_mapping_init() 309 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn; in kernel_physical_mapping_init() 349 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn; in kernel_physical_mapping_init() 404 unsigned long start_pfn, unsigned long end_pfn) in add_highpages_with_active_regions() argument 411 start_pfn, end_pfn); in add_highpages_with_active_regions() 413 start_pfn, end_pfn); in add_highpages_with_active_regions()
|
/linux/mm/ |
A D | page_isolation.c | 194 BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages)); in start_isolate_page_range() 197 pfn < end_pfn; in start_isolate_page_range() 218 BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages)); in undo_isolate_page_range() 221 pfn < end_pfn; in undo_isolate_page_range() 242 while (pfn < end_pfn) { in __test_page_isolated_in_pageblock() 270 int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn, in test_pages_isolated() argument 283 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { in test_pages_isolated() 288 page = __first_valid_page(start_pfn, end_pfn - start_pfn); in test_pages_isolated() 289 if ((pfn < end_pfn) || !page) { in test_pages_isolated() 300 ret = pfn < end_pfn ? -EBUSY : 0; in test_pages_isolated() [all …]
|
A D | page_idle.c | 120 unsigned long pfn, end_pfn; in page_idle_bitmap_read() local 130 end_pfn = pfn + count * BITS_PER_BYTE; in page_idle_bitmap_read() 131 if (end_pfn > max_pfn) in page_idle_bitmap_read() 132 end_pfn = max_pfn; in page_idle_bitmap_read() 134 for (; pfn < end_pfn; pfn++) { in page_idle_bitmap_read() 165 unsigned long pfn, end_pfn; in page_idle_bitmap_write() local 175 end_pfn = pfn + count * BITS_PER_BYTE; in page_idle_bitmap_write() 176 if (end_pfn > max_pfn) in page_idle_bitmap_write() 177 end_pfn = max_pfn; in page_idle_bitmap_write() 179 for (; pfn < end_pfn; pfn++) { in page_idle_bitmap_write()
|
A D | memory_hotplug.c | 335 cur_nr_pages = min(end_pfn - pfn, in __add_pages() 349 unsigned long end_pfn) in find_smallest_section_pfn() argument 370 unsigned long end_pfn) in find_biggest_section_pfn() argument 375 pfn = end_pfn - 1; in find_biggest_section_pfn() 393 unsigned long end_pfn) in shrink_zone_span() argument 446 node_end_pfn = end_pfn; in update_pgdat_span() 450 if (end_pfn > node_end_pfn) in update_pgdat_span() 451 node_end_pfn = end_pfn; in update_pgdat_span() 533 cur_nr_pages = min(end_pfn - pfn, in __remove_pages() 1597 unsigned long end_pfn) in test_pages_in_a_zone() argument [all …]
|
A D | page_alloc.c | 424 prev_end_pfn = end_pfn; in defer_init() 1762 end_pfn--; in __pageblock_pfn_to_page() 2017 while (spfn < end_pfn) { in deferred_init_memmap_chunk() 6697 end_pfn = clamp(end_pfn, zone_start_pfn, zone_end_pfn); in memmap_init_zone_range() 6708 *hole_pfn = end_pfn; in memmap_init_zone_range() 6742 end_pfn = round_up(end_pfn, PAGES_PER_SECTION); in memmap_init() 7051 *end_pfn = 0; in get_pfn_range_for_nid() 7055 *end_pfn = max(*end_pfn, this_end_pfn); in get_pfn_range_for_nid() 7170 end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn); in __absent_pages_in_range() 7598 end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0); in free_area_init_node() [all …]
|
A D | compaction.c | 552 unsigned long end_pfn, in isolate_freepages_block() argument 657 if (unlikely(blockpfn > end_pfn)) in isolate_freepages_block() 658 blockpfn = end_pfn; in isolate_freepages_block() 671 if (strict && blockpfn < end_pfn) in isolate_freepages_block() 751 if (pfn < end_pfn) { in isolate_freepages_range() 902 low_pfn = end_pfn; in isolate_migratepages_block() 1136 if (unlikely(low_pfn > end_pfn)) in isolate_migratepages_block() 1137 low_pfn = end_pfn; in isolate_migratepages_block() 1187 unsigned long end_pfn) in isolate_migratepages_range() argument 1352 if (start_pfn < end_pfn) in fast_isolate_around() [all …]
|
A D | sparse.c | 130 unsigned long *end_pfn) in mminit_validate_memmodel_limits() argument 141 *start_pfn, *end_pfn, max_sparsemem_pfn); in mminit_validate_memmodel_limits() 144 *end_pfn = max_sparsemem_pfn; in mminit_validate_memmodel_limits() 145 } else if (*end_pfn > max_sparsemem_pfn) { in mminit_validate_memmodel_limits() 148 *start_pfn, *end_pfn, max_sparsemem_pfn); in mminit_validate_memmodel_limits() 150 *end_pfn = max_sparsemem_pfn; in mminit_validate_memmodel_limits() 592 void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn) in online_mem_sections() argument 596 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { in online_mem_sections() 610 void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn) in offline_mem_sections() argument 614 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { in offline_mem_sections()
|
A D | internal.h | 226 unsigned long end_pfn, struct zone *zone); 229 unsigned long end_pfn, struct zone *zone) in pageblock_pfn_to_page() argument 234 return __pageblock_pfn_to_page(start_pfn, end_pfn, zone); in pageblock_pfn_to_page() 319 unsigned long start_pfn, unsigned long end_pfn); 322 unsigned long low_pfn, unsigned long end_pfn); 573 unsigned long *end_pfn); 576 unsigned long *end_pfn) in mminit_validate_memmodel_limits() argument
|
A D | bootmem_info.c | 103 unsigned long i, pfn, end_pfn, nr_pages; in register_page_bootmem_info_node() local 114 end_pfn = pgdat_end_pfn(pgdat); in register_page_bootmem_info_node() 117 for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) { in register_page_bootmem_info_node()
|
/linux/include/trace/events/ |
A D | page_isolation.h | 14 unsigned long end_pfn, 17 TP_ARGS(start_pfn, end_pfn, fin_pfn), 21 __field(unsigned long, end_pfn) 27 __entry->end_pfn = end_pfn; 32 __entry->start_pfn, __entry->end_pfn, __entry->fin_pfn, 33 __entry->end_pfn <= __entry->fin_pfn ? "success" : "fail")
|
A D | compaction.h | 18 unsigned long end_pfn, 22 TP_ARGS(start_pfn, end_pfn, nr_scanned, nr_taken), 26 __field(unsigned long, end_pfn) 33 __entry->end_pfn = end_pfn; 40 __entry->end_pfn, 49 unsigned long end_pfn, 53 TP_ARGS(start_pfn, end_pfn, nr_scanned, nr_taken) 60 unsigned long end_pfn, 64 TP_ARGS(start_pfn, end_pfn, nr_scanned, nr_taken)
|
/linux/arch/sparc/mm/ |
A D | init_32.c | 66 unsigned long end_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT; in calc_highpages() local 68 if (end_pfn <= max_low_pfn) in calc_highpages() 74 nr += end_pfn - start_pfn; in calc_highpages() 236 static void map_high_region(unsigned long start_pfn, unsigned long end_pfn) in map_high_region() argument 241 printk("mapping high region %08lx - %08lx\n", start_pfn, end_pfn); in map_high_region() 244 for (tmp = start_pfn; tmp < end_pfn; tmp++) in map_high_region() 285 unsigned long end_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT; in mem_init() local 287 if (end_pfn <= highstart_pfn) in mem_init() 293 map_high_region(start_pfn, end_pfn); in mem_init()
|
/linux/arch/sh/mm/ |
A D | numa.c | 28 unsigned long start_pfn, end_pfn; in setup_bootmem_node() local 34 end_pfn = PFN_DOWN(end); in setup_bootmem_node() 41 __add_active_range(nid, start_pfn, end_pfn); in setup_bootmem_node() 52 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; in setup_bootmem_node()
|
A D | init.c | 210 unsigned long start_pfn, end_pfn; in allocate_pgdat() local 212 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); in allocate_pgdat() 224 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; in allocate_pgdat() 229 unsigned long start_pfn, end_pfn; in do_init_bootmem() local 233 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) in do_init_bootmem() 234 __add_active_range(0, start_pfn, end_pfn); in do_init_bootmem()
|
/linux/arch/x86/xen/ |
A D | setup.c | 255 unsigned long end_pfn, unsigned long nr_pages) in xen_set_identity_and_release_chunk() argument 260 WARN_ON(start_pfn > end_pfn); in xen_set_identity_and_release_chunk() 263 end = min(end_pfn, nr_pages); in xen_set_identity_and_release_chunk() 282 set_phys_range_identity(start_pfn, end_pfn); in xen_set_identity_and_release_chunk() 389 unsigned long n = end_pfn - start_pfn; in xen_set_identity_and_remap_chunk() 431 for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++) in xen_set_identity_and_remap_chunk() 446 return remap_pages + min(end_pfn, nr_pages) - start_pfn; in xen_count_remap_pages() 473 unsigned long end_pfn = PFN_UP(end); in xen_foreach_remap_area() local 476 end_pfn = PFN_UP(entry->addr); in xen_foreach_remap_area() 478 if (start_pfn < end_pfn) in xen_foreach_remap_area() [all …]
|
/linux/arch/mips/loongson64/ |
A D | numa.c | 88 unsigned long start_pfn, end_pfn; in node_mem_init() local 97 get_pfn_range_for_nid(node, &start_pfn, &end_pfn); in node_mem_init() 99 node, start_pfn, end_pfn); in node_mem_init() 112 NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn; in node_mem_init() 122 max_low_pfn = end_pfn; in node_mem_init()
|
/linux/include/linux/ |
A D | node.h | 103 unsigned long end_pfn, 107 unsigned long end_pfn, in link_mem_sections() argument 126 unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages; in register_one_node() local 132 link_mem_sections(nid, start_pfn, end_pfn, MEMINIT_EARLY); in register_one_node()
|
A D | page-isolation.h | 46 start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, 54 undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, 60 int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
|
/linux/arch/x86/platform/efi/ |
A D | efi_32.c | 38 u64 start_pfn, end_pfn, end; in efi_map_region() local 45 end_pfn = PFN_UP(end); in efi_map_region() 47 if (pfn_range_is_mapped(start_pfn, end_pfn)) { in efi_map_region()
|
/linux/drivers/base/ |
A D | arch_numa.c | 283 static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn) in setup_node_data() argument 290 if (start_pfn >= end_pfn) in setup_node_data() 311 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; in setup_node_data() 427 unsigned long start_pfn, end_pfn; in numa_register_nodes() local 429 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); in numa_register_nodes() 430 setup_node_data(nid, start_pfn, end_pfn); in numa_register_nodes()
|
/linux/arch/sh/kernel/ |
A D | setup.c | 198 unsigned long end_pfn) in __add_active_range() argument 206 end = end_pfn << PAGE_SHIFT; in __add_active_range() 215 start_pfn, end_pfn); in __add_active_range() 239 memblock_set_node(PFN_PHYS(start_pfn), PFN_PHYS(end_pfn - start_pfn), in __add_active_range()
|
A D | swsusp.c | 22 unsigned long end_pfn = PAGE_ALIGN(__pa(&__nosave_end)) >> PAGE_SHIFT; in pfn_is_nosave() local 24 return (pfn >= begin_pfn) && (pfn < end_pfn); in pfn_is_nosave()
|
/linux/arch/powerpc/mm/ |
A D | numa.c | 90 static int __init fake_numa_create_new_node(unsigned long end_pfn, in fake_numa_create_new_node() argument 121 if ((end_pfn << PAGE_SHIFT) > mem) { in fake_numa_create_new_node() 1027 unsigned long start_pfn, end_pfn; in setup_nonnuma() local 1034 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) { in setup_nonnuma() 1035 fake_numa_create_new_node(end_pfn, &nid); in setup_nonnuma() 1037 PFN_PHYS(end_pfn - start_pfn), in setup_nonnuma() 1079 static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn) in setup_node_data() argument 1081 u64 spanned_pages = end_pfn - start_pfn; in setup_node_data() 1212 unsigned long start_pfn, end_pfn; in initmem_init() local 1214 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); in initmem_init() [all …]
|
/linux/drivers/hv/ |
A D | hv_balloon.c | 438 unsigned long end_pfn; member 448 unsigned long end_pfn; member 587 if ((pfn >= gap->start_pfn) && (pfn < gap->end_pfn)) in has_pfn_is_backed() 609 (pfn < has->end_pfn) && in hv_page_offline_check() 776 (pfn + (1UL << order) > has->end_pfn)) in hv_online_page() 815 gap->end_pfn = start_pfn; in pfn_covered() 825 if ((start_pfn + pfn_cnt) > has->end_pfn) { in pfn_covered() 826 residual = (start_pfn + pfn_cnt - has->end_pfn); in pfn_covered() 834 has->end_pfn += new_inc; in pfn_covered() 904 size = (has->end_pfn - has->ha_end_pfn); in handle_pg_range() [all …]
|