Lines Matching refs:start_pfn
348 unsigned long start_pfn, in find_smallest_section_pfn() argument
351 for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SUBSECTION) { in find_smallest_section_pfn()
352 if (unlikely(!pfn_to_online_page(start_pfn))) in find_smallest_section_pfn()
355 if (unlikely(pfn_to_nid(start_pfn) != nid)) in find_smallest_section_pfn()
358 if (zone != page_zone(pfn_to_page(start_pfn))) in find_smallest_section_pfn()
361 return start_pfn; in find_smallest_section_pfn()
369 unsigned long start_pfn, in find_biggest_section_pfn() argument
376 for (; pfn >= start_pfn; pfn -= PAGES_PER_SUBSECTION) { in find_biggest_section_pfn()
392 static void shrink_zone_span(struct zone *zone, unsigned long start_pfn, in shrink_zone_span() argument
398 if (zone->zone_start_pfn == start_pfn) { in shrink_zone_span()
422 start_pfn); in shrink_zone_span()
461 unsigned long start_pfn, in remove_pfn_range_from_zone() argument
464 const unsigned long end_pfn = start_pfn + nr_pages; in remove_pfn_range_from_zone()
469 for (pfn = start_pfn; pfn < end_pfn; pfn += cur_nr_pages) { in remove_pfn_range_from_zone()
489 shrink_zone_span(zone, start_pfn, start_pfn + nr_pages); in remove_pfn_range_from_zone()
591 static void online_pages_range(unsigned long start_pfn, unsigned long nr_pages) in online_pages_range() argument
593 const unsigned long end_pfn = start_pfn + nr_pages; in online_pages_range()
605 for (pfn = start_pfn; pfn < end_pfn;) { in online_pages_range()
613 online_mem_sections(start_pfn, end_pfn); in online_pages_range()
640 static void __meminit resize_zone_range(struct zone *zone, unsigned long start_pfn, in resize_zone_range() argument
645 if (zone_is_empty(zone) || start_pfn < zone->zone_start_pfn) in resize_zone_range()
646 zone->zone_start_pfn = start_pfn; in resize_zone_range()
648 zone->spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - zone->zone_start_pfn; in resize_zone_range()
651 static void __meminit resize_pgdat_range(struct pglist_data *pgdat, unsigned long start_pfn, in resize_pgdat_range() argument
656 if (!pgdat->node_spanned_pages || start_pfn < pgdat->node_start_pfn) in resize_pgdat_range()
657 pgdat->node_start_pfn = start_pfn; in resize_pgdat_range()
659 pgdat->node_spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - pgdat->node_start_pfn; in resize_pgdat_range()
679 void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, in move_pfn_range_to_zone() argument
689 init_currently_empty_zone(zone, start_pfn, nr_pages); in move_pfn_range_to_zone()
690 resize_zone_range(zone, start_pfn, nr_pages); in move_pfn_range_to_zone()
691 resize_pgdat_range(pgdat, start_pfn, nr_pages); in move_pfn_range_to_zone()
700 if (!IS_ALIGNED(start_pfn, PAGES_PER_SECTION)) in move_pfn_range_to_zone()
701 section_taint_zone_device(start_pfn); in move_pfn_range_to_zone()
702 if (!IS_ALIGNED(start_pfn + nr_pages, PAGES_PER_SECTION)) in move_pfn_range_to_zone()
703 section_taint_zone_device(start_pfn + nr_pages); in move_pfn_range_to_zone()
712 memmap_init_range(nr_pages, nid, zone_idx(zone), start_pfn, 0, in move_pfn_range_to_zone()
826 static struct zone *default_kernel_zone_for_pfn(int nid, unsigned long start_pfn, in default_kernel_zone_for_pfn() argument
835 if (zone_intersects(zone, start_pfn, nr_pages)) in default_kernel_zone_for_pfn()
953 static inline struct zone *default_zone_for_pfn(int nid, unsigned long start_pfn, in default_zone_for_pfn() argument
956 struct zone *kernel_zone = default_kernel_zone_for_pfn(nid, start_pfn, in default_zone_for_pfn()
959 bool in_kernel = zone_intersects(kernel_zone, start_pfn, nr_pages); in default_zone_for_pfn()
960 bool in_movable = zone_intersects(movable_zone, start_pfn, nr_pages); in default_zone_for_pfn()
978 struct memory_group *group, unsigned long start_pfn, in zone_for_pfn_range() argument
982 return default_kernel_zone_for_pfn(nid, start_pfn, nr_pages); in zone_for_pfn_range()
988 return auto_movable_zone_for_pfn(nid, group, start_pfn, nr_pages); in zone_for_pfn_range()
990 return default_zone_for_pfn(nid, start_pfn, nr_pages); in zone_for_pfn_range()
1087 arg.start_pfn = pfn; in online_pages()
1596 struct zone *test_pages_in_a_zone(unsigned long start_pfn, in test_pages_in_a_zone() argument
1603 for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn + 1); in test_pages_in_a_zone()
1683 do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) in do_migrate_range() argument
1692 for (pfn = start_pfn; pfn < end_pfn; pfn++) { in do_migrate_range()
1837 static int count_system_ram_pages_cb(unsigned long start_pfn, in count_system_ram_pages_cb() argument
1846 int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages, in offline_pages() argument
1849 const unsigned long end_pfn = start_pfn + nr_pages; in offline_pages()
1865 !IS_ALIGNED(start_pfn, pageblock_nr_pages) || in offline_pages()
1866 !IS_ALIGNED(start_pfn + nr_pages, PAGES_PER_SECTION))) in offline_pages()
1879 walk_system_ram_range(start_pfn, nr_pages, &system_ram_pages, in offline_pages()
1889 zone = test_pages_in_a_zone(start_pfn, end_pfn); in offline_pages()
1905 ret = start_isolate_page_range(start_pfn, end_pfn, in offline_pages()
1913 arg.start_pfn = start_pfn; in offline_pages()
1925 pfn = start_pfn; in offline_pages()
1955 ret = dissolve_free_huge_pages(start_pfn, end_pfn); in offline_pages()
1961 ret = test_pages_isolated(start_pfn, end_pfn, MEMORY_OFFLINE); in offline_pages()
1966 __offline_isolated_pages(start_pfn, end_pfn); in offline_pages()
1982 adjust_managed_page_count(pfn_to_page(start_pfn), -nr_pages); in offline_pages()
1983 adjust_present_page_count(pfn_to_page(start_pfn), group, -nr_pages); in offline_pages()
2002 remove_pfn_range_from_zone(zone, start_pfn, nr_pages); in offline_pages()
2007 undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE); in offline_pages()
2014 (unsigned long long) start_pfn << PAGE_SHIFT, in offline_pages()