/linux/mm/ |
A D | page_ext.c | 138 base = NODE_DATA(page_to_nid(page))->node_page_ext; in lookup_page_ext() 147 index = pfn - round_down(node_start_pfn(page_to_nid(page)), in lookup_page_ext()
|
A D | list_lru.c | 123 int nid = page_to_nid(virt_to_page(item)); in list_lru_add() 147 int nid = page_to_nid(virt_to_page(item)); in list_lru_del()
|
A D | hugetlb.c | 1113 int nid = page_to_nid(page); in enqueue_huge_page() 1422 int nid = page_to_nid(page); in __remove_hugetlb_page() 1489 int nid = page_to_nid(page); in add_hugetlb_page() 1663 int nid = page_to_nid(page); in free_huge_page() 1973 prep_new_huge_page(h, page, page_to_nid(page)); in alloc_fresh_huge_page() 2217 h->surplus_huge_pages_node[page_to_nid(page)]++; in alloc_surplus_huge_page() 2704 int nid = page_to_nid(old_page); in alloc_and_dissolve_huge_page() 3025 prep_new_huge_page(h, page, page_to_nid(page)); in gather_bootmem_prealloc() 3408 int i, nid = page_to_nid(page); in demote_free_huge_page() 6807 int old_nid = page_to_nid(oldpage); in move_hugetlb_state() [all …]
|
A D | sparse.c | 46 int page_to_nid(const struct page *page) in page_to_nid() function 50 EXPORT_SYMBOL(page_to_nid);
|
A D | slub.c | 1372 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); in free_debug_processing() 1981 inc_slabs_node(s, page_to_nid(page), page->objects); in allocate_slab() 2038 dec_slabs_node(s, page_to_nid(page), page->objects); in discard_slab() 2485 n2 = get_node(s, page_to_nid(page)); in __unfreeze_partials() 2754 if (node != NUMA_NO_NODE && page_to_nid(page) != node) in node_match() 3350 n = get_node(s, page_to_nid(page)); in __slab_free() 3934 if (page_to_nid(page) != node) { in early_kmem_cache_node_alloc() 5157 node_set(page_to_nid(virt_to_page(track)), l->nodes); in add_location() 5188 node_set(page_to_nid(virt_to_page(track)), l->nodes); in add_location() 5249 node = page_to_nid(page); in show_slab_objects() [all …]
|
A D | mempolicy.c | 433 int nid = page_to_nid(page); in queue_pages_required() 911 err = page_to_nid(p); in lookup_node() 2040 if (page && page_to_nid(page) == nid) { in alloc_page_interleave() 2474 int curnid = page_to_nid(page); in mpol_misplaced()
|
A D | sparse-vmemmap.c | 334 int nid = page_to_nid((struct page *)start); in alloc_vmemmap_page_list()
|
A D | slab.c | 559 page_node = page_to_nid(page); in cache_free_pfmemalloc() 796 int page_node = page_to_nid(virt_to_page(objp)); in cache_free_alien() 2597 page_node = page_to_nid(page); in cache_grow_begin() 2652 n = get_node(cachep, page_to_nid(page)); in cache_grow_end() 3143 nid = page_to_nid(page); in fallback_alloc()
|
A D | huge_memory.c | 505 struct pglist_data *pgdat = NODE_DATA(page_to_nid(page)); in get_deferred_split_queue() 515 struct pglist_data *pgdat = NODE_DATA(page_to_nid(page)); in get_deferred_split_queue() 1452 page_nid = page_to_nid(page); in do_huge_pmd_numa_page() 2784 set_shrinker_bit(memcg, page_to_nid(page), in deferred_split_huge_page()
|
A D | mprotect.c | 112 if (target_node == page_to_nid(page)) in change_pte_range()
|
A D | slob.c | 326 if (node != NUMA_NO_NODE && page_to_nid(sp) != node) in slob_alloc()
|
A D | migrate.c | 1611 nid = page_to_nid(page); in alloc_migration_target() 1706 if (page_to_nid(page) == node) in add_page_for_migration() 1885 err = page ? page_to_nid(page) : -ENOENT; in do_pages_stat_array()
|
A D | khugepaged.c | 1312 node = page_to_nid(page); in khugepaged_scan_pmd() 2021 node = page_to_nid(page); in khugepaged_scan_file()
|
A D | kmemleak.c | 1458 if (page_to_nid(page) != i) in kmemleak_scan()
|
A D | vmscan.c | 2613 nid = page_to_nid(page); in reclaim_pages() 2617 if (nid == page_to_nid(page)) { in reclaim_pages()
|
/linux/include/linux/ |
A D | mm.h | 1380 extern int page_to_nid(const struct page *page); 1382 static inline int page_to_nid(const struct page *page) in page_to_nid() function 1392 return page_to_nid(&folio->page); in folio_nid() 1462 return page_to_nid(page); /* XXX */ in page_cpupid_xchg_last() 1467 return page_to_nid(page); /* XXX */ in page_cpupid_last() 1554 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)]; in page_zone() 1559 return NODE_DATA(page_to_nid(page)); in page_pgdat()
|
A D | mmzone.h | 1559 page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \
|
/linux/kernel/dma/ |
A D | contiguous.c | 360 if (cma_release(dma_contiguous_pernuma_area[page_to_nid(page)], in dma_free_contiguous()
|
/linux/drivers/net/ethernet/cavium/liquidio/ |
A D | octeon_network.h | 342 unlikely(page_to_nid(pg_info->page) != numa_node_id())) { in recv_buffer_recycle()
|
/linux/fs/proc/ |
A D | task_mmu.c | 1741 md->node[page_to_nid(page)] += nr_pages; in gather_stats() 1760 nid = page_to_nid(page); in can_gather_numa_stats() 1785 nid = page_to_nid(page); in can_gather_numa_stats_pmd()
|
/linux/net/core/ |
A D | page_pool.c | 142 if (likely(page_to_nid(page) == pref_nid)) { in page_pool_refill_alloc_cache()
|
/linux/drivers/virt/nitro_enclaves/ |
A D | ne_misc_dev.c | 816 if (ne_enclave->numa_node != page_to_nid(mem_region_page)) { in ne_sanity_check_user_mem_region_page()
|
/linux/drivers/net/ethernet/hisilicon/hns/ |
A D | hns_enet.c | 436 if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id())) in hns_nic_reuse_page() 580 if (likely(page_to_nid(desc_cb->priv) == numa_node_id())) in hns_nic_poll_rx_skb()
|
/linux/drivers/net/ethernet/mellanox/mlx4/ |
A D | en_rx.c | 499 page_to_nid(page) != numa_mem_id(); in mlx4_en_complete_rx_desc()
|
/linux/drivers/net/ethernet/pensando/ionic/ |
A D | ionic_txrx.c | 102 if (page_to_nid(buf_info->page) != numa_mem_id()) in ionic_rx_buf_recycle()
|