/linux/mm/ |
A D | folio-compat.c | 13 return folio_mapping(page_folio(page)); in page_mapping() 19 return folio_unlock(page_folio(page)); in unlock_page() 25 return folio_end_writeback(page_folio(page)); in end_page_writeback() 37 return folio_wait_stable(page_folio(page)); in wait_for_stable_page() 43 return folio_mapped(page_folio(page)); in page_mapped() 49 folio_mark_accessed(page_folio(page)); in mark_page_accessed() 58 page_folio(page), extra_count); in migrate_page_move_mapping() 64 folio_migrate_flags(page_folio(newpage), page_folio(page)); in migrate_page_states() 70 folio_migrate_copy(page_folio(newpage), page_folio(page)); in migrate_page_copy() 83 return folio_mark_dirty(page_folio(page)); in set_page_dirty() [all …]
|
A D | swap.c | 83 struct folio *folio = page_folio(page); in __page_cache_release() 98 mem_cgroup_uncharge(page_folio(page)); in __put_single_page() 202 struct folio *folio = page_folio(page); in pagevec_lru_move_fn() 221 struct folio *folio = page_folio(page); in pagevec_move_tail_fn() 331 return __folio_activate(page_folio(page), lruvec); in __activate_page() 912 struct folio *folio = page_folio(page); in release_pages() 1068 struct folio *folio = page_folio(pvec->pages[i]); in __pagevec_lru_add()
|
A D | util.c | 660 return folio_raw_mapping(page_folio(page)); in page_rmapping() 691 struct folio *folio = page_folio(page); in page_anon_vma()
|
A D | filemap.c | 852 struct folio *fold = page_folio(old); in replace_page_cache_page() 853 struct folio *fnew = page_folio(new); in replace_page_cache_page() 987 return __filemap_add_folio(mapping, page_folio(page), offset, in add_to_page_cache_locked() 1456 return folio_wait_bit_common(page_folio(page), PG_locked, state, in put_and_wait_on_page_locked() 2434 struct folio *folio = page_folio(page); in filemap_update_page() 2916 struct folio *folio = page_folio(page); in lock_page_maybe_drop_mmap()
|
A D | khugepaged.c | 1093 if (unlikely(mem_cgroup_charge(page_folio(new_page), mm, gfp))) { in collapse_huge_page() 1217 mem_cgroup_uncharge(page_folio(*hpage)); in collapse_huge_page() 1664 if (unlikely(mem_cgroup_charge(page_folio(new_page), mm, gfp))) { in collapse_file() 1986 mem_cgroup_uncharge(page_folio(*hpage)); in collapse_file()
|
A D | memremap.c | 508 mem_cgroup_uncharge(page_folio(page)); in free_devmap_managed_page()
|
A D | mlock.c | 274 struct folio *folio = page_folio(page); in __munlock_pagevec()
|
A D | userfaultfd.c | 166 if (mem_cgroup_charge(page_folio(page), dst_mm, GFP_KERNEL)) in mcopy_atomic_pte()
|
A D | migrate.c | 632 struct folio *newfolio = page_folio(newpage); in migrate_page() 633 struct folio *folio = page_folio(page); in migrate_page() 2753 if (mem_cgroup_charge(page_folio(page), vma->vm_mm, GFP_KERNEL)) in migrate_vma_insert_page()
|
A D | memcontrol.c | 2046 folio_memcg_lock(page_folio(page)); in lock_page_memcg() 2078 folio_memcg_unlock(page_folio(page)); in unlock_page_memcg() 3050 struct folio *folio = page_folio(page); in __memcg_kmem_uncharge_page() 3293 struct folio *folio = page_folio(head); in split_page_memcg() 5553 struct folio *folio = page_folio(page); in mem_cgroup_move_account() 6708 struct folio *folio = page_folio(page); in mem_cgroup_swapin_charge_page()
|
A D | swap_state.c | 501 workingset_refault(page_folio(page), shadow); in __read_swap_cache_async()
|
/linux/include/linux/ |
A D | page_idle.h | 112 return folio_test_young(page_folio(page)); in page_is_young() 117 folio_set_young(page_folio(page)); in set_page_young() 122 return folio_test_clear_young(page_folio(page)); in test_and_clear_page_young() 127 return folio_test_idle(page_folio(page)); in page_is_idle() 132 folio_set_idle(page_folio(page)); in set_page_idle() 137 folio_clear_idle(page_folio(page)); in clear_page_idle()
|
A D | mm_inline.h | 30 return folio_is_file_lru(page_folio(page)); in page_is_file_lru() 67 __folio_clear_lru_flags(page_folio(page)); in __clear_page_lru_flags() 106 lruvec_add_folio(lruvec, page_folio(page)); in add_page_to_lru_list() 122 lruvec_add_folio_tail(lruvec, page_folio(page)); in add_page_to_lru_list_tail() 136 lruvec_del_folio(lruvec, page_folio(page)); in del_page_from_lru_list()
|
A D | pagemap.h | 257 return folio_file_mapping(page_folio(page)); in page_file_mapping() 265 struct folio *folio = page_folio(page); in page_mapping_file() 354 folio_attach_private(page_folio(page), data); in attach_page_private() 359 return folio_detach_private(page_folio(page)); in detach_page_private() 775 return folio_trylock(page_folio(page)); in trylock_page() 793 folio = page_folio(page); in lock_page() 813 return folio_lock_killable(page_folio(page)); in lock_page_killable() 829 folio = page_folio(page); in lock_page_or_retry() 862 folio_wait_locked(page_folio(page)); in wait_on_page_locked() 900 folio_cancel_dirty(page_folio(page)); in cancel_dirty_page() [all …]
|
A D | netfs.h | 91 folio_start_fscache(page_folio(page)); in set_page_fscache() 96 folio_end_private_2(page_folio(page)); in end_page_fscache() 101 folio_wait_private_2(page_folio(page)); in wait_on_page_fscache() 106 return folio_wait_private_2_killable(page_folio(page)); in wait_on_page_fscache_killable()
|
A D | page-flags.h | 220 #define page_folio(p) (_Generic((p), \ macro 493 return folio_test_swapcache(page_folio(page)); in PageSwapCache() 586 return folio_test_anon(page_folio(page)); in PageAnon() 610 return folio_test_ksm(page_folio(page)); in PageKsm() 637 return folio_test_uptodate(page_folio(page)); in PageUptodate() 679 folio_start_writeback_keepwrite(page_folio(page)); in set_page_writeback_keepwrite()
|
A D | rmap.h | 306 return folio_mkclean(page_folio(page)); in page_mkclean()
|
A D | page_ref.h | 94 return folio_ref_count(page_folio(page)); in page_count()
|
A D | writeback.h | 396 folio_account_redirty(page_folio(page)); in account_page_redirty()
|
A D | memcontrol.h | 450 return folio_memcg(page_folio(page)); in page_memcg() 598 return folio_memcg_kmem(page_folio(page)); in PageMemcgKmem()
|
/linux/fs/9p/ |
A D | vfs_addr.c | 111 struct folio *folio = page_folio(page); in v9fs_vfs_readpage() 135 struct folio *folio = page_folio(page); in v9fs_release_page() 159 struct folio *folio = page_folio(page); in v9fs_invalidate_page() 194 struct folio *folio = page_folio(page); in v9fs_vfs_writepage() 223 struct folio *folio = page_folio(page); in v9fs_launder_page() 304 struct folio *folio = page_folio(subpage); in v9fs_write_end()
|
A D | vfs_file.c | 531 struct folio *folio = page_folio(vmf->page); in v9fs_vm_page_mkwrite()
|
/linux/fs/afs/ |
A D | write.c | 112 struct folio *folio = page_folio(subpage); in afs_write_end() 508 folio = page_folio(pvec.pages[i]); in afs_extend_writeback() 646 struct folio *folio = page_folio(subpage); in afs_writepage() 686 folio = page_folio(head_page); in afs_writepages_region() 852 struct folio *folio = page_folio(vmf->page); in afs_page_mkwrite() 940 struct folio *folio = page_folio(subpage); in afs_launder_page()
|
A D | file.c | 327 struct folio *folio = page_folio(page); in afs_symlink_readpage() 391 struct folio *folio = page_folio(page); in afs_readpage() 466 struct folio *folio = page_folio(page); in afs_invalidatepage() 485 struct folio *folio = page_folio(page); in afs_releasepage()
|
/linux/fs/cachefiles/ |
A D | rdwr.c | 110 folio_add_wait_queue(page_folio(backpage), &monitor->monitor); in cachefiles_read_reissue() 297 folio_add_wait_queue(page_folio(backpage), &monitor->monitor); in cachefiles_read_backing_file_one() 551 folio_add_wait_queue(page_folio(backpage), &monitor->monitor); in cachefiles_read_backing_file()
|