Lines Matching refs:vma
145 static void anon_vma_chain_link(struct vm_area_struct *vma, in anon_vma_chain_link() argument
149 avc->vma = vma; in anon_vma_chain_link()
151 list_add(&avc->same_vma, &vma->anon_vma_chain); in anon_vma_chain_link()
183 int __anon_vma_prepare(struct vm_area_struct *vma) in __anon_vma_prepare() argument
185 struct mm_struct *mm = vma->vm_mm; in __anon_vma_prepare()
195 anon_vma = find_mergeable_anon_vma(vma); in __anon_vma_prepare()
207 if (likely(!vma->anon_vma)) { in __anon_vma_prepare()
208 vma->anon_vma = anon_vma; in __anon_vma_prepare()
209 anon_vma_chain_link(vma, avc, anon_vma); in __anon_vma_prepare()
329 int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) in anon_vma_fork() argument
340 vma->anon_vma = NULL; in anon_vma_fork()
346 error = anon_vma_clone(vma, pvma); in anon_vma_fork()
351 if (vma->anon_vma) in anon_vma_fork()
375 vma->anon_vma = anon_vma; in anon_vma_fork()
377 anon_vma_chain_link(vma, avc, anon_vma); in anon_vma_fork()
386 unlink_anon_vmas(vma); in anon_vma_fork()
390 void unlink_anon_vmas(struct vm_area_struct *vma) in unlink_anon_vmas() argument
399 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { in unlink_anon_vmas()
417 if (vma->anon_vma) { in unlink_anon_vmas()
418 vma->anon_vma->degree--; in unlink_anon_vmas()
424 vma->anon_vma = NULL; in unlink_anon_vmas()
433 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { in unlink_anon_vmas()
709 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) in page_address_in_vma() argument
717 if (!vma->anon_vma || !page__anon_vma || in page_address_in_vma()
718 vma->anon_vma->root != page__anon_vma->root) in page_address_in_vma()
720 } else if (!vma->vm_file) { in page_address_in_vma()
722 } else if (vma->vm_file->f_mapping != compound_head(page)->mapping) { in page_address_in_vma()
726 return vma_address(page, vma); in page_address_in_vma()
772 static bool page_referenced_one(struct page *page, struct vm_area_struct *vma, in page_referenced_one() argument
778 .vma = vma, in page_referenced_one()
786 if (vma->vm_flags & VM_LOCKED) { in page_referenced_one()
793 if (ptep_clear_flush_young_notify(vma, address, in page_referenced_one()
803 if (likely(!(vma->vm_flags & VM_SEQ_READ))) in page_referenced_one()
807 if (pmdp_clear_flush_young_notify(vma, address, in page_referenced_one()
825 pra->vm_flags |= vma->vm_flags; in page_referenced_one()
834 static bool invalid_page_referenced_vma(struct vm_area_struct *vma, void *arg) in invalid_page_referenced_vma() argument
839 if (!mm_match_cgroup(vma->vm_mm, memcg)) in invalid_page_referenced_vma()
902 static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma, in page_mkclean_one() argument
907 .vma = vma, in page_mkclean_one()
919 0, vma, vma->vm_mm, address, in page_mkclean_one()
920 vma_address_end(page, vma)); in page_mkclean_one()
934 flush_cache_page(vma, address, pte_pfn(*pte)); in page_mkclean_one()
935 entry = ptep_clear_flush(vma, address, pte); in page_mkclean_one()
938 set_pte_at(vma->vm_mm, address, pte, entry); in page_mkclean_one()
948 flush_cache_page(vma, address, page_to_pfn(page)); in page_mkclean_one()
949 entry = pmdp_invalidate(vma, address, pmd); in page_mkclean_one()
952 set_pmd_at(vma->vm_mm, address, pmd, entry); in page_mkclean_one()
976 static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg) in invalid_mkclean_vma() argument
978 if (vma->vm_flags & VM_SHARED) in invalid_mkclean_vma()
1019 void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma) in page_move_anon_rmap() argument
1021 struct anon_vma *anon_vma = vma->anon_vma; in page_move_anon_rmap()
1026 VM_BUG_ON_VMA(!anon_vma, vma); in page_move_anon_rmap()
1045 struct vm_area_struct *vma, unsigned long address, int exclusive) in __page_set_anon_rmap() argument
1047 struct anon_vma *anon_vma = vma->anon_vma; in __page_set_anon_rmap()
1070 page->index = linear_page_index(vma, address); in __page_set_anon_rmap()
1080 struct vm_area_struct *vma, unsigned long address) in __page_check_anon_rmap() argument
1093 VM_BUG_ON_PAGE(page_anon_vma(page)->root != vma->anon_vma->root, page); in __page_check_anon_rmap()
1094 VM_BUG_ON_PAGE(page_to_pgoff(page) != linear_page_index(vma, address), in __page_check_anon_rmap()
1111 struct vm_area_struct *vma, unsigned long address, bool compound) in page_add_anon_rmap() argument
1113 do_page_add_anon_rmap(page, vma, address, compound ? RMAP_COMPOUND : 0); in page_add_anon_rmap()
1122 struct vm_area_struct *vma, unsigned long address, int flags) in do_page_add_anon_rmap() argument
1162 __page_set_anon_rmap(page, vma, address, in do_page_add_anon_rmap()
1165 __page_check_anon_rmap(page, vma, address); in do_page_add_anon_rmap()
1180 struct vm_area_struct *vma, unsigned long address, bool compound) in page_add_new_anon_rmap() argument
1184 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); in page_add_new_anon_rmap()
1201 __page_set_anon_rmap(page, vma, address, 1); in page_add_new_anon_rmap()
1393 static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, in try_to_unmap_one() argument
1396 struct mm_struct *mm = vma->vm_mm; in try_to_unmap_one()
1399 .vma = vma, in try_to_unmap_one()
1418 split_huge_pmd_address(vma, address, false, page); in try_to_unmap_one()
1429 address + PAGE_SIZE : vma_address_end(page, vma); in try_to_unmap_one()
1430 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, in try_to_unmap_one()
1437 adjust_range_if_pmd_sharing_possible(vma, &range.start, in try_to_unmap_one()
1447 (vma->vm_flags & VM_LOCKED)) { in try_to_unmap_one()
1475 if (huge_pmd_unshare(mm, vma, &address, pvmw.pte)) { in try_to_unmap_one()
1483 flush_cache_range(vma, range.start, range.end); in try_to_unmap_one()
1484 flush_tlb_range(vma, range.start, range.end); in try_to_unmap_one()
1503 flush_cache_page(vma, address, pte_pfn(*pvmw.pte)); in try_to_unmap_one()
1517 pteval = ptep_clear_flush(vma, address, pvmw.pte); in try_to_unmap_one()
1533 vma_mmu_pagesize(vma)); in try_to_unmap_one()
1539 } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) { in try_to_unmap_one()
1598 if (arch_unmap_one(mm, vma, address, pteval) < 0) { in try_to_unmap_one()
1651 static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg) in invalid_migration_vma() argument
1653 return vma_is_temporary_stack(vma); in invalid_migration_vma()
1693 static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma, in try_to_migrate_one() argument
1696 struct mm_struct *mm = vma->vm_mm; in try_to_migrate_one()
1699 .vma = vma, in try_to_migrate_one()
1722 split_huge_pmd_address(vma, address, true, page); in try_to_migrate_one()
1733 address + PAGE_SIZE : vma_address_end(page, vma); in try_to_migrate_one()
1734 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, in try_to_migrate_one()
1741 adjust_range_if_pmd_sharing_possible(vma, &range.start, in try_to_migrate_one()
1771 if (huge_pmd_unshare(mm, vma, &address, pvmw.pte)) { in try_to_migrate_one()
1779 flush_cache_range(vma, range.start, range.end); in try_to_migrate_one()
1780 flush_tlb_range(vma, range.start, range.end); in try_to_migrate_one()
1799 flush_cache_page(vma, address, pte_pfn(*pvmw.pte)); in try_to_migrate_one()
1800 pteval = ptep_clear_flush(vma, address, pvmw.pte); in try_to_migrate_one()
1853 vma_mmu_pagesize(vma)); in try_to_migrate_one()
1859 } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) { in try_to_migrate_one()
1878 if (arch_unmap_one(mm, vma, address, pteval) < 0) { in try_to_migrate_one()
1974 static bool page_mlock_one(struct page *page, struct vm_area_struct *vma, in page_mlock_one() argument
1979 .vma = vma, in page_mlock_one()
1984 if (!(vma->vm_flags & VM_LOCKED)) in page_mlock_one()
1993 if (vma->vm_flags & VM_LOCKED) { in page_mlock_one()
2049 struct vm_area_struct *vma, unsigned long address, void *priv) in page_make_device_exclusive_one() argument
2051 struct mm_struct *mm = vma->vm_mm; in page_make_device_exclusive_one()
2054 .vma = vma, in page_make_device_exclusive_one()
2065 mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, vma, in page_make_device_exclusive_one()
2066 vma->vm_mm, address, min(vma->vm_end, in page_make_device_exclusive_one()
2084 flush_cache_page(vma, address, pte_pfn(*pvmw.pte)); in page_make_device_exclusive_one()
2085 pteval = ptep_clear_flush(vma, address, pvmw.pte); in page_make_device_exclusive_one()
2292 struct vm_area_struct *vma = avc->vma; in rmap_walk_anon() local
2293 unsigned long address = vma_address(page, vma); in rmap_walk_anon()
2295 VM_BUG_ON_VMA(address == -EFAULT, vma); in rmap_walk_anon()
2298 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) in rmap_walk_anon()
2301 if (!rwc->rmap_one(page, vma, address, rwc->arg)) in rmap_walk_anon()
2329 struct vm_area_struct *vma; in rmap_walk_file() local
2346 vma_interval_tree_foreach(vma, &mapping->i_mmap, in rmap_walk_file()
2348 unsigned long address = vma_address(page, vma); in rmap_walk_file()
2350 VM_BUG_ON_VMA(address == -EFAULT, vma); in rmap_walk_file()
2353 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) in rmap_walk_file()
2356 if (!rwc->rmap_one(page, vma, address, rwc->arg)) in rmap_walk_file()
2395 struct vm_area_struct *vma, unsigned long address) in hugepage_add_anon_rmap() argument
2397 struct anon_vma *anon_vma = vma->anon_vma; in hugepage_add_anon_rmap()
2405 __page_set_anon_rmap(page, vma, address, 0); in hugepage_add_anon_rmap()
2409 struct vm_area_struct *vma, unsigned long address) in hugepage_add_new_anon_rmap() argument
2411 BUG_ON(address < vma->vm_start || address >= vma->vm_end); in hugepage_add_new_anon_rmap()
2416 __page_set_anon_rmap(page, vma, address, 1); in hugepage_add_new_anon_rmap()