Lines Matching refs:vma

67 static inline bool file_thp_enabled(struct vm_area_struct *vma)  in file_thp_enabled()  argument
69 return transhuge_vma_enabled(vma, vma->vm_flags) && vma->vm_file && in file_thp_enabled()
70 !inode_is_open_for_write(vma->vm_file->f_inode) && in file_thp_enabled()
71 (vma->vm_flags & VM_EXEC); in file_thp_enabled()
74 bool transparent_hugepage_active(struct vm_area_struct *vma) in transparent_hugepage_active() argument
77 unsigned long addr = (vma->vm_end & HPAGE_PMD_MASK) - HPAGE_PMD_SIZE; in transparent_hugepage_active()
79 if (!transhuge_vma_suitable(vma, addr)) in transparent_hugepage_active()
81 if (vma_is_anonymous(vma)) in transparent_hugepage_active()
82 return __transparent_hugepage_enabled(vma); in transparent_hugepage_active()
83 if (vma_is_shmem(vma)) in transparent_hugepage_active()
84 return shmem_huge_enabled(vma); in transparent_hugepage_active()
86 return file_thp_enabled(vma); in transparent_hugepage_active()
494 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) in maybe_pmd_mkwrite() argument
496 if (likely(vma->vm_flags & VM_WRITE)) in maybe_pmd_mkwrite()
599 struct vm_area_struct *vma = vmf->vma; in __do_huge_pmd_anonymous_page() local
606 if (mem_cgroup_charge(page_folio(page), vma->vm_mm, gfp)) { in __do_huge_pmd_anonymous_page()
614 pgtable = pte_alloc_one(vma->vm_mm); in __do_huge_pmd_anonymous_page()
628 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in __do_huge_pmd_anonymous_page()
634 ret = check_stable_address_space(vma->vm_mm); in __do_huge_pmd_anonymous_page()
639 if (userfaultfd_missing(vma)) { in __do_huge_pmd_anonymous_page()
642 pte_free(vma->vm_mm, pgtable); in __do_huge_pmd_anonymous_page()
648 entry = mk_huge_pmd(page, vma->vm_page_prot); in __do_huge_pmd_anonymous_page()
649 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); in __do_huge_pmd_anonymous_page()
650 page_add_new_anon_rmap(page, vma, haddr, true); in __do_huge_pmd_anonymous_page()
651 lru_cache_add_inactive_or_unevictable(page, vma); in __do_huge_pmd_anonymous_page()
652 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); in __do_huge_pmd_anonymous_page()
653 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); in __do_huge_pmd_anonymous_page()
654 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); in __do_huge_pmd_anonymous_page()
655 add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); in __do_huge_pmd_anonymous_page()
656 mm_inc_nr_ptes(vma->vm_mm); in __do_huge_pmd_anonymous_page()
659 count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC); in __do_huge_pmd_anonymous_page()
667 pte_free(vma->vm_mm, pgtable); in __do_huge_pmd_anonymous_page()
682 gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma) in vma_thp_gfp_mask() argument
684 const bool vma_madvised = vma && (vma->vm_flags & VM_HUGEPAGE); in vma_thp_gfp_mask()
710 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, in set_huge_zero_page() argument
716 entry = mk_pmd(zero_page, vma->vm_page_prot); in set_huge_zero_page()
726 struct vm_area_struct *vma = vmf->vma; in do_huge_pmd_anonymous_page() local
731 if (!transhuge_vma_suitable(vma, haddr)) in do_huge_pmd_anonymous_page()
733 if (unlikely(anon_vma_prepare(vma))) in do_huge_pmd_anonymous_page()
735 if (unlikely(khugepaged_enter(vma, vma->vm_flags))) in do_huge_pmd_anonymous_page()
738 !mm_forbids_zeropage(vma->vm_mm) && in do_huge_pmd_anonymous_page()
743 pgtable = pte_alloc_one(vma->vm_mm); in do_huge_pmd_anonymous_page()
746 zero_page = mm_get_huge_zero_page(vma->vm_mm); in do_huge_pmd_anonymous_page()
748 pte_free(vma->vm_mm, pgtable); in do_huge_pmd_anonymous_page()
752 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_huge_pmd_anonymous_page()
755 ret = check_stable_address_space(vma->vm_mm); in do_huge_pmd_anonymous_page()
758 pte_free(vma->vm_mm, pgtable); in do_huge_pmd_anonymous_page()
759 } else if (userfaultfd_missing(vma)) { in do_huge_pmd_anonymous_page()
761 pte_free(vma->vm_mm, pgtable); in do_huge_pmd_anonymous_page()
765 set_huge_zero_page(pgtable, vma->vm_mm, vma, in do_huge_pmd_anonymous_page()
767 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); in do_huge_pmd_anonymous_page()
772 pte_free(vma->vm_mm, pgtable); in do_huge_pmd_anonymous_page()
776 gfp = vma_thp_gfp_mask(vma); in do_huge_pmd_anonymous_page()
777 page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER); in do_huge_pmd_anonymous_page()
786 static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, in insert_pfn_pmd() argument
790 struct mm_struct *mm = vma->vm_mm; in insert_pfn_pmd()
802 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); in insert_pfn_pmd()
803 if (pmdp_set_access_flags(vma, addr, pmd, entry, 1)) in insert_pfn_pmd()
804 update_mmu_cache_pmd(vma, addr, pmd); in insert_pfn_pmd()
815 entry = maybe_pmd_mkwrite(entry, vma); in insert_pfn_pmd()
825 update_mmu_cache_pmd(vma, addr, pmd); in insert_pfn_pmd()
850 struct vm_area_struct *vma = vmf->vma; in vmf_insert_pfn_pmd_prot() local
858 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && in vmf_insert_pfn_pmd_prot()
860 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == in vmf_insert_pfn_pmd_prot()
862 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); in vmf_insert_pfn_pmd_prot()
864 if (addr < vma->vm_start || addr >= vma->vm_end) in vmf_insert_pfn_pmd_prot()
868 pgtable = pte_alloc_one(vma->vm_mm); in vmf_insert_pfn_pmd_prot()
873 track_pfn_insert(vma, &pgprot, pfn); in vmf_insert_pfn_pmd_prot()
875 insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable); in vmf_insert_pfn_pmd_prot()
881 static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma) in maybe_pud_mkwrite() argument
883 if (likely(vma->vm_flags & VM_WRITE)) in maybe_pud_mkwrite()
888 static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr, in insert_pfn_pud() argument
891 struct mm_struct *mm = vma->vm_mm; in insert_pfn_pud()
903 entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma); in insert_pfn_pud()
904 if (pudp_set_access_flags(vma, addr, pud, entry, 1)) in insert_pfn_pud()
905 update_mmu_cache_pud(vma, addr, pud); in insert_pfn_pud()
915 entry = maybe_pud_mkwrite(entry, vma); in insert_pfn_pud()
918 update_mmu_cache_pud(vma, addr, pud); in insert_pfn_pud()
941 struct vm_area_struct *vma = vmf->vma; in vmf_insert_pfn_pud_prot() local
948 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && in vmf_insert_pfn_pud_prot()
950 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == in vmf_insert_pfn_pud_prot()
952 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); in vmf_insert_pfn_pud_prot()
954 if (addr < vma->vm_start || addr >= vma->vm_end) in vmf_insert_pfn_pud_prot()
957 track_pfn_insert(vma, &pgprot, pfn); in vmf_insert_pfn_pud_prot()
959 insert_pfn_pud(vma, addr, vmf->pud, pfn, pgprot, write); in vmf_insert_pfn_pud_prot()
965 static void touch_pmd(struct vm_area_struct *vma, unsigned long addr, in touch_pmd() argument
973 if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK, in touch_pmd()
975 update_mmu_cache_pmd(vma, addr, pmd); in touch_pmd()
978 struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, in follow_devmap_pmd() argument
982 struct mm_struct *mm = vma->vm_mm; in follow_devmap_pmd()
1007 touch_pmd(vma, addr, pmd, flags); in follow_devmap_pmd()
1136 static void touch_pud(struct vm_area_struct *vma, unsigned long addr, in touch_pud() argument
1144 if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK, in touch_pud()
1146 update_mmu_cache_pud(vma, addr, pud); in touch_pud()
1149 struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, in follow_devmap_pud() argument
1153 struct mm_struct *mm = vma->vm_mm; in follow_devmap_pud()
1172 touch_pud(vma, addr, pud, flags); in follow_devmap_pud()
1196 struct vm_area_struct *vma) in copy_huge_pud() argument
1221 if (unlikely(page_needs_cow_for_dma(vma, pud_page(pud)))) { in copy_huge_pud()
1224 __split_huge_pud(vma, src_pud, addr); in copy_huge_pud()
1245 vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud); in huge_pud_set_accessed()
1253 if (pudp_set_access_flags(vmf->vma, haddr, vmf->pud, entry, write)) in huge_pud_set_accessed()
1254 update_mmu_cache_pud(vmf->vma, vmf->address, vmf->pud); in huge_pud_set_accessed()
1268 vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); in huge_pmd_set_accessed()
1276 if (pmdp_set_access_flags(vmf->vma, haddr, vmf->pmd, entry, write)) in huge_pmd_set_accessed()
1277 update_mmu_cache_pmd(vmf->vma, vmf->address, vmf->pmd); in huge_pmd_set_accessed()
1285 struct vm_area_struct *vma = vmf->vma; in do_huge_pmd_wp_page() local
1290 vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd); in do_huge_pmd_wp_page()
1291 VM_BUG_ON_VMA(!vma->anon_vma, vma); in do_huge_pmd_wp_page()
1328 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); in do_huge_pmd_wp_page()
1329 if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1)) in do_huge_pmd_wp_page()
1330 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); in do_huge_pmd_wp_page()
1339 __split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL); in do_huge_pmd_wp_page()
1353 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, in follow_trans_huge_pmd() argument
1358 struct mm_struct *mm = vma->vm_mm; in follow_trans_huge_pmd()
1381 touch_pmd(vma, addr, pmd, flags); in follow_trans_huge_pmd()
1383 if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { in follow_trans_huge_pmd()
1426 struct vm_area_struct *vma = vmf->vma; in do_huge_pmd_numa_page() local
1437 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_huge_pmd_numa_page()
1443 pmd = pmd_modify(oldpmd, vma->vm_page_prot); in do_huge_pmd_numa_page()
1444 page = vm_normal_page_pmd(vma, haddr, pmd); in do_huge_pmd_numa_page()
1454 target_nid = numa_migrate_prep(page, vma, haddr, page_nid, in do_huge_pmd_numa_page()
1464 migrated = migrate_misplaced_page(page, vma, target_nid); in do_huge_pmd_numa_page()
1470 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_huge_pmd_numa_page()
1487 pmd = pmd_modify(oldpmd, vma->vm_page_prot); in do_huge_pmd_numa_page()
1491 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd); in do_huge_pmd_numa_page()
1492 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); in do_huge_pmd_numa_page()
1501 bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, in madvise_free_huge_pmd() argument
1512 ptl = pmd_trans_huge_lock(pmd, vma); in madvise_free_huge_pmd()
1555 pmdp_invalidate(vma, addr, pmd); in madvise_free_huge_pmd()
1580 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, in zap_huge_pmd() argument
1588 ptl = __pmd_trans_huge_lock(pmd, vma); in zap_huge_pmd()
1597 orig_pmd = pmdp_huge_get_and_clear_full(vma, addr, pmd, in zap_huge_pmd()
1600 if (vma_is_special_huge(vma)) { in zap_huge_pmd()
1645 struct vm_area_struct *vma) in pmd_move_must_withdraw() argument
1653 return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma); in pmd_move_must_withdraw()
1668 bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, in move_huge_pmd() argument
1673 struct mm_struct *mm = vma->vm_mm; in move_huge_pmd()
1689 old_ptl = __pmd_trans_huge_lock(old_pmd, vma); in move_huge_pmd()
1699 if (pmd_move_must_withdraw(new_ptl, old_ptl, vma)) { in move_huge_pmd()
1707 flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE); in move_huge_pmd()
1723 int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, in change_huge_pmd() argument
1726 struct mm_struct *mm = vma->vm_mm; in change_huge_pmd()
1738 ptl = __pmd_trans_huge_lock(pmd, vma); in change_huge_pmd()
1801 entry = pmdp_invalidate(vma, addr, pmd); in change_huge_pmd()
1819 BUG_ON(vma_is_anonymous(vma) && !preserve_write && pmd_write(entry)); in change_huge_pmd()
1831 spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) in __pmd_trans_huge_lock() argument
1834 ptl = pmd_lock(vma->vm_mm, pmd); in __pmd_trans_huge_lock()
1848 spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma) in __pud_trans_huge_lock() argument
1852 ptl = pud_lock(vma->vm_mm, pud); in __pud_trans_huge_lock()
1860 int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, in zap_huge_pud() argument
1865 ptl = __pud_trans_huge_lock(pud, vma); in zap_huge_pud()
1876 if (vma_is_special_huge(vma)) { in zap_huge_pud()
1886 static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud, in __split_huge_pud_locked() argument
1890 VM_BUG_ON_VMA(vma->vm_start > haddr, vma); in __split_huge_pud_locked()
1891 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma); in __split_huge_pud_locked()
1896 pudp_huge_clear_flush_notify(vma, haddr, pud); in __split_huge_pud_locked()
1899 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud, in __split_huge_pud() argument
1905 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, in __split_huge_pud()
1909 ptl = pud_lock(vma->vm_mm, pud); in __split_huge_pud()
1912 __split_huge_pud_locked(vma, pud, range.start); in __split_huge_pud()
1924 static void __split_huge_zero_page_pmd(struct vm_area_struct *vma, in __split_huge_zero_page_pmd() argument
1927 struct mm_struct *mm = vma->vm_mm; in __split_huge_zero_page_pmd()
1940 pmdp_huge_clear_flush(vma, haddr, pmd); in __split_huge_zero_page_pmd()
1947 entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot); in __split_huge_zero_page_pmd()
1958 static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, in __split_huge_pmd_locked() argument
1961 struct mm_struct *mm = vma->vm_mm; in __split_huge_pmd_locked()
1970 VM_BUG_ON_VMA(vma->vm_start > haddr, vma); in __split_huge_pmd_locked()
1971 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma); in __split_huge_pmd_locked()
1977 if (!vma_is_anonymous(vma)) { in __split_huge_pmd_locked()
1978 old_pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd); in __split_huge_pmd_locked()
1985 if (vma_is_special_huge(vma)) in __split_huge_pmd_locked()
2015 return __split_huge_zero_page_pmd(vma, haddr, pmd); in __split_huge_pmd_locked()
2038 old_pmd = pmdp_invalidate(vma, haddr, pmd); in __split_huge_pmd_locked()
2090 entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot)); in __split_huge_pmd_locked()
2091 entry = maybe_mkwrite(entry, vma); in __split_huge_pmd_locked()
2145 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, in __split_huge_pmd() argument
2153 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, in __split_huge_pmd()
2157 ptl = pmd_lock(vma->vm_mm, pmd); in __split_huge_pmd()
2203 __split_huge_pmd_locked(vma, pmd, range.start, freeze); in __split_huge_pmd()
2224 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, in split_huge_pmd_address() argument
2232 pgd = pgd_offset(vma->vm_mm, address); in split_huge_pmd_address()
2246 __split_huge_pmd(vma, pmd, address, freeze, page); in split_huge_pmd_address()
2249 static inline void split_huge_pmd_if_needed(struct vm_area_struct *vma, unsigned long address) in split_huge_pmd_if_needed() argument
2256 range_in_vma(vma, ALIGN_DOWN(address, HPAGE_PMD_SIZE), in split_huge_pmd_if_needed()
2258 split_huge_pmd_address(vma, address, false, NULL); in split_huge_pmd_if_needed()
2261 void vma_adjust_trans_huge(struct vm_area_struct *vma, in vma_adjust_trans_huge() argument
2267 split_huge_pmd_if_needed(vma, start); in vma_adjust_trans_huge()
2270 split_huge_pmd_if_needed(vma, end); in vma_adjust_trans_huge()
2277 struct vm_area_struct *next = vma->vm_next; in vma_adjust_trans_huge()
2908 static inline bool vma_not_suitable_for_thp_split(struct vm_area_struct *vma) in vma_not_suitable_for_thp_split() argument
2910 return vma_is_special_huge(vma) || (vma->vm_flags & VM_IO) || in vma_not_suitable_for_thp_split()
2911 is_vm_hugetlb_page(vma); in vma_not_suitable_for_thp_split()
2955 struct vm_area_struct *vma = find_vma(mm, addr); in split_huge_pages_pid() local
2959 if (!vma || addr < vma->vm_start) in split_huge_pages_pid()
2963 if (vma_not_suitable_for_thp_split(vma)) { in split_huge_pages_pid()
2964 addr = vma->vm_end; in split_huge_pages_pid()
2970 page = follow_page(vma, addr, follflags); in split_huge_pages_pid()
3152 struct vm_area_struct *vma = pvmw->vma; in set_pmd_migration_entry() local
3153 struct mm_struct *mm = vma->vm_mm; in set_pmd_migration_entry()
3162 flush_cache_range(vma, address, address + HPAGE_PMD_SIZE); in set_pmd_migration_entry()
3163 pmdval = pmdp_invalidate(vma, address, pvmw->pmd); in set_pmd_migration_entry()
3180 struct vm_area_struct *vma = pvmw->vma; in remove_migration_pmd() local
3181 struct mm_struct *mm = vma->vm_mm; in remove_migration_pmd()
3192 pmde = pmd_mkold(mk_huge_pmd(new, vma->vm_page_prot)); in remove_migration_pmd()
3196 pmde = maybe_pmd_mkwrite(pmde, vma); in remove_migration_pmd()
3200 flush_cache_range(vma, mmun_start, mmun_start + HPAGE_PMD_SIZE); in remove_migration_pmd()
3202 page_add_anon_rmap(new, vma, mmun_start, true); in remove_migration_pmd()
3206 if ((vma->vm_flags & VM_LOCKED) && !PageDoubleMap(new)) in remove_migration_pmd()
3208 update_mmu_cache_pmd(vma, address, pvmw->pmd); in remove_migration_pmd()