Lines Matching refs:pmd
494 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) in maybe_pmd_mkwrite() argument
497 pmd = pmd_mkwrite(pmd); in maybe_pmd_mkwrite()
498 return pmd; in maybe_pmd_mkwrite()
628 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in __do_huge_pmd_anonymous_page()
629 if (unlikely(!pmd_none(*vmf->pmd))) { in __do_huge_pmd_anonymous_page()
652 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); in __do_huge_pmd_anonymous_page()
653 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); in __do_huge_pmd_anonymous_page()
654 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); in __do_huge_pmd_anonymous_page()
710 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, in set_huge_zero_page() argument
714 if (!pmd_none(*pmd)) in set_huge_zero_page()
719 pgtable_trans_huge_deposit(mm, pmd, pgtable); in set_huge_zero_page()
720 set_pmd_at(mm, haddr, pmd, entry); in set_huge_zero_page()
752 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_huge_pmd_anonymous_page()
754 if (pmd_none(*vmf->pmd)) { in do_huge_pmd_anonymous_page()
766 haddr, vmf->pmd, zero_page); in do_huge_pmd_anonymous_page()
767 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); in do_huge_pmd_anonymous_page()
787 pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write, in insert_pfn_pmd() argument
794 ptl = pmd_lock(mm, pmd); in insert_pfn_pmd()
795 if (!pmd_none(*pmd)) { in insert_pfn_pmd()
797 if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) { in insert_pfn_pmd()
798 WARN_ON_ONCE(!is_huge_zero_pmd(*pmd)); in insert_pfn_pmd()
801 entry = pmd_mkyoung(*pmd); in insert_pfn_pmd()
803 if (pmdp_set_access_flags(vma, addr, pmd, entry, 1)) in insert_pfn_pmd()
804 update_mmu_cache_pmd(vma, addr, pmd); in insert_pfn_pmd()
819 pgtable_trans_huge_deposit(mm, pmd, pgtable); in insert_pfn_pmd()
824 set_pmd_at(mm, addr, pmd, entry); in insert_pfn_pmd()
825 update_mmu_cache_pmd(vma, addr, pmd); in insert_pfn_pmd()
875 insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable); in vmf_insert_pfn_pmd_prot()
966 pmd_t *pmd, int flags) in touch_pmd() argument
970 _pmd = pmd_mkyoung(*pmd); in touch_pmd()
974 pmd, _pmd, flags & FOLL_WRITE)) in touch_pmd()
975 update_mmu_cache_pmd(vma, addr, pmd); in touch_pmd()
979 pmd_t *pmd, int flags, struct dev_pagemap **pgmap) in follow_devmap_pmd() argument
981 unsigned long pfn = pmd_pfn(*pmd); in follow_devmap_pmd()
985 assert_spin_locked(pmd_lockptr(mm, pmd)); in follow_devmap_pmd()
998 if (flags & FOLL_WRITE && !pmd_write(*pmd)) in follow_devmap_pmd()
1001 if (pmd_present(*pmd) && pmd_devmap(*pmd)) in follow_devmap_pmd()
1007 touch_pmd(vma, addr, pmd, flags); in follow_devmap_pmd()
1033 pmd_t pmd; in copy_huge_pmd() local
1050 pmd = *src_pmd; in copy_huge_pmd()
1053 if (unlikely(is_swap_pmd(pmd))) { in copy_huge_pmd()
1054 swp_entry_t entry = pmd_to_swp_entry(pmd); in copy_huge_pmd()
1056 VM_BUG_ON(!is_pmd_migration_entry(pmd)); in copy_huge_pmd()
1060 pmd = swp_entry_to_pmd(entry); in copy_huge_pmd()
1062 pmd = pmd_swp_mksoft_dirty(pmd); in copy_huge_pmd()
1064 pmd = pmd_swp_mkuffd_wp(pmd); in copy_huge_pmd()
1065 set_pmd_at(src_mm, addr, src_pmd, pmd); in copy_huge_pmd()
1071 pmd = pmd_swp_clear_uffd_wp(pmd); in copy_huge_pmd()
1072 set_pmd_at(dst_mm, addr, dst_pmd, pmd); in copy_huge_pmd()
1078 if (unlikely(!pmd_trans_huge(pmd))) { in copy_huge_pmd()
1087 if (is_huge_zero_pmd(pmd)) { in copy_huge_pmd()
1097 src_page = pmd_page(pmd); in copy_huge_pmd()
1123 pmd = pmd_clear_uffd_wp(pmd); in copy_huge_pmd()
1124 pmd = pmd_mkold(pmd_wrprotect(pmd)); in copy_huge_pmd()
1125 set_pmd_at(dst_mm, addr, dst_pmd, pmd); in copy_huge_pmd()
1268 vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); in huge_pmd_set_accessed()
1269 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) in huge_pmd_set_accessed()
1276 if (pmdp_set_access_flags(vmf->vma, haddr, vmf->pmd, entry, write)) in huge_pmd_set_accessed()
1277 update_mmu_cache_pmd(vmf->vma, vmf->address, vmf->pmd); in huge_pmd_set_accessed()
1290 vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd); in do_huge_pmd_wp_page()
1298 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) { in do_huge_pmd_wp_page()
1312 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) { in do_huge_pmd_wp_page()
1329 if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1)) in do_huge_pmd_wp_page()
1330 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); in do_huge_pmd_wp_page()
1339 __split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL); in do_huge_pmd_wp_page()
1347 static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags) in can_follow_write_pmd() argument
1349 return pmd_write(pmd) || in can_follow_write_pmd()
1350 ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd)); in can_follow_write_pmd()
1355 pmd_t *pmd, in follow_trans_huge_pmd() argument
1361 assert_spin_locked(pmd_lockptr(mm, pmd)); in follow_trans_huge_pmd()
1363 if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, flags)) in follow_trans_huge_pmd()
1367 if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd)) in follow_trans_huge_pmd()
1371 if ((flags & FOLL_NUMA) && pmd_protnone(*pmd)) in follow_trans_huge_pmd()
1374 page = pmd_page(*pmd); in follow_trans_huge_pmd()
1381 touch_pmd(vma, addr, pmd, flags); in follow_trans_huge_pmd()
1428 pmd_t pmd; in do_huge_pmd_numa_page() local
1437 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_huge_pmd_numa_page()
1438 if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) { in do_huge_pmd_numa_page()
1443 pmd = pmd_modify(oldpmd, vma->vm_page_prot); in do_huge_pmd_numa_page()
1444 page = vm_normal_page_pmd(vma, haddr, pmd); in do_huge_pmd_numa_page()
1470 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_huge_pmd_numa_page()
1471 if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) { in do_huge_pmd_numa_page()
1487 pmd = pmd_modify(oldpmd, vma->vm_page_prot); in do_huge_pmd_numa_page()
1488 pmd = pmd_mkyoung(pmd); in do_huge_pmd_numa_page()
1490 pmd = pmd_mkwrite(pmd); in do_huge_pmd_numa_page()
1491 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd); in do_huge_pmd_numa_page()
1492 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); in do_huge_pmd_numa_page()
1502 pmd_t *pmd, unsigned long addr, unsigned long next) in madvise_free_huge_pmd() argument
1512 ptl = pmd_trans_huge_lock(pmd, vma); in madvise_free_huge_pmd()
1516 orig_pmd = *pmd; in madvise_free_huge_pmd()
1555 pmdp_invalidate(vma, addr, pmd); in madvise_free_huge_pmd()
1559 set_pmd_at(mm, addr, pmd, orig_pmd); in madvise_free_huge_pmd()
1560 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); in madvise_free_huge_pmd()
1571 static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd) in zap_deposited_table() argument
1575 pgtable = pgtable_trans_huge_withdraw(mm, pmd); in zap_deposited_table()
1581 pmd_t *pmd, unsigned long addr) in zap_huge_pmd() argument
1588 ptl = __pmd_trans_huge_lock(pmd, vma); in zap_huge_pmd()
1597 orig_pmd = pmdp_huge_get_and_clear_full(vma, addr, pmd, in zap_huge_pmd()
1599 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); in zap_huge_pmd()
1602 zap_deposited_table(tlb->mm, pmd); in zap_huge_pmd()
1605 zap_deposited_table(tlb->mm, pmd); in zap_huge_pmd()
1627 zap_deposited_table(tlb->mm, pmd); in zap_huge_pmd()
1631 zap_deposited_table(tlb->mm, pmd); in zap_huge_pmd()
1657 static pmd_t move_soft_dirty_pmd(pmd_t pmd) in move_soft_dirty_pmd() argument
1660 if (unlikely(is_pmd_migration_entry(pmd))) in move_soft_dirty_pmd()
1661 pmd = pmd_swp_mksoft_dirty(pmd); in move_soft_dirty_pmd()
1662 else if (pmd_present(pmd)) in move_soft_dirty_pmd()
1663 pmd = pmd_mksoft_dirty(pmd); in move_soft_dirty_pmd()
1665 return pmd; in move_soft_dirty_pmd()
1672 pmd_t pmd; in move_huge_pmd() local
1694 pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd); in move_huge_pmd()
1695 if (pmd_present(pmd)) in move_huge_pmd()
1704 pmd = move_soft_dirty_pmd(pmd); in move_huge_pmd()
1705 set_pmd_at(mm, new_addr, new_pmd, pmd); in move_huge_pmd()
1723 int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, in change_huge_pmd() argument
1738 ptl = __pmd_trans_huge_lock(pmd, vma); in change_huge_pmd()
1742 preserve_write = prot_numa && pmd_write(*pmd); in change_huge_pmd()
1746 if (is_swap_pmd(*pmd)) { in change_huge_pmd()
1747 swp_entry_t entry = pmd_to_swp_entry(*pmd); in change_huge_pmd()
1749 VM_BUG_ON(!is_pmd_migration_entry(*pmd)); in change_huge_pmd()
1759 if (pmd_swp_soft_dirty(*pmd)) in change_huge_pmd()
1761 if (pmd_swp_uffd_wp(*pmd)) in change_huge_pmd()
1763 set_pmd_at(mm, addr, pmd, newpmd); in change_huge_pmd()
1774 if (prot_numa && is_huge_zero_pmd(*pmd)) in change_huge_pmd()
1777 if (prot_numa && pmd_protnone(*pmd)) in change_huge_pmd()
1801 entry = pmdp_invalidate(vma, addr, pmd); in change_huge_pmd()
1818 set_pmd_at(mm, addr, pmd, entry); in change_huge_pmd()
1831 spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) in __pmd_trans_huge_lock() argument
1834 ptl = pmd_lock(vma->vm_mm, pmd); in __pmd_trans_huge_lock()
1835 if (likely(is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || in __pmd_trans_huge_lock()
1836 pmd_devmap(*pmd))) in __pmd_trans_huge_lock()
1925 unsigned long haddr, pmd_t *pmd) in __split_huge_zero_page_pmd() argument
1940 pmdp_huge_clear_flush(vma, haddr, pmd); in __split_huge_zero_page_pmd()
1942 pgtable = pgtable_trans_huge_withdraw(mm, pmd); in __split_huge_zero_page_pmd()
1955 pmd_populate(mm, pmd, pgtable); in __split_huge_zero_page_pmd()
1958 static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, in __split_huge_pmd_locked() argument
1972 VM_BUG_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd) in __split_huge_pmd_locked()
1973 && !pmd_devmap(*pmd)); in __split_huge_pmd_locked()
1978 old_pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd); in __split_huge_pmd_locked()
1984 zap_deposited_table(mm, pmd); in __split_huge_pmd_locked()
2005 if (is_huge_zero_pmd(*pmd)) { in __split_huge_pmd_locked()
2015 return __split_huge_zero_page_pmd(vma, haddr, pmd); in __split_huge_pmd_locked()
2038 old_pmd = pmdp_invalidate(vma, haddr, pmd); in __split_huge_pmd_locked()
2066 pgtable = pgtable_trans_huge_withdraw(mm, pmd); in __split_huge_pmd_locked()
2135 pmd_populate(mm, pmd, pgtable); in __split_huge_pmd_locked()
2145 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, in __split_huge_pmd() argument
2157 ptl = pmd_lock(vma->vm_mm, pmd); in __split_huge_pmd()
2166 if (page != pmd_page(*pmd)) in __split_huge_pmd()
2171 if (pmd_trans_huge(*pmd)) { in __split_huge_pmd()
2173 page = pmd_page(*pmd); in __split_huge_pmd()
2184 _pmd = *pmd; in __split_huge_pmd()
2188 if (unlikely(!pmd_same(*pmd, _pmd))) { in __split_huge_pmd()
2201 } else if (!(pmd_devmap(*pmd) || is_pmd_migration_entry(*pmd))) in __split_huge_pmd()
2203 __split_huge_pmd_locked(vma, pmd, range.start, freeze); in __split_huge_pmd()
2230 pmd_t *pmd; in split_huge_pmd_address() local
2244 pmd = pmd_offset(pud, address); in split_huge_pmd_address()
2246 __split_huge_pmd(vma, pmd, address, freeze, page); in split_huge_pmd_address()
3159 if (!(pvmw->pmd && !pvmw->pte)) in set_pmd_migration_entry()
3163 pmdval = pmdp_invalidate(vma, address, pvmw->pmd); in set_pmd_migration_entry()
3173 set_pmd_at(mm, address, pvmw->pmd, pmdswp); in set_pmd_migration_entry()
3187 if (!(pvmw->pmd && !pvmw->pte)) in remove_migration_pmd()
3190 entry = pmd_to_swp_entry(*pvmw->pmd); in remove_migration_pmd()
3193 if (pmd_swp_soft_dirty(*pvmw->pmd)) in remove_migration_pmd()
3197 if (pmd_swp_uffd_wp(*pvmw->pmd)) in remove_migration_pmd()
3205 set_pmd_at(mm, mmun_start, pvmw->pmd, pmde); in remove_migration_pmd()
3208 update_mmu_cache_pmd(vma, address, pvmw->pmd); in remove_migration_pmd()