Searched refs:pvmw (Results 1 – 10 of 10) sorted by relevance
/linux/mm/ |
A D | page_vma_mapped.c | 18 pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address); in map_pte() 51 pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd); in map_pte() 124 pvmw->address = (pvmw->address + size) & ~(size - 1); in step_forward() 164 if (pvmw->pmd && !pvmw->pte) in page_vma_mapped_walk() 193 if (pvmw->pte) in page_vma_mapped_walk() 213 pvmw->pmd = pmd_offset(pud, pvmw->address); in page_vma_mapped_walk() 222 pvmw->ptl = pmd_lock(mm, pvmw->pmd); in page_vma_mapped_walk() 281 pvmw->pte++; in page_vma_mapped_walk() 282 if ((pvmw->flags & PVMW_SYNC) && !pvmw->ptl) { in page_vma_mapped_walk() 283 pvmw->ptl = pte_lockptr(mm, pvmw->pmd); in page_vma_mapped_walk() [all …]
|
A D | rmap.c | 784 address = pvmw.address; in page_referenced_one() 792 if (pvmw.pte) { in page_referenced_one() 794 pvmw.pte)) { in page_referenced_one() 808 pvmw.pmd)) in page_referenced_one() 926 address = pvmw.address; in page_mkclean_one() 927 if (pvmw.pte) { in page_mkclean_one() 929 pte_t *pte = pvmw.pte; in page_mkclean_one() 942 pmd_t *pmd = pvmw.pmd; in page_mkclean_one() 1415 pvmw.flags = PVMW_SYNC; in try_to_unmap_one() 1749 if (!pvmw.pte) { in try_to_migrate_one() [all …]
|
A D | page_idle.c | 51 struct page_vma_mapped_walk pvmw = { in page_idle_clear_pte_refs_one() local 58 while (page_vma_mapped_walk(&pvmw)) { in page_idle_clear_pte_refs_one() 59 addr = pvmw.address; in page_idle_clear_pte_refs_one() 60 if (pvmw.pte) { in page_idle_clear_pte_refs_one() 65 if (ptep_clear_young_notify(vma, addr, pvmw.pte)) in page_idle_clear_pte_refs_one() 68 if (pmdp_clear_young_notify(vma, addr, pvmw.pmd)) in page_idle_clear_pte_refs_one()
|
A D | ksm.c | 1045 if (pvmw.address == -EFAULT) in write_protect_page() 1051 pvmw.address, in write_protect_page() 1052 pvmw.address + PAGE_SIZE); in write_protect_page() 1055 if (!page_vma_mapped_walk(&pvmw)) in write_protect_page() 1060 if (pte_write(*pvmw.pte) || pte_dirty(*pvmw.pte) || in write_protect_page() 1061 (pte_protnone(*pvmw.pte) && pte_savedwrite(*pvmw.pte)) || in write_protect_page() 1081 entry = ptep_clear_flush(vma, pvmw.address, pvmw.pte); in write_protect_page() 1087 set_pte_at(mm, pvmw.address, pvmw.pte, entry); in write_protect_page() 1097 set_pte_at_notify(mm, pvmw.address, pvmw.pte, entry); in write_protect_page() 1099 *orig_pte = *pvmw.pte; in write_protect_page() [all …]
|
A D | migrate.c | 178 struct page_vma_mapped_walk pvmw = { in remove_migration_pte() local 189 while (page_vma_mapped_walk(&pvmw)) { in remove_migration_pte() 193 new = page - pvmw.page->index + in remove_migration_pte() 198 if (!pvmw.pte) { in remove_migration_pte() 200 remove_migration_pmd(&pvmw, new); in remove_migration_pte() 207 if (pte_swp_soft_dirty(*pvmw.pte)) in remove_migration_pte() 227 if (pte_swp_soft_dirty(*pvmw.pte)) in remove_migration_pte() 229 if (pte_swp_uffd_wp(*pvmw.pte)) in remove_migration_pte() 239 set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); in remove_migration_pte() 247 set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); in remove_migration_pte() [all …]
|
A D | huge_memory.c | 3152 struct vm_area_struct *vma = pvmw->vma; in set_pmd_migration_entry() 3154 unsigned long address = pvmw->address; in set_pmd_migration_entry() 3159 if (!(pvmw->pmd && !pvmw->pte)) in set_pmd_migration_entry() 3173 set_pmd_at(mm, address, pvmw->pmd, pmdswp); in set_pmd_migration_entry() 3180 struct vm_area_struct *vma = pvmw->vma; in remove_migration_pmd() 3182 unsigned long address = pvmw->address; in remove_migration_pmd() 3187 if (!(pvmw->pmd && !pvmw->pte)) in remove_migration_pmd() 3190 entry = pmd_to_swp_entry(*pvmw->pmd); in remove_migration_pmd() 3193 if (pmd_swp_soft_dirty(*pvmw->pmd)) in remove_migration_pmd() 3197 if (pmd_swp_uffd_wp(*pvmw->pmd)) in remove_migration_pmd() [all …]
|
/linux/mm/damon/ |
A D | paddr.c | 22 struct page_vma_mapped_walk pvmw = { in __damon_pa_mkold() local 28 while (page_vma_mapped_walk(&pvmw)) { in __damon_pa_mkold() 29 addr = pvmw.address; in __damon_pa_mkold() 30 if (pvmw.pte) in __damon_pa_mkold() 31 damon_ptep_mkold(pvmw.pte, vma->vm_mm, addr); in __damon_pa_mkold() 33 damon_pmdp_mkold(pvmw.pmd, vma->vm_mm, addr); in __damon_pa_mkold() 96 struct page_vma_mapped_walk pvmw = { in __damon_pa_young() local 104 while (page_vma_mapped_walk(&pvmw)) { in __damon_pa_young() 105 addr = pvmw.address; in __damon_pa_young() 106 if (pvmw.pte) { in __damon_pa_young() [all …]
|
/linux/include/linux/ |
A D | rmap.h | 216 static inline void page_vma_mapped_walk_done(struct page_vma_mapped_walk *pvmw) in page_vma_mapped_walk_done() argument 219 if (pvmw->pte && !PageHuge(pvmw->page)) in page_vma_mapped_walk_done() 220 pte_unmap(pvmw->pte); in page_vma_mapped_walk_done() 221 if (pvmw->ptl) in page_vma_mapped_walk_done() 222 spin_unlock(pvmw->ptl); in page_vma_mapped_walk_done() 225 bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw);
|
A D | swapops.h | 277 extern void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, 280 extern void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, 310 static inline void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, in set_pmd_migration_entry() argument 316 static inline void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, in remove_migration_pmd() argument
|
/linux/kernel/events/ |
A D | uprobes.c | 158 struct page_vma_mapped_walk pvmw = { in __replace_page() local 181 if (!page_vma_mapped_walk(&pvmw)) in __replace_page() 183 VM_BUG_ON_PAGE(addr != pvmw.address, old_page); in __replace_page() 198 flush_cache_page(vma, addr, pte_pfn(*pvmw.pte)); in __replace_page() 199 ptep_clear_flush_notify(vma, addr, pvmw.pte); in __replace_page() 201 set_pte_at_notify(mm, addr, pvmw.pte, in __replace_page() 207 page_vma_mapped_walk_done(&pvmw); in __replace_page()
|
Completed in 152 milliseconds