Lines Matching refs:vma

69 static long madvise_behavior(struct vm_area_struct *vma,  in madvise_behavior()  argument
73 struct mm_struct *mm = vma->vm_mm; in madvise_behavior()
76 unsigned long new_flags = vma->vm_flags; in madvise_behavior()
92 if (vma->vm_flags & VM_IO) { in madvise_behavior()
100 if (vma->vm_file || vma->vm_flags & VM_SHARED) { in madvise_behavior()
113 if (!is_vm_hugetlb_page(vma) && new_flags & VM_SPECIAL) { in madvise_behavior()
121 error = ksm_madvise(vma, start, end, behavior, &new_flags); in madvise_behavior()
127 error = hugepage_madvise(vma, &new_flags, behavior); in madvise_behavior()
133 if (new_flags == vma->vm_flags) { in madvise_behavior()
134 *prev = vma; in madvise_behavior()
138 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); in madvise_behavior()
139 *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma, in madvise_behavior()
140 vma->vm_file, pgoff, vma_policy(vma), in madvise_behavior()
141 vma->vm_userfaultfd_ctx); in madvise_behavior()
143 vma = *prev; in madvise_behavior()
147 *prev = vma; in madvise_behavior()
149 if (start != vma->vm_start) { in madvise_behavior()
154 error = __split_vma(mm, vma, start, 1); in madvise_behavior()
159 if (end != vma->vm_end) { in madvise_behavior()
164 error = __split_vma(mm, vma, end, 0); in madvise_behavior()
173 vma->vm_flags = new_flags; in madvise_behavior()
191 struct vm_area_struct *vma = walk->private; in swapin_walk_pmd_entry() local
203 orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl); in swapin_walk_pmd_entry()
214 vma, index, false); in swapin_walk_pmd_entry()
226 static void force_shm_swapin_readahead(struct vm_area_struct *vma, in force_shm_swapin_readahead() argument
230 XA_STATE(xas, &mapping->i_pages, linear_page_index(vma, start)); in force_shm_swapin_readahead()
231 pgoff_t end_index = linear_page_index(vma, end + PAGE_SIZE - 1); in force_shm_swapin_readahead()
260 static long madvise_willneed(struct vm_area_struct *vma, in madvise_willneed() argument
264 struct mm_struct *mm = vma->vm_mm; in madvise_willneed()
265 struct file *file = vma->vm_file; in madvise_willneed()
268 *prev = vma; in madvise_willneed()
271 walk_page_range(vma->vm_mm, start, end, &swapin_walk_ops, vma); in madvise_willneed()
277 force_shm_swapin_readahead(vma, start, end, in madvise_willneed()
299 offset = (loff_t)(start - vma->vm_start) in madvise_willneed()
300 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); in madvise_willneed()
316 struct vm_area_struct *vma = walk->vma; in madvise_cold_or_pageout_pte_range() local
331 ptl = pmd_trans_huge_lock(pmd, vma); in madvise_cold_or_pageout_pte_range()
366 pmdp_invalidate(vma, addr, pmd); in madvise_cold_or_pageout_pte_range()
396 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in madvise_cold_or_pageout_pte_range()
408 page = vm_normal_page(vma, addr, ptent); in madvise_cold_or_pageout_pte_range()
486 struct vm_area_struct *vma, in madvise_cold_page_range() argument
494 tlb_start_vma(tlb, vma); in madvise_cold_page_range()
495 walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private); in madvise_cold_page_range()
496 tlb_end_vma(tlb, vma); in madvise_cold_page_range()
499 static long madvise_cold(struct vm_area_struct *vma, in madvise_cold() argument
503 struct mm_struct *mm = vma->vm_mm; in madvise_cold()
506 *prev = vma; in madvise_cold()
507 if (!can_madv_lru_vma(vma)) in madvise_cold()
512 madvise_cold_page_range(&tlb, vma, start_addr, end_addr); in madvise_cold()
519 struct vm_area_struct *vma, in madvise_pageout_page_range() argument
527 tlb_start_vma(tlb, vma); in madvise_pageout_page_range()
528 walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private); in madvise_pageout_page_range()
529 tlb_end_vma(tlb, vma); in madvise_pageout_page_range()
532 static inline bool can_do_pageout(struct vm_area_struct *vma) in can_do_pageout() argument
534 if (vma_is_anonymous(vma)) in can_do_pageout()
536 if (!vma->vm_file) in can_do_pageout()
545 file_inode(vma->vm_file)) || in can_do_pageout()
546 file_permission(vma->vm_file, MAY_WRITE) == 0; in can_do_pageout()
549 static long madvise_pageout(struct vm_area_struct *vma, in madvise_pageout() argument
553 struct mm_struct *mm = vma->vm_mm; in madvise_pageout()
556 *prev = vma; in madvise_pageout()
557 if (!can_madv_lru_vma(vma)) in madvise_pageout()
560 if (!can_do_pageout(vma)) in madvise_pageout()
565 madvise_pageout_page_range(&tlb, vma, start_addr, end_addr); in madvise_pageout()
577 struct vm_area_struct *vma = walk->vma; in madvise_free_pte_range() local
586 if (madvise_free_huge_pmd(tlb, vma, pmd, addr, next)) in madvise_free_pte_range()
618 page = vm_normal_page(vma, addr, ptent); in madvise_free_pte_range()
708 static int madvise_free_single_vma(struct vm_area_struct *vma, in madvise_free_single_vma() argument
711 struct mm_struct *mm = vma->vm_mm; in madvise_free_single_vma()
716 if (!vma_is_anonymous(vma)) in madvise_free_single_vma()
719 range.start = max(vma->vm_start, start_addr); in madvise_free_single_vma()
720 if (range.start >= vma->vm_end) in madvise_free_single_vma()
722 range.end = min(vma->vm_end, end_addr); in madvise_free_single_vma()
723 if (range.end <= vma->vm_start) in madvise_free_single_vma()
725 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, in madvise_free_single_vma()
733 tlb_start_vma(&tlb, vma); in madvise_free_single_vma()
734 walk_page_range(vma->vm_mm, range.start, range.end, in madvise_free_single_vma()
736 tlb_end_vma(&tlb, vma); in madvise_free_single_vma()
762 static long madvise_dontneed_single_vma(struct vm_area_struct *vma, in madvise_dontneed_single_vma() argument
765 zap_page_range(vma, start, end - start); in madvise_dontneed_single_vma()
769 static long madvise_dontneed_free(struct vm_area_struct *vma, in madvise_dontneed_free() argument
774 struct mm_struct *mm = vma->vm_mm; in madvise_dontneed_free()
776 *prev = vma; in madvise_dontneed_free()
777 if (!can_madv_lru_vma(vma)) in madvise_dontneed_free()
780 if (!userfaultfd_remove(vma, start, end)) { in madvise_dontneed_free()
784 vma = find_vma(mm, start); in madvise_dontneed_free()
785 if (!vma) in madvise_dontneed_free()
787 if (start < vma->vm_start) { in madvise_dontneed_free()
799 if (!can_madv_lru_vma(vma)) in madvise_dontneed_free()
801 if (end > vma->vm_end) { in madvise_dontneed_free()
814 end = vma->vm_end; in madvise_dontneed_free()
820 return madvise_dontneed_single_vma(vma, start, end); in madvise_dontneed_free()
822 return madvise_free_single_vma(vma, start, end); in madvise_dontneed_free()
827 static long madvise_populate(struct vm_area_struct *vma, in madvise_populate() argument
833 struct mm_struct *mm = vma->vm_mm; in madvise_populate()
838 *prev = vma; in madvise_populate()
845 if (!vma || start >= vma->vm_end) { in madvise_populate()
846 vma = find_vma(mm, start); in madvise_populate()
847 if (!vma || start < vma->vm_start) in madvise_populate()
851 tmp_end = min_t(unsigned long, end, vma->vm_end); in madvise_populate()
853 pages = faultin_vma_page_range(vma, start, tmp_end, write, in madvise_populate()
859 vma = NULL; in madvise_populate()
888 static long madvise_remove(struct vm_area_struct *vma, in madvise_remove() argument
895 struct mm_struct *mm = vma->vm_mm; in madvise_remove()
899 if (vma->vm_flags & VM_LOCKED) in madvise_remove()
902 f = vma->vm_file; in madvise_remove()
908 if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE)) in madvise_remove()
911 offset = (loff_t)(start - vma->vm_start) in madvise_remove()
912 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); in madvise_remove()
921 if (userfaultfd_remove(vma, start, end)) { in madvise_remove()
982 madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev, in madvise_vma() argument
987 return madvise_remove(vma, prev, start, end); in madvise_vma()
989 return madvise_willneed(vma, prev, start, end); in madvise_vma()
991 return madvise_cold(vma, prev, start, end); in madvise_vma()
993 return madvise_pageout(vma, prev, start, end); in madvise_vma()
996 return madvise_dontneed_free(vma, prev, start, end, behavior); in madvise_vma()
999 return madvise_populate(vma, prev, start, end, behavior); in madvise_vma()
1001 return madvise_behavior(vma, prev, start, end, behavior); in madvise_vma()
1131 struct vm_area_struct *vma, *prev; in do_madvise() local
1177 vma = find_vma_prev(mm, start, &prev); in do_madvise()
1178 if (vma && start > vma->vm_start) in do_madvise()
1179 prev = vma; in do_madvise()
1185 if (!vma) in do_madvise()
1189 if (start < vma->vm_start) { in do_madvise()
1191 start = vma->vm_start; in do_madvise()
1197 tmp = vma->vm_end; in do_madvise()
1202 error = madvise_vma(vma, &prev, start, tmp, behavior); in do_madvise()
1212 vma = prev->vm_next; in do_madvise()
1214 vma = find_vma(mm, start); in do_madvise()