Lines Matching refs:vma

38 static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,  in change_pte_range()  argument
65 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in change_pte_range()
68 if (prot_numa && !(vma->vm_flags & VM_SHARED) && in change_pte_range()
69 atomic_read(&vma->vm_mm->mm_users) == 1) in change_pte_range()
72 flush_tlb_batched_pending(vma->vm_mm); in change_pte_range()
91 page = vm_normal_page(vma, addr, oldpte); in change_pte_range()
96 if (is_cow_mapping(vma->vm_flags) && in change_pte_range()
116 oldpte = ptep_modify_prot_start(vma, addr, pte); in change_pte_range()
137 !(vma->vm_flags & VM_SOFTDIRTY))) { in change_pte_range()
140 ptep_modify_prot_commit(vma, addr, pte, oldpte, ptent); in change_pte_range()
186 set_pte_at(vma->vm_mm, addr, pte, newpte); in change_pte_range()
222 static inline unsigned long change_pmd_range(struct vm_area_struct *vma, in change_pmd_range() argument
256 vma, vma->vm_mm, addr, end); in change_pmd_range()
262 __split_huge_pmd(vma, pmd, addr, false, NULL); in change_pmd_range()
264 int nr_ptes = change_huge_pmd(vma, pmd, addr, in change_pmd_range()
279 this_pages = change_pte_range(vma, pmd, addr, next, newprot, in change_pmd_range()
294 static inline unsigned long change_pud_range(struct vm_area_struct *vma, in change_pud_range() argument
307 pages += change_pmd_range(vma, pud, addr, next, newprot, in change_pud_range()
314 static inline unsigned long change_p4d_range(struct vm_area_struct *vma, in change_p4d_range() argument
327 pages += change_pud_range(vma, p4d, addr, next, newprot, in change_p4d_range()
334 static unsigned long change_protection_range(struct vm_area_struct *vma, in change_protection_range() argument
338 struct mm_struct *mm = vma->vm_mm; in change_protection_range()
346 flush_cache_range(vma, addr, end); in change_protection_range()
352 pages += change_p4d_range(vma, pgd, addr, next, newprot, in change_protection_range()
358 flush_tlb_range(vma, start, end); in change_protection_range()
364 unsigned long change_protection(struct vm_area_struct *vma, unsigned long start, in change_protection() argument
372 if (is_vm_hugetlb_page(vma)) in change_protection()
373 pages = hugetlb_change_protection(vma, start, end, newprot); in change_protection()
375 pages = change_protection_range(vma, start, end, newprot, in change_protection()
409 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, in mprotect_fixup() argument
412 struct mm_struct *mm = vma->vm_mm; in mprotect_fixup()
413 unsigned long oldflags = vma->vm_flags; in mprotect_fixup()
421 *pprev = vma; in mprotect_fixup()
431 (vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && in mprotect_fixup()
464 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); in mprotect_fixup()
466 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma), in mprotect_fixup()
467 vma->vm_userfaultfd_ctx); in mprotect_fixup()
469 vma = *pprev; in mprotect_fixup()
470 VM_WARN_ON((vma->vm_flags ^ newflags) & ~VM_SOFTDIRTY); in mprotect_fixup()
474 *pprev = vma; in mprotect_fixup()
476 if (start != vma->vm_start) { in mprotect_fixup()
477 error = split_vma(mm, vma, start, 1); in mprotect_fixup()
482 if (end != vma->vm_end) { in mprotect_fixup()
483 error = split_vma(mm, vma, end, 0); in mprotect_fixup()
493 vma->vm_flags = newflags; in mprotect_fixup()
494 dirty_accountable = vma_wants_writenotify(vma, vma->vm_page_prot); in mprotect_fixup()
495 vma_set_page_prot(vma); in mprotect_fixup()
497 change_protection(vma, start, end, vma->vm_page_prot, in mprotect_fixup()
506 populate_vma_page_range(vma, start, end, NULL); in mprotect_fixup()
511 perf_event_mmap(vma); in mprotect_fixup()
526 struct vm_area_struct *vma, *prev; in do_mprotect_pkey() local
562 vma = find_vma(current->mm, start); in do_mprotect_pkey()
564 if (!vma) in do_mprotect_pkey()
568 if (vma->vm_start >= end) in do_mprotect_pkey()
570 start = vma->vm_start; in do_mprotect_pkey()
572 if (!(vma->vm_flags & VM_GROWSDOWN)) in do_mprotect_pkey()
575 if (vma->vm_start > start) in do_mprotect_pkey()
578 end = vma->vm_end; in do_mprotect_pkey()
580 if (!(vma->vm_flags & VM_GROWSUP)) in do_mprotect_pkey()
585 if (start > vma->vm_start) in do_mprotect_pkey()
586 prev = vma; in do_mprotect_pkey()
588 prev = vma->vm_prev; in do_mprotect_pkey()
598 if (rier && (vma->vm_flags & VM_MAYEXEC)) in do_mprotect_pkey()
609 new_vma_pkey = arch_override_mprotect_pkey(vma, prot, pkey); in do_mprotect_pkey()
611 newflags |= (vma->vm_flags & ~mask_off_old_flags); in do_mprotect_pkey()
625 error = security_file_mprotect(vma, reqprot, prot); in do_mprotect_pkey()
629 tmp = vma->vm_end; in do_mprotect_pkey()
633 if (vma->vm_ops && vma->vm_ops->mprotect) { in do_mprotect_pkey()
634 error = vma->vm_ops->mprotect(vma, nstart, tmp, newflags); in do_mprotect_pkey()
639 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags); in do_mprotect_pkey()
650 vma = prev->vm_next; in do_mprotect_pkey()
651 if (!vma || vma->vm_start != nstart) { in do_mprotect_pkey()