Lines Matching refs:old_len

567 		unsigned long old_addr, unsigned long old_len,  in move_vma()  argument
572 long to_account = new_len - old_len; in move_vma()
597 if (!err && vma->vm_end != old_addr + old_len) in move_vma()
598 err = vma->vm_ops->may_split(vma, old_addr + old_len); in move_vma()
610 err = ksm_madvise(vma, old_addr, old_addr + old_len, in move_vma()
629 moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len, in move_vma()
631 if (moved_len < old_len) { in move_vma()
646 old_len = new_len; in move_vma()
660 excess = vma->vm_end - vma->vm_start - old_len; in move_vma()
662 old_addr + old_len < vma->vm_end) in move_vma()
691 vma->vm_end == (old_addr + old_len)) in move_vma()
698 if (do_munmap(mm, old_addr, old_len, uf_unmap) < 0) { in move_vma()
701 vm_acct_memory(old_len >> PAGE_SHIFT); in move_vma()
723 unsigned long old_len, unsigned long new_len, unsigned long flags) in vma_to_resize() argument
741 if (!old_len && !(vma->vm_flags & (VM_SHARED | VM_MAYSHARE))) { in vma_to_resize()
751 if (old_len > vma->vm_end - addr) in vma_to_resize()
754 if (new_len == old_len) in vma_to_resize()
770 locked += new_len - old_len; in vma_to_resize()
776 (new_len - old_len) >> PAGE_SHIFT)) in vma_to_resize()
782 static unsigned long mremap_to(unsigned long addr, unsigned long old_len, in mremap_to() argument
800 if (addr + old_len > new_addr && new_addr + new_len > addr) in mremap_to()
826 if (old_len >= new_len) { in mremap_to()
827 ret = do_munmap(mm, addr+new_len, old_len - new_len, uf_unmap); in mremap_to()
828 if (ret && old_len != new_len) in mremap_to()
830 old_len = new_len; in mremap_to()
833 vma = vma_to_resize(addr, old_len, new_len, flags); in mremap_to()
841 !may_expand_vm(mm, vma->vm_flags, old_len >> PAGE_SHIFT)) { in mremap_to()
862 ret = move_vma(vma, addr, old_len, new_len, new_addr, locked, flags, uf, in mremap_to()
889 SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, in SYSCALL_DEFINE5() argument
925 (!(flags & MREMAP_MAYMOVE) || old_len != new_len)) in SYSCALL_DEFINE5()
932 old_len = PAGE_ALIGN(old_len); in SYSCALL_DEFINE5()
954 old_len = ALIGN(old_len, huge_page_size(h)); in SYSCALL_DEFINE5()
967 if (new_len > old_len) in SYSCALL_DEFINE5()
972 ret = mremap_to(addr, old_len, new_addr, new_len, in SYSCALL_DEFINE5()
984 if (old_len >= new_len) { in SYSCALL_DEFINE5()
987 retval = __do_munmap(mm, addr+new_len, old_len - new_len, in SYSCALL_DEFINE5()
989 if (retval < 0 && old_len != new_len) { in SYSCALL_DEFINE5()
1002 vma = vma_to_resize(addr, old_len, new_len, flags); in SYSCALL_DEFINE5()
1010 if (old_len == vma->vm_end - addr) { in SYSCALL_DEFINE5()
1012 if (vma_expandable(vma, new_len - old_len)) { in SYSCALL_DEFINE5()
1013 long pages = (new_len - old_len) >> PAGE_SHIFT; in SYSCALL_DEFINE5()
1059 ret = move_vma(vma, addr, old_len, new_len, new_addr, in SYSCALL_DEFINE5()
1069 if (locked && new_len > old_len) in SYSCALL_DEFINE5()
1070 mm_populate(new_addr + old_len, new_len - old_len); in SYSCALL_DEFINE5()
1072 mremap_userfaultfd_complete(&uf, addr, ret, old_len); in SYSCALL_DEFINE5()