Lines Matching refs:old_addr
135 unsigned long old_addr, unsigned long old_end, in move_ptes() argument
143 unsigned long len = old_end - old_addr; in move_ptes()
170 old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl); in move_ptes()
178 for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE, in move_ptes()
183 pte = ptep_get_and_clear(mm, old_addr, old_pte); in move_ptes()
197 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr); in move_ptes()
223 static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr, in move_normal_pmd() argument
274 flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE); in move_normal_pmd()
283 unsigned long old_addr, unsigned long new_addr, pmd_t *old_pmd, in move_normal_pmd() argument
291 static bool move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr, in move_normal_pud() argument
323 flush_tlb_range(vma, old_addr, old_addr + PUD_SIZE); in move_normal_pud()
332 unsigned long old_addr, unsigned long new_addr, pud_t *old_pud, in move_normal_pud() argument
340 static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr, in move_huge_pud() argument
372 flush_pud_tlb_range(vma, old_addr, old_addr + HPAGE_PUD_SIZE); in move_huge_pud()
380 static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr, in move_huge_pud() argument
402 unsigned long old_addr, unsigned long old_end, in get_extent() argument
423 next = (old_addr + size) & mask; in get_extent()
425 extent = next - old_addr; in get_extent()
426 if (extent > old_end - old_addr) in get_extent()
427 extent = old_end - old_addr; in get_extent()
439 unsigned long old_addr, unsigned long new_addr, in move_pgt_entry() argument
450 moved = move_normal_pmd(vma, old_addr, new_addr, old_entry, in move_pgt_entry()
454 moved = move_normal_pud(vma, old_addr, new_addr, old_entry, in move_pgt_entry()
459 move_huge_pmd(vma, old_addr, new_addr, old_entry, in move_pgt_entry()
464 move_huge_pud(vma, old_addr, new_addr, old_entry, in move_pgt_entry()
480 unsigned long old_addr, struct vm_area_struct *new_vma, in move_page_tables() argument
489 old_end = old_addr + len; in move_page_tables()
490 flush_cache_range(vma, old_addr, old_end); in move_page_tables()
493 return move_hugetlb_page_tables(vma, new_vma, old_addr, in move_page_tables()
497 old_addr, old_end); in move_page_tables()
500 for (; old_addr < old_end; old_addr += extent, new_addr += extent) { in move_page_tables()
506 extent = get_extent(NORMAL_PUD, old_addr, old_end, new_addr); in move_page_tables()
508 old_pud = get_old_pud(vma->vm_mm, old_addr); in move_page_tables()
516 move_pgt_entry(HPAGE_PUD, vma, old_addr, new_addr, in move_page_tables()
523 if (move_pgt_entry(NORMAL_PUD, vma, old_addr, new_addr, in move_page_tables()
528 extent = get_extent(NORMAL_PMD, old_addr, old_end, new_addr); in move_page_tables()
529 old_pmd = get_old_pmd(vma->vm_mm, old_addr); in move_page_tables()
538 move_pgt_entry(HPAGE_PMD, vma, old_addr, new_addr, in move_page_tables()
541 split_huge_pmd(vma, old_pmd, old_addr); in move_page_tables()
550 if (move_pgt_entry(NORMAL_PMD, vma, old_addr, new_addr, in move_page_tables()
557 move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma, in move_page_tables()
563 return len + old_addr - old_end; /* how much done */ in move_page_tables()
567 unsigned long old_addr, unsigned long old_len, in move_vma() argument
595 if (vma->vm_start != old_addr) in move_vma()
596 err = vma->vm_ops->may_split(vma, old_addr); in move_vma()
597 if (!err && vma->vm_end != old_addr + old_len) in move_vma()
598 err = vma->vm_ops->may_split(vma, old_addr + old_len); in move_vma()
610 err = ksm_madvise(vma, old_addr, old_addr + old_len, in move_vma()
620 new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT); in move_vma()
629 moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len, in move_vma()
643 move_page_tables(new_vma, new_addr, vma, old_addr, moved_len, in move_vma()
647 old_addr = new_addr; in move_vma()
661 if (old_addr > vma->vm_start && in move_vma()
662 old_addr + old_len < vma->vm_end) in move_vma()
690 if (new_vma != vma && vma->vm_start == old_addr && in move_vma()
691 vma->vm_end == (old_addr + old_len)) in move_vma()
698 if (do_munmap(mm, old_addr, old_len, uf_unmap) < 0) { in move_vma()