/linux/arch/powerpc/mm/book3s64/ |
A D | radix_hugetlbpage.c | 16 radix__flush_tlb_page_psize(vma->vm_mm, vmaddr, psize); in radix__flush_hugetlb_page() 25 radix__local_flush_tlb_page_psize(vma->vm_mm, vmaddr, psize); in radix__local_flush_hugetlb_page() 39 radix__flush_tlb_pwc_range_psize(vma->vm_mm, start, end, psize); in radix__flush_hugetlb_tlb_range() 41 radix__flush_tlb_range_psize(vma->vm_mm, start, end, psize); in radix__flush_hugetlb_tlb_range() 103 struct mm_struct *mm = vma->vm_mm; in radix__huge_ptep_modify_prot_commit() 113 set_huge_pte_at(vma->vm_mm, addr, ptep, pte); in radix__huge_ptep_modify_prot_commit()
|
A D | pgtable.c | 44 assert_spin_locked(pmd_lockptr(vma->vm_mm, pmdp)); in pmdp_set_access_flags() 61 return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp); in pmdp_test_and_clear_young() 118 old_pmd = pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, _PAGE_INVALID); in pmdp_invalidate() 130 pmd = pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp); in pmdp_huge_get_and_clear_full() 447 pte_val = pte_update(vma->vm_mm, addr, ptep, _PAGE_PRESENT, _PAGE_INVALID, 0); in ptep_modify_prot_start() 459 set_pte_at(vma->vm_mm, addr, ptep, pte); in ptep_modify_prot_commit()
|
/linux/arch/mips/mm/ |
A D | tlb-r3k.c | 73 struct mm_struct *mm = vma->vm_mm; in local_flush_tlb_range() 154 if (cpu_context(cpu, vma->vm_mm) != 0) { in local_flush_tlb_page() 159 printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page); in local_flush_tlb_page() 161 newpid = cpu_context(cpu, vma->vm_mm) & asid_mask; in local_flush_tlb_page() 190 if (current->active_mm != vma->vm_mm) in __update_tlb() 196 if ((pid != (cpu_context(cpu, vma->vm_mm) & asid_mask)) || (cpu_context(cpu, vma->vm_mm) == 0)) { in __update_tlb() 198 (cpu_context(cpu, vma->vm_mm)), pid); in __update_tlb()
|
A D | tlb-r4k.c | 109 struct mm_struct *mm = vma->vm_mm; in local_flush_tlb_range() 215 if (cpu_context(cpu, vma->vm_mm) != 0) { in local_flush_tlb_page() 227 write_c0_memorymapid(cpu_asid(cpu, vma->vm_mm)); in local_flush_tlb_page() 229 write_c0_entryhi(page | cpu_asid(cpu, vma->vm_mm)); in local_flush_tlb_page() 306 if (current->active_mm != vma->vm_mm) in __update_tlb() 319 pgdp = pgd_offset(vma->vm_mm, address); in __update_tlb()
|
/linux/Documentation/translations/zh_CN/core-api/ |
A D | cachetlb.rst | 57 这个接口必须确保以前对‘start’到‘end-1’范围内的地址空间‘vma->vm_mm’ 70 踪进程的mmap区域的支持结构体,地址空间可以通过vma->vm_mm获得。另 75 “vma->vm_mm”的页表修改对cpu来说是可见的。也就是说,在运行后,TLB 76 中不会有虚拟地址‘addr’的‘vma->vm_mm’的页表项。 84 软件页表中,在地址空间“vma->vm_mm”的虚拟地址“地址”处,现在存在 140 后,在“start”到“end-1”范围内的虚拟地址的“vma->vm_mm”的缓存中 153 vma->vm_mm获得。另外,我们可以通过测试(vma->vm_flags & 160 在运行之后,对于虚拟地址‘addr’的‘vma->vm_mm’,在缓存中不会
|
/linux/arch/sh/mm/ |
A D | tlbflush_32.c | 19 if (vma->vm_mm && cpu_context(cpu, vma->vm_mm) != NO_CONTEXT) { in local_flush_tlb_page() 24 asid = cpu_asid(cpu, vma->vm_mm); in local_flush_tlb_page() 28 if (vma->vm_mm != current->mm) { in local_flush_tlb_page() 42 struct mm_struct *mm = vma->vm_mm; in local_flush_tlb_range()
|
A D | cache-sh4.c | 221 if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT) in sh4_flush_cache_page() 224 pmd = pmd_off(vma->vm_mm, address); in sh4_flush_cache_page() 231 if ((vma->vm_mm == current->active_mm)) in sh4_flush_cache_page() 282 if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT) in sh4_flush_cache_range()
|
/linux/mm/ |
A D | memory.c | 1636 tlb_gather_mmu(&tlb, vma->vm_mm); in zap_page_range() 1637 update_hiwater_rss(vma->vm_mm); in zap_page_range() 1663 tlb_gather_mmu(&tlb, vma->vm_mm); in zap_page_range_single() 1664 update_hiwater_rss(vma->vm_mm); in zap_page_range_single() 1756 struct mm_struct *mm = vma->vm_mm; in insert_page() 2020 struct mm_struct *mm = vma->vm_mm; in insert_pfn() 2366 struct mm_struct *mm = vma->vm_mm; in remap_pfn_range_notrack() 2737 struct mm_struct *mm = vma->vm_mm; in cow_user_page() 2973 struct mm_struct *mm = vma->vm_mm; in wp_page_copy() 3880 mm_inc_nr_ptes(vma->vm_mm); in deposit_prealloc_pte() [all …]
|
A D | pgtable-generic.c | 70 set_pte_at(vma->vm_mm, address, ptep, entry); in ptep_set_access_flags() 93 struct mm_struct *mm = (vma)->vm_mm; in ptep_clear_flush() 112 set_pmd_at(vma->vm_mm, address, pmdp, entry); in pmdp_set_access_flags() 140 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); in pmdp_huge_clear_flush() 153 pud = pudp_huge_get_and_clear(vma->vm_mm, address, pudp); in pudp_huge_clear_flush() 215 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); in pmdp_collapse_flush()
|
A D | huge_memory.c | 642 pte_free(vma->vm_mm, pgtable); in __do_huge_pmd_anonymous_page() 656 mm_inc_nr_ptes(vma->vm_mm); in __do_huge_pmd_anonymous_page() 667 pte_free(vma->vm_mm, pgtable); in __do_huge_pmd_anonymous_page() 748 pte_free(vma->vm_mm, pgtable); in do_huge_pmd_anonymous_page() 758 pte_free(vma->vm_mm, pgtable); in do_huge_pmd_anonymous_page() 761 pte_free(vma->vm_mm, pgtable); in do_huge_pmd_anonymous_page() 772 pte_free(vma->vm_mm, pgtable); in do_huge_pmd_anonymous_page() 1834 ptl = pmd_lock(vma->vm_mm, pmd); in __pmd_trans_huge_lock() 1852 ptl = pud_lock(vma->vm_mm, pud); in __pud_trans_huge_lock() 1909 ptl = pud_lock(vma->vm_mm, pud); in __split_huge_pud() [all …]
|
A D | mremap.c | 139 struct mm_struct *mm = vma->vm_mm; in move_ptes() 175 flush_tlb_batched_pending(vma->vm_mm); in move_ptes() 227 struct mm_struct *mm = vma->vm_mm; in move_normal_pmd() 262 old_ptl = pmd_lock(vma->vm_mm, old_pmd); in move_normal_pmd() 295 struct mm_struct *mm = vma->vm_mm; in move_normal_pud() 311 old_ptl = pud_lock(vma->vm_mm, old_pud); in move_normal_pud() 344 struct mm_struct *mm = vma->vm_mm; in move_huge_pud() 358 old_ptl = pud_lock(vma->vm_mm, old_pud); in move_huge_pud() 508 old_pud = get_old_pud(vma->vm_mm, old_addr); in move_page_tables() 555 if (pte_alloc(new_vma->vm_mm, new_pmd)) in move_page_tables() [all …]
|
A D | madvise.c | 73 struct mm_struct *mm = vma->vm_mm; in madvise_behavior() 203 orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl); in swapin_walk_pmd_entry() 264 struct mm_struct *mm = vma->vm_mm; in madvise_willneed() 271 walk_page_range(vma->vm_mm, start, end, &swapin_walk_ops, vma); in madvise_willneed() 503 struct mm_struct *mm = vma->vm_mm; in madvise_cold() 553 struct mm_struct *mm = vma->vm_mm; in madvise_pageout() 711 struct mm_struct *mm = vma->vm_mm; in madvise_free_single_vma() 734 walk_page_range(vma->vm_mm, range.start, range.end, in madvise_free_single_vma() 774 struct mm_struct *mm = vma->vm_mm; in madvise_dontneed_free() 833 struct mm_struct *mm = vma->vm_mm; in madvise_populate() [all …]
|
/linux/arch/arc/mm/ |
A D | tlb.c | 220 local_flush_tlb_mm(vma->vm_mm); in local_flush_tlb_range() 233 if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) { in local_flush_tlb_range() 235 tlb_entry_erase(start | hw_pid(vma->vm_mm, cpu)); in local_flush_tlb_range() 286 if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) { in local_flush_tlb_page() 287 tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm, cpu)); in local_flush_tlb_page() 349 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, &ta, 1); in flush_tlb_page() 361 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, &ta, 1); in flush_tlb_range() 374 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_pmd_tlb_range, &ta, 1); in flush_pmd_tlb_range() 423 if (current->active_mm != vma->vm_mm) in create_tlb() 547 if (likely(asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID)) { in local_flush_pmd_tlb_range() [all …]
|
/linux/arch/csky/kernel/ |
A D | vdso.c | 101 if (vma->vm_mm && (vma->vm_start == (long)vma->vm_mm->context.vdso)) in arch_vma_name() 103 if (vma->vm_mm && (vma->vm_start == in arch_vma_name() 104 (long)vma->vm_mm->context.vdso + PAGE_SIZE)) in arch_vma_name()
|
/linux/arch/arm/mm/ |
A D | fault-armv.c | 57 set_pte_at(vma->vm_mm, address, ptep, entry); in do_adjust_pte() 99 pgd = pgd_offset(vma->vm_mm, address); in adjust_pte() 120 ptl = pte_lockptr(vma->vm_mm, pmd); in adjust_pte() 136 struct mm_struct *mm = vma->vm_mm; in make_coherent() 156 if (mpnt->vm_mm != mm || mpnt == vma) in make_coherent()
|
/linux/include/linux/ |
A D | khugepaged.h | 59 if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags)) in khugepaged_enter() 64 !test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) in khugepaged_enter() 65 if (__khugepaged_enter(vma->vm_mm)) in khugepaged_enter()
|
A D | mmu_notifier.h | 546 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \ 559 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \ 572 __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \ 583 __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \ 591 struct mm_struct *___mm = (__vma)->vm_mm; \ 604 struct mm_struct *___mm = (__vma)->vm_mm; \ 617 struct mm_struct *___mm = (__vma)->vm_mm; \
|
/linux/arch/riscv/mm/ |
A D | tlbflush.c | 79 __sbi_tlb_flush_range(vma->vm_mm, addr, PAGE_SIZE, PAGE_SIZE); in flush_tlb_page() 85 __sbi_tlb_flush_range(vma->vm_mm, start, end - start, PAGE_SIZE); in flush_tlb_range() 91 __sbi_tlb_flush_range(vma->vm_mm, start, end - start, PMD_SIZE); in flush_pmd_tlb_range()
|
/linux/arch/arm/kernel/ |
A D | smp_tlb.c | 202 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, in flush_tlb_page() 206 broadcast_tlb_mm_a15_erratum(vma->vm_mm); in flush_tlb_page() 228 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, in flush_tlb_range() 232 broadcast_tlb_mm_a15_erratum(vma->vm_mm); in flush_tlb_range()
|
/linux/arch/m68k/include/asm/ |
A D | tlbflush.h | 86 if (vma->vm_mm == current->active_mm) in flush_tlb_page() 93 if (vma->vm_mm == current->active_mm) in flush_tlb_range() 173 sun3_put_context(vma->vm_mm->context); in flush_tlb_page() 190 struct mm_struct *mm = vma->vm_mm; in flush_tlb_range()
|
/linux/arch/s390/include/asm/ |
A D | hugetlb.h | 56 huge_ptep_get_and_clear(vma->vm_mm, address, ptep); in huge_ptep_clear_flush() 65 huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); in huge_ptep_set_access_flags() 66 set_huge_pte_at(vma->vm_mm, addr, ptep, pte); in huge_ptep_set_access_flags()
|
/linux/mm/damon/ |
A D | paddr.c | 31 damon_ptep_mkold(pvmw.pte, vma->vm_mm, addr); in __damon_pa_mkold() 33 damon_pmdp_mkold(pvmw.pmd, vma->vm_mm, addr); in __damon_pa_mkold() 109 mmu_notifier_test_young(vma->vm_mm, addr); in __damon_pa_young() 114 mmu_notifier_test_young(vma->vm_mm, addr); in __damon_pa_young()
|
/linux/arch/mips/kernel/ |
A D | smp.c | 558 struct mm_struct *mm = vma->vm_mm; in flush_tlb_range() 638 write_c0_memorymapid(cpu_asid(0, vma->vm_mm)); in flush_tlb_page() 645 } else if ((atomic_read(&vma->vm_mm->mm_users) != 1) || in flush_tlb_page() 646 (current->mm != vma->vm_mm)) { in flush_tlb_page() 664 if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm)) in flush_tlb_page() 665 set_cpu_context(cpu, vma->vm_mm, 1); in flush_tlb_page()
|
/linux/arch/parisc/include/asm/ |
A D | tlbflush.h | 20 __flush_tlb_range((vma)->vm_mm->context, start, end) 67 purge_tlb_entries(vma->vm_mm, addr); in flush_tlb_page()
|
/linux/arch/hexagon/mm/ |
A D | vm_tlb.c | 28 struct mm_struct *mm = vma->vm_mm; in flush_tlb_range() 68 struct mm_struct *mm = vma->vm_mm; in flush_tlb_page()
|