/linux/drivers/gpu/drm/i915/ |
A D | i915_vma.c | 63 vma->node.start, vma->node.size, reason); in vma_print_allocator() 69 vma->node.start, vma->node.size, reason, buf); in vma_print_allocator() 209 list_add(&vma->obj_link, &obj->vma.list); in vma_create() 299 struct i915_vma *vma = vw->vma; in __vma_bind() local 377 GEM_BUG_ON(vma->size > vma->node.size); in i915_vma_bind() 403 work->vma = vma; in i915_vma_bind() 431 vma->ops->bind_vma(vma->vm, NULL, vma, cache_level, bind_flags); in i915_vma_bind() 814 err = vma->ops->set_pages(vma); in vma_get_pages() 836 vma->ops->clear_pages(vma); in __vma_put_pages() 983 vma->obj ? vma->obj->cache_level : 0, in i915_vma_pin_ww() [all …]
|
A D | i915_vma.h | 133 GEM_BUG_ON(upper_32_bits(vma->node.start + vma->node.size - 1)); in i915_ggtt_offset() 145 return vma; in i915_vma_get() 151 return vma; in i915_vma_tryget() 175 cmp = vma->ggtt_view.type; in i915_vma_compare() 226 return vma; in __i915_vma_get() 237 #define assert_vma_held(vma) dma_resv_assert_held((vma)->resv) argument 289 atomic_inc(&vma->flags); in __i915_vma_pin() 296 atomic_dec(&vma->flags); in __i915_vma_unpin() 302 __i915_vma_unpin(vma); in i915_vma_unpin() 344 GEM_BUG_ON(!vma->pages); in i915_vma_first_page() [all …]
|
A D | i915_gem_evict.c | 54 struct i915_vma *vma, in mark_free() argument 58 if (i915_vma_is_pinned(vma)) in mark_free() 67 if (i915_vma_is_active(vma)) in defer_evict() 167 active = vma; in i915_gem_evict_something() 226 __i915_vma_pin(vma); in i915_gem_evict_something() 234 __i915_vma_unpin(vma); in i915_gem_evict_something() 306 vma = container_of(node, typeof(*vma), node); in i915_gem_evict_for_node() 345 __i915_vma_pin(vma); in i915_gem_evict_for_node() 350 __i915_vma_unpin(vma); in i915_gem_evict_for_node() 396 __i915_vma_pin(vma); in i915_gem_evict_vm() [all …]
|
/linux/mm/ |
A D | mmap.c | 184 vma->vm_ops->close(vma); in remove_vma() 411 vma = vma->vm_next; in validate_mm() 623 for (vma = vma->vm_next; vma; vma = vma->vm_next) { in count_vma_pages_range() 842 uprobe_munmap(vma, vma->vm_start, vma->vm_end); in __vma_adjust() 2628 vma = remove_vma(vma); in remove_vma_list() 2672 vma = vma->vm_next; in detach_vmas_to_be_unmapped() 3181 vma = remove_vma(vma); in exit_mmap() 3590 for (vma = mm->mmap; vma; vma = vma->vm_next) { in mm_take_all_locks() 3598 for (vma = mm->mmap; vma; vma = vma->vm_next) { in mm_take_all_locks() 3606 for (vma = mm->mmap; vma; vma = vma->vm_next) { in mm_take_all_locks() [all …]
|
A D | nommu.c | 102 if (vma) in kobjsize() 103 return vma->vm_end - vma->vm_start; in kobjsize() 654 if (vma->vm_ops && vma->vm_ops->close) in delete_vma() 655 vma->vm_ops->close(vma); in delete_vma() 677 for (vma = mm->mmap; vma; vma = vma->vm_next) { in find_vma() 726 for (vma = mm->mmap; vma; vma = vma->vm_next) { in find_vma_exact() 944 ret = call_mmap(vma->vm_file, vma); in do_mmap_shared_file() 975 ret = call_mmap(vma->vm_file, vma); in do_mmap_private() 1472 vma = vma->vm_next; in do_munmap() 1583 vma->vm_end = vma->vm_start + new_len; in do_mremap() [all …]
|
A D | mremap.c | 511 new_pud = alloc_new_pud(vma->vm_mm, vma, new_addr); in move_page_tables() 532 new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr); in move_page_tables() 594 if (vma->vm_ops && vma->vm_ops->may_split) { in move_vma() 596 err = vma->vm_ops->may_split(vma, old_addr); in move_vma() 633 } else if (vma->vm_ops && vma->vm_ops->mremap) { in move_vma() 660 excess = vma->vm_end - vma->vm_start - old_len; in move_vma() 690 if (new_vma != vma && vma->vm_start == old_addr && in move_vma() 730 if (!vma) in vma_to_resize() 779 return vma; in vma_to_resize() 946 if (!vma || vma->vm_start > addr) { in SYSCALL_DEFINE5() [all …]
|
A D | madvise.c | 100 if (vma->vm_file || vma->vm_flags & VM_SHARED) { in madvise_behavior() 134 *prev = vma; in madvise_behavior() 140 vma->vm_file, pgoff, vma_policy(vma), in madvise_behavior() 143 vma = *prev; in madvise_behavior() 147 *prev = vma; in madvise_behavior() 316 struct vm_area_struct *vma = walk->vma; in madvise_cold_or_pageout_pte_range() local 577 struct vm_area_struct *vma = walk->vma; in madvise_free_pte_range() local 785 if (!vma) in madvise_dontneed_free() 845 if (!vma || start >= vma->vm_end) { in madvise_populate() 847 if (!vma || start < vma->vm_start) in madvise_populate() [all …]
|
A D | mprotect.c | 256 vma, vma->vm_mm, addr, end); in change_pmd_range() 421 *pprev = vma; in mprotect_fixup() 466 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma), in mprotect_fixup() 469 vma = *pprev; in mprotect_fixup() 474 *pprev = vma; in mprotect_fixup() 497 change_protection(vma, start, end, vma->vm_page_prot, in mprotect_fixup() 564 if (!vma) in do_mprotect_pkey() 586 prev = vma; in do_mprotect_pkey() 633 if (vma->vm_ops && vma->vm_ops->mprotect) { in do_mprotect_pkey() 634 error = vma->vm_ops->mprotect(vma, nstart, tmp, newflags); in do_mprotect_pkey() [all …]
|
A D | memory.c | 1614 for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) in unmap_vmas() 1639 for ( ; vma && vma->vm_start < range.end; vma = vma->vm_next) in zap_page_range() 2481 vm_len = vma->vm_end - vma->vm_start; in vm_iomap_memory() 2736 struct vm_area_struct *vma = vmf->vma; in cow_user_page() local 2880 struct vm_area_struct *vma = vmf->vma; in fault_dirty_shared_page() local 2934 struct vm_area_struct *vma = vmf->vma; in wp_page_reuse() local 2972 struct vm_area_struct *vma = vmf->vma; in wp_page_copy() local 4604 .vma = vma, in __handle_mm_fault() 5219 if (vma && vma->vm_file) { in print_vma_addr() 5230 vma->vm_end - vma->vm_start); in print_vma_addr() [all …]
|
A D | mlock.c | 508 vma_is_dax(vma) || vma_is_secretmem(vma)) in mlock_fixup() 514 vma->vm_file, pgoff, vma_policy(vma), in mlock_fixup() 517 vma = *prev; in mlock_fixup() 556 *prev = vma; in mlock_fixup() 575 if (!vma || vma->vm_start > start) in apply_vma_lock_flags() 580 prev = vma; in apply_vma_lock_flags() 601 if (!vma || vma->vm_start != nstart) { in apply_vma_lock_flags() 629 for (; vma ; vma = vma->vm_next) { in count_mm_mlocked_page_nr() 641 count += vma->vm_end - vma->vm_start; in count_mm_mlocked_page_nr() 762 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) { in apply_mlockall_flags() [all …]
|
A D | rmap.c | 149 avc->vma = vma; in anon_vma_chain_link() 778 .vma = vma, in page_referenced_one() 907 .vma = vma, in page_mkclean_one() 919 0, vma, vma->vm_mm, address, in page_mkclean_one() 1184 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); in page_add_new_anon_rmap() 1399 .vma = vma, in try_to_unmap_one() 1699 .vma = vma, in try_to_migrate_one() 1979 .vma = vma, in page_mlock_one() 2054 .vma = vma, in page_make_device_exclusive_one() 2066 vma->vm_mm, address, min(vma->vm_end, in page_make_device_exclusive_one() [all …]
|
A D | huge_memory.c | 69 return transhuge_vma_enabled(vma, vma->vm_flags) && vma->vm_file && in file_thp_enabled() 599 struct vm_area_struct *vma = vmf->vma; in __do_huge_pmd_anonymous_page() local 726 struct vm_area_struct *vma = vmf->vma; in do_huge_pmd_anonymous_page() local 850 struct vm_area_struct *vma = vmf->vma; in vmf_insert_pfn_pmd_prot() local 941 struct vm_area_struct *vma = vmf->vma; in vmf_insert_pfn_pud_prot() local 1285 struct vm_area_struct *vma = vmf->vma; in do_huge_pmd_wp_page() local 1291 VM_BUG_ON_VMA(!vma->anon_vma, vma); in do_huge_pmd_wp_page() 1426 struct vm_area_struct *vma = vmf->vma; in do_huge_pmd_numa_page() local 2959 if (!vma || addr < vma->vm_start) in split_huge_pages_pid() 3152 struct vm_area_struct *vma = pvmw->vma; in set_pmd_migration_entry() local [all …]
|
/linux/drivers/gpu/drm/ |
A D | drm_vm.c | 112 struct vm_area_struct *vma = vmf->vma; in drm_vm_fault() local 201 struct vm_area_struct *vma = vmf->vma; in drm_vm_shm_fault() local 240 vma->vm_start, vma->vm_end - vma->vm_start); in drm_vm_shm_close() 248 if (pt->vma == vma) { in drm_vm_shm_close() 395 vma->vm_start, vma->vm_end - vma->vm_start); in drm_vm_open_locked() 399 vma_entry->vma = vma; in drm_vm_open_locked() 421 vma->vm_start, vma->vm_end - vma->vm_start); in drm_vm_close_locked() 424 if (pt->vma == vma) { in drm_vm_close_locked() 470 vma->vm_start, vma->vm_end, vma->vm_pgoff); in drm_mmap_dma() 532 vma->vm_start, vma->vm_end, vma->vm_pgoff); in drm_mmap_locked() [all …]
|
/linux/drivers/gpu/drm/i915/display/ |
A D | intel_fb_pin.c | 28 struct i915_vma *vma; in intel_pin_fb_obj_dpt() local 41 vma = ERR_PTR(ret); in intel_pin_fb_obj_dpt() 46 if (IS_ERR(vma)) in intel_pin_fb_obj_dpt() 59 vma = ERR_PTR(ret); in intel_pin_fb_obj_dpt() 67 i915_vma_get(vma); in intel_pin_fb_obj_dpt() 71 return vma; in intel_pin_fb_obj_dpt() 179 if (vma->fence) in intel_pin_and_fence_fb_obj() 199 return vma; in intel_pin_and_fence_fb_obj() 261 if (vma) in intel_plane_unpin_fb() 267 if (vma) in intel_plane_unpin_fb() [all …]
|
/linux/drivers/gpu/drm/nouveau/ |
A D | nouveau_vmm.c | 31 if (vma->mem) { in nouveau_vma_unmap() 32 nvif_vmm_unmap(&vma->vmm->vmm, vma->addr); in nouveau_vma_unmap() 33 vma->mem = NULL; in nouveau_vma_unmap() 44 vma->mem = mem; in nouveau_vma_map() 55 return vma; in nouveau_vma_find() 65 if (vma && --vma->refs <= 0) { in nouveau_vma_del() 86 vma->refs++; in nouveau_vma_new() 90 if (!(vma = *pvma = kmalloc(sizeof(*vma), GFP_KERNEL))) in nouveau_vma_new() 92 vma->vmm = vmm; in nouveau_vma_new() 93 vma->refs = 1; in nouveau_vma_new() [all …]
|
/linux/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ |
A D | vmm.c | 870 if (addr >= vma->addr + vma->size) in nvkm_vmm_node_search() 931 if (!(vma = nvkm_vma_tail(vma, vma->size + vma->addr - addr))) in nvkm_vmm_node_split() 954 vma->addr, (u64)vma->size, in nvkm_vma_dump() 1154 if (vma->addr == addr && vma->part && (prev = node(vma, prev))) { in nvkm_vmm_pfn_split_merge() 1182 if (!vma->mapped || vma->memory) in nvkm_vmm_pfn_unmap() 1196 } while ((vma = node(vma, next)) && (start = vma->addr) < limit); in nvkm_vmm_pfn_unmap() 1254 if (!vma->mapref || vma->memory) { in nvkm_vmm_pfn_map() 1310 vma = node(vma, next); in nvkm_vmm_pfn_map() 1544 if (vma->mapref || !vma->sparse) { in nvkm_vmm_put_locked() 1585 if (vma->sparse && !vma->mapref) { in nvkm_vmm_put_locked() [all …]
|
A D | uvmm.c | 118 if (ret = -ENOENT, !vma || vma->addr != addr) { in nvkm_uvmm_mthd_unmap() 120 addr, vma ? vma->addr : ~0ULL); in nvkm_uvmm_mthd_unmap() 179 if (ret = -EINVAL, vma->mapped && !vma->memory) { in nvkm_uvmm_mthd_map() 185 if (addr + size > vma->addr + vma->size || vma->memory || in nvkm_uvmm_mthd_map() 186 (vma->refd == NVKM_VMA_PAGE_NONE && !vma->mapref)) { in nvkm_uvmm_mthd_map() 189 !!vma->memory, vma->refd, vma->mapref, in nvkm_uvmm_mthd_map() 190 addr, size, vma->addr, (u64)vma->size); in nvkm_uvmm_mthd_map() 194 vma = nvkm_vmm_node_split(vmm, vma, addr, size); in nvkm_uvmm_mthd_map() 195 if (!vma) { in nvkm_uvmm_mthd_map() 237 if (ret = -ENOENT, !vma || vma->addr != addr || vma->part) { in nvkm_uvmm_mthd_put() [all …]
|
/linux/include/linux/ |
A D | userfaultfd_k.h | 78 return vma->vm_userfaultfd_ctx.ctx == vm_ctx.ctx; in is_mergeable_vm_userfaultfd_ctx() 94 return vma->vm_flags & (VM_UFFD_WP | VM_UFFD_MINOR); in uffd_disable_huge_pmd_share() 99 return vma->vm_flags & VM_UFFD_MISSING; in userfaultfd_missing() 102 static inline bool userfaultfd_wp(struct vm_area_struct *vma) in userfaultfd_wp() argument 104 return vma->vm_flags & VM_UFFD_WP; in userfaultfd_wp() 109 return vma->vm_flags & VM_UFFD_MINOR; in userfaultfd_minor() 115 return userfaultfd_wp(vma) && pte_uffd_wp(pte); in userfaultfd_pte_wp() 121 return userfaultfd_wp(vma) && pmd_uffd_wp(pmd); in userfaultfd_huge_pmd_wp() 126 return vma->vm_flags & __VM_UFFD_FLAGS; in userfaultfd_armed() 138 extern bool userfaultfd_remove(struct vm_area_struct *vma, [all …]
|
A D | huge_mm.h | 17 struct vm_area_struct *vma); 122 if (!vma_is_anonymous(vma)) { in transhuge_vma_suitable() 123 if (!IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff, in transhuge_vma_suitable() 128 if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end) in transhuge_vma_suitable() 156 if (!transhuge_vma_enabled(vma, vma->vm_flags)) in __transparent_hugepage_enabled() 159 if (vma_is_temporary_stack(vma)) in __transparent_hugepage_enabled() 165 if (vma_is_dax(vma)) in __transparent_hugepage_enabled() 237 struct vm_area_struct *vma) in pmd_trans_huge_lock() argument 245 struct vm_area_struct *vma) in pud_trans_huge_lock() argument 419 struct vm_area_struct *vma) in pmd_trans_huge_lock() argument [all …]
|
/linux/drivers/gpu/drm/i915/selftests/ |
A D | i915_gem_gtt.c | 463 __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node), in fill_hole() 536 __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node), in fill_hole() 566 __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node), in fill_hole() 1284 list_add_tail(&vma->vm_link, &vma->vm->bound_list); in track_vma_bind() 1398 vma->node.start, vma->node.size, in igt_gtt_reserve() 1450 vma->node.start, vma->node.size, in igt_gtt_reserve() 1497 vma->node.start, vma->node.size, in igt_gtt_reserve() 1956 err = vma->ops->set_pages(vma); in igt_cs_tlb() 1997 vma->ops->clear_pages(vma); in igt_cs_tlb() 2012 err = vma->ops->set_pages(vma); in igt_cs_tlb() [all …]
|
A D | i915_vma.c | 72 return vma; in checked_vma_instance() 92 if (i915_vma_compare(vma, vma->vm, in checked_vma_instance() 93 i915_vma_is_ggtt(vma) ? &vma->ggtt_view : NULL)) { in checked_vma_instance() 103 return vma; in checked_vma_instance() 655 if (vma->node.size < vma->size) { in igt_vma_rotate_remap() 657 vma->size, vma->node.size); in igt_vma_rotate_remap() 751 if (vma->node.size < vma->size) { in assert_pin() 753 name, vma->size, vma->node.size); in assert_pin() 764 if (vma->pages == vma->obj->mm.pages) { in assert_pin() 776 if (vma->pages != vma->obj->mm.pages) { in assert_pin() [all …]
|
/linux/drivers/gpu/drm/msm/ |
A D | msm_gem_vma.c | 51 if (!vma->mapped) in msm_gem_purge_vma() 57 vma->mapped = false; in msm_gem_purge_vma() 65 vma->inuse--; in msm_gem_unmap_vma() 80 vma->inuse++; in msm_gem_map_vma() 82 if (vma->mapped) in msm_gem_map_vma() 85 vma->mapped = true; in msm_gem_map_vma() 93 vma->inuse--; in msm_gem_map_vma() 103 if (WARN_ON(vma->inuse > 0 || vma->mapped)) in msm_gem_close_vma() 107 if (vma->iova) in msm_gem_close_vma() 111 vma->iova = 0; in msm_gem_close_vma() [all …]
|
/linux/drivers/pci/ |
A D | mmap.c | 23 struct vm_area_struct *vma, in pci_mmap_page_range() argument 31 vma->vm_pgoff -= start >> PAGE_SHIFT; in pci_mmap_page_range() 44 struct vm_area_struct *vma, in pci_mmap_resource_range() argument 51 if (vma->vm_pgoff + vma_pages(vma) > size) in pci_mmap_resource_range() 55 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); in pci_mmap_resource_range() 57 vma->vm_page_prot = pgprot_device(vma->vm_page_prot); in pci_mmap_resource_range() 66 vma->vm_ops = &pci_phys_vm_ops; in pci_mmap_resource_range() 68 return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, in pci_mmap_resource_range() 69 vma->vm_end - vma->vm_start, in pci_mmap_resource_range() 70 vma->vm_page_prot); in pci_mmap_resource_range() [all …]
|
/linux/fs/proc/ |
A D | task_mmu.c | 247 vma->vm_end >= vma->vm_mm->start_stack; in is_stack() 303 if (vma->vm_ops && vma->vm_ops->name) { in show_map_vma() 304 name = vma->vm_ops->name(vma); in show_map_vma() 480 struct vm_area_struct *vma = walk->vma; in smaps_pte_hole() local 506 struct vm_area_struct *vma = walk->vma; in smaps_pte_entry() local 546 struct vm_area_struct *vma = walk->vma; in smaps_pmd_entry() local 581 struct vm_area_struct *vma = walk->vma; in smaps_pte_range() local 693 struct vm_area_struct *vma = walk->vma; in smaps_hugetlb_range() local 870 for (vma = priv->mm->mmap; vma;) { in show_smaps_rollup() 936 vma = vma->vm_next; in show_smaps_rollup() [all …]
|
/linux/arch/x86/entry/vdso/ |
A D | vma.c | 129 struct vm_area_struct *vma; in vdso_join_timens() local 133 for (vma = mm->mmap; vma; vma = vma->vm_next) { in vdso_join_timens() 134 unsigned long size = vma->vm_end - vma->vm_start; in vdso_join_timens() 137 zap_page_range(vma, vma->vm_start, size); in vdso_join_timens() 249 struct vm_area_struct *vma; in map_vdso() local 275 if (IS_ERR(vma)) { in map_vdso() 276 ret = PTR_ERR(vma); in map_vdso() 287 if (IS_ERR(vma)) { in map_vdso() 288 ret = PTR_ERR(vma); in map_vdso() 356 struct vm_area_struct *vma; in map_vdso_once() local [all …]
|