Lines Matching refs:vma

48 void i915_vma_free(struct i915_vma *vma)  in i915_vma_free()  argument
50 return kmem_cache_free(slab_vmas, vma); in i915_vma_free()
57 static void vma_print_allocator(struct i915_vma *vma, const char *reason) in vma_print_allocator() argument
61 if (!vma->node.stack) { in vma_print_allocator()
63 vma->node.start, vma->node.size, reason); in vma_print_allocator()
67 stack_depot_snprint(vma->node.stack, buf, sizeof(buf), 0); in vma_print_allocator()
69 vma->node.start, vma->node.size, reason, buf); in vma_print_allocator()
74 static void vma_print_allocator(struct i915_vma *vma, const char *reason) in vma_print_allocator() argument
101 struct i915_vma *vma; in vma_create() local
107 vma = i915_vma_alloc(); in vma_create()
108 if (vma == NULL) in vma_create()
111 kref_init(&vma->ref); in vma_create()
112 mutex_init(&vma->pages_mutex); in vma_create()
113 vma->vm = i915_vm_get(vm); in vma_create()
114 vma->ops = &vm->vma_ops; in vma_create()
115 vma->obj = obj; in vma_create()
116 vma->resv = obj->base.resv; in vma_create()
117 vma->size = obj->base.size; in vma_create()
118 vma->display_alignment = I915_GTT_MIN_ALIGNMENT; in vma_create()
120 i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire, 0); in vma_create()
125 might_lock(&vma->active.mutex); in vma_create()
129 INIT_LIST_HEAD(&vma->closed_link); in vma_create()
132 vma->ggtt_view = *view; in vma_create()
138 vma->size = view->partial.size; in vma_create()
139 vma->size <<= PAGE_SHIFT; in vma_create()
140 GEM_BUG_ON(vma->size > obj->base.size); in vma_create()
142 vma->size = intel_rotation_info_size(&view->rotated); in vma_create()
143 vma->size <<= PAGE_SHIFT; in vma_create()
145 vma->size = intel_remapped_info_size(&view->remapped); in vma_create()
146 vma->size <<= PAGE_SHIFT; in vma_create()
150 if (unlikely(vma->size > vm->total)) in vma_create()
153 GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE)); in vma_create()
155 spin_lock(&obj->vma.lock); in vma_create()
158 if (unlikely(overflows_type(vma->size, u32))) in vma_create()
161 vma->fence_size = i915_gem_fence_size(vm->i915, vma->size, in vma_create()
164 if (unlikely(vma->fence_size < vma->size || /* overflow */ in vma_create()
165 vma->fence_size > vm->total)) in vma_create()
168 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT)); in vma_create()
170 vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size, in vma_create()
173 GEM_BUG_ON(!is_power_of_2(vma->fence_alignment)); in vma_create()
175 __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma)); in vma_create()
179 p = &obj->vma.tree.rb_node; in vma_create()
199 rb_link_node(&vma->obj_node, rb, p); in vma_create()
200 rb_insert_color(&vma->obj_node, &obj->vma.tree); in vma_create()
202 if (i915_vma_is_ggtt(vma)) in vma_create()
209 list_add(&vma->obj_link, &obj->vma.list); in vma_create()
211 list_add_tail(&vma->obj_link, &obj->vma.list); in vma_create()
213 spin_unlock(&obj->vma.lock); in vma_create()
215 return vma; in vma_create()
218 spin_unlock(&obj->vma.lock); in vma_create()
221 i915_vma_free(vma); in vma_create()
232 rb = obj->vma.tree.rb_node; in i915_vma_lookup()
234 struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node); in i915_vma_lookup() local
237 cmp = i915_vma_compare(vma, vm, view); in i915_vma_lookup()
239 return vma; in i915_vma_lookup()
268 struct i915_vma *vma; in i915_vma_instance() local
273 spin_lock(&obj->vma.lock); in i915_vma_instance()
274 vma = i915_vma_lookup(obj, vm, view); in i915_vma_instance()
275 spin_unlock(&obj->vma.lock); in i915_vma_instance()
278 if (unlikely(!vma)) in i915_vma_instance()
279 vma = vma_create(obj, vm, view); in i915_vma_instance()
281 GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view)); in i915_vma_instance()
282 return vma; in i915_vma_instance()
289 struct i915_vma *vma; member
299 struct i915_vma *vma = vw->vma; in __vma_bind() local
301 vma->ops->bind_vma(vw->vm, &vw->stash, in __vma_bind()
302 vma, vw->cache_level, vw->flags); in __vma_bind()
338 int i915_vma_wait_for_bind(struct i915_vma *vma) in i915_vma_wait_for_bind() argument
342 if (rcu_access_pointer(vma->active.excl.fence)) { in i915_vma_wait_for_bind()
346 fence = dma_fence_get_rcu_safe(&vma->active.excl.fence); in i915_vma_wait_for_bind()
368 int i915_vma_bind(struct i915_vma *vma, in i915_vma_bind() argument
376 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); in i915_vma_bind()
377 GEM_BUG_ON(vma->size > vma->node.size); in i915_vma_bind()
379 if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start, in i915_vma_bind()
380 vma->node.size, in i915_vma_bind()
381 vma->vm->total))) in i915_vma_bind()
390 vma_flags = atomic_read(&vma->flags); in i915_vma_bind()
397 GEM_BUG_ON(!vma->pages); in i915_vma_bind()
399 trace_i915_vma_bind(vma, bind_flags); in i915_vma_bind()
400 if (work && bind_flags & vma->vm->bind_async_flags) { in i915_vma_bind()
403 work->vma = vma; in i915_vma_bind()
416 prev = i915_active_set_exclusive(&vma->active, &work->base.dma); in i915_vma_bind()
426 if (vma->obj) { in i915_vma_bind()
427 __i915_gem_object_pin_pages(vma->obj); in i915_vma_bind()
428 work->pinned = i915_gem_object_get(vma->obj); in i915_vma_bind()
431 vma->ops->bind_vma(vma->vm, NULL, vma, cache_level, bind_flags); in i915_vma_bind()
434 atomic_or(bind_flags, &vma->flags); in i915_vma_bind()
438 void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) in i915_vma_pin_iomap() argument
443 if (!i915_gem_object_is_lmem(vma->obj)) { in i915_vma_pin_iomap()
444 if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) { in i915_vma_pin_iomap()
450 GEM_BUG_ON(!i915_vma_is_ggtt(vma)); in i915_vma_pin_iomap()
451 GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)); in i915_vma_pin_iomap()
453 ptr = READ_ONCE(vma->iomap); in i915_vma_pin_iomap()
461 if (i915_gem_object_is_lmem(vma->obj)) in i915_vma_pin_iomap()
462 ptr = i915_gem_object_lmem_io_map(vma->obj, 0, in i915_vma_pin_iomap()
463 vma->obj->base.size); in i915_vma_pin_iomap()
465 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap, in i915_vma_pin_iomap()
466 vma->node.start, in i915_vma_pin_iomap()
467 vma->node.size); in i915_vma_pin_iomap()
473 if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) { in i915_vma_pin_iomap()
475 ptr = vma->iomap; in i915_vma_pin_iomap()
479 __i915_vma_pin(vma); in i915_vma_pin_iomap()
481 err = i915_vma_pin_fence(vma); in i915_vma_pin_iomap()
485 i915_vma_set_ggtt_write(vma); in i915_vma_pin_iomap()
491 __i915_vma_unpin(vma); in i915_vma_pin_iomap()
496 void i915_vma_flush_writes(struct i915_vma *vma) in i915_vma_flush_writes() argument
498 if (i915_vma_unset_ggtt_write(vma)) in i915_vma_flush_writes()
499 intel_gt_flush_ggtt_writes(vma->vm->gt); in i915_vma_flush_writes()
502 void i915_vma_unpin_iomap(struct i915_vma *vma) in i915_vma_unpin_iomap() argument
504 GEM_BUG_ON(vma->iomap == NULL); in i915_vma_unpin_iomap()
506 i915_vma_flush_writes(vma); in i915_vma_unpin_iomap()
508 i915_vma_unpin_fence(vma); in i915_vma_unpin_iomap()
509 i915_vma_unpin(vma); in i915_vma_unpin_iomap()
514 struct i915_vma *vma; in i915_vma_unpin_and_release() local
517 vma = fetch_and_zero(p_vma); in i915_vma_unpin_and_release()
518 if (!vma) in i915_vma_unpin_and_release()
521 obj = vma->obj; in i915_vma_unpin_and_release()
524 i915_vma_unpin(vma); in i915_vma_unpin_and_release()
532 bool i915_vma_misplaced(const struct i915_vma *vma, in i915_vma_misplaced() argument
535 if (!drm_mm_node_allocated(&vma->node)) in i915_vma_misplaced()
538 if (test_bit(I915_VMA_ERROR_BIT, __i915_vma_flags(vma))) in i915_vma_misplaced()
541 if (vma->node.size < size) in i915_vma_misplaced()
545 if (alignment && !IS_ALIGNED(vma->node.start, alignment)) in i915_vma_misplaced()
548 if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma)) in i915_vma_misplaced()
552 vma->node.start < (flags & PIN_OFFSET_MASK)) in i915_vma_misplaced()
556 vma->node.start != (flags & PIN_OFFSET_MASK)) in i915_vma_misplaced()
562 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma) in __i915_vma_set_map_and_fenceable() argument
566 GEM_BUG_ON(!i915_vma_is_ggtt(vma)); in __i915_vma_set_map_and_fenceable()
567 GEM_BUG_ON(!vma->fence_size); in __i915_vma_set_map_and_fenceable()
569 fenceable = (vma->node.size >= vma->fence_size && in __i915_vma_set_map_and_fenceable()
570 IS_ALIGNED(vma->node.start, vma->fence_alignment)); in __i915_vma_set_map_and_fenceable()
572 mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end; in __i915_vma_set_map_and_fenceable()
575 set_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma)); in __i915_vma_set_map_and_fenceable()
577 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma)); in __i915_vma_set_map_and_fenceable()
580 bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color) in i915_gem_valid_gtt_space() argument
582 struct drm_mm_node *node = &vma->node; in i915_gem_valid_gtt_space()
592 if (!i915_vm_has_cache_coloring(vma->vm)) in i915_gem_valid_gtt_space()
627 i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) in i915_vma_insert() argument
633 GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); in i915_vma_insert()
634 GEM_BUG_ON(drm_mm_node_allocated(&vma->node)); in i915_vma_insert()
636 size = max(size, vma->size); in i915_vma_insert()
637 alignment = max(alignment, vma->display_alignment); in i915_vma_insert()
639 size = max_t(typeof(size), size, vma->fence_size); in i915_vma_insert()
641 alignment, vma->fence_alignment); in i915_vma_insert()
651 end = vma->vm->total; in i915_vma_insert()
653 end = min_t(u64, end, i915_vm_to_ggtt(vma->vm)->mappable_end); in i915_vma_insert()
670 if (vma->obj && i915_vm_has_cache_coloring(vma->vm)) in i915_vma_insert()
671 color = vma->obj->cache_level; in i915_vma_insert()
679 ret = i915_gem_gtt_reserve(vma->vm, &vma->node, in i915_vma_insert()
694 vma->page_sizes.sg > I915_GTT_PAGE_SIZE) { in i915_vma_insert()
702 rounddown_pow_of_two(vma->page_sizes.sg | in i915_vma_insert()
710 GEM_BUG_ON(i915_vma_is_ggtt(vma)); in i915_vma_insert()
714 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) in i915_vma_insert()
718 ret = i915_gem_gtt_insert(vma->vm, &vma->node, in i915_vma_insert()
724 GEM_BUG_ON(vma->node.start < start); in i915_vma_insert()
725 GEM_BUG_ON(vma->node.start + vma->node.size > end); in i915_vma_insert()
727 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); in i915_vma_insert()
728 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color)); in i915_vma_insert()
730 list_add_tail(&vma->vm_link, &vma->vm->bound_list); in i915_vma_insert()
736 i915_vma_detach(struct i915_vma *vma) in i915_vma_detach() argument
738 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); in i915_vma_detach()
739 GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); in i915_vma_detach()
746 list_del(&vma->vm_link); in i915_vma_detach()
749 static bool try_qad_pin(struct i915_vma *vma, unsigned int flags) in try_qad_pin() argument
754 bound = atomic_read(&vma->flags); in try_qad_pin()
766 } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1)); in try_qad_pin()
775 mutex_lock(&vma->vm->mutex); in try_qad_pin()
786 } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1)); in try_qad_pin()
787 mutex_unlock(&vma->vm->mutex); in try_qad_pin()
792 static int vma_get_pages(struct i915_vma *vma) in vma_get_pages() argument
797 if (atomic_add_unless(&vma->pages_count, 1, 0)) in vma_get_pages()
800 if (vma->obj) { in vma_get_pages()
801 err = i915_gem_object_pin_pages(vma->obj); in vma_get_pages()
808 if (mutex_lock_interruptible(&vma->pages_mutex)) { in vma_get_pages()
813 if (!atomic_read(&vma->pages_count)) { in vma_get_pages()
814 err = vma->ops->set_pages(vma); in vma_get_pages()
819 atomic_inc(&vma->pages_count); in vma_get_pages()
822 mutex_unlock(&vma->pages_mutex); in vma_get_pages()
825 __i915_gem_object_unpin_pages(vma->obj); in vma_get_pages()
830 static void __vma_put_pages(struct i915_vma *vma, unsigned int count) in __vma_put_pages() argument
833 mutex_lock_nested(&vma->pages_mutex, SINGLE_DEPTH_NESTING); in __vma_put_pages()
834 GEM_BUG_ON(atomic_read(&vma->pages_count) < count); in __vma_put_pages()
835 if (atomic_sub_return(count, &vma->pages_count) == 0) { in __vma_put_pages()
836 vma->ops->clear_pages(vma); in __vma_put_pages()
837 GEM_BUG_ON(vma->pages); in __vma_put_pages()
838 if (vma->obj) in __vma_put_pages()
839 i915_gem_object_unpin_pages(vma->obj); in __vma_put_pages()
841 mutex_unlock(&vma->pages_mutex); in __vma_put_pages()
844 static void vma_put_pages(struct i915_vma *vma) in vma_put_pages() argument
846 if (atomic_add_unless(&vma->pages_count, -1, 1)) in vma_put_pages()
849 __vma_put_pages(vma, 1); in vma_put_pages()
852 static void vma_unbind_pages(struct i915_vma *vma) in vma_unbind_pages() argument
856 lockdep_assert_held(&vma->vm->mutex); in vma_unbind_pages()
859 count = atomic_read(&vma->pages_count); in vma_unbind_pages()
863 __vma_put_pages(vma, count | count << I915_VMA_PAGES_BIAS); in vma_unbind_pages()
866 int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, in i915_vma_pin_ww() argument
875 if (debug_locks && !WARN_ON(!ww) && vma->resv) in i915_vma_pin_ww()
876 assert_vma_held(vma); in i915_vma_pin_ww()
885 if (try_qad_pin(vma, flags & I915_VMA_BIND_MASK)) in i915_vma_pin_ww()
888 err = vma_get_pages(vma); in i915_vma_pin_ww()
893 wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm); in i915_vma_pin_ww()
895 if (flags & vma->vm->bind_async_flags) { in i915_vma_pin_ww()
897 err = i915_vm_lock_objects(vma->vm, ww); in i915_vma_pin_ww()
907 work->vm = i915_vm_get(vma->vm); in i915_vma_pin_ww()
910 if (vma->vm->allocate_va_range) { in i915_vma_pin_ww()
911 err = i915_vm_alloc_pt_stash(vma->vm, in i915_vma_pin_ww()
913 vma->size); in i915_vma_pin_ww()
917 err = i915_vm_map_pt_stash(vma->vm, &work->stash); in i915_vma_pin_ww()
940 err = mutex_lock_interruptible_nested(&vma->vm->mutex, in i915_vma_pin_ww()
947 if (unlikely(i915_vma_is_closed(vma))) { in i915_vma_pin_ww()
952 bound = atomic_read(&vma->flags); in i915_vma_pin_ww()
964 __i915_vma_pin(vma); in i915_vma_pin_ww()
968 err = i915_active_acquire(&vma->active); in i915_vma_pin_ww()
973 err = i915_vma_insert(vma, size, alignment, flags); in i915_vma_pin_ww()
977 if (i915_is_ggtt(vma->vm)) in i915_vma_pin_ww()
978 __i915_vma_set_map_and_fenceable(vma); in i915_vma_pin_ww()
981 GEM_BUG_ON(!vma->pages); in i915_vma_pin_ww()
982 err = i915_vma_bind(vma, in i915_vma_pin_ww()
983 vma->obj ? vma->obj->cache_level : 0, in i915_vma_pin_ww()
990 atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count); in i915_vma_pin_ww()
991 list_move_tail(&vma->vm_link, &vma->vm->bound_list); in i915_vma_pin_ww()
993 __i915_vma_pin(vma); in i915_vma_pin_ww()
994 GEM_BUG_ON(!i915_vma_is_pinned(vma)); in i915_vma_pin_ww()
995 GEM_BUG_ON(!i915_vma_is_bound(vma, flags)); in i915_vma_pin_ww()
996 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags)); in i915_vma_pin_ww()
999 if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) { in i915_vma_pin_ww()
1000 i915_vma_detach(vma); in i915_vma_pin_ww()
1001 drm_mm_remove_node(&vma->node); in i915_vma_pin_ww()
1004 i915_active_release(&vma->active); in i915_vma_pin_ww()
1006 mutex_unlock(&vma->vm->mutex); in i915_vma_pin_ww()
1012 intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref); in i915_vma_pin_ww()
1013 vma_put_pages(vma); in i915_vma_pin_ww()
1028 int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, in i915_ggtt_pin() argument
1031 struct i915_address_space *vm = vma->vm; in i915_ggtt_pin()
1034 GEM_BUG_ON(!i915_vma_is_ggtt(vma)); in i915_ggtt_pin()
1037 WARN_ON(!ww && vma->resv && dma_resv_held(vma->resv)); in i915_ggtt_pin()
1042 err = i915_vma_pin_ww(vma, ww, 0, align, flags | PIN_GLOBAL); in i915_ggtt_pin()
1044 err = i915_vma_pin(vma, 0, align, flags | PIN_GLOBAL); in i915_ggtt_pin()
1047 err = i915_vma_wait_for_bind(vma); in i915_ggtt_pin()
1049 i915_vma_unpin(vma); in i915_ggtt_pin()
1063 static void __vma_close(struct i915_vma *vma, struct intel_gt *gt) in __vma_close() argument
1077 GEM_BUG_ON(i915_vma_is_closed(vma)); in __vma_close()
1078 list_add(&vma->closed_link, &gt->closed_vma); in __vma_close()
1081 void i915_vma_close(struct i915_vma *vma) in i915_vma_close() argument
1083 struct intel_gt *gt = vma->vm->gt; in i915_vma_close()
1086 if (i915_vma_is_ggtt(vma)) in i915_vma_close()
1089 GEM_BUG_ON(!atomic_read(&vma->open_count)); in i915_vma_close()
1090 if (atomic_dec_and_lock_irqsave(&vma->open_count, in i915_vma_close()
1093 __vma_close(vma, gt); in i915_vma_close()
1098 static void __i915_vma_remove_closed(struct i915_vma *vma) in __i915_vma_remove_closed() argument
1100 struct intel_gt *gt = vma->vm->gt; in __i915_vma_remove_closed()
1103 list_del_init(&vma->closed_link); in __i915_vma_remove_closed()
1107 void i915_vma_reopen(struct i915_vma *vma) in i915_vma_reopen() argument
1109 if (i915_vma_is_closed(vma)) in i915_vma_reopen()
1110 __i915_vma_remove_closed(vma); in i915_vma_reopen()
1115 struct i915_vma *vma = container_of(ref, typeof(*vma), ref); in i915_vma_release() local
1117 if (drm_mm_node_allocated(&vma->node)) { in i915_vma_release()
1118 mutex_lock(&vma->vm->mutex); in i915_vma_release()
1119 atomic_and(~I915_VMA_PIN_MASK, &vma->flags); in i915_vma_release()
1120 WARN_ON(__i915_vma_unbind(vma)); in i915_vma_release()
1121 mutex_unlock(&vma->vm->mutex); in i915_vma_release()
1122 GEM_BUG_ON(drm_mm_node_allocated(&vma->node)); in i915_vma_release()
1124 GEM_BUG_ON(i915_vma_is_active(vma)); in i915_vma_release()
1126 if (vma->obj) { in i915_vma_release()
1127 struct drm_i915_gem_object *obj = vma->obj; in i915_vma_release()
1129 spin_lock(&obj->vma.lock); in i915_vma_release()
1130 list_del(&vma->obj_link); in i915_vma_release()
1131 if (!RB_EMPTY_NODE(&vma->obj_node)) in i915_vma_release()
1132 rb_erase(&vma->obj_node, &obj->vma.tree); in i915_vma_release()
1133 spin_unlock(&obj->vma.lock); in i915_vma_release()
1136 __i915_vma_remove_closed(vma); in i915_vma_release()
1137 i915_vm_put(vma->vm); in i915_vma_release()
1139 i915_active_fini(&vma->active); in i915_vma_release()
1140 i915_vma_free(vma); in i915_vma_release()
1145 struct i915_vma *vma, *next; in i915_vma_parked() local
1149 list_for_each_entry_safe(vma, next, &gt->closed_vma, closed_link) { in i915_vma_parked()
1150 struct drm_i915_gem_object *obj = vma->obj; in i915_vma_parked()
1151 struct i915_address_space *vm = vma->vm; in i915_vma_parked()
1163 list_move(&vma->closed_link, &closed); in i915_vma_parked()
1168 list_for_each_entry_safe(vma, next, &closed, closed_link) { in i915_vma_parked()
1169 struct drm_i915_gem_object *obj = vma->obj; in i915_vma_parked()
1170 struct i915_address_space *vm = vma->vm; in i915_vma_parked()
1172 INIT_LIST_HEAD(&vma->closed_link); in i915_vma_parked()
1173 __i915_vma_put(vma); in i915_vma_parked()
1180 static void __i915_vma_iounmap(struct i915_vma *vma) in __i915_vma_iounmap() argument
1182 GEM_BUG_ON(i915_vma_is_pinned(vma)); in __i915_vma_iounmap()
1184 if (vma->iomap == NULL) in __i915_vma_iounmap()
1187 io_mapping_unmap(vma->iomap); in __i915_vma_iounmap()
1188 vma->iomap = NULL; in __i915_vma_iounmap()
1191 void i915_vma_revoke_mmap(struct i915_vma *vma) in i915_vma_revoke_mmap() argument
1196 if (!i915_vma_has_userfault(vma)) in i915_vma_revoke_mmap()
1199 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma)); in i915_vma_revoke_mmap()
1200 GEM_BUG_ON(!vma->obj->userfault_count); in i915_vma_revoke_mmap()
1202 node = &vma->mmo->vma_node; in i915_vma_revoke_mmap()
1203 vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT; in i915_vma_revoke_mmap()
1204 unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping, in i915_vma_revoke_mmap()
1206 vma->size, in i915_vma_revoke_mmap()
1209 i915_vma_unset_userfault(vma); in i915_vma_revoke_mmap()
1210 if (!--vma->obj->userfault_count) in i915_vma_revoke_mmap()
1211 list_del(&vma->obj->userfault_link); in i915_vma_revoke_mmap()
1215 __i915_request_await_bind(struct i915_request *rq, struct i915_vma *vma) in __i915_request_await_bind() argument
1217 return __i915_request_await_exclusive(rq, &vma->active); in __i915_request_await_bind()
1220 int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq) in __i915_vma_move_to_active() argument
1224 GEM_BUG_ON(!i915_vma_is_pinned(vma)); in __i915_vma_move_to_active()
1227 err = __i915_request_await_bind(rq, vma); in __i915_vma_move_to_active()
1231 return i915_active_add_request(&vma->active, rq); in __i915_vma_move_to_active()
1234 int _i915_vma_move_to_active(struct i915_vma *vma, in _i915_vma_move_to_active() argument
1239 struct drm_i915_gem_object *obj = vma->obj; in _i915_vma_move_to_active()
1244 err = __i915_vma_move_to_active(vma, rq); in _i915_vma_move_to_active()
1259 dma_resv_add_excl_fence(vma->resv, fence); in _i915_vma_move_to_active()
1265 err = dma_resv_reserve_shared(vma->resv, 1); in _i915_vma_move_to_active()
1271 dma_resv_add_shared_fence(vma->resv, fence); in _i915_vma_move_to_active()
1276 if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence) in _i915_vma_move_to_active()
1277 i915_active_add_request(&vma->fence->active, rq); in _i915_vma_move_to_active()
1282 GEM_BUG_ON(!i915_vma_is_active(vma)); in _i915_vma_move_to_active()
1286 void __i915_vma_evict(struct i915_vma *vma) in __i915_vma_evict() argument
1288 GEM_BUG_ON(i915_vma_is_pinned(vma)); in __i915_vma_evict()
1290 if (i915_vma_is_map_and_fenceable(vma)) { in __i915_vma_evict()
1292 i915_vma_revoke_mmap(vma); in __i915_vma_evict()
1307 i915_vma_flush_writes(vma); in __i915_vma_evict()
1310 i915_vma_revoke_fence(vma); in __i915_vma_evict()
1312 __i915_vma_iounmap(vma); in __i915_vma_evict()
1313 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma)); in __i915_vma_evict()
1315 GEM_BUG_ON(vma->fence); in __i915_vma_evict()
1316 GEM_BUG_ON(i915_vma_has_userfault(vma)); in __i915_vma_evict()
1318 if (likely(atomic_read(&vma->vm->open))) { in __i915_vma_evict()
1319 trace_i915_vma_unbind(vma); in __i915_vma_evict()
1320 vma->ops->unbind_vma(vma->vm, vma); in __i915_vma_evict()
1323 &vma->flags); in __i915_vma_evict()
1325 i915_vma_detach(vma); in __i915_vma_evict()
1326 vma_unbind_pages(vma); in __i915_vma_evict()
1329 int __i915_vma_unbind(struct i915_vma *vma) in __i915_vma_unbind() argument
1333 lockdep_assert_held(&vma->vm->mutex); in __i915_vma_unbind()
1335 if (!drm_mm_node_allocated(&vma->node)) in __i915_vma_unbind()
1338 if (i915_vma_is_pinned(vma)) { in __i915_vma_unbind()
1339 vma_print_allocator(vma, "is pinned"); in __i915_vma_unbind()
1348 ret = i915_vma_sync(vma); in __i915_vma_unbind()
1352 GEM_BUG_ON(i915_vma_is_active(vma)); in __i915_vma_unbind()
1353 __i915_vma_evict(vma); in __i915_vma_unbind()
1355 drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */ in __i915_vma_unbind()
1359 int i915_vma_unbind(struct i915_vma *vma) in i915_vma_unbind() argument
1361 struct i915_address_space *vm = vma->vm; in i915_vma_unbind()
1366 err = i915_vma_sync(vma); in i915_vma_unbind()
1370 if (!drm_mm_node_allocated(&vma->node)) in i915_vma_unbind()
1373 if (i915_vma_is_pinned(vma)) { in i915_vma_unbind()
1374 vma_print_allocator(vma, "is pinned"); in i915_vma_unbind()
1378 if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) in i915_vma_unbind()
1382 err = mutex_lock_interruptible_nested(&vma->vm->mutex, !wakeref); in i915_vma_unbind()
1386 err = __i915_vma_unbind(vma); in i915_vma_unbind()
1395 struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma) in i915_vma_make_unshrinkable() argument
1397 i915_gem_object_make_unshrinkable(vma->obj); in i915_vma_make_unshrinkable()
1398 return vma; in i915_vma_make_unshrinkable()
1401 void i915_vma_make_shrinkable(struct i915_vma *vma) in i915_vma_make_shrinkable() argument
1403 i915_gem_object_make_shrinkable(vma->obj); in i915_vma_make_shrinkable()
1406 void i915_vma_make_purgeable(struct i915_vma *vma) in i915_vma_make_purgeable() argument
1408 i915_gem_object_make_purgeable(vma->obj); in i915_vma_make_purgeable()