Lines Matching refs:bdev

67 		man = ttm_manager_type(bo->bdev, mem_type);  in ttm_bo_mem_space_debug()
74 struct ttm_device *bdev = bo->bdev; in ttm_bo_move_to_pinned() local
76 list_move_tail(&bo->lru, &bdev->pinned); in ttm_bo_move_to_pinned()
78 if (bdev->funcs->del_from_lru_notify) in ttm_bo_move_to_pinned()
79 bdev->funcs->del_from_lru_notify(bo); in ttm_bo_move_to_pinned()
84 struct ttm_device *bdev = bo->bdev; in ttm_bo_del_from_lru() local
88 if (bdev->funcs->del_from_lru_notify) in ttm_bo_del_from_lru()
89 bdev->funcs->del_from_lru_notify(bo); in ttm_bo_del_from_lru()
104 struct ttm_device *bdev = bo->bdev; in ttm_bo_move_to_lru_tail() local
118 man = ttm_manager_type(bdev, mem->mem_type); in ttm_bo_move_to_lru_tail()
121 if (bdev->funcs->del_from_lru_notify) in ttm_bo_move_to_lru_tail()
122 bdev->funcs->del_from_lru_notify(bo); in ttm_bo_move_to_lru_tail()
152 man = ttm_manager_type(pos->first->bdev, TTM_PL_TT); in ttm_bo_bulk_move_lru_tail()
167 man = ttm_manager_type(pos->first->bdev, TTM_PL_VRAM); in ttm_bo_bulk_move_lru_tail()
180 struct ttm_device *bdev = bo->bdev; in ttm_bo_handle_move_mem() local
183 old_man = ttm_manager_type(bdev, bo->resource->mem_type); in ttm_bo_handle_move_mem()
184 new_man = ttm_manager_type(bdev, mem->mem_type); in ttm_bo_handle_move_mem()
201 ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx); in ttm_bo_handle_move_mem()
207 ret = bdev->funcs->move(bo, evict, ctx, mem, hop); in ttm_bo_handle_move_mem()
218 new_man = ttm_manager_type(bdev, bo->resource->mem_type); in ttm_bo_handle_move_mem()
235 if (bo->bdev->funcs->delete_mem_notify) in ttm_bo_cleanup_memtype_use()
236 bo->bdev->funcs->delete_mem_notify(bo); in ttm_bo_cleanup_memtype_use()
261 spin_lock(&bo->bdev->lru_lock); in ttm_bo_individualize_resv()
263 spin_unlock(&bo->bdev->lru_lock); in ttm_bo_individualize_resv()
314 spin_unlock(&bo->bdev->lru_lock); in ttm_bo_cleanup_refs()
324 spin_lock(&bo->bdev->lru_lock); in ttm_bo_cleanup_refs()
334 spin_unlock(&bo->bdev->lru_lock); in ttm_bo_cleanup_refs()
343 spin_unlock(&bo->bdev->lru_lock); in ttm_bo_cleanup_refs()
349 spin_unlock(&bo->bdev->lru_lock); in ttm_bo_cleanup_refs()
364 bool ttm_bo_delayed_delete(struct ttm_device *bdev, bool remove_all) in ttm_bo_delayed_delete() argument
371 spin_lock(&bdev->lru_lock); in ttm_bo_delayed_delete()
372 while (!list_empty(&bdev->ddestroy)) { in ttm_bo_delayed_delete()
375 bo = list_first_entry(&bdev->ddestroy, struct ttm_buffer_object, in ttm_bo_delayed_delete()
382 spin_unlock(&bdev->lru_lock); in ttm_bo_delayed_delete()
385 spin_lock(&bdev->lru_lock); in ttm_bo_delayed_delete()
391 spin_unlock(&bdev->lru_lock); in ttm_bo_delayed_delete()
395 spin_lock(&bdev->lru_lock); in ttm_bo_delayed_delete()
397 list_splice_tail(&removed, &bdev->ddestroy); in ttm_bo_delayed_delete()
398 empty = list_empty(&bdev->ddestroy); in ttm_bo_delayed_delete()
399 spin_unlock(&bdev->lru_lock); in ttm_bo_delayed_delete()
408 struct ttm_device *bdev = bo->bdev; in ttm_bo_release() local
423 if (bo->bdev->funcs->release_notify) in ttm_bo_release()
424 bo->bdev->funcs->release_notify(bo); in ttm_bo_release()
426 drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node); in ttm_bo_release()
427 ttm_mem_io_free(bdev, bo->resource); in ttm_bo_release()
436 spin_lock(&bo->bdev->lru_lock); in ttm_bo_release()
452 list_add_tail(&bo->ddestroy, &bdev->ddestroy); in ttm_bo_release()
453 spin_unlock(&bo->bdev->lru_lock); in ttm_bo_release()
455 schedule_delayed_work(&bdev->wq, in ttm_bo_release()
460 spin_lock(&bo->bdev->lru_lock); in ttm_bo_release()
463 spin_unlock(&bo->bdev->lru_lock); in ttm_bo_release()
479 int ttm_bo_lock_delayed_workqueue(struct ttm_device *bdev) in ttm_bo_lock_delayed_workqueue() argument
481 return cancel_delayed_work_sync(&bdev->wq); in ttm_bo_lock_delayed_workqueue()
485 void ttm_bo_unlock_delayed_workqueue(struct ttm_device *bdev, int resched) in ttm_bo_unlock_delayed_workqueue() argument
488 schedule_delayed_work(&bdev->wq, in ttm_bo_unlock_delayed_workqueue()
521 struct ttm_device *bdev = bo->bdev; in ttm_bo_evict() local
533 bdev->funcs->evict_flags(bo, &placement); in ttm_bo_evict()
623 !bo->bdev->funcs->eviction_valuable(bo, place))) { in ttm_bo_evict_swapout_allowable()
669 int ttm_mem_evict_first(struct ttm_device *bdev, in ttm_mem_evict_first() argument
680 spin_lock(&bdev->lru_lock); in ttm_mem_evict_first()
711 spin_unlock(&bdev->lru_lock); in ttm_mem_evict_first()
725 spin_unlock(&bdev->lru_lock); in ttm_mem_evict_first()
783 struct ttm_device *bdev = bo->bdev; in ttm_bo_mem_force_space() local
788 man = ttm_manager_type(bdev, place->mem_type); in ttm_bo_mem_force_space()
796 ret = ttm_mem_evict_first(bdev, man, place, ctx, in ttm_bo_mem_force_space()
818 struct ttm_device *bdev = bo->bdev; in ttm_bo_mem_space() local
830 man = ttm_manager_type(bdev, place->mem_type); in ttm_bo_mem_space()
856 man = ttm_manager_type(bdev, place->mem_type); in ttm_bo_mem_space()
954 int ttm_bo_init_reserved(struct ttm_device *bdev, in ttm_bo_init_reserved() argument
974 bo->bdev = bdev; in ttm_bo_init_reserved()
1000 ret = drm_vma_offset_add(bdev->vma_manager, &bo->base.vma_node, in ttm_bo_init_reserved()
1028 int ttm_bo_init(struct ttm_device *bdev, in ttm_bo_init() argument
1042 ret = ttm_bo_init_reserved(bdev, bo, size, type, placement, in ttm_bo_init()
1060 struct ttm_device *bdev = bo->bdev; in ttm_bo_unmap_virtual() local
1062 drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping); in ttm_bo_unmap_virtual()
1063 ttm_mem_io_free(bdev, bo->resource); in ttm_bo_unmap_virtual()
1127 spin_unlock(&bo->bdev->lru_lock); in ttm_bo_swapout()
1163 if (bo->bdev->funcs->swap_notify) in ttm_bo_swapout()
1164 bo->bdev->funcs->swap_notify(bo); in ttm_bo_swapout()
1167 ret = ttm_tt_swapout(bo->bdev, bo->ttm, gfp_flags); in ttm_bo_swapout()
1185 ttm_tt_unpopulate(bo->bdev, bo->ttm); in ttm_bo_tt_destroy()
1186 ttm_tt_destroy(bo->bdev, bo->ttm); in ttm_bo_tt_destroy()