Lines Matching refs:bo

46 static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,  in ttm_bo_vm_fault_idle()  argument
52 if (likely(!bo->moving)) in ttm_bo_vm_fault_idle()
58 if (dma_fence_is_signaled(bo->moving)) in ttm_bo_vm_fault_idle()
71 ttm_bo_get(bo); in ttm_bo_vm_fault_idle()
73 (void) dma_fence_wait(bo->moving, true); in ttm_bo_vm_fault_idle()
74 dma_resv_unlock(bo->base.resv); in ttm_bo_vm_fault_idle()
75 ttm_bo_put(bo); in ttm_bo_vm_fault_idle()
82 err = dma_fence_wait(bo->moving, true); in ttm_bo_vm_fault_idle()
90 dma_fence_put(bo->moving); in ttm_bo_vm_fault_idle()
91 bo->moving = NULL; in ttm_bo_vm_fault_idle()
97 static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo, in ttm_bo_io_mem_pfn() argument
100 struct ttm_device *bdev = bo->bdev; in ttm_bo_io_mem_pfn()
103 return bdev->funcs->io_mem_pfn(bo, page_offset); in ttm_bo_io_mem_pfn()
105 return (bo->resource->bus.offset >> PAGE_SHIFT) + page_offset; in ttm_bo_io_mem_pfn()
129 vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo, in ttm_bo_vm_reserve() argument
138 if (unlikely(!dma_resv_trylock(bo->base.resv))) { in ttm_bo_vm_reserve()
146 ttm_bo_get(bo); in ttm_bo_vm_reserve()
148 if (!dma_resv_lock_interruptible(bo->base.resv, in ttm_bo_vm_reserve()
150 dma_resv_unlock(bo->base.resv); in ttm_bo_vm_reserve()
151 ttm_bo_put(bo); in ttm_bo_vm_reserve()
157 if (dma_resv_lock_interruptible(bo->base.resv, NULL)) in ttm_bo_vm_reserve()
165 if (bo->ttm && (bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) { in ttm_bo_vm_reserve()
166 if (!(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE)) { in ttm_bo_vm_reserve()
167 dma_resv_unlock(bo->base.resv); in ttm_bo_vm_reserve()
199 struct ttm_buffer_object *bo = vma->vm_private_data; in ttm_bo_vm_fault_reserved() local
200 struct ttm_device *bdev = bo->bdev; in ttm_bo_vm_fault_reserved()
215 ret = ttm_bo_vm_fault_idle(bo, vmf); in ttm_bo_vm_fault_reserved()
219 err = ttm_mem_io_reserve(bdev, bo->resource); in ttm_bo_vm_fault_reserved()
224 vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node); in ttm_bo_vm_fault_reserved()
226 drm_vma_node_start(&bo->base.vma_node); in ttm_bo_vm_fault_reserved()
228 if (unlikely(page_offset >= bo->resource->num_pages)) in ttm_bo_vm_fault_reserved()
231 prot = ttm_io_prot(bo, bo->resource, prot); in ttm_bo_vm_fault_reserved()
232 if (!bo->resource->bus.is_iomem) { in ttm_bo_vm_fault_reserved()
239 ttm = bo->ttm; in ttm_bo_vm_fault_reserved()
240 if (ttm_tt_populate(bdev, bo->ttm, &ctx)) in ttm_bo_vm_fault_reserved()
252 if (bo->resource->bus.is_iomem) { in ttm_bo_vm_fault_reserved()
253 pfn = ttm_bo_io_mem_pfn(bo, page_offset); in ttm_bo_vm_fault_reserved()
300 struct ttm_buffer_object *bo = vma->vm_private_data; in ttm_bo_vm_dummy_page() local
301 struct drm_device *ddev = bo->base.dev; in ttm_bo_vm_dummy_page()
331 struct ttm_buffer_object *bo = vma->vm_private_data; in ttm_bo_vm_fault() local
332 struct drm_device *ddev = bo->base.dev; in ttm_bo_vm_fault()
336 ret = ttm_bo_vm_reserve(bo, vmf); in ttm_bo_vm_fault()
350 dma_resv_unlock(bo->base.resv); in ttm_bo_vm_fault()
358 struct ttm_buffer_object *bo = vma->vm_private_data; in ttm_bo_vm_open() local
360 WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping); in ttm_bo_vm_open()
362 ttm_bo_get(bo); in ttm_bo_vm_open()
368 struct ttm_buffer_object *bo = vma->vm_private_data; in ttm_bo_vm_close() local
370 ttm_bo_put(bo); in ttm_bo_vm_close()
375 static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo, in ttm_bo_vm_access_kmap() argument
393 ret = ttm_bo_kmap(bo, page, 1, &map); in ttm_bo_vm_access_kmap()
417 struct ttm_buffer_object *bo = vma->vm_private_data; in ttm_bo_vm_access() local
419 ((vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node)) in ttm_bo_vm_access()
423 if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->resource->num_pages) in ttm_bo_vm_access()
426 ret = ttm_bo_reserve(bo, true, false, NULL); in ttm_bo_vm_access()
430 switch (bo->resource->mem_type) { in ttm_bo_vm_access()
434 ret = ttm_bo_vm_access_kmap(bo, offset, buf, len, write); in ttm_bo_vm_access()
437 if (bo->bdev->funcs->access_memory) in ttm_bo_vm_access()
438 ret = bo->bdev->funcs->access_memory( in ttm_bo_vm_access()
439 bo, offset, buf, len, write); in ttm_bo_vm_access()
444 ttm_bo_unreserve(bo); in ttm_bo_vm_access()
457 int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo) in ttm_bo_mmap_obj() argument
463 ttm_bo_get(bo); in ttm_bo_mmap_obj()
477 vma->vm_private_data = bo; in ttm_bo_mmap_obj()