/linux/drivers/gpu/drm/ttm/ |
A D | ttm_tt.c | 66 if (bo->ttm) in ttm_tt_create() 99 ttm->pages = kvmalloc_array(ttm->num_pages, sizeof(void*), in ttm_tt_alloc_page_directory() 101 if (!ttm->pages) in ttm_tt_alloc_page_directory() 108 ttm->pages = kvmalloc_array(ttm->num_pages, in ttm_dma_tt_alloc_page_directory() 112 if (!ttm->pages) in ttm_dma_tt_alloc_page_directory() 115 ttm->dma_address = (void *)(ttm->pages + ttm->num_pages); in ttm_dma_tt_alloc_page_directory() 121 ttm->dma_address = kvmalloc_array(ttm->num_pages, in ttm_sg_tt_alloc_page_directory() 144 ttm->sg = bo->sg; in ttm_tt_init_fields() 169 if (ttm->pages) in ttm_tt_fini() 173 ttm->pages = NULL; in ttm_tt_fini() [all …]
|
A D | ttm_agp_backend.c | 45 struct ttm_tt ttm; member 52 struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm); in ttm_agp_bind() 66 for (i = 0; i < ttm->num_pages; i++) { in ttm_agp_bind() 67 struct page *page = ttm->pages[i]; in ttm_agp_bind() 89 struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm); in ttm_agp_unbind() 104 struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm); in ttm_agp_is_bound() 106 if (!ttm) in ttm_agp_is_bound() 115 struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm); in ttm_agp_destroy() 118 ttm_agp_unbind(ttm); in ttm_agp_destroy() 119 ttm_tt_fini(ttm); in ttm_agp_destroy() [all …]
|
A D | ttm_bo_util.c | 138 struct ttm_tt *ttm = bo->ttm; in ttm_bo_move_memcpy() local 150 if (ttm && ((ttm->page_flags & TTM_TT_FLAG_SWAPPED) || in ttm_bo_move_memcpy() 304 struct ttm_tt *ttm = bo->ttm; in ttm_bo_kmap_ttm() local 308 BUG_ON(!ttm); in ttm_bo_kmap_ttm() 423 struct ttm_tt *ttm = bo->ttm; in ttm_bo_vmap() local 436 vaddr = vmap(ttm->pages, ttm->num_pages, 0, prot); in ttm_bo_vmap() 511 bo->ttm = NULL; in ttm_bo_move_to_ghost() 621 ttm = bo->ttm; in ttm_bo_pipeline_gutting() 622 bo->ttm = NULL; in ttm_bo_pipeline_gutting() 624 swap(bo->ttm, ttm); in ttm_bo_pipeline_gutting() [all …]
|
A D | Makefile | 5 ttm-y := ttm_tt.o ttm_bo.o ttm_bo_util.o ttm_bo_vm.o ttm_module.o \ 8 ttm-$(CONFIG_AGP) += ttm_agp_backend.o 10 obj-$(CONFIG_DRM_TTM) += ttm.o
|
A D | ttm_bo_vm.c | 165 if (bo->ttm && (bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) { in ttm_bo_vm_reserve() 166 if (!(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE)) { in ttm_bo_vm_reserve() 204 struct ttm_tt *ttm = NULL; in ttm_bo_vm_fault_reserved() local 239 ttm = bo->ttm; in ttm_bo_vm_fault_reserved() 240 if (ttm_tt_populate(bdev, bo->ttm, &ctx)) in ttm_bo_vm_fault_reserved() 255 page = ttm->pages[page_offset]; in ttm_bo_vm_fault_reserved()
|
A D | ttm_device.c | 276 if (bo->ttm) in ttm_device_clear_dma_mappings() 277 ttm_tt_unpopulate(bo->bdev, bo->ttm); in ttm_device_clear_dma_mappings() 296 if (bo->ttm) in ttm_device_clear_dma_mappings() 297 ttm_tt_unpopulate(bo->bdev, bo->ttm); in ttm_device_clear_dma_mappings()
|
A D | ttm_bo.c | 201 ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx); in ttm_bo_handle_move_mem() 1110 if (!bo->ttm || !ttm_tt_is_populated(bo->ttm) || in ttm_bo_swapout() 1111 bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL || in ttm_bo_swapout() 1112 bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED || in ttm_bo_swapout() 1166 if (ttm_tt_is_populated(bo->ttm)) in ttm_bo_swapout() 1167 ret = ttm_tt_swapout(bo->bdev, bo->ttm, gfp_flags); in ttm_bo_swapout() 1182 if (bo->ttm == NULL) in ttm_bo_tt_destroy() 1185 ttm_tt_unpopulate(bo->bdev, bo->ttm); in ttm_bo_tt_destroy() 1186 ttm_tt_destroy(bo->bdev, bo->ttm); in ttm_bo_tt_destroy() 1187 bo->ttm = NULL; in ttm_bo_tt_destroy()
|
/linux/drivers/gpu/drm/radeon/ |
A D | radeon_ttm.c | 376 r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0, in radeon_ttm_tt_pin_userptr() 386 drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address, in radeon_ttm_tt_pin_userptr() 392 kfree(ttm->sg); in radeon_ttm_tt_pin_userptr() 410 if (!ttm->sg || !ttm->sg->sgl) in radeon_ttm_tt_unpin_userptr() 456 ttm->num_pages, bo_mem, ttm); in radeon_ttm_backend_bind() 461 ttm->pages, gtt->ttm.dma_address, flags); in radeon_ttm_backend_bind() 537 if (!ttm) in radeon_ttm_tt_to_gtt() 539 return container_of(ttm, struct radeon_ttm_tt, ttm); in radeon_ttm_tt_to_gtt() 552 if (!ttm->sg) in radeon_ttm_tt_populate() 560 drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address, in radeon_ttm_tt_populate() [all …]
|
A D | radeon_prime.c | 39 return drm_prime_pages_to_sg(obj->dev, bo->tbo.ttm->pages, in radeon_gem_prime_get_sg_table() 40 bo->tbo.ttm->num_pages); in radeon_gem_prime_get_sg_table() 117 if (radeon_ttm_tt_has_userptr(bo->rdev, bo->tbo.ttm)) in radeon_gem_prime_export()
|
/linux/drivers/gpu/drm/i915/gem/ |
A D | i915_gem_ttm.c | 46 struct ttm_tt ttm; member 213 struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm); in i915_ttm_tt_unpopulate() 227 struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm); in i915_ttm_tt_destroy() 229 ttm_tt_fini(ttm); in i915_ttm_tt_destroy() 327 bo->ttm); in i915_ttm_adjust_gem_after_move() 406 ttm->pages, ttm->num_pages, in i915_ttm_tt_get_st() 546 struct ttm_tt *ttm = bo->ttm; in i915_ttm_move() local 578 if (!(clear && ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC))) in i915_ttm_move() 680 if (bo->ttm && !ttm_tt_is_populated(bo->ttm)) { in __i915_ttm_get_pages() 691 st = bo->ttm ? i915_ttm_tt_get_st(bo->ttm) : obj->ttm.cached_io_st; in __i915_ttm_get_pages() [all …]
|
A D | i915_gem_ttm_pm.c | 23 if (obj->ttm.backup) { in i915_ttm_backup_free() 24 i915_gem_object_put(obj->ttm.backup); in i915_ttm_backup_free() 25 obj->ttm.backup = NULL; in i915_ttm_backup_free() 54 if (bo->resource->mem_type == I915_PL_SYSTEM || obj->ttm.backup) in i915_ttm_backup() 76 err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx); in i915_ttm_backup() 83 obj->ttm.backup = backup; in i915_ttm_backup() 151 struct drm_i915_gem_object *backup = obj->ttm.backup; in i915_ttm_restore() 167 err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx); in i915_ttm_restore() 173 obj->ttm.backup = NULL; in i915_ttm_restore()
|
/linux/drivers/gpu/drm/amd/amdgpu/ |
A D | amdgpu_ttm.c | 663 struct ttm_tt *ttm = bo->tbo.ttm; in amdgpu_ttm_tt_get_user_pages() local 774 r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0, in amdgpu_ttm_tt_pin_userptr() 786 drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address, in amdgpu_ttm_tt_pin_userptr() 792 kfree(ttm->sg); in amdgpu_ttm_tt_pin_userptr() 793 ttm->sg = NULL; in amdgpu_ttm_tt_pin_userptr() 810 if (!ttm->sg || !ttm->sg->sgl) in amdgpu_ttm_tt_unpin_userptr() 837 struct ttm_tt *ttm = tbo->ttm; in amdgpu_ttm_gart_bind() local 922 ttm->num_pages, bo_mem, ttm); in amdgpu_ttm_backend_bind() 1023 if (!tbo->ttm) in amdgpu_ttm_recover_gart() 1134 if (!ttm->sg) in amdgpu_ttm_tt_populate() [all …]
|
A D | amdgpu_ttm.h | 166 bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm); 173 static inline bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm) in amdgpu_ttm_tt_get_user_pages_done() argument 179 void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages); 182 bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm); 183 struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm); 184 bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, 186 bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm, 188 bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm); 189 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm); 190 uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem); [all …]
|
A D | amdgpu_amdkfd_gpuvm.c | 478 struct ttm_tt *ttm = bo->tbo.ttm; in kfd_mem_dmamap_userptr() local 481 ttm->sg = kmalloc(sizeof(*ttm->sg), GFP_KERNEL); in kfd_mem_dmamap_userptr() 482 if (unlikely(!ttm->sg)) in kfd_mem_dmamap_userptr() 490 ttm->num_pages, 0, in kfd_mem_dmamap_userptr() 500 drm_prime_sg_to_dma_addr_array(ttm->sg, ttm->dma_address, in kfd_mem_dmamap_userptr() 514 sg_free_table(ttm->sg); in kfd_mem_dmamap_userptr() 516 kfree(ttm->sg); in kfd_mem_dmamap_userptr() 517 ttm->sg = NULL; in kfd_mem_dmamap_userptr() 558 struct ttm_tt *ttm = bo->tbo.ttm; in kfd_mem_dmaunmap_userptr() local 568 kfree(ttm->sg); in kfd_mem_dmaunmap_userptr() [all …]
|
/linux/include/drm/ttm/ |
A D | ttm_tt.h | 149 int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo, 161 void ttm_tt_fini(struct ttm_tt *ttm); 171 void ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm); 180 int ttm_tt_swapin(struct ttm_tt *ttm); 181 int ttm_tt_swapout(struct ttm_device *bdev, struct ttm_tt *ttm, 193 int ttm_tt_populate(struct ttm_device *bdev, struct ttm_tt *ttm, 214 static inline void ttm_tt_mark_for_clear(struct ttm_tt *ttm) in ttm_tt_mark_for_clear() argument 216 ttm->page_flags |= TTM_TT_FLAG_ZERO_ALLOC; in ttm_tt_mark_for_clear() 243 void ttm_agp_unbind(struct ttm_tt *ttm); 244 void ttm_agp_destroy(struct ttm_tt *ttm); [all …]
|
A D | ttm_device.h | 88 struct ttm_tt *ttm, 99 struct ttm_tt *ttm); 110 void (*ttm_tt_destroy)(struct ttm_device *bdev, struct ttm_tt *ttm);
|
/linux/drivers/gpu/drm/nouveau/ |
A D | nouveau_sgdma.c | 14 struct ttm_tt ttm; member 19 nouveau_sgdma_destroy(struct ttm_device *bdev, struct ttm_tt *ttm) in nouveau_sgdma_destroy() argument 21 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; in nouveau_sgdma_destroy() 23 if (ttm) { in nouveau_sgdma_destroy() 24 ttm_tt_fini(&nvbe->ttm); in nouveau_sgdma_destroy() 32 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; in nouveau_sgdma_bind() 40 ret = nouveau_mem_host(reg, &nvbe->ttm); in nouveau_sgdma_bind() 57 nouveau_sgdma_unbind(struct ttm_device *bdev, struct ttm_tt *ttm) in nouveau_sgdma_unbind() argument 59 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; in nouveau_sgdma_unbind() 85 if (ttm_sg_tt_init(&nvbe->ttm, bo, page_flags, caching)) { in nouveau_sgdma_create_ttm() [all …]
|
A D | nouveau_ttm.c | 145 drm->ttm.type_host[!!kind] = typei; in nouveau_ttm_init_host() 151 drm->ttm.type_ncoh[!!kind] = typei; in nouveau_ttm_init_host() 189 ttm_range_man_fini(&drm->ttm.bdev, TTM_PL_VRAM); in nouveau_ttm_fini_vram() 226 ttm_range_man_fini(&drm->ttm.bdev, TTM_PL_TT); in nouveau_ttm_fini_gtt() 266 drm->ttm.type_vram = typei; in nouveau_ttm_init() 268 drm->ttm.type_vram = -1; in nouveau_ttm_init() 319 mutex_init(&drm->ttm.io_reserve_mutex); in nouveau_ttm_init() 320 INIT_LIST_HEAD(&drm->ttm.io_reserve_lru); in nouveau_ttm_init() 335 ttm_device_fini(&drm->ttm.bdev); in nouveau_ttm_fini() 337 arch_phys_wc_del(drm->ttm.mtrr); in nouveau_ttm_fini() [all …]
|
A D | nouveau_bo.c | 735 ttm_agp_unbind(ttm); in nouveau_ttm_tt_unbind() 886 &drm->ttm.copy); in nouveau_bo_move_init() 895 drm->ttm.chan = chan; in nouveau_bo_move_init() 1022 if (drm->ttm.move) { in nouveau_bo_move() 1257 if (slave && ttm->sg) { in nouveau_ttm_tt_populate() 1259 ttm->num_pages); in nouveau_ttm_tt_populate() 1265 return ttm_pool_alloc(&drm->ttm.bdev.pool, ttm, ctx); in nouveau_ttm_tt_populate() 1270 struct ttm_tt *ttm) in nouveau_ttm_tt_unpopulate() argument 1282 return ttm_pool_free(&drm->ttm.bdev.pool, ttm); in nouveau_ttm_tt_unpopulate() 1287 struct ttm_tt *ttm) in nouveau_ttm_tt_destroy() argument [all …]
|
A D | nouveau_ttm.h | 8 return container_of(bd, struct nouveau_drm, ttm.bdev); in nouveau_bdev() 24 int nouveau_sgdma_bind(struct ttm_device *bdev, struct ttm_tt *ttm, struct ttm_resource *reg); 25 void nouveau_sgdma_unbind(struct ttm_device *bdev, struct ttm_tt *ttm); 26 void nouveau_sgdma_destroy(struct ttm_device *bdev, struct ttm_tt *ttm);
|
A D | nouveau_mem.c | 99 type = drm->ttm.type_ncoh[!!mem->kind]; in nouveau_mem_host() 101 type = drm->ttm.type_host[0]; in nouveau_mem_host() 138 drm->ttm.type_vram, page, size, in nouveau_mem_vram() 146 drm->ttm.type_vram, page, size, in nouveau_mem_vram()
|
/linux/drivers/gpu/drm/vmwgfx/ |
A D | vmwgfx_ttm_buffer.c | 478 vmw_mob_create(ttm->num_pages); in vmw_ttm_bind() 484 &vmw_be->vsgt, ttm->num_pages, in vmw_ttm_bind() 495 struct ttm_tt *ttm) in vmw_ttm_unbind() argument 526 ttm_tt_fini(ttm); in vmw_ttm_destroy() 541 if (ttm_tt_is_populated(ttm)) in vmw_ttm_populate() 560 ttm_pool_free(&bdev->pool, ttm); in vmw_ttm_populate() 565 struct ttm_tt *ttm) in vmw_ttm_unpopulate() argument 571 vmw_ttm_unbind(bdev, ttm); in vmw_ttm_unpopulate() 580 for (i = 0; i < ttm->num_pages; ++i) in vmw_ttm_unpopulate() 584 ttm_pool_free(&bdev->pool, ttm); in vmw_ttm_unpopulate() [all …]
|
A D | vmwgfx_blit.c | 468 if (!ttm_tt_is_populated(dst->ttm)) { in vmw_bo_cpu_blit() 469 ret = dst->bdev->funcs->ttm_tt_populate(dst->bdev, dst->ttm, &ctx); in vmw_bo_cpu_blit() 474 if (!ttm_tt_is_populated(src->ttm)) { in vmw_bo_cpu_blit() 475 ret = src->bdev->funcs->ttm_tt_populate(src->bdev, src->ttm, &ctx); in vmw_bo_cpu_blit() 484 d.dst_pages = dst->ttm->pages; in vmw_bo_cpu_blit() 485 d.src_pages = src->ttm->pages; in vmw_bo_cpu_blit()
|
/linux/drivers/gpu/drm/qxl/ |
A D | qxl_ttm.c | 102 static void qxl_ttm_backend_destroy(struct ttm_device *bdev, struct ttm_tt *ttm) in qxl_ttm_backend_destroy() argument 104 ttm_tt_fini(ttm); in qxl_ttm_backend_destroy() 105 kfree(ttm); in qxl_ttm_backend_destroy() 111 struct ttm_tt *ttm; in qxl_ttm_tt_create() local 113 ttm = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL); in qxl_ttm_tt_create() 114 if (ttm == NULL) in qxl_ttm_tt_create() 116 if (ttm_tt_init(ttm, bo, page_flags, ttm_cached)) { in qxl_ttm_tt_create() 117 kfree(ttm); in qxl_ttm_tt_create() 120 return ttm; in qxl_ttm_tt_create() 152 if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { in qxl_bo_move()
|
/linux/arch/powerpc/perf/ |
A D | ppc970-pmu.c | 264 unsigned int ttm, grp; in p970_compute_mmcr() local 321 ttm = unitmap[i]; in p970_compute_mmcr() 322 ++ttmuse[(ttm >> 2) & 1]; in p970_compute_mmcr() 323 mmcr1 |= (unsigned long)(ttm & ~4) << MMCR1_TTM1SEL_SH; in p970_compute_mmcr() 335 ttm = (unitmap[unit] >> 2) & 1; in p970_compute_mmcr() 337 ttm = 2; in p970_compute_mmcr() 339 ttm = 3; in p970_compute_mmcr() 343 mmcr1 |= (unsigned long)ttm in p970_compute_mmcr()
|