Lines Matching refs:drm

58 	struct nouveau_drm *drm = nouveau_drm(dev);  in nv10_bo_update_tile_region()  local
59 int i = reg - drm->tile.reg; in nv10_bo_update_tile_region()
60 struct nvkm_fb *fb = nvxx_fb(&drm->client.device); in nv10_bo_update_tile_region()
77 struct nouveau_drm *drm = nouveau_drm(dev); in nv10_bo_get_tile_region() local
78 struct nouveau_drm_tile *tile = &drm->tile.reg[i]; in nv10_bo_get_tile_region()
80 spin_lock(&drm->tile.lock); in nv10_bo_get_tile_region()
88 spin_unlock(&drm->tile.lock); in nv10_bo_get_tile_region()
96 struct nouveau_drm *drm = nouveau_drm(dev); in nv10_bo_put_tile_region() local
99 spin_lock(&drm->tile.lock); in nv10_bo_put_tile_region()
102 spin_unlock(&drm->tile.lock); in nv10_bo_put_tile_region()
110 struct nouveau_drm *drm = nouveau_drm(dev); in nv10_bo_set_tiling() local
111 struct nvkm_fb *fb = nvxx_fb(&drm->client.device); in nv10_bo_set_tiling()
138 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); in nouveau_bo_del_ttm() local
139 struct drm_device *dev = drm->dev; in nouveau_bo_del_ttm()
169 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); in nouveau_bo_fixup_align() local
170 struct nvif_device *device = &drm->client.device; in nouveau_bo_fixup_align()
203 struct nouveau_drm *drm = cli->drm; in nouveau_bo_alloc() local
210 NV_WARN(drm, "skipped size %016llx\n", *size); in nouveau_bo_alloc()
220 nvbo->bo.bdev = &drm->ttm.bdev; in nouveau_bo_alloc()
230 if (!nouveau_drm_use_coherent_gpu_mapping(drm)) in nouveau_bo_alloc()
371 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); in set_placement_range() local
372 u64 vram_size = drm->client.device.info.ram_size; in set_placement_range()
375 if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS && in set_placement_range()
421 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); in nouveau_bo_pin() local
430 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA && in nouveau_bo_pin()
454 NV_ERROR(drm, "bo %p pinned elsewhere: " in nouveau_bo_pin()
479 drm->gem.vram_available -= bo->base.size; in nouveau_bo_pin()
482 drm->gem.gart_available -= bo->base.size; in nouveau_bo_pin()
498 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); in nouveau_bo_unpin() local
510 drm->gem.vram_available += bo->base.size; in nouveau_bo_unpin()
513 drm->gem.gart_available += bo->base.size; in nouveau_bo_unpin()
551 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); in nouveau_bo_sync_for_device() local
558 NV_DEBUG(drm, "ttm_dma 0x%p: pages NULL\n", ttm_dma); in nouveau_bo_sync_for_device()
577 dma_sync_single_for_device(drm->dev->dev, in nouveau_bo_sync_for_device()
587 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); in nouveau_bo_sync_for_cpu() local
594 NV_DEBUG(drm, "ttm_dma 0x%p: pages NULL\n", ttm_dma); in nouveau_bo_sync_for_cpu()
614 dma_sync_single_for_cpu(drm->dev->dev, ttm_dma->dma_address[i], in nouveau_bo_sync_for_cpu()
622 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); in nouveau_bo_add_io_reserve_lru() local
625 mutex_lock(&drm->ttm.io_reserve_mutex); in nouveau_bo_add_io_reserve_lru()
626 list_move_tail(&nvbo->io_reserve_lru, &drm->ttm.io_reserve_lru); in nouveau_bo_add_io_reserve_lru()
627 mutex_unlock(&drm->ttm.io_reserve_mutex); in nouveau_bo_add_io_reserve_lru()
632 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); in nouveau_bo_del_io_reserve_lru() local
635 mutex_lock(&drm->ttm.io_reserve_mutex); in nouveau_bo_del_io_reserve_lru()
637 mutex_unlock(&drm->ttm.io_reserve_mutex); in nouveau_bo_del_io_reserve_lru()
702 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); in nouveau_ttm_tt_create() local
704 if (drm->agp.bridge) { in nouveau_ttm_tt_create()
705 return ttm_agp_tt_create(bo, drm->agp.bridge, page_flags); in nouveau_ttm_tt_create()
717 struct nouveau_drm *drm = nouveau_bdev(bdev); in nouveau_ttm_tt_bind() local
722 if (drm->agp.bridge) in nouveau_ttm_tt_bind()
732 struct nouveau_drm *drm = nouveau_bdev(bdev); in nouveau_ttm_tt_unbind() local
734 if (drm->agp.bridge) { in nouveau_ttm_tt_unbind()
761 nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo, in nouveau_bo_move_prep() argument
766 struct nvif_vmm *vmm = &drm->client.vmm.vmm; in nouveau_bo_move_prep()
797 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); in nouveau_bo_move_m2mf() local
798 struct nouveau_channel *chan = drm->ttm.chan; in nouveau_bo_move_m2mf()
807 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) { in nouveau_bo_move_m2mf()
808 ret = nouveau_bo_move_prep(drm, bo, new_reg); in nouveau_bo_move_m2mf()
813 if (drm_drv_uses_atomic_modeset(drm->dev)) in nouveau_bo_move_m2mf()
819 ret = drm->ttm.move(chan, bo, bo->resource, new_reg); in nouveau_bo_move_m2mf()
836 nouveau_bo_move_init(struct nouveau_drm *drm) in nouveau_bo_move_init() argument
877 chan = drm->cechan; in nouveau_bo_move_init()
879 chan = drm->channel; in nouveau_bo_move_init()
886 &drm->ttm.copy); in nouveau_bo_move_init()
888 ret = mthd->init(chan, drm->ttm.copy.handle); in nouveau_bo_move_init()
890 nvif_object_dtor(&drm->ttm.copy); in nouveau_bo_move_init()
894 drm->ttm.move = mthd->exec; in nouveau_bo_move_init()
895 drm->ttm.chan = chan; in nouveau_bo_move_init()
901 NV_INFO(drm, "MM: using %s for buffer copies\n", name); in nouveau_bo_move_init()
938 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); in nouveau_bo_vm_bind() local
939 struct drm_device *dev = drm->dev; in nouveau_bo_vm_bind()
947 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) { in nouveau_bo_vm_bind()
960 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); in nouveau_bo_vm_cleanup() local
961 struct drm_device *dev = drm->dev; in nouveau_bo_vm_cleanup()
974 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); in nouveau_bo_move() local
993 NV_WARN(drm, "Moving pinned object %p!\n", nvbo); in nouveau_bo_move()
995 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) { in nouveau_bo_move()
1022 if (drm->ttm.move) { in nouveau_bo_move()
1044 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) { in nouveau_bo_move()
1058 nouveau_ttm_io_mem_free_locked(struct nouveau_drm *drm, in nouveau_ttm_io_mem_free_locked() argument
1063 if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) { in nouveau_ttm_io_mem_free_locked()
1081 struct nouveau_drm *drm = nouveau_bdev(bdev); in nouveau_ttm_io_mem_reserve() local
1082 struct nvkm_device *device = nvxx_device(&drm->client.device); in nouveau_ttm_io_mem_reserve()
1084 struct nvif_mmu *mmu = &drm->client.mmu; in nouveau_ttm_io_mem_reserve()
1087 mutex_lock(&drm->ttm.io_reserve_mutex); in nouveau_ttm_io_mem_reserve()
1096 if (drm->agp.bridge) { in nouveau_ttm_io_mem_reserve()
1098 drm->agp.base; in nouveau_ttm_io_mem_reserve()
1099 reg->bus.is_iomem = !drm->agp.cma; in nouveau_ttm_io_mem_reserve()
1103 if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 || in nouveau_ttm_io_mem_reserve()
1116 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA && in nouveau_ttm_io_mem_reserve()
1117 mmu->type[drm->ttm.type_vram].type & NVIF_MEM_UNCACHED) in nouveau_ttm_io_mem_reserve()
1122 if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) { in nouveau_ttm_io_mem_reserve()
1170 nvbo = list_first_entry_or_null(&drm->ttm.io_reserve_lru, in nouveau_ttm_io_mem_reserve()
1177 nouveau_ttm_io_mem_free_locked(drm, nvbo->bo.resource); in nouveau_ttm_io_mem_reserve()
1182 mutex_unlock(&drm->ttm.io_reserve_mutex); in nouveau_ttm_io_mem_reserve()
1189 struct nouveau_drm *drm = nouveau_bdev(bdev); in nouveau_ttm_io_mem_free() local
1191 mutex_lock(&drm->ttm.io_reserve_mutex); in nouveau_ttm_io_mem_free()
1192 nouveau_ttm_io_mem_free_locked(drm, reg); in nouveau_ttm_io_mem_free()
1193 mutex_unlock(&drm->ttm.io_reserve_mutex); in nouveau_ttm_io_mem_free()
1198 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); in nouveau_ttm_fault_reserve_notify() local
1200 struct nvkm_device *device = nvxx_device(&drm->client.device); in nouveau_ttm_fault_reserve_notify()
1208 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA || in nouveau_ttm_fault_reserve_notify()
1219 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA || in nouveau_ttm_fault_reserve_notify()
1251 struct nouveau_drm *drm; in nouveau_ttm_tt_populate() local
1263 drm = nouveau_bdev(bdev); in nouveau_ttm_tt_populate()
1265 return ttm_pool_alloc(&drm->ttm.bdev.pool, ttm, ctx); in nouveau_ttm_tt_populate()
1272 struct nouveau_drm *drm; in nouveau_ttm_tt_unpopulate() local
1280 drm = nouveau_bdev(bdev); in nouveau_ttm_tt_unpopulate()
1282 return ttm_pool_free(&drm->ttm.bdev.pool, ttm); in nouveau_ttm_tt_unpopulate()
1290 struct nouveau_drm *drm = nouveau_bdev(bdev); in nouveau_ttm_tt_destroy() local
1291 if (drm->agp.bridge) { in nouveau_ttm_tt_destroy()