/linux/drivers/gpu/drm/etnaviv/ |
A D | etnaviv_gpu.c | 403 gpu->identity.model, gpu->identity.revision); in etnaviv_hw_identify() 478 gpu->base_rate_core >> gpu->freq_scale); in etnaviv_gpu_update_clock() 480 gpu->base_rate_shader >> gpu->freq_scale); in etnaviv_gpu_update_clock() 1134 f->gpu = gpu; in etnaviv_gpu_fence_alloc() 1339 struct etnaviv_gpu *gpu = submit->gpu; in etnaviv_gpu_submit() local 1421 event_free(gpu, gpu->sync_point_event); in sync_point_worker() 1491 queue_work(gpu->wq, &gpu->sync_point_work); in irq_handler() 1587 if (gpu->initialized && gpu->fe_running) { in etnaviv_gpu_hw_suspend() 1709 priv->gpu[priv->num_gpus++] = gpu; in etnaviv_gpu_bind() 1802 dev_name(gpu->dev), gpu); in etnaviv_gpu_platform_probe() [all …]
|
A D | etnaviv_buffer.c | 94 lockdep_assert_held(&gpu->lock); in etnaviv_cmd_select_pipe() 102 if (gpu->exec_state == ETNA_PIPE_2D) in etnaviv_cmd_select_pipe() 166 lockdep_assert_held(&gpu->lock); in etnaviv_buffer_init() 183 lockdep_assert_held(&gpu->lock); in etnaviv_buffer_config_mmuv2() 218 lockdep_assert_held(&gpu->lock); in etnaviv_buffer_config_pta() 240 lockdep_assert_held(&gpu->lock); in etnaviv_buffer_end() 242 if (gpu->exec_state == ETNA_PIPE_2D) in etnaviv_buffer_end() 306 lockdep_assert_held(&gpu->lock); in etnaviv_sync_point_queue() 354 lockdep_assert_held(&gpu->lock); in etnaviv_buffer_queue() 437 gpu->flush_seq = new_flush_seq; in etnaviv_buffer_queue() [all …]
|
A D | etnaviv_perfmon.c | 18 u32 (*sample)(struct etnaviv_gpu *gpu, 66 pipe_select(gpu, clock, i); in pipe_perf_reg_read() 71 pipe_select(gpu, clock, 0); in pipe_perf_reg_read() 85 pipe_select(gpu, clock, i); in pipe_reg_read() 86 value += gpu_read(gpu, signal->data); in pipe_reg_read() 90 pipe_select(gpu, clock, 0); in pipe_reg_read() 106 return gpu_read(gpu, reg); in hi_total_cycle_read() 120 return gpu_read(gpu, reg); in hi_total_idle_cycle_read() 508 dom = pm_domain(gpu, domain->iter); in etnaviv_pm_query_dom() 533 dom = pm_domain(gpu, signal->domain); in etnaviv_pm_query_sig() [all …]
|
A D | etnaviv_drv.c | 67 struct etnaviv_gpu *gpu = priv->gpu[i]; in etnaviv_open() local 70 if (gpu) { in etnaviv_open() 94 struct etnaviv_gpu *gpu = priv->gpu[i]; in etnaviv_postclose() local 96 if (gpu) in etnaviv_postclose() 214 gpu = priv->gpu[i]; in show_each_gpu() 256 gpu = priv->gpu[args->pipe]; in etnaviv_ioctl_get_param() 257 if (!gpu) in etnaviv_ioctl_get_param() 352 gpu = priv->gpu[args->pipe]; in etnaviv_ioctl_wait_fence() 403 gpu = priv->gpu[args->pipe]; in etnaviv_ioctl_gem_wait() 431 gpu = priv->gpu[args->pipe]; in etnaviv_ioctl_pm_query_dom() [all …]
|
A D | etnaviv_iommu_v2.c | 175 if (gpu->mmu_context) in etnaviv_iommuv2_restore_nonsec() 179 prefetch = etnaviv_buffer_config_mmuv2(gpu, in etnaviv_iommuv2_restore_nonsec() 182 etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(&gpu->buffer), in etnaviv_iommuv2_restore_nonsec() 184 etnaviv_gpu_wait_idle(gpu, 100); in etnaviv_iommuv2_restore_nonsec() 199 if (gpu->mmu_context) in etnaviv_iommuv2_restore_sec() 203 gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_LOW, in etnaviv_iommuv2_restore_sec() 205 gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_HIGH, in etnaviv_iommuv2_restore_sec() 224 etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(&gpu->buffer), in etnaviv_iommuv2_restore_sec() 226 etnaviv_gpu_wait_idle(gpu, 100); in etnaviv_iommuv2_restore_sec() 247 switch (gpu->sec_mode) { in etnaviv_iommuv2_restore() [all …]
|
A D | etnaviv_sched.c | 89 struct etnaviv_gpu *gpu = submit->gpu; in etnaviv_sched_timedout_job() local 94 drm_sched_stop(&gpu->sched, sched_job); in etnaviv_sched_timedout_job() 111 gpu->hangcheck_dma_addr = dma_addr; in etnaviv_sched_timedout_job() 120 etnaviv_gpu_recover_hang(gpu); in etnaviv_sched_timedout_job() 122 drm_sched_resubmit_jobs(&gpu->sched); in etnaviv_sched_timedout_job() 124 drm_sched_start(&gpu->sched, true); in etnaviv_sched_timedout_job() 129 drm_sched_start(&gpu->sched, true); in etnaviv_sched_timedout_job() 159 mutex_lock(&submit->gpu->fence_lock); in etnaviv_sched_push_job() 184 mutex_unlock(&submit->gpu->fence_lock); in etnaviv_sched_push_job() 196 dev_name(gpu->dev)); in etnaviv_sched_init() [all …]
|
/linux/drivers/gpu/drm/msm/ |
A D | msm_gpu.c | 58 if (gpu->core_clk && gpu->fast_rate) in enable_clk() 159 ret = gpu->funcs->hw_init(gpu); in msm_gpu_hw_init() 215 gpu->funcs->show(gpu, state, &p); in msm_gpu_devcoredump_read() 275 state = gpu->funcs->gpu_state_get(gpu); in msm_gpu_crashstate_capture() 427 gpu->funcs->recover(gpu); in recover_worker() 440 gpu->funcs->submit(gpu, submit); in recover_worker() 722 update_fences(gpu, gpu->rb[i], gpu->rb[i]->memptrs->fence); in msm_gpu_retire() 765 gpu->funcs->submit(gpu, submit); in msm_gpu_submit() 778 return gpu->funcs->irq(gpu); in irq_handler() 908 gpu->aspace = gpu->funcs->create_address_space(gpu, pdev); in msm_gpu_init() [all …]
|
A D | msm_gpu_devfreq.c | 33 if (gpu->devfreq.idle_freq) { in msm_devfreq_target() 44 if (gpu->funcs->gpu_set_freq) in msm_devfreq_target() 45 gpu->funcs->gpu_set_freq(gpu, opp); in msm_devfreq_target() 56 if (gpu->devfreq.idle_freq) in get_freq() 60 return gpu->funcs->gpu_get_freq(gpu); in get_freq() 72 status->busy_time = gpu->funcs->gpu_busy(gpu); in msm_devfreq_get_dev_status() 76 gpu->devfreq.time = time; in msm_devfreq_get_dev_status() 103 if (!gpu->funcs->gpu_busy) in msm_devfreq_init() 129 gpu->cooling = of_devfreq_cooling_register(gpu->pdev->dev.of_node, df->devfreq); in msm_devfreq_init() 133 gpu->cooling = NULL; in msm_devfreq_init() [all …]
|
A D | msm_gpu.h | 46 int (*hw_init)(struct msm_gpu *gpu); 53 void (*recover)(struct msm_gpu *gpu); 54 void (*destroy)(struct msm_gpu *gpu); 70 (struct msm_gpu *gpu); 341 if (rn >= gpu->nr_rings) in msm_gpu_convert_priority() 542 if (gpu->crashstate) { in msm_gpu_crashstate_get() 544 state = gpu->crashstate; in msm_gpu_crashstate_get() 556 if (gpu->crashstate) { in msm_gpu_crashstate_put() 557 if (gpu->funcs->gpu_state_put(gpu->crashstate)) in msm_gpu_crashstate_put() 558 gpu->crashstate = NULL; in msm_gpu_crashstate_put() [all …]
|
/linux/drivers/gpu/drm/msm/adreno/ |
A D | a4xx_gpu.c | 181 return a4xx_idle(gpu); in a4xx_me_init() 326 gpu_write(gpu, REG_A4XX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova)); in a4xx_hw_init() 364 a4xx_dump(gpu); in a4xx_recover() 369 adreno_recover(gpu); in a4xx_recover() 377 DBG("%s", gpu->name); in a4xx_destroy() 389 if (!adreno_idle(gpu, gpu->rb[0])) in a4xx_idle() 419 msm_gpu_retire(gpu); in a4xx_irq() 570 adreno_dump(gpu); in a4xx_dump() 648 struct msm_gpu *gpu; in a4xx_gpu_init() local 686 if (!gpu->aspace) { in a4xx_gpu_init() [all …]
|
A D | a5xx_gpu.c | 122 msm_gpu_retire(gpu); in a5xx_submit_in_rb() 963 a5xx_flush(gpu, gpu->rb[0], true); in a5xx_hw_init() 964 if (!a5xx_idle(gpu, gpu->rb[0])) in a5xx_hw_init() 981 a5xx_flush(gpu, gpu->rb[0], true); in a5xx_hw_init() 982 if (!a5xx_idle(gpu, gpu->rb[0])) in a5xx_hw_init() 1016 a5xx_dump(gpu); in a5xx_recover() 1220 struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu); in a5xx_fault_detect_irq() 1244 kthread_queue_work(gpu->worker, &gpu->recover_work); in a5xx_fault_detect_irq() 1361 gpu->name, in a5xx_pm_resume() 1372 gpu->name); in a5xx_pm_resume() [all …]
|
A D | a3xx_gpu.c | 110 return a3xx_idle(gpu); in a3xx_me_init() 120 DBG("%s", gpu->name); in a3xx_hw_init() 277 gpu_write(gpu, REG_AXXX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova)); in a3xx_hw_init() 365 a3xx_dump(gpu); in a3xx_recover() 370 adreno_recover(gpu); in a3xx_recover() 390 if (!adreno_idle(gpu, gpu->rb[0])) in a3xx_idle() 416 msm_gpu_retire(gpu); in a3xx_irq() 464 adreno_dump(gpu); in a3xx_dump() 519 struct msm_gpu *gpu; in a3xx_gpu_init() local 558 if (!gpu->aspace) { in a3xx_gpu_init() [all …]
|
A D | a5xx_power.c | 164 gpu_write(gpu, AGC_MSG_PAYLOAD(2), _get_mvolts(gpu, gpu->fast_rate)); in a530_lm_setup() 165 gpu_write(gpu, AGC_MSG_PAYLOAD(3), gpu->fast_rate / 1000000); in a530_lm_setup() 199 gpu_write(gpu, AGC_MSG_PAYLOAD(2), _get_mvolts(gpu, gpu->fast_rate)); in a540_lm_setup() 200 gpu_write(gpu, AGC_MSG_PAYLOAD(3), gpu->fast_rate / 1000000); in a540_lm_setup() 247 gpu->name); in a5xx_gpmu_init() 264 gpu->name); in a5xx_gpmu_init() 271 gpu->name, val); in a5xx_gpmu_init() 306 a530_lm_setup(gpu); in a5xx_power_init() 308 a540_lm_setup(gpu); in a5xx_power_init() 311 a5xx_pc_init(gpu); in a5xx_power_init() [all …]
|
A D | a2xx_gpu.c | 99 return a2xx_idle(gpu); in a2xx_me_init() 111 DBG("%s", gpu->name); in a2xx_hw_init() 212 gpu_write(gpu, REG_AXXX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova)); in a2xx_hw_init() 261 a2xx_dump(gpu); in a2xx_recover() 266 adreno_recover(gpu); in a2xx_recover() 274 DBG("%s", gpu->name); in a2xx_destroy() 284 if (!adreno_idle(gpu, gpu->rb[0])) in a2xx_idle() 333 msm_gpu_retire(gpu); in a2xx_irq() 434 adreno_dump(gpu); in a2xx_dump() 535 if (!gpu->aspace) { in a2xx_gpu_init() [all …]
|
A D | a6xx_gpu.c | 1105 a6xx_flush(gpu, gpu->rb[0]); in hw_init() 1106 if (!a6xx_idle(gpu, gpu->rb[0])) in hw_init() 1174 a6xx_dump(gpu); in a6xx_recover() 1182 gpu->funcs->pm_suspend(gpu); in a6xx_recover() 1183 gpu->funcs->pm_resume(gpu); in a6xx_recover() 1245 gpu->aspace->mmu->funcs->resume_translation(gpu->aspace->mmu); in a6xx_fault_handler() 1291 kthread_queue_work(gpu->worker, &gpu->fault_work); in a6xx_fault_handler() 1343 struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu); in a6xx_fault_detect_irq() 1374 kthread_queue_work(gpu->worker, &gpu->recover_work); in a6xx_fault_detect_irq() 1875 msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu, in a6xx_gpu_init() [all …]
|
A D | adreno_gpu.h | 160 return gpu->revn == 225; in adreno_is_a225() 165 return gpu->revn == 305; in adreno_is_a305() 171 return gpu->revn == 307; in adreno_is_a306() 176 return gpu->revn == 320; in adreno_is_a320() 181 return gpu->revn == 330; in adreno_is_a330() 186 return adreno_is_a330(gpu) && (gpu->rev.patchid > 0); in adreno_is_a330v2() 191 return gpu->revn == 405; in adreno_is_a405() 196 return gpu->revn == 420; in adreno_is_a420() 246 return (gpu->revn == 640) || (gpu->revn == 680); in adreno_is_a640_family() 267 return adreno_is_a660(gpu) || adreno_is_7c3(gpu); in adreno_is_a660_family() [all …]
|
A D | a5xx_preempt.c | 80 struct drm_device *dev = gpu->dev; in a5xx_preempt_timer() 86 kthread_queue_work(gpu->worker, &gpu->recover_work); in a5xx_preempt_timer() 97 if (gpu->nr_rings == 1) in a5xx_preempt_trigger() 108 ring = get_next_ring(gpu); in a5xx_preempt_trigger() 181 gpu->name); in a5xx_preempt_irq() 182 kthread_queue_work(gpu->worker, &gpu->recover_work); in a5xx_preempt_irq() 201 a5xx_gpu->cur_ring = gpu->rb[0]; in a5xx_preempt_hw_init() 204 if (gpu->nr_rings == 1) in a5xx_preempt_hw_init() 287 if (gpu->nr_rings <= 1) in a5xx_preempt_init() 296 a5xx_preempt_fini(gpu); in a5xx_preempt_init() [all …]
|
A D | a6xx_gpu_state.c | 116 SZ_1M, MSM_BO_WC, gpu->aspace, in a6xx_crashdumper_init() 209 gpu_write(gpu, ctrl0, reg); in vbif_debugbus_read() 212 gpu_write(gpu, ctrl1, i); in vbif_debugbus_read() 258 ptr += vbif_debugbus_read(gpu, in a6xx_get_vbif_debugbus_block() 264 ptr += vbif_debugbus_read(gpu, in a6xx_get_vbif_debugbus_block() 273 ptr += vbif_debugbus_read(gpu, in a6xx_get_vbif_debugbus_block() 388 a6xx_get_debugbus_block(gpu, in a6xx_get_debugbus() 823 a6xx_get_ahb_gpu_registers(gpu, in a6xx_get_registers() 828 a6xx_get_ahb_gpu_registers(gpu, in a6xx_get_registers() 832 a6xx_get_ahb_gpu_registers(gpu, in a6xx_get_registers() [all …]
|
A D | a5xx_debugfs.c | 21 gpu_write(gpu, REG_A5XX_CP_PFP_STAT_ADDR, i); in pfp_print() 23 gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA)); in pfp_print() 36 gpu_read(gpu, REG_A5XX_CP_ME_STAT_DATA)); in me_print() 45 gpu_write(gpu, REG_A5XX_CP_MEQ_DBG_ADDR, 0); in meq_print() 49 gpu_read(gpu, REG_A5XX_CP_MEQ_DBG_DATA)); in meq_print() 79 show(priv->gpu, &p); in show() 97 struct msm_gpu *gpu = priv->gpu; in reset_set() local 130 gpu->needs_hw_init = true; in reset_set() 132 pm_runtime_get_sync(&gpu->pdev->dev); in reset_set() 133 gpu->funcs->recover(gpu); in reset_set() [all …]
|
A D | adreno_gpu.c | 411 VERB("%s", gpu->name); in adreno_hw_init() 440 return gpu->funcs->get_rptr(gpu, ring); in get_rptr() 445 return gpu->rb[0]; in adreno_active_ring() 456 gpu->funcs->pm_suspend(gpu); in adreno_recover() 457 gpu->funcs->pm_resume(gpu); in adreno_recover() 459 ret = msm_gpu_hw_init(gpu); in adreno_recover() 483 gpu_write(gpu, reg, wptr); in adreno_flush() 847 struct msm_gpu *gpu) in adreno_get_pwrlevels() argument 853 gpu->fast_rate = 0; in adreno_get_pwrlevels() 868 gpu->fast_rate = freq; in adreno_get_pwrlevels() [all …]
|
/linux/Documentation/gpu/ |
A D | drm-kms-helpers.rst | 151 .. kernel-doc:: drivers/gpu/drm/drm_bridge.c 157 .. kernel-doc:: drivers/gpu/drm/drm_bridge.c 205 .. kernel-doc:: drivers/gpu/drm/drm_panel.c 211 .. kernel-doc:: drivers/gpu/drm/drm_panel.c 229 .. kernel-doc:: drivers/gpu/drm/drm_hdcp.c 326 .. kernel-doc:: drivers/gpu/drm/drm_dsc.c 332 .. kernel-doc:: drivers/gpu/drm/drm_dsc.c 350 .. kernel-doc:: drivers/gpu/drm/drm_edid.c 388 .. kernel-doc:: drivers/gpu/drm/drm_rect.c 415 .. kernel-doc:: drivers/gpu/drm/drm_of.c [all …]
|
A D | amdgpu.rst | 13 .. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c 49 .. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c 158 .. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c 169 .. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c 175 .. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c 181 .. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c 187 .. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c 193 .. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c 199 .. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c 205 .. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c [all …]
|
A D | i915.rst | 19 .. kernel-doc:: drivers/gpu/drm/i915/intel_runtime_pm.c 22 .. kernel-doc:: drivers/gpu/drm/i915/intel_runtime_pm.c 25 .. kernel-doc:: drivers/gpu/drm/i915/intel_uncore.c 31 .. kernel-doc:: drivers/gpu/drm/i915/i915_irq.c 34 .. kernel-doc:: drivers/gpu/drm/i915/i915_irq.c 37 .. kernel-doc:: drivers/gpu/drm/i915/i915_irq.c 40 .. kernel-doc:: drivers/gpu/drm/i915/i915_irq.c 46 .. kernel-doc:: drivers/gpu/drm/i915/i915_vgpu.c 49 .. kernel-doc:: drivers/gpu/drm/i915/i915_vgpu.c 55 .. kernel-doc:: drivers/gpu/drm/i915/intel_gvt.c [all …]
|
/linux/drivers/gpu/drm/ |
A D | .built-in.a.cmd | 1 …gpu/drm/built-in.a := echo >/dev/null; rm -f drivers/gpu/drm/built-in.a; /usr/bin/ccache /home/tes…
|
/linux/drivers/gpu/drm/amd/amdkfd/ |
A D | kfd_topology.c | 106 if (top_dev->gpu && top_dev->gpu->pdev == pdev) { in kfd_device_by_pci_dev() 124 if (top_dev->gpu && top_dev->gpu->kgd == kgd) { in kfd_device_by_kgd() 315 if (mem->gpu && kfd_devcgroup_check_permission(mem->gpu)) in mem_show() 429 if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu)) in node_show() 445 if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu)) in node_show() 1106 if (!gpu) in kfd_generate_gpu_id() 1151 dev->gpu = gpu; in kfd_assign_gpu() 1155 mem->gpu = dev->gpu; in kfd_assign_gpu() 1157 cache->gpu = dev->gpu; in kfd_assign_gpu() 1159 iolink->gpu = dev->gpu; in kfd_assign_gpu() [all …]
|