Lines Matching refs:dev_priv

35 bool vmw_supports_3d(struct vmw_private *dev_priv)  in vmw_supports_3d()  argument
38 const struct vmw_fifo_state *fifo = dev_priv->fifo; in vmw_supports_3d()
40 if (!(dev_priv->capabilities & SVGA_CAP_3D)) in vmw_supports_3d()
43 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { in vmw_supports_3d()
46 if (!dev_priv->has_mob) in vmw_supports_3d()
49 result = vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_3D); in vmw_supports_3d()
54 if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)) in vmw_supports_3d()
57 BUG_ON(vmw_is_svga_v3(dev_priv)); in vmw_supports_3d()
59 fifo_min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN); in vmw_supports_3d()
63 hwversion = vmw_fifo_mem_read(dev_priv, in vmw_supports_3d()
76 if (dev_priv->active_display_unit == vmw_du_legacy) in vmw_supports_3d()
82 bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv) in vmw_fifo_have_pitchlock() argument
86 if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)) in vmw_fifo_have_pitchlock()
89 caps = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CAPABILITIES); in vmw_fifo_have_pitchlock()
96 struct vmw_fifo_state *vmw_fifo_create(struct vmw_private *dev_priv) in vmw_fifo_create() argument
102 if (!dev_priv->fifo_mem) in vmw_fifo_create()
122 if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO) in vmw_fifo_create()
123 min = vmw_read(dev_priv, SVGA_REG_MEM_REGS); in vmw_fifo_create()
129 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_MIN, min); in vmw_fifo_create()
130 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_MAX, dev_priv->fifo_mem_size); in vmw_fifo_create()
132 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_NEXT_CMD, min); in vmw_fifo_create()
133 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_STOP, min); in vmw_fifo_create()
134 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_BUSY, 0); in vmw_fifo_create()
137 vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1); in vmw_fifo_create()
139 max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX); in vmw_fifo_create()
140 min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN); in vmw_fifo_create()
141 fifo->capabilities = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CAPABILITIES); in vmw_fifo_create()
143 drm_info(&dev_priv->drm, in vmw_fifo_create()
151 void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason) in vmw_fifo_ping_host() argument
153 u32 *fifo_mem = dev_priv->fifo_mem; in vmw_fifo_ping_host()
155 vmw_write(dev_priv, SVGA_REG_SYNC, reason); in vmw_fifo_ping_host()
159 void vmw_fifo_destroy(struct vmw_private *dev_priv) in vmw_fifo_destroy() argument
161 struct vmw_fifo_state *fifo = dev_priv->fifo; in vmw_fifo_destroy()
176 dev_priv->fifo = NULL; in vmw_fifo_destroy()
179 static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes) in vmw_fifo_is_full() argument
181 uint32_t max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX); in vmw_fifo_is_full()
182 uint32_t next_cmd = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_NEXT_CMD); in vmw_fifo_is_full()
183 uint32_t min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN); in vmw_fifo_is_full()
184 uint32_t stop = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_STOP); in vmw_fifo_is_full()
189 static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv, in vmw_fifo_wait_noirq() argument
200 prepare_to_wait(&dev_priv->fifo_queue, &__wait, in vmw_fifo_wait_noirq()
203 if (!vmw_fifo_is_full(dev_priv, bytes)) in vmw_fifo_wait_noirq()
216 finish_wait(&dev_priv->fifo_queue, &__wait); in vmw_fifo_wait_noirq()
217 wake_up_all(&dev_priv->fifo_queue); in vmw_fifo_wait_noirq()
222 static int vmw_fifo_wait(struct vmw_private *dev_priv, in vmw_fifo_wait() argument
228 if (likely(!vmw_fifo_is_full(dev_priv, bytes))) in vmw_fifo_wait()
231 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_FIFOFULL); in vmw_fifo_wait()
232 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) in vmw_fifo_wait()
233 return vmw_fifo_wait_noirq(dev_priv, bytes, in vmw_fifo_wait()
236 vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_FIFO_PROGRESS, in vmw_fifo_wait()
237 &dev_priv->fifo_queue_waiters); in vmw_fifo_wait()
241 (dev_priv->fifo_queue, in vmw_fifo_wait()
242 !vmw_fifo_is_full(dev_priv, bytes), timeout); in vmw_fifo_wait()
245 (dev_priv->fifo_queue, in vmw_fifo_wait()
246 !vmw_fifo_is_full(dev_priv, bytes), timeout); in vmw_fifo_wait()
253 vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_FIFO_PROGRESS, in vmw_fifo_wait()
254 &dev_priv->fifo_queue_waiters); in vmw_fifo_wait()
269 static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv, in vmw_local_fifo_reserve() argument
272 struct vmw_fifo_state *fifo_state = dev_priv->fifo; in vmw_local_fifo_reserve()
273 u32 *fifo_mem = dev_priv->fifo_mem; in vmw_local_fifo_reserve()
281 max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX); in vmw_local_fifo_reserve()
282 min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN); in vmw_local_fifo_reserve()
283 next_cmd = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_NEXT_CMD); in vmw_local_fifo_reserve()
294 uint32_t stop = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_STOP); in vmw_local_fifo_reserve()
303 else if (vmw_fifo_is_full(dev_priv, bytes)) { in vmw_local_fifo_reserve()
304 ret = vmw_fifo_wait(dev_priv, bytes, in vmw_local_fifo_reserve()
316 ret = vmw_fifo_wait(dev_priv, bytes, in vmw_local_fifo_reserve()
328 vmw_fifo_mem_write(dev_priv, in vmw_local_fifo_reserve()
357 void *vmw_cmd_ctx_reserve(struct vmw_private *dev_priv, uint32_t bytes, in vmw_cmd_ctx_reserve() argument
362 if (dev_priv->cman) in vmw_cmd_ctx_reserve()
363 ret = vmw_cmdbuf_reserve(dev_priv->cman, bytes, in vmw_cmd_ctx_reserve()
366 ret = vmw_local_fifo_reserve(dev_priv, bytes); in vmw_cmd_ctx_reserve()
419 static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes) in vmw_local_fifo_commit() argument
421 struct vmw_fifo_state *fifo_state = dev_priv->fifo; in vmw_local_fifo_commit()
422 uint32_t next_cmd = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_NEXT_CMD); in vmw_local_fifo_commit()
423 uint32_t max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX); in vmw_local_fifo_commit()
424 uint32_t min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN); in vmw_local_fifo_commit()
434 vmw_fifo_res_copy(fifo_state, dev_priv, in vmw_local_fifo_commit()
437 vmw_fifo_slow_copy(fifo_state, dev_priv, in vmw_local_fifo_commit()
453 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_NEXT_CMD, next_cmd); in vmw_local_fifo_commit()
457 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_RESERVED, 0); in vmw_local_fifo_commit()
460 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); in vmw_local_fifo_commit()
464 void vmw_cmd_commit(struct vmw_private *dev_priv, uint32_t bytes) in vmw_cmd_commit() argument
466 if (dev_priv->cman) in vmw_cmd_commit()
467 vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, false); in vmw_cmd_commit()
469 vmw_local_fifo_commit(dev_priv, bytes); in vmw_cmd_commit()
479 void vmw_cmd_commit_flush(struct vmw_private *dev_priv, uint32_t bytes) in vmw_cmd_commit_flush() argument
481 if (dev_priv->cman) in vmw_cmd_commit_flush()
482 vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, true); in vmw_cmd_commit_flush()
484 vmw_local_fifo_commit(dev_priv, bytes); in vmw_cmd_commit_flush()
494 int vmw_cmd_flush(struct vmw_private *dev_priv, bool interruptible) in vmw_cmd_flush() argument
498 if (dev_priv->cman) in vmw_cmd_flush()
499 return vmw_cmdbuf_cur_flush(dev_priv->cman, interruptible); in vmw_cmd_flush()
504 int vmw_cmd_send_fence(struct vmw_private *dev_priv, uint32_t *seqno) in vmw_cmd_send_fence() argument
511 fm = VMW_CMD_RESERVE(dev_priv, bytes); in vmw_cmd_send_fence()
513 *seqno = atomic_read(&dev_priv->marker_seq); in vmw_cmd_send_fence()
515 (void)vmw_fallback_wait(dev_priv, false, true, *seqno, in vmw_cmd_send_fence()
521 *seqno = atomic_add_return(1, &dev_priv->marker_seq); in vmw_cmd_send_fence()
524 if (!(vmw_fifo_caps(dev_priv) & SVGA_FIFO_CAP_FENCE)) { in vmw_cmd_send_fence()
531 vmw_cmd_commit(dev_priv, 0); in vmw_cmd_send_fence()
538 vmw_cmd_commit_flush(dev_priv, bytes); in vmw_cmd_send_fence()
539 vmw_update_seqno(dev_priv); in vmw_cmd_send_fence()
554 static int vmw_cmd_emit_dummy_legacy_query(struct vmw_private *dev_priv, in vmw_cmd_emit_dummy_legacy_query() argument
563 struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base; in vmw_cmd_emit_dummy_legacy_query()
569 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); in vmw_cmd_emit_dummy_legacy_query()
586 vmw_cmd_commit(dev_priv, sizeof(*cmd)); in vmw_cmd_emit_dummy_legacy_query()
600 static int vmw_cmd_emit_dummy_gb_query(struct vmw_private *dev_priv, in vmw_cmd_emit_dummy_gb_query() argument
609 struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base; in vmw_cmd_emit_dummy_gb_query()
615 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); in vmw_cmd_emit_dummy_gb_query()
627 vmw_cmd_commit(dev_priv, sizeof(*cmd)); in vmw_cmd_emit_dummy_gb_query()
651 int vmw_cmd_emit_dummy_query(struct vmw_private *dev_priv, in vmw_cmd_emit_dummy_query() argument
654 if (dev_priv->has_mob) in vmw_cmd_emit_dummy_query()
655 return vmw_cmd_emit_dummy_gb_query(dev_priv, cid); in vmw_cmd_emit_dummy_query()
657 return vmw_cmd_emit_dummy_legacy_query(dev_priv, cid); in vmw_cmd_emit_dummy_query()