Lines Matching refs:cq
43 struct ib_cq *cq = dim->priv; in ib_cq_rdma_dim_work() local
50 trace_cq_modify(cq, comps, usec); in ib_cq_rdma_dim_work()
51 cq->device->ops.modify_cq(cq, comps, usec); in ib_cq_rdma_dim_work()
54 static void rdma_dim_init(struct ib_cq *cq) in rdma_dim_init() argument
58 if (!cq->device->ops.modify_cq || !cq->device->use_cq_dim || in rdma_dim_init()
59 cq->poll_ctx == IB_POLL_DIRECT) in rdma_dim_init()
69 dim->priv = cq; in rdma_dim_init()
70 cq->dim = dim; in rdma_dim_init()
75 static void rdma_dim_destroy(struct ib_cq *cq) in rdma_dim_destroy() argument
77 if (!cq->dim) in rdma_dim_destroy()
80 cancel_work_sync(&cq->dim->work); in rdma_dim_destroy()
81 kfree(cq->dim); in rdma_dim_destroy()
84 static int __poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc) in __poll_cq() argument
88 rc = ib_poll_cq(cq, num_entries, wc); in __poll_cq()
89 trace_cq_poll(cq, num_entries, rc); in __poll_cq()
93 static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *wcs, in __ib_process_cq() argument
98 trace_cq_process(cq); in __ib_process_cq()
105 while ((n = __poll_cq(cq, min_t(u32, batch, in __ib_process_cq()
111 wc->wr_cqe->done(cq, wc); in __ib_process_cq()
139 int ib_process_cq_direct(struct ib_cq *cq, int budget) in ib_process_cq_direct() argument
143 return __ib_process_cq(cq, budget, wcs, IB_POLL_BATCH_DIRECT); in ib_process_cq_direct()
147 static void ib_cq_completion_direct(struct ib_cq *cq, void *private) in ib_cq_completion_direct() argument
149 WARN_ONCE(1, "got unsolicited completion for CQ 0x%p\n", cq); in ib_cq_completion_direct()
154 struct ib_cq *cq = container_of(iop, struct ib_cq, iop); in ib_poll_handler() local
155 struct dim *dim = cq->dim; in ib_poll_handler()
158 completed = __ib_process_cq(cq, budget, cq->wc, IB_POLL_BATCH); in ib_poll_handler()
160 irq_poll_complete(&cq->iop); in ib_poll_handler()
161 if (ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) { in ib_poll_handler()
162 trace_cq_reschedule(cq); in ib_poll_handler()
163 irq_poll_sched(&cq->iop); in ib_poll_handler()
173 static void ib_cq_completion_softirq(struct ib_cq *cq, void *private) in ib_cq_completion_softirq() argument
175 trace_cq_schedule(cq); in ib_cq_completion_softirq()
176 irq_poll_sched(&cq->iop); in ib_cq_completion_softirq()
181 struct ib_cq *cq = container_of(work, struct ib_cq, work); in ib_cq_poll_work() local
184 completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE, cq->wc, in ib_cq_poll_work()
187 ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) in ib_cq_poll_work()
188 queue_work(cq->comp_wq, &cq->work); in ib_cq_poll_work()
189 else if (cq->dim) in ib_cq_poll_work()
190 rdma_dim(cq->dim, completed); in ib_cq_poll_work()
193 static void ib_cq_completion_workqueue(struct ib_cq *cq, void *private) in ib_cq_completion_workqueue() argument
195 trace_cq_schedule(cq); in ib_cq_completion_workqueue()
196 queue_work(cq->comp_wq, &cq->work); in ib_cq_completion_workqueue()
221 struct ib_cq *cq; in __ib_alloc_cq() local
224 cq = rdma_zalloc_drv_obj(dev, ib_cq); in __ib_alloc_cq()
225 if (!cq) in __ib_alloc_cq()
228 cq->device = dev; in __ib_alloc_cq()
229 cq->cq_context = private; in __ib_alloc_cq()
230 cq->poll_ctx = poll_ctx; in __ib_alloc_cq()
231 atomic_set(&cq->usecnt, 0); in __ib_alloc_cq()
232 cq->comp_vector = comp_vector; in __ib_alloc_cq()
234 cq->wc = kmalloc_array(IB_POLL_BATCH, sizeof(*cq->wc), GFP_KERNEL); in __ib_alloc_cq()
235 if (!cq->wc) in __ib_alloc_cq()
238 rdma_restrack_new(&cq->res, RDMA_RESTRACK_CQ); in __ib_alloc_cq()
239 rdma_restrack_set_name(&cq->res, caller); in __ib_alloc_cq()
241 ret = dev->ops.create_cq(cq, &cq_attr, NULL); in __ib_alloc_cq()
245 rdma_dim_init(cq); in __ib_alloc_cq()
247 switch (cq->poll_ctx) { in __ib_alloc_cq()
249 cq->comp_handler = ib_cq_completion_direct; in __ib_alloc_cq()
252 cq->comp_handler = ib_cq_completion_softirq; in __ib_alloc_cq()
254 irq_poll_init(&cq->iop, IB_POLL_BUDGET_IRQ, ib_poll_handler); in __ib_alloc_cq()
255 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); in __ib_alloc_cq()
259 cq->comp_handler = ib_cq_completion_workqueue; in __ib_alloc_cq()
260 INIT_WORK(&cq->work, ib_cq_poll_work); in __ib_alloc_cq()
261 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); in __ib_alloc_cq()
262 cq->comp_wq = (cq->poll_ctx == IB_POLL_WORKQUEUE) ? in __ib_alloc_cq()
270 rdma_restrack_add(&cq->res); in __ib_alloc_cq()
271 trace_cq_alloc(cq, nr_cqe, comp_vector, poll_ctx); in __ib_alloc_cq()
272 return cq; in __ib_alloc_cq()
275 rdma_dim_destroy(cq); in __ib_alloc_cq()
276 cq->device->ops.destroy_cq(cq, NULL); in __ib_alloc_cq()
278 rdma_restrack_put(&cq->res); in __ib_alloc_cq()
279 kfree(cq->wc); in __ib_alloc_cq()
281 kfree(cq); in __ib_alloc_cq()
319 void ib_free_cq(struct ib_cq *cq) in ib_free_cq() argument
323 if (WARN_ON_ONCE(atomic_read(&cq->usecnt))) in ib_free_cq()
325 if (WARN_ON_ONCE(cq->cqe_used)) in ib_free_cq()
328 switch (cq->poll_ctx) { in ib_free_cq()
332 irq_poll_disable(&cq->iop); in ib_free_cq()
336 cancel_work_sync(&cq->work); in ib_free_cq()
342 rdma_dim_destroy(cq); in ib_free_cq()
343 trace_cq_free(cq); in ib_free_cq()
344 ret = cq->device->ops.destroy_cq(cq, NULL); in ib_free_cq()
346 rdma_restrack_del(&cq->res); in ib_free_cq()
347 kfree(cq->wc); in ib_free_cq()
348 kfree(cq); in ib_free_cq()
354 struct ib_cq *cq, *n; in ib_cq_pool_cleanup() local
358 list_for_each_entry_safe(cq, n, &dev->cq_pools[i], in ib_cq_pool_cleanup()
360 WARN_ON(cq->cqe_used); in ib_cq_pool_cleanup()
361 list_del(&cq->pool_entry); in ib_cq_pool_cleanup()
362 cq->shared = false; in ib_cq_pool_cleanup()
363 ib_free_cq(cq); in ib_cq_pool_cleanup()
373 struct ib_cq *cq, *n; in ib_alloc_cqs() local
390 cq = ib_alloc_cq(dev, NULL, nr_cqes, i, poll_ctx); in ib_alloc_cqs()
391 if (IS_ERR(cq)) { in ib_alloc_cqs()
392 ret = PTR_ERR(cq); in ib_alloc_cqs()
395 cq->shared = true; in ib_alloc_cqs()
396 list_add_tail(&cq->pool_entry, &tmp_list); in ib_alloc_cqs()
406 list_for_each_entry_safe(cq, n, &tmp_list, pool_entry) { in ib_alloc_cqs()
407 cq->shared = false; in ib_alloc_cqs()
408 ib_free_cq(cq); in ib_alloc_cqs()
435 struct ib_cq *cq, *found = NULL; in ib_cq_pool_get() local
459 list_for_each_entry(cq, &dev->cq_pools[poll_ctx], in ib_cq_pool_get()
465 if (vector != cq->comp_vector) in ib_cq_pool_get()
467 if (cq->cqe_used + nr_cqe > cq->cqe) in ib_cq_pool_get()
469 found = cq; in ib_cq_pool_get()
499 void ib_cq_pool_put(struct ib_cq *cq, unsigned int nr_cqe) in ib_cq_pool_put() argument
501 if (WARN_ON_ONCE(nr_cqe > cq->cqe_used)) in ib_cq_pool_put()
504 spin_lock_irq(&cq->device->cq_pools_lock); in ib_cq_pool_put()
505 cq->cqe_used -= nr_cqe; in ib_cq_pool_put()
506 spin_unlock_irq(&cq->device->cq_pools_lock); in ib_cq_pool_put()