Lines Matching refs:srq

74 static void *get_wqe(struct mthca_srq *srq, int n)  in get_wqe()  argument
76 if (srq->is_direct) in get_wqe()
77 return srq->queue.direct.buf + (n << srq->wqe_shift); in get_wqe()
79 return srq->queue.page_list[(n << srq->wqe_shift) >> PAGE_SHIFT].buf + in get_wqe()
80 ((n << srq->wqe_shift) & (PAGE_SIZE - 1)); in get_wqe()
99 struct mthca_srq *srq, in mthca_tavor_init_srq_context() argument
108 context->wqe_base_ds = cpu_to_be64(1 << (srq->wqe_shift - 4)); in mthca_tavor_init_srq_context()
110 context->lkey = cpu_to_be32(srq->mr.ibmr.lkey); in mthca_tavor_init_srq_context()
120 struct mthca_srq *srq, in mthca_arbel_init_srq_context() argument
134 max = srq->max; in mthca_arbel_init_srq_context()
136 context->state_logsize_srqn = cpu_to_be32(logsize << 24 | srq->srqn); in mthca_arbel_init_srq_context()
137 context->lkey = cpu_to_be32(srq->mr.ibmr.lkey); in mthca_arbel_init_srq_context()
138 context->db_index = cpu_to_be32(srq->db_index); in mthca_arbel_init_srq_context()
139 context->logstride_usrpage = cpu_to_be32((srq->wqe_shift - 4) << 29); in mthca_arbel_init_srq_context()
147 static void mthca_free_srq_buf(struct mthca_dev *dev, struct mthca_srq *srq) in mthca_free_srq_buf() argument
149 mthca_buf_free(dev, srq->max << srq->wqe_shift, &srq->queue, in mthca_free_srq_buf()
150 srq->is_direct, &srq->mr); in mthca_free_srq_buf()
151 kfree(srq->wrid); in mthca_free_srq_buf()
155 struct mthca_srq *srq, struct ib_udata *udata) in mthca_alloc_srq_buf() argument
165 srq->wrid = kmalloc_array(srq->max, sizeof(u64), GFP_KERNEL); in mthca_alloc_srq_buf()
166 if (!srq->wrid) in mthca_alloc_srq_buf()
169 err = mthca_buf_alloc(dev, srq->max << srq->wqe_shift, in mthca_alloc_srq_buf()
171 &srq->queue, &srq->is_direct, pd, 1, &srq->mr); in mthca_alloc_srq_buf()
173 kfree(srq->wrid); in mthca_alloc_srq_buf()
182 for (i = 0; i < srq->max; ++i) { in mthca_alloc_srq_buf()
185 next = wqe = get_wqe(srq, i); in mthca_alloc_srq_buf()
187 if (i < srq->max - 1) { in mthca_alloc_srq_buf()
189 next->nda_op = htonl(((i + 1) << srq->wqe_shift) | 1); in mthca_alloc_srq_buf()
196 (void *) scatter < wqe + (1 << srq->wqe_shift); in mthca_alloc_srq_buf()
201 srq->last = get_wqe(srq, srq->max - 1); in mthca_alloc_srq_buf()
207 struct ib_srq_attr *attr, struct mthca_srq *srq, in mthca_alloc_srq() argument
219 srq->max = attr->max_wr; in mthca_alloc_srq()
220 srq->max_gs = attr->max_sge; in mthca_alloc_srq()
221 srq->counter = 0; in mthca_alloc_srq()
224 srq->max = roundup_pow_of_two(srq->max + 1); in mthca_alloc_srq()
226 srq->max = srq->max + 1; in mthca_alloc_srq()
230 srq->max_gs * sizeof (struct mthca_data_seg))); in mthca_alloc_srq()
235 srq->wqe_shift = ilog2(ds); in mthca_alloc_srq()
237 srq->srqn = mthca_alloc(&dev->srq_table.alloc); in mthca_alloc_srq()
238 if (srq->srqn == -1) in mthca_alloc_srq()
242 err = mthca_table_get(dev, dev->srq_table.table, srq->srqn); in mthca_alloc_srq()
247 srq->db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SRQ, in mthca_alloc_srq()
248 srq->srqn, &srq->db); in mthca_alloc_srq()
249 if (srq->db_index < 0) { in mthca_alloc_srq()
262 err = mthca_alloc_srq_buf(dev, pd, srq, udata); in mthca_alloc_srq()
266 spin_lock_init(&srq->lock); in mthca_alloc_srq()
267 srq->refcount = 1; in mthca_alloc_srq()
268 init_waitqueue_head(&srq->wait); in mthca_alloc_srq()
269 mutex_init(&srq->mutex); in mthca_alloc_srq()
272 mthca_arbel_init_srq_context(dev, pd, srq, mailbox->buf, udata); in mthca_alloc_srq()
274 mthca_tavor_init_srq_context(dev, pd, srq, mailbox->buf, udata); in mthca_alloc_srq()
276 err = mthca_SW2HW_SRQ(dev, mailbox, srq->srqn); in mthca_alloc_srq()
284 if (mthca_array_set(&dev->srq_table.srq, in mthca_alloc_srq()
285 srq->srqn & (dev->limits.num_srqs - 1), in mthca_alloc_srq()
286 srq)) { in mthca_alloc_srq()
294 srq->first_free = 0; in mthca_alloc_srq()
295 srq->last_free = srq->max - 1; in mthca_alloc_srq()
297 attr->max_wr = srq->max - 1; in mthca_alloc_srq()
298 attr->max_sge = srq->max_gs; in mthca_alloc_srq()
303 err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn); in mthca_alloc_srq()
309 mthca_free_srq_buf(dev, srq); in mthca_alloc_srq()
316 mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index); in mthca_alloc_srq()
319 mthca_table_put(dev, dev->srq_table.table, srq->srqn); in mthca_alloc_srq()
322 mthca_free(&dev->srq_table.alloc, srq->srqn); in mthca_alloc_srq()
327 static inline int get_srq_refcount(struct mthca_dev *dev, struct mthca_srq *srq) in get_srq_refcount() argument
332 c = srq->refcount; in get_srq_refcount()
338 void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq) in mthca_free_srq() argument
349 err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn); in mthca_free_srq()
354 mthca_array_clear(&dev->srq_table.srq, in mthca_free_srq()
355 srq->srqn & (dev->limits.num_srqs - 1)); in mthca_free_srq()
356 --srq->refcount; in mthca_free_srq()
359 wait_event(srq->wait, !get_srq_refcount(dev, srq)); in mthca_free_srq()
361 if (!srq->ibsrq.uobject) { in mthca_free_srq()
362 mthca_free_srq_buf(dev, srq); in mthca_free_srq()
364 mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index); in mthca_free_srq()
367 mthca_table_put(dev, dev->srq_table.table, srq->srqn); in mthca_free_srq()
368 mthca_free(&dev->srq_table.alloc, srq->srqn); in mthca_free_srq()
376 struct mthca_srq *srq = to_msrq(ibsrq); in mthca_modify_srq() local
384 u32 max_wr = mthca_is_memfree(dev) ? srq->max - 1 : srq->max; in mthca_modify_srq()
388 mutex_lock(&srq->mutex); in mthca_modify_srq()
389 ret = mthca_ARM_SRQ(dev, srq->srqn, attr->srq_limit); in mthca_modify_srq()
390 mutex_unlock(&srq->mutex); in mthca_modify_srq()
399 struct mthca_srq *srq = to_msrq(ibsrq); in mthca_query_srq() local
409 err = mthca_QUERY_SRQ(dev, srq->srqn, mailbox); in mthca_query_srq()
421 srq_attr->max_wr = srq->max - 1; in mthca_query_srq()
422 srq_attr->max_sge = srq->max_gs; in mthca_query_srq()
433 struct mthca_srq *srq; in mthca_srq_event() local
437 srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1)); in mthca_srq_event()
438 if (srq) in mthca_srq_event()
439 ++srq->refcount; in mthca_srq_event()
442 if (!srq) { in mthca_srq_event()
447 if (!srq->ibsrq.event_handler) in mthca_srq_event()
452 event.element.srq = &srq->ibsrq; in mthca_srq_event()
453 srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context); in mthca_srq_event()
457 if (!--srq->refcount) in mthca_srq_event()
458 wake_up(&srq->wait); in mthca_srq_event()
465 void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr) in mthca_free_srq_wqe() argument
470 ind = wqe_addr >> srq->wqe_shift; in mthca_free_srq_wqe()
472 spin_lock(&srq->lock); in mthca_free_srq_wqe()
474 last_free = get_wqe(srq, srq->last_free); in mthca_free_srq_wqe()
476 last_free->nda_op = htonl((ind << srq->wqe_shift) | 1); in mthca_free_srq_wqe()
477 *wqe_to_link(get_wqe(srq, ind)) = -1; in mthca_free_srq_wqe()
478 srq->last_free = ind; in mthca_free_srq_wqe()
480 spin_unlock(&srq->lock); in mthca_free_srq_wqe()
487 struct mthca_srq *srq = to_msrq(ibsrq); in mthca_tavor_post_srq_recv() local
498 spin_lock_irqsave(&srq->lock, flags); in mthca_tavor_post_srq_recv()
500 first_ind = srq->first_free; in mthca_tavor_post_srq_recv()
503 ind = srq->first_free; in mthca_tavor_post_srq_recv()
504 wqe = get_wqe(srq, ind); in mthca_tavor_post_srq_recv()
508 mthca_err(dev, "SRQ %06x full\n", srq->srqn); in mthca_tavor_post_srq_recv()
514 prev_wqe = srq->last; in mthca_tavor_post_srq_recv()
515 srq->last = wqe; in mthca_tavor_post_srq_recv()
522 if (unlikely(wr->num_sge > srq->max_gs)) { in mthca_tavor_post_srq_recv()
525 srq->last = prev_wqe; in mthca_tavor_post_srq_recv()
534 if (i < srq->max_gs) in mthca_tavor_post_srq_recv()
540 srq->wrid[ind] = wr->wr_id; in mthca_tavor_post_srq_recv()
541 srq->first_free = next_ind; in mthca_tavor_post_srq_recv()
553 mthca_write64(first_ind << srq->wqe_shift, srq->srqn << 8, in mthca_tavor_post_srq_recv()
557 first_ind = srq->first_free; in mthca_tavor_post_srq_recv()
568 mthca_write64(first_ind << srq->wqe_shift, (srq->srqn << 8) | nreq, in mthca_tavor_post_srq_recv()
573 spin_unlock_irqrestore(&srq->lock, flags); in mthca_tavor_post_srq_recv()
581 struct mthca_srq *srq = to_msrq(ibsrq); in mthca_arbel_post_srq_recv() local
590 spin_lock_irqsave(&srq->lock, flags); in mthca_arbel_post_srq_recv()
593 ind = srq->first_free; in mthca_arbel_post_srq_recv()
594 wqe = get_wqe(srq, ind); in mthca_arbel_post_srq_recv()
598 mthca_err(dev, "SRQ %06x full\n", srq->srqn); in mthca_arbel_post_srq_recv()
609 if (unlikely(wr->num_sge > srq->max_gs)) { in mthca_arbel_post_srq_recv()
620 if (i < srq->max_gs) in mthca_arbel_post_srq_recv()
623 srq->wrid[ind] = wr->wr_id; in mthca_arbel_post_srq_recv()
624 srq->first_free = next_ind; in mthca_arbel_post_srq_recv()
628 srq->counter += nreq; in mthca_arbel_post_srq_recv()
635 *srq->db = cpu_to_be32(srq->counter); in mthca_arbel_post_srq_recv()
638 spin_unlock_irqrestore(&srq->lock, flags); in mthca_arbel_post_srq_recv()
683 err = mthca_array_init(&dev->srq_table.srq, in mthca_init_srq_table()
696 mthca_array_cleanup(&dev->srq_table.srq, dev->limits.num_srqs); in mthca_cleanup_srq_table()