Lines Matching refs:pq

41 static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq);
54 static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *pq,
91 struct hfi1_user_sdma_pkt_q *pq = in defer_packet_queue() local
95 trace_hfi1_usdma_defer(pq, sde, &pq->busy); in defer_packet_queue()
103 xchg(&pq->state, SDMA_PKT_Q_DEFERRED); in defer_packet_queue()
104 if (list_empty(&pq->busy.list)) { in defer_packet_queue()
105 pq->busy.lock = &sde->waitlock; in defer_packet_queue()
106 iowait_get_priority(&pq->busy); in defer_packet_queue()
107 iowait_queue(pkts_sent, &pq->busy, &sde->dmawait); in defer_packet_queue()
118 struct hfi1_user_sdma_pkt_q *pq = in activate_packet_queue() local
121 trace_hfi1_usdma_activate(pq, wait, reason); in activate_packet_queue()
122 xchg(&pq->state, SDMA_PKT_Q_ACTIVE); in activate_packet_queue()
133 struct hfi1_user_sdma_pkt_q *pq; in hfi1_user_sdma_alloc_queues() local
143 pq = kzalloc(sizeof(*pq), GFP_KERNEL); in hfi1_user_sdma_alloc_queues()
144 if (!pq) in hfi1_user_sdma_alloc_queues()
146 pq->dd = dd; in hfi1_user_sdma_alloc_queues()
147 pq->ctxt = uctxt->ctxt; in hfi1_user_sdma_alloc_queues()
148 pq->subctxt = fd->subctxt; in hfi1_user_sdma_alloc_queues()
149 pq->n_max_reqs = hfi1_sdma_comp_ring_size; in hfi1_user_sdma_alloc_queues()
150 atomic_set(&pq->n_reqs, 0); in hfi1_user_sdma_alloc_queues()
151 init_waitqueue_head(&pq->wait); in hfi1_user_sdma_alloc_queues()
152 atomic_set(&pq->n_locked, 0); in hfi1_user_sdma_alloc_queues()
154 iowait_init(&pq->busy, 0, NULL, NULL, defer_packet_queue, in hfi1_user_sdma_alloc_queues()
156 pq->reqidx = 0; in hfi1_user_sdma_alloc_queues()
158 pq->reqs = kcalloc(hfi1_sdma_comp_ring_size, in hfi1_user_sdma_alloc_queues()
159 sizeof(*pq->reqs), in hfi1_user_sdma_alloc_queues()
161 if (!pq->reqs) in hfi1_user_sdma_alloc_queues()
164 pq->req_in_use = kcalloc(BITS_TO_LONGS(hfi1_sdma_comp_ring_size), in hfi1_user_sdma_alloc_queues()
165 sizeof(*pq->req_in_use), in hfi1_user_sdma_alloc_queues()
167 if (!pq->req_in_use) in hfi1_user_sdma_alloc_queues()
172 pq->txreq_cache = kmem_cache_create(buf, in hfi1_user_sdma_alloc_queues()
177 if (!pq->txreq_cache) { in hfi1_user_sdma_alloc_queues()
194 ret = hfi1_mmu_rb_register(pq, &sdma_rb_ops, dd->pport->hfi1_wq, in hfi1_user_sdma_alloc_queues()
195 &pq->handler); in hfi1_user_sdma_alloc_queues()
201 rcu_assign_pointer(fd->pq, pq); in hfi1_user_sdma_alloc_queues()
211 kmem_cache_destroy(pq->txreq_cache); in hfi1_user_sdma_alloc_queues()
213 kfree(pq->req_in_use); in hfi1_user_sdma_alloc_queues()
215 kfree(pq->reqs); in hfi1_user_sdma_alloc_queues()
217 kfree(pq); in hfi1_user_sdma_alloc_queues()
222 static void flush_pq_iowait(struct hfi1_user_sdma_pkt_q *pq) in flush_pq_iowait() argument
225 seqlock_t *lock = pq->busy.lock; in flush_pq_iowait()
230 if (!list_empty(&pq->busy.list)) { in flush_pq_iowait()
231 list_del_init(&pq->busy.list); in flush_pq_iowait()
232 pq->busy.lock = NULL; in flush_pq_iowait()
240 struct hfi1_user_sdma_pkt_q *pq; in hfi1_user_sdma_free_queues() local
245 pq = srcu_dereference_check(fd->pq, &fd->pq_srcu, in hfi1_user_sdma_free_queues()
247 if (pq) { in hfi1_user_sdma_free_queues()
248 rcu_assign_pointer(fd->pq, NULL); in hfi1_user_sdma_free_queues()
252 if (pq->handler) in hfi1_user_sdma_free_queues()
253 hfi1_mmu_rb_unregister(pq->handler); in hfi1_user_sdma_free_queues()
254 iowait_sdma_drain(&pq->busy); in hfi1_user_sdma_free_queues()
257 pq->wait, in hfi1_user_sdma_free_queues()
258 !atomic_read(&pq->n_reqs)); in hfi1_user_sdma_free_queues()
259 kfree(pq->reqs); in hfi1_user_sdma_free_queues()
260 kfree(pq->req_in_use); in hfi1_user_sdma_free_queues()
261 kmem_cache_destroy(pq->txreq_cache); in hfi1_user_sdma_free_queues()
262 flush_pq_iowait(pq); in hfi1_user_sdma_free_queues()
263 kfree(pq); in hfi1_user_sdma_free_queues()
309 struct hfi1_user_sdma_pkt_q *pq = in hfi1_user_sdma_process_request() local
310 srcu_dereference(fd->pq, &fd->pq_srcu); in hfi1_user_sdma_process_request()
312 struct hfi1_devdata *dd = pq->dd; in hfi1_user_sdma_process_request()
367 if (test_and_set_bit(info.comp_idx, pq->req_in_use)) { in hfi1_user_sdma_process_request()
378 req = pq->reqs + info.comp_idx; in hfi1_user_sdma_process_request()
381 req->pq = pq; in hfi1_user_sdma_process_request()
396 atomic_inc(&pq->n_reqs); in hfi1_user_sdma_process_request()
551 set_comp_state(pq, cq, info.comp_idx, QUEUED, 0); in hfi1_user_sdma_process_request()
552 pq->state = SDMA_PKT_Q_ACTIVE; in hfi1_user_sdma_process_request()
568 pq->busy.wait_dma, in hfi1_user_sdma_process_request()
569 pq->state == SDMA_PKT_Q_ACTIVE, in hfi1_user_sdma_process_request()
572 trace_hfi1_usdma_we(pq, we_ret); in hfi1_user_sdma_process_request()
574 flush_pq_iowait(pq); in hfi1_user_sdma_process_request()
587 wait_event(pq->busy.wait_dma, in hfi1_user_sdma_process_request()
590 pq_update(pq); in hfi1_user_sdma_process_request()
591 set_comp_state(pq, cq, info.comp_idx, ERROR, ret); in hfi1_user_sdma_process_request()
644 trace_hfi1_sdma_user_compute_length(req->pq->dd, in compute_data_length()
645 req->pq->ctxt, in compute_data_length()
646 req->pq->subctxt, in compute_data_length()
672 struct hfi1_user_sdma_pkt_q *pq = req->pq; in user_sdma_txadd_ahg() local
695 ret = sdma_txadd_kvaddr(pq->dd, &tx->txreq, &tx->hdr, sizeof(tx->hdr)); in user_sdma_txadd_ahg()
697 sdma_txclean(pq->dd, &tx->txreq); in user_sdma_txadd_ahg()
712 struct hfi1_user_sdma_pkt_q *pq = req->pq; in user_sdma_txadd() local
721 ret = sdma_txadd_page(pq->dd, &tx->txreq, iovec->pages[pageidx], in user_sdma_txadd()
749 struct hfi1_user_sdma_pkt_q *pq = NULL; in user_sdma_send_pkts() local
752 if (!req->pq) in user_sdma_send_pkts()
755 pq = req->pq; in user_sdma_send_pkts()
785 tx = kmem_cache_alloc(pq->txreq_cache, GFP_KERNEL); in user_sdma_send_pkts()
900 iowait_get_ib_work(&pq->busy), in user_sdma_send_pkts()
916 sdma_txclean(pq->dd, &tx->txreq); in user_sdma_send_pkts()
918 kmem_cache_free(pq->txreq_cache, tx); in user_sdma_send_pkts()
922 static u32 sdma_cache_evict(struct hfi1_user_sdma_pkt_q *pq, u32 npages) in sdma_cache_evict() argument
928 hfi1_mmu_rb_evict(pq->handler, &evict_data); in sdma_cache_evict()
939 struct hfi1_user_sdma_pkt_q *pq = req->pq; in pin_sdma_pages() local
948 if (!hfi1_can_pin_pages(pq->dd, current->mm, in pin_sdma_pages()
949 atomic_read(&pq->n_locked), npages)) { in pin_sdma_pages()
950 cleared = sdma_cache_evict(pq, npages); in pin_sdma_pages()
969 atomic_add(pinned, &pq->n_locked); in pin_sdma_pages()
978 atomic_sub(node->npages, &node->pq->n_locked); in unpin_sdma_pages()
986 struct hfi1_user_sdma_pkt_q *pq = req->pq; in pin_vector_pages() local
993 hfi1_mmu_rb_remove_unless_exact(pq->handler, in pin_vector_pages()
1014 node->pq = pq; in pin_vector_pages()
1033 ret = hfi1_mmu_rb_insert(req->pq->handler, &node->rb); in pin_vector_pages()
1125 struct hfi1_user_sdma_pkt_q *pq = req->pq; in set_txreq_header() local
1222 pq->dd, pq->ctxt, pq->subctxt, req->info.comp_idx, in set_txreq_header()
1231 trace_hfi1_sdma_user_header(pq->dd, pq->ctxt, pq->subctxt, in set_txreq_header()
1233 return sdma_txadd_kvaddr(pq->dd, &tx->txreq, hdr, sizeof(*hdr)); in set_txreq_header()
1242 struct hfi1_user_sdma_pkt_q *pq = req->pq; in set_txreq_header_ahg() local
1341 trace_hfi1_sdma_user_header_ahg(pq->dd, pq->ctxt, pq->subctxt, in set_txreq_header_ahg()
1368 struct hfi1_user_sdma_pkt_q *pq; in user_sdma_txreq_cb() local
1376 pq = req->pq; in user_sdma_txreq_cb()
1387 kmem_cache_free(pq->txreq_cache, tx); in user_sdma_txreq_cb()
1394 set_comp_state(pq, cq, req->info.comp_idx, state, status); in user_sdma_txreq_cb()
1395 pq_update(pq); in user_sdma_txreq_cb()
1398 static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq) in pq_update() argument
1400 if (atomic_dec_and_test(&pq->n_reqs)) in pq_update()
1401 wake_up(&pq->wait); in pq_update()
1415 sdma_txclean(req->pq->dd, t); in user_sdma_free_request()
1416 kmem_cache_free(req->pq->txreq_cache, tx); in user_sdma_free_request()
1429 hfi1_mmu_rb_remove(req->pq->handler, in user_sdma_free_request()
1436 clear_bit(req->info.comp_idx, req->pq->req_in_use); in user_sdma_free_request()
1439 static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *pq, in set_comp_state() argument
1448 trace_hfi1_sdma_user_completion(pq->dd, pq->ctxt, pq->subctxt, in set_comp_state()