Lines Matching refs:ppd

803 				    struct qib_pportdata *ppd,  in qib_user_sdma_queue_pkts()  argument
905 ppd->ibmaxlen) { in qib_user_sdma_queue_pkts()
1060 static int qib_user_sdma_queue_clean(struct qib_pportdata *ppd, in qib_user_sdma_queue_clean() argument
1063 struct qib_devdata *dd = ppd->dd; in qib_user_sdma_queue_clean()
1082 s64 descd = ppd->sdma_descq_removed - pkt->added; in qib_user_sdma_queue_clean()
1125 static int qib_user_sdma_hwqueue_clean(struct qib_pportdata *ppd) in qib_user_sdma_hwqueue_clean() argument
1130 spin_lock_irqsave(&ppd->sdma_lock, flags); in qib_user_sdma_hwqueue_clean()
1131 ret = qib_sdma_make_progress(ppd); in qib_user_sdma_hwqueue_clean()
1132 spin_unlock_irqrestore(&ppd->sdma_lock, flags); in qib_user_sdma_hwqueue_clean()
1138 void qib_user_sdma_queue_drain(struct qib_pportdata *ppd, in qib_user_sdma_queue_drain() argument
1141 struct qib_devdata *dd = ppd->dd; in qib_user_sdma_queue_drain()
1154 qib_user_sdma_hwqueue_clean(ppd); in qib_user_sdma_queue_drain()
1155 qib_user_sdma_queue_clean(ppd, pq); in qib_user_sdma_queue_drain()
1166 spin_lock_irqsave(&ppd->sdma_lock, flags); in qib_user_sdma_queue_drain()
1172 &ppd->sdma_userpending, list) { in qib_user_sdma_queue_drain()
1180 spin_unlock_irqrestore(&ppd->sdma_lock, flags); in qib_user_sdma_queue_drain()
1221 static void qib_user_sdma_send_frag(struct qib_pportdata *ppd, in qib_user_sdma_send_frag() argument
1231 descqp = &ppd->sdma_descq[tail].qw[0]; in qib_user_sdma_send_frag()
1238 if (ppd->sdma_intrequest) { in qib_user_sdma_send_frag()
1240 ppd->sdma_intrequest = 0; in qib_user_sdma_send_frag()
1248 void qib_user_sdma_send_desc(struct qib_pportdata *ppd, in qib_user_sdma_send_desc() argument
1251 struct qib_devdata *dd = ppd->dd; in qib_user_sdma_send_desc()
1256 nfree = qib_sdma_descq_freecnt(ppd); in qib_user_sdma_send_desc()
1262 tail_c = tail = ppd->sdma_descq_tail; in qib_user_sdma_send_desc()
1263 gen_c = gen = ppd->sdma_generation; in qib_user_sdma_send_desc()
1273 qib_user_sdma_send_frag(ppd, pkt, i, ofs, tail, gen); in qib_user_sdma_send_desc()
1276 if (++tail == ppd->sdma_descq_cnt) { in qib_user_sdma_send_desc()
1279 ppd->sdma_intrequest = 1; in qib_user_sdma_send_desc()
1280 } else if (tail == (ppd->sdma_descq_cnt>>1)) { in qib_user_sdma_send_desc()
1281 ppd->sdma_intrequest = 1; in qib_user_sdma_send_desc()
1295 ppd->sdma_descq[dtail].qw[0] |= in qib_user_sdma_send_desc()
1297 if (++dtail == ppd->sdma_descq_cnt) in qib_user_sdma_send_desc()
1308 ppd->sdma_descq_added += c; in qib_user_sdma_send_desc()
1311 pkt->added = ppd->sdma_descq_added; in qib_user_sdma_send_desc()
1319 if (!nfree || (nsent<<2) > ppd->sdma_descq_cnt) in qib_user_sdma_send_desc()
1324 if (ppd->sdma_descq_tail != tail_c) { in qib_user_sdma_send_desc()
1325 ppd->sdma_generation = gen_c; in qib_user_sdma_send_desc()
1326 dd->f_sdma_update_tail(ppd, tail_c); in qib_user_sdma_send_desc()
1334 static int qib_user_sdma_push_pkts(struct qib_pportdata *ppd, in qib_user_sdma_push_pkts() argument
1340 if (unlikely(!(ppd->lflags & QIBL_LINKACTIVE))) in qib_user_sdma_push_pkts()
1345 spin_lock_irqsave(&ppd->sdma_lock, flags); in qib_user_sdma_push_pkts()
1346 if (unlikely(!__qib_sdma_running(ppd))) { in qib_user_sdma_push_pkts()
1347 spin_unlock_irqrestore(&ppd->sdma_lock, flags); in qib_user_sdma_push_pkts()
1351 list_splice_tail_init(pktlist, &ppd->sdma_userpending); in qib_user_sdma_push_pkts()
1352 qib_user_sdma_send_desc(ppd, &ppd->sdma_userpending); in qib_user_sdma_push_pkts()
1353 spin_unlock_irqrestore(&ppd->sdma_lock, flags); in qib_user_sdma_push_pkts()
1372 spin_lock_irqsave(&ppd->sdma_lock, flags); in qib_user_sdma_push_pkts()
1373 if (unlikely(!__qib_sdma_running(ppd))) { in qib_user_sdma_push_pkts()
1374 spin_unlock_irqrestore(&ppd->sdma_lock, flags); in qib_user_sdma_push_pkts()
1377 qib_user_sdma_send_desc(ppd, pktlist); in qib_user_sdma_push_pkts()
1379 qib_sdma_make_progress(ppd); in qib_user_sdma_push_pkts()
1380 spin_unlock_irqrestore(&ppd->sdma_lock, flags); in qib_user_sdma_push_pkts()
1392 struct qib_pportdata *ppd = rcd->ppd; in qib_user_sdma_writev() local
1402 if (!qib_sdma_running(ppd)) in qib_user_sdma_writev()
1406 if (pq->added > ppd->sdma_descq_removed) in qib_user_sdma_writev()
1407 qib_user_sdma_hwqueue_clean(ppd); in qib_user_sdma_writev()
1410 qib_user_sdma_queue_clean(ppd, pq); in qib_user_sdma_writev()
1416 ret = qib_user_sdma_queue_pkts(dd, ppd, pq, in qib_user_sdma_writev()
1430 if (qib_sdma_descq_freecnt(ppd) < ndesc) { in qib_user_sdma_writev()
1431 qib_user_sdma_hwqueue_clean(ppd); in qib_user_sdma_writev()
1433 qib_user_sdma_queue_clean(ppd, pq); in qib_user_sdma_writev()
1436 ret = qib_user_sdma_push_pkts(ppd, pq, &list, mxp); in qib_user_sdma_writev()
1454 int qib_user_sdma_make_progress(struct qib_pportdata *ppd, in qib_user_sdma_make_progress() argument
1460 qib_user_sdma_hwqueue_clean(ppd); in qib_user_sdma_make_progress()
1461 ret = qib_user_sdma_queue_clean(ppd, pq); in qib_user_sdma_make_progress()