Lines Matching refs:q_idx
124 u8 q_idx, struct sk_buff *skb, int err) in hfi1_vnic_update_tx_counters() argument
127 struct opa_vnic_stats *stats = &vinfo->stats[q_idx]; in hfi1_vnic_update_tx_counters()
153 u8 q_idx, struct sk_buff *skb, int err) in hfi1_vnic_update_rx_counters() argument
156 struct opa_vnic_stats *stats = &vinfo->stats[q_idx]; in hfi1_vnic_update_rx_counters()
205 u8 q_idx) in hfi1_vnic_maybe_stop_tx() argument
207 netif_stop_subqueue(vinfo->netdev, q_idx); in hfi1_vnic_maybe_stop_tx()
208 if (!hfi1_vnic_sdma_write_avail(vinfo, q_idx)) in hfi1_vnic_maybe_stop_tx()
211 netif_start_subqueue(vinfo->netdev, q_idx); in hfi1_vnic_maybe_stop_tx()
218 u8 pad_len, q_idx = skb->queue_mapping; in hfi1_netdev_start_xmit() local
225 v_dbg("xmit: queue %d skb len %d\n", q_idx, skb->len); in hfi1_netdev_start_xmit()
227 vinfo->stats[q_idx].tx_drop_state++; in hfi1_netdev_start_xmit()
235 vinfo->stats[q_idx].tx_dlid_zero++; in hfi1_netdev_start_xmit()
255 err = dd->process_vnic_dma_send(dd, q_idx, vinfo, skb, pbc, pad_len); in hfi1_netdev_start_xmit()
258 vinfo->stats[q_idx].netstats.tx_fifo_errors++; in hfi1_netdev_start_xmit()
260 vinfo->stats[q_idx].netstats.tx_carrier_errors++; in hfi1_netdev_start_xmit()
266 hfi1_vnic_maybe_stop_tx(vinfo, q_idx); in hfi1_netdev_start_xmit()
273 hfi1_vnic_update_tx_counters(vinfo, q_idx, skb, err); in hfi1_netdev_start_xmit()
339 u8 q_idx; in hfi1_vnic_bypass_rcv() local
369 q_idx = packet->rcd->vnic_q_idx; in hfi1_vnic_bypass_rcv()
370 rxq = &vinfo->rxq[q_idx]; in hfi1_vnic_bypass_rcv()
372 vinfo->stats[q_idx].rx_drop_state++; in hfi1_vnic_bypass_rcv()
378 vinfo->stats[q_idx].netstats.rx_fifo_errors++; in hfi1_vnic_bypass_rcv()