Home
last modified time | relevance | path

Searched refs:q_idx (Results 1 – 25 of 51) sorted by relevance

123

/linux/drivers/net/ethernet/intel/ice/
A Dice_xsk.c29 memset(&vsi->tx_rings[q_idx]->stats, 0, in ice_qp_reset_stats()
32 memset(&vsi->xdp_rings[q_idx]->stats, 0, in ice_qp_reset_stats()
43 ice_clean_tx_ring(vsi->tx_rings[q_idx]); in ice_qp_clean_rings()
159 if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq) in ice_qp_dis()
162 tx_ring = vsi->tx_rings[q_idx]; in ice_qp_dis()
163 rx_ring = vsi->rx_rings[q_idx]; in ice_qp_dis()
195 ice_qp_clean_rings(vsi, q_idx); in ice_qp_dis()
196 ice_qp_reset_stats(vsi, q_idx); in ice_qp_dis()
217 if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq) in ice_qp_ena()
227 tx_ring = vsi->tx_rings[q_idx]; in ice_qp_ena()
[all …]
A Dice_lib.h15 int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx);
17 int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, u16 q_idx);
A Dice_lib.c1832 if (q_idx >= vsi->num_rxq) in ice_vsi_cfg_single_rxq()
1835 return ice_vsi_cfg_rxq(vsi->rx_rings[q_idx]); in ice_vsi_cfg_single_rxq()
1843 if (q_idx >= vsi->alloc_txq || !tx_rings || !tx_rings[q_idx]) in ice_vsi_cfg_single_txq()
1897 u16 q_idx = 0; in ice_vsi_cfg_txqs() local
1906 for (q_idx = 0; q_idx < count; q_idx++) { in ice_vsi_cfg_txqs()
1907 err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf); in ice_vsi_cfg_txqs()
2229 u16 q_idx; in ice_vsi_stop_tx_rings() local
2234 for (q_idx = 0; q_idx < count; q_idx++) { in ice_vsi_stop_tx_rings()
2238 if (!rings || !rings[q_idx]) in ice_vsi_stop_tx_rings()
2241 ice_fill_txq_meta(vsi, rings[q_idx], &txq_meta); in ice_vsi_stop_tx_rings()
[all …]
/linux/drivers/infiniband/hw/hfi1/
A Dvnic_main.c205 u8 q_idx) in hfi1_vnic_maybe_stop_tx() argument
207 netif_stop_subqueue(vinfo->netdev, q_idx); in hfi1_vnic_maybe_stop_tx()
211 netif_start_subqueue(vinfo->netdev, q_idx); in hfi1_vnic_maybe_stop_tx()
218 u8 pad_len, q_idx = skb->queue_mapping; in hfi1_netdev_start_xmit() local
227 vinfo->stats[q_idx].tx_drop_state++; in hfi1_netdev_start_xmit()
235 vinfo->stats[q_idx].tx_dlid_zero++; in hfi1_netdev_start_xmit()
266 hfi1_vnic_maybe_stop_tx(vinfo, q_idx); in hfi1_netdev_start_xmit()
339 u8 q_idx; in hfi1_vnic_bypass_rcv() local
369 q_idx = packet->rcd->vnic_q_idx; in hfi1_vnic_bypass_rcv()
370 rxq = &vinfo->rxq[q_idx]; in hfi1_vnic_bypass_rcv()
[all …]
A Dvnic_sdma.c125 int hfi1_vnic_send_dma(struct hfi1_devdata *dd, u8 q_idx, in hfi1_vnic_send_dma() argument
129 struct hfi1_vnic_sdma *vnic_sdma = &vinfo->sdma[q_idx]; in hfi1_vnic_send_dma()
223 if (__netif_subqueue_stopped(vinfo->netdev, vnic_sdma->q_idx)) in hfi1_vnic_sdma_wakeup()
224 netif_wake_subqueue(vinfo->netdev, vnic_sdma->q_idx); in hfi1_vnic_sdma_wakeup()
228 u8 q_idx) in hfi1_vnic_sdma_write_avail() argument
230 struct hfi1_vnic_sdma *vnic_sdma = &vinfo->sdma[q_idx]; in hfi1_vnic_sdma_write_avail()
248 vnic_sdma->q_idx = i; in hfi1_vnic_sdma_init()
A Dvnic.h49 u8 q_idx; member
113 u8 q_idx);
122 int hfi1_vnic_send_dma(struct hfi1_devdata *dd, u8 q_idx,
A Dipoib_tx.c57 netif_stop_subqueue(txq->priv->netdev, txq->q_idx); in hfi1_ipoib_stop_txq()
64 netif_wake_subqueue(txq->priv->netdev, txq->q_idx); in hfi1_ipoib_wake_txq()
125 le64_to_cpu(tx->sdma_hdr.pbc), tx->txq->q_idx, in hfi1_ipoib_free_tx()
719 txq->q_idx = i; in hfi1_ipoib_txreq_init()
776 txq->q_idx, in hfi1_ipoib_drain_tx_list()
832 __netif_subqueue_stopped(dev, txq->q_idx), in hfi1_ipoib_tx_timeout()
A Dipoib.h114 u8 q_idx; member
/linux/drivers/misc/habanalabs/common/
A Dhw_queue.c408 u32 q_idx; in init_signal_cs() local
411 q_idx = job->hw_queue_id; in init_signal_cs()
420 cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val, q_idx, in init_signal_cs()
465 u32 q_idx; in init_wait_cs() local
467 q_idx = job->hw_queue_id; in init_wait_cs()
482 cs->encaps_sig_hdl->q_idx, in init_wait_cs()
518 prop->base_mon_id, q_idx, cs->sequence); in init_wait_cs()
525 wait_prop.q_idx = q_idx; in init_wait_cs()
602 encaps_sig_hdl->q_idx, in encaps_sig_first_staged_cs_handler()
954 hw_sob->q_idx = q_idx; in sync_stream_queue_init()
[all …]
A Dcommand_submission.c56 hw_sob->q_idx, hw_sob->sob_id); in hl_sob_reset_error()
1597 q_idx); in hl_cs_signal_sob_wraparound_handler()
1742 job->hw_queue_id = q_idx; in cs_ioctl_signal_wait_create_jobs()
1770 u32 q_idx, u32 count, in cs_ioctl_reserve_signals() argument
1792 q_idx); in cs_ioctl_reserve_signals()
1802 q_idx); in cs_ioctl_reserve_signals()
1829 handle->q_idx = q_idx; in cs_ioctl_reserve_signals()
1890 u32 q_idx, sob_addr; in cs_ioctl_unreserve_signals() local
1904 q_idx = encaps_sig_hdl->q_idx; in cs_ioctl_unreserve_signals()
1991 q_idx = chunk->queue_index; in cs_ioctl_signal_wait()
[all …]
/linux/drivers/net/hyperv/
A Dnetvsc.c755 u16 q_idx = 0; in netvsc_send_tx_complete() local
776 q_idx = packet->q_idx; in netvsc_send_tx_complete()
778 tx_stats = &net_device->chan_table[q_idx].tx_stats; in netvsc_send_tx_complete()
960 &net_device->chan_table[packet->q_idx]; in netvsc_send_pkt()
1090 nvchan = &net_device->chan_table[packet->q_idx]; in netvsc_send()
1249 struct netvsc_device *nvdev, u16 q_idx, in enq_receive_complete() argument
1252 struct netvsc_channel *nvchan = &nvdev->chan_table[q_idx]; in enq_receive_complete()
1266 q_idx, tid); in enq_receive_complete()
1289 u16 q_idx = channel->offermsg.offer.sub_channel_index; in netvsc_receive() local
1372 trace_rndis_recv(ndev, q_idx, data); in netvsc_receive()
[all …]
A Dnetvsc_drv.c300 int q_idx; in netvsc_get_tx_queue() local
306 if (q_idx != old_idx && in netvsc_get_tx_queue()
308 sk_tx_queue_set(sk, q_idx); in netvsc_get_tx_queue()
310 return q_idx; in netvsc_get_tx_queue()
326 int q_idx = sk_tx_queue_get(skb->sk); in netvsc_pick_tx() local
328 if (q_idx < 0 || skb->ooo_okay || q_idx >= ndev->real_num_tx_queues) { in netvsc_pick_tx()
333 q_idx = skb_get_rx_queue(skb); in netvsc_pick_tx()
335 q_idx = netvsc_get_tx_queue(ndev, skb, q_idx); in netvsc_pick_tx()
338 return q_idx; in netvsc_pick_tx()
578 packet->q_idx = skb_get_queue_mapping(skb); in netvsc_xmit()
[all …]
/linux/drivers/net/ethernet/cavium/thunder/
A Dnicvf_queues.h354 void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx);
355 void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx);
356 void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx);
357 int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx);
A Dnicvf_queues.c1722 static u64 nicvf_int_type_to_mask(int int_type, int q_idx) in nicvf_int_type_to_mask() argument
1728 reg_val = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT); in nicvf_int_type_to_mask()
1731 reg_val = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT); in nicvf_int_type_to_mask()
1734 reg_val = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT); in nicvf_int_type_to_mask()
1756 void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx) in nicvf_enable_intr() argument
1758 u64 mask = nicvf_int_type_to_mask(int_type, q_idx); in nicvf_enable_intr()
1770 void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx) in nicvf_disable_intr() argument
1772 u64 mask = nicvf_int_type_to_mask(int_type, q_idx); in nicvf_disable_intr()
1784 void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx) in nicvf_clear_intr() argument
1786 u64 mask = nicvf_int_type_to_mask(int_type, q_idx); in nicvf_clear_intr()
[all …]
/linux/drivers/net/ethernet/intel/fm10k/
A Dfm10k_pf.c502 u16 vsi, queue, pc, q_idx; in fm10k_configure_dglort_map_pf() local
519 q_idx = dglort->queue_b; in fm10k_configure_dglort_map_pf()
523 for (queue = 0; queue < queue_count; queue++, q_idx++) { in fm10k_configure_dglort_map_pf()
524 if (q_idx >= FM10K_MAX_QUEUES) in fm10k_configure_dglort_map_pf()
527 fm10k_write_reg(hw, FM10K_TX_SGLORT(q_idx), glort); in fm10k_configure_dglort_map_pf()
528 fm10k_write_reg(hw, FM10K_RX_SGLORT(q_idx), glort); in fm10k_configure_dglort_map_pf()
538 q_idx = pc + dglort->queue_b; in fm10k_configure_dglort_map_pf()
540 if (q_idx >= FM10K_MAX_QUEUES) in fm10k_configure_dglort_map_pf()
543 txqctl = fm10k_read_reg(hw, FM10K_TXQCTL(q_idx)); in fm10k_configure_dglort_map_pf()
546 fm10k_write_reg(hw, FM10K_TXQCTL(q_idx), txqctl); in fm10k_configure_dglort_map_pf()
[all …]
A Dfm10k_pci.c1176 int q_idx; in fm10k_napi_enable_all() local
1178 for (q_idx = 0; q_idx < interface->num_q_vectors; q_idx++) { in fm10k_napi_enable_all()
1179 q_vector = interface->q_vector[q_idx]; in fm10k_napi_enable_all()
1873 int q_idx; in fm10k_napi_disable_all() local
1875 for (q_idx = 0; q_idx < interface->num_q_vectors; q_idx++) { in fm10k_napi_disable_all()
1876 q_vector = interface->q_vector[q_idx]; in fm10k_napi_disable_all()
/linux/drivers/net/ethernet/intel/iavf/
A Diavf_main.c943 int q_idx; in iavf_napi_enable_all() local
947 for (q_idx = 0; q_idx < q_vectors; q_idx++) { in iavf_napi_enable_all()
962 int q_idx; in iavf_napi_disable_all() local
966 for (q_idx = 0; q_idx < q_vectors; q_idx++) { in iavf_napi_disable_all()
1404 int q_idx = 0, num_q_vectors; in iavf_alloc_q_vectors() local
1413 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { in iavf_alloc_q_vectors()
1417 q_vector->v_idx = q_idx; in iavf_alloc_q_vectors()
1418 q_vector->reg_idx = q_idx; in iavf_alloc_q_vectors()
1437 int q_idx, num_q_vectors; in iavf_free_q_vectors() local
1446 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { in iavf_free_q_vectors()
[all …]
/linux/net/sched/
A Dsch_api.c1686 int ret = 0, q_idx = *q_idx_p; in tc_dump_qdisc_root() local
1694 if (q_idx < s_q_idx) { in tc_dump_qdisc_root()
1695 q_idx++; in tc_dump_qdisc_root()
1702 q_idx++; in tc_dump_qdisc_root()
1715 if (q_idx < s_q_idx) { in tc_dump_qdisc_root()
1716 q_idx++; in tc_dump_qdisc_root()
1724 q_idx++; in tc_dump_qdisc_root()
1728 *q_idx_p = q_idx; in tc_dump_qdisc_root()
1738 int idx, q_idx; in tc_dump_qdisc() local
1763 q_idx = 0; in tc_dump_qdisc()
[all …]
/linux/drivers/scsi/mpi3mr/
A Dmpi3mr_fw.c1267 segments = mrioc->req_qinfo[q_idx].q_segments; in mpi3mr_free_op_req_q_segments()
1273 if (mrioc->req_qinfo[q_idx].q_segment_list) { in mpi3mr_free_op_req_q_segments()
1276 mrioc->req_qinfo[q_idx].q_segment_list, in mpi3mr_free_op_req_q_segments()
1281 size = mrioc->req_qinfo[q_idx].num_requests * in mpi3mr_free_op_req_q_segments()
1291 kfree(mrioc->req_qinfo[q_idx].q_segments); in mpi3mr_free_op_req_q_segments()
1292 mrioc->req_qinfo[q_idx].q_segments = NULL; in mpi3mr_free_op_req_q_segments()
1293 mrioc->req_qinfo[q_idx].qid = 0; in mpi3mr_free_op_req_q_segments()
1325 size = mrioc->op_reply_qinfo[q_idx].segment_qd * in mpi3mr_free_op_reply_q_segments()
1336 kfree(mrioc->op_reply_qinfo[q_idx].q_segments); in mpi3mr_free_op_reply_q_segments()
1337 mrioc->op_reply_qinfo[q_idx].q_segments = NULL; in mpi3mr_free_op_reply_q_segments()
[all …]
/linux/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/
A Dtrx.c534 u16 rtl92ee_get_available_desc(struct ieee80211_hw *hw, u8 q_idx) in rtl92ee_get_available_desc() argument
542 get_desc_addr_fr_q_idx(q_idx)); in rtl92ee_get_available_desc()
889 u8 q_idx = *val; in rtl92ee_set_desc() local
900 struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[q_idx]; in rtl92ee_set_desc()
903 if (q_idx == BEACON_QUEUE) { in rtl92ee_set_desc()
914 get_desc_addr_fr_q_idx(q_idx), in rtl92ee_set_desc()
/linux/drivers/net/ethernet/intel/igc/
A Digc_defines.h628 #define IGC_VLANPQF_QSEL(_n, q_idx) ((q_idx) << ((_n) * 4)) argument
/linux/drivers/net/ethernet/qlogic/qed/
A Dqed_hw.h293 u8 q_idx; member
/linux/drivers/net/ethernet/ti/
A Dcpsw.c906 int ret, q_idx; in cpsw_ndo_start_xmit() local
918 q_idx = skb_get_queue_mapping(skb); in cpsw_ndo_start_xmit()
919 if (q_idx >= cpsw->tx_ch_num) in cpsw_ndo_start_xmit()
920 q_idx = q_idx % cpsw->tx_ch_num; in cpsw_ndo_start_xmit()
922 txch = cpsw->txv[q_idx].ch; in cpsw_ndo_start_xmit()
923 txq = netdev_get_tx_queue(ndev, q_idx); in cpsw_ndo_start_xmit()
A Dcpsw_new.c922 int ret, q_idx; in cpsw_ndo_start_xmit() local
934 q_idx = skb_get_queue_mapping(skb); in cpsw_ndo_start_xmit()
935 if (q_idx >= cpsw->tx_ch_num) in cpsw_ndo_start_xmit()
936 q_idx = q_idx % cpsw->tx_ch_num; in cpsw_ndo_start_xmit()
938 txch = cpsw->txv[q_idx].ch; in cpsw_ndo_start_xmit()
939 txq = netdev_get_tx_queue(ndev, q_idx); in cpsw_ndo_start_xmit()
/linux/drivers/net/ethernet/cavium/liquidio/
A Dlio_vf_main.c1423 int q_idx = 0, iq_no = 0; in liquidio_xmit() local
1435 q_idx = skb_iq(lio->oct_dev, skb); in liquidio_xmit()
1436 tag = q_idx; in liquidio_xmit()
1437 iq_no = lio->linfo.txpciq[q_idx].s.q_no; in liquidio_xmit()
1515 spin_lock(&lio->glist_lock[q_idx]); in liquidio_xmit()
1517 lio_list_delete_head(&lio->glist[q_idx]); in liquidio_xmit()
1518 spin_unlock(&lio->glist_lock[q_idx]); in liquidio_xmit()
1612 netif_stop_subqueue(netdev, q_idx); in liquidio_xmit()

Completed in 81 milliseconds

123