/linux/drivers/net/ethernet/huawei/hinic/ |
A D | hinic_rx.c | 140 skb = netdev_alloc_skb_ip_align(rxq->netdev, rxq->rq->buf_sz); in rx_alloc_skb() 449 rx_alloc_pkts(rxq); in rxq_recv() 484 netif_napi_add(rxq->netdev, &rxq->napi, rx_poll, nic_dev->rx_weight); in rx_add_napi() 526 rx_add_napi(rxq); in rx_request_irq() 546 err = request_irq(rq->irq, rx_irq, 0, rxq->irq_name, rxq); in rx_request_irq() 560 rx_del_napi(rxq); in rx_request_irq() 570 rx_del_napi(rxq); in rx_free_irq() 588 rxq->rq = rq; in hinic_init_rxq() 592 rxq_stats_init(rxq); in hinic_init_rxq() 596 if (!rxq->irq_name) in hinic_init_rxq() [all …]
|
A D | hinic_rx.h | 44 void hinic_rxq_clean_stats(struct hinic_rxq *rxq); 46 void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats); 48 int hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq, 51 void hinic_clean_rxq(struct hinic_rxq *rxq);
|
/linux/drivers/net/wireless/intel/iwlwifi/pcie/ |
A D | rx.c | 130 return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1); in iwl_rxq_space() 192 rxq->write_actual = round_down(rxq->write, 8); in iwl_pcie_rxq_inc_wr_ptr() 206 struct iwl_rxq *rxq = &trans_pcie->rxq[i]; in iwl_pcie_rxq_check_wrptr() local 235 (u32)rxb->vid, rxq->id, rxq->write); in iwl_pcie_restock_bd() 269 rxq->write = (rxq->write + 1) & (rxq->queue_size - 1); in iwl_pcie_rxmq_restock() 308 rxb = rxq->queue[rxq->write]; in iwl_pcie_rxsq_restock() 319 rxq->queue[rxq->write] = rxb; in iwl_pcie_rxsq_restock() 676 rxq->bd, rxq->bd_dma); in iwl_pcie_free_rxq_dma() 687 rxq->used_bd, rxq->used_bd_dma); in iwl_pcie_free_rxq_dma() 738 struct iwl_rxq *rxq = &trans_pcie->rxq[i]; in iwl_pcie_alloc_rxq_dma() local [all …]
|
/linux/drivers/infiniband/hw/hfi1/ |
A D | netdev_rx.c | 194 rx->rxq = kcalloc_node(rx->num_rx_q, sizeof(*rx->rxq), in hfi1_netdev_rxq_init() 197 if (!rx->rxq) { in hfi1_netdev_rxq_init() 203 struct hfi1_netdev_rxq *rxq = &rx->rxq[i]; in hfi1_netdev_rxq_init() local 210 rxq->rx = rx; in hfi1_netdev_rxq_init() 211 rxq->rcd->napi = &rxq->napi; in hfi1_netdev_rxq_init() 230 struct hfi1_netdev_rxq *rxq = &rx->rxq[i]; in hfi1_netdev_rxq_init() local 238 kfree(rx->rxq); in hfi1_netdev_rxq_init() 239 rx->rxq = NULL; in hfi1_netdev_rxq_init() 250 struct hfi1_netdev_rxq *rxq = &rx->rxq[i]; in hfi1_netdev_rxq_deinit() local 268 struct hfi1_netdev_rxq *rxq = &rx->rxq[i]; in enable_queues() local [all …]
|
A D | vnic_main.c | 295 struct hfi1_vnic_vport_info *vinfo = rxq->vinfo; in hfi1_vnic_decap_skb() 303 vinfo->stats[rxq->idx].rx_oversize++; in hfi1_vnic_decap_skb() 305 vinfo->stats[rxq->idx].rx_runt++; in hfi1_vnic_decap_skb() 336 struct hfi1_vnic_rx_queue *rxq; in hfi1_vnic_bypass_rcv() local 370 rxq = &vinfo->rxq[q_idx]; in hfi1_vnic_bypass_rcv() 389 rc = hfi1_vnic_decap_skb(rxq, skb); in hfi1_vnic_bypass_rcv() 401 napi_gro_receive(&rxq->napi, skb); in hfi1_vnic_bypass_rcv() 599 struct hfi1_vnic_rx_queue *rxq = &vinfo->rxq[i]; in hfi1_vnic_alloc_rn() local 601 rxq->idx = i; in hfi1_vnic_alloc_rn() 602 rxq->vinfo = vinfo; in hfi1_vnic_alloc_rn() [all …]
|
/linux/drivers/net/ethernet/qlogic/qede/ |
A D | qede_fp.c | 56 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; in qede_alloc_rx_buffer() 68 rxq->sw_rx_prod++; in qede_alloc_rx_buffer() 519 rxq->sw_rx_cons++; in qede_rx_bd_ring_consume() 532 curr_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; in qede_reuse_page() 553 curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX]; in qede_recycle_rx_bd_ring() 669 struct sw_rx_data *current_bd = &rxq->sw_rx_ring[rxq->sw_rx_cons & in qede_fill_frag_skb() 974 struct qede_rx_queue *rxq = fp->rxq; in qede_tpa_end() local 1088 xdp_init_buff(&xdp, rxq->rx_buf_seg_size, &rxq->xdp_rxq); in qede_rx_xdp() 1182 u16 cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size : in qede_rx_build_jumbo() 1339 struct qede_rx_queue *rxq = fp->rxq; in qede_rx_int() local [all …]
|
A D | qede_main.c | 884 if (fp->rxq && xdp_rxq_info_is_reg(&fp->rxq->xdp_rxq)) in qede_free_fp_array() 886 kfree(fp->rxq); in qede_free_fp_array() 955 fp->rxq = kzalloc(sizeof(*fp->rxq), GFP_KERNEL); in qede_alloc_fp_array() 956 if (!fp->rxq) in qede_alloc_fp_array() 1427 for (i = rxq->sw_rx_cons; i != rxq->sw_rx_prod; i++) { in qede_free_rx_buffers() 1448 kfree(rxq->sw_rx_ring); in qede_free_mem_rxq() 1481 size = rxq->rx_headroom + in qede_alloc_mem_rxq() 1502 if (!rxq->sw_rx_ring) { in qede_alloc_mem_rxq() 1527 rxq->filled_buffers = 0; in qede_alloc_mem_rxq() 1539 qede_set_tpa_param(rxq); in qede_alloc_mem_rxq() [all …]
|
/linux/drivers/net/ethernet/marvell/ |
A D | mvneta.c | 131 #define MVNETA_CPU_RXQ_ACCESS(rxq) BIT(rxq) argument 890 prefetch(rxq->descs + rxq->next_desc_to_proc); in mvneta_rxq_next_desc_get() 1463 for (rxq = 0; rxq < rxq_number; rxq++) in mvneta_defaults_set() 2016 rxq->id, i, rxq->refill_num); in mvneta_rx_refill_queue() 3266 if (mvneta_rx_refill(pp, rxq->descs + i, rxq, in mvneta_rxq_fill() 3316 rxq->last_desc = rxq->size - 1; in mvneta_rxq_sw_init() 3329 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal); in mvneta_rxq_hw_init() 3330 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal); in mvneta_rxq_hw_init() 3339 mvneta_rxq_fill(pp, rxq, rxq->size); in mvneta_rxq_hw_init() 4156 int rxq; in mvneta_percpu_elect() local [all …]
|
A D | mv643xx_eth.c | 442 return container_of(rxq, struct mv643xx_eth_private, rxq[rxq->index]); in rxq_to_mp() 518 rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc]; in rxq_process() 525 skb = rxq->rx_skb[rxq->rx_curr_desc]; in rxq_process() 526 rxq->rx_skb[rxq->rx_curr_desc] = NULL; in rxq_process() 529 if (rxq->rx_curr_desc == rxq->rx_ring_size) in rxq_process() 623 if (rxq->rx_used_desc == rxq->rx_ring_size) in rxq_refill() 1935 struct rx_queue *rxq = mp->rxq + index; in rxq_init() local 1968 rxq->rx_skb = kcalloc(rxq->rx_ring_size, sizeof(*rxq->rx_skb), in rxq_init() 2005 rxq_disable(rxq); in rxq_deinit() 2024 rxq->rx_desc_area, rxq->rx_desc_dma); in rxq_deinit() [all …]
|
/linux/drivers/atm/ |
A D | ambassador.c | 673 amb_rxq * rxq = &dev->rxq[pool]; in rx_give() local 680 if (rxq->pending < rxq->maximum) { in rx_give() 685 rxq->in.ptr = NEXTQ (rxq->in.ptr, rxq->in.start, rxq->in.limit); in rx_give() 698 amb_rxq * rxq = &dev->rxq[pool]; in rx_take() local 705 if (rxq->pending && (rxq->out.ptr->status || rxq->out.ptr->length)) { in rx_take() 713 rxq->out.ptr = NEXTQ (rxq->out.ptr, rxq->out.start, rxq->out.limit); in rx_take() 715 if (rxq->pending < rxq->low) in rx_take() 716 rxq->low = rxq->pending; in rx_take() 731 amb_rxq * rxq = &dev->rxq[pool]; in drain_rx_pool() local 776 rxq = &dev->rxq[pool]; in fill_rx_pool() [all …]
|
/linux/drivers/net/ethernet/microsoft/mana/ |
A D | mana_en.c | 896 if (rxq->buf_index == rxq->num_rx_buf) in mana_post_pkt_rxq() 1008 rxq->gdma_id, cq->gdma_id, rxq->rxobj); in mana_process_rx_cqe() 1285 if (!rxq) in mana_destroy_rxq() 1316 kfree(rxq); in mana_destroy_rxq() 1332 WARN_ON(rxq->datasize == 0 || rxq->datasize > PAGE_SIZE); in mana_alloc_rx_wqe() 1411 if (!rxq) in mana_create_rxq() 1439 cq->rxq = rxq; in mana_create_rxq() 1473 rxq->gdma_id = rxq->gdma_rq->id; in mana_create_rxq() 1493 return rxq; in mana_create_rxq() 1515 if (!rxq) { in mana_add_rx_queues() [all …]
|
/linux/drivers/vdpa/vdpa_sim/ |
A D | vdpa_sim_net.c | 37 struct vdpasim_virtqueue *rxq = &vdpasim->vqs[0]; in vdpasim_net_work() local 48 if (!txq->ready || !rxq->ready) in vdpasim_net_work() 58 err = vringh_getdesc_iotlb(&rxq->vring, NULL, &rxq->in_iov, in vdpasim_net_work() 59 &rxq->head, GFP_ATOMIC); in vdpasim_net_work() 72 write = vringh_iov_push_iotlb(&rxq->vring, &rxq->in_iov, in vdpasim_net_work() 84 vringh_complete_iotlb(&rxq->vring, rxq->head, total_write); in vdpasim_net_work() 92 if (vringh_need_notify_iotlb(&rxq->vring) > 0) in vdpasim_net_work() 93 vringh_notify(&rxq->vring); in vdpasim_net_work()
|
/linux/drivers/net/ethernet/atheros/alx/ |
A D | main.c | 74 struct alx_rx_queue *rxq = alx->qnapi[0]->rxq; in alx_refill_rx_ring() local 229 rrd = &rxq->rrd[rxq->rrd_read_idx]; in alx_clean_rx_irq() 242 rxb = &rxq->bufs[rxq->read_idx]; in alx_clean_rx_irq() 282 if (++rxq->read_idx == rxq->count) in alx_clean_rx_irq() 284 if (++rxq->rrd_read_idx == rxq->count) in alx_clean_rx_irq() 308 if (np->rxq) in alx_poll() 468 if (np->rxq) { in alx_init_ring_ptrs() 512 if (!rxq->bufs) in alx_free_rxring_buf() 779 rxq = kzalloc(sizeof(*rxq), GFP_KERNEL); in alx_alloc_napis() 780 if (!rxq) in alx_alloc_napis() [all …]
|
/linux/drivers/net/ethernet/hisilicon/ |
A D | hisi_femac.c | 212 struct hisi_femac_queue *rxq = &priv->rxq; in hisi_femac_rx_refill() local 218 pos = rxq->head; in hisi_femac_rx_refill() 220 if (!CIRC_SPACE(pos, rxq->tail, rxq->num)) in hisi_femac_rx_refill() 238 rxq->skb[pos] = skb; in hisi_femac_rx_refill() 242 rxq->head = pos; in hisi_femac_rx_refill() 248 struct hisi_femac_queue *rxq = &priv->rxq; in hisi_femac_rx() local 253 pos = rxq->tail; in hisi_femac_rx() 292 rxq->tail = pos; in hisi_femac_rx() 388 struct hisi_femac_queue *rxq = &priv->rxq; in hisi_femac_free_skb_rings() local 393 pos = rxq->tail; in hisi_femac_free_skb_rings() [all …]
|
/linux/drivers/net/wireless/intel/iwlegacy/ |
A D | 3945-mac.c | 929 struct il_rx_queue *rxq = &il->rxq; in il3945_rx_queue_restock() local 942 rxq->bd[rxq->write] = in il3945_rx_queue_restock() 944 rxq->queue[rxq->write] = rxb; in il3945_rx_queue_restock() 945 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; in il3945_rx_queue_restock() 956 if (rxq->write_actual != (rxq->write & ~0x7) || in il3945_rx_queue_restock() 957 abs(rxq->write - rxq->read) > 7) { in il3945_rx_queue_restock() 976 struct il_rx_queue *rxq = &il->rxq; in il3945_rx_allocate() local 1070 list_add_tail(&rxq->pool[i].list, &rxq->rx_used); in il3945_rx_queue_reset() 1075 rxq->read = rxq->write = 0; in il3945_rx_queue_reset() 1125 rxq->rb_stts, rxq->rb_stts_dma); in il3945_rx_queue_free() [all …]
|
A D | 4965-mac.c | 104 list_add_tail(&rxq->pool[i].list, &rxq->rx_used); in il4965_rx_queue_reset() 112 rxq->read = rxq->write = 0; in il4965_rx_queue_reset() 184 struct il_rx_queue *rxq = &il->rxq; in il4965_hw_nic_init() local 253 struct il_rx_queue *rxq = &il->rxq; in il4965_rx_queue_restock() local 261 rxb = rxq->queue[rxq->write]; in il4965_rx_queue_restock() 270 rxq->bd[rxq->write] = in il4965_rx_queue_restock() 272 rxq->queue[rxq->write] = rxb; in il4965_rx_queue_restock() 273 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; in il4965_rx_queue_restock() 303 struct il_rx_queue *rxq = &il->rxq; in il4965_rx_allocate() local 424 rxq->rb_stts, rxq->rb_stts_dma); in il4965_rx_queue_free() [all …]
|
/linux/samples/bpf/ |
A D | xdp_rxq_info_user.c | 194 struct record *rxq; member 234 rec->rxq = alloc_record_per_rxq(); in alloc_stats_record() 236 rec->rxq[i].cpu = alloc_record_per_cpu(); in alloc_stats_record() 248 free(r->rxq[i].cpu); in free_stats_record() 250 free(r->rxq); in free_stats_record() 294 map_collect_percpu(fd, i, &rec->rxq[i]); in stats_collect() 343 int rxq; in stats_print() local 383 for (rxq = 0; rxq < nr_rxqs; rxq++) { in stats_print() 387 int rxq_ = rxq; in stats_print() 393 rec = &stats_rec->rxq[rxq]; in stats_print() [all …]
|
/linux/drivers/vhost/ |
A D | net.c | 153 if (rxq->tail != rxq->head) in vhost_net_buf_get_ptr() 154 return rxq->queue[rxq->head]; in vhost_net_buf_get_ptr() 161 return rxq->tail - rxq->head; in vhost_net_buf_get_size() 166 return rxq->tail == rxq->head; in vhost_net_buf_is_empty() 172 ++rxq->head; in vhost_net_buf_consume() 178 struct vhost_net_buf *rxq = &nvq->rxq; in vhost_net_buf_produce() local 188 struct vhost_net_buf *rxq = &nvq->rxq; in vhost_net_buf_unproduce() local 191 ptr_ring_unconsume(nvq->rx_ring, rxq->queue + rxq->head, in vhost_net_buf_unproduce() 194 rxq->head = rxq->tail = 0; in vhost_net_buf_unproduce() 211 struct vhost_net_buf *rxq = &nvq->rxq; in vhost_net_buf_peek() local [all …]
|
/linux/drivers/net/ethernet/chelsio/cxgb4vf/ |
A D | cxgb4vf_main.c | 383 int rxq, msi, err; in request_msix_queue_irqs() local 409 while (--rxq >= 0) in request_msix_queue_irqs() 421 int rxq, msi; in free_msix_queue_irqs() local 452 int rxq; in enable_rx() local 477 int rxq; in quiesce_rx() local 650 memset(&rxq->stats, 0, sizeof(rxq->stats)); in setup_sge_queues() 668 IQ_MAP(s, rxq->rspq.abs_id) = &rxq->rspq; in setup_sge_queues() 680 rxq->fl.abs_id = rxq->fl.cntxt_id + s->egr_base; in setup_sge_queues() 681 EQ_MAP(s, rxq->fl.abs_id) = &rxq->fl; in setup_sge_queues() 1242 rxq++; in cxgb4vf_poll_controller() [all …]
|
A D | sge.c | 1594 rxq->stats.vlan_ex++; in do_gro() 1602 rxq->stats.pkts++; in do_gro() 1603 rxq->stats.rx_cso++; in do_gro() 1633 do_gro(rxq, gl, pkt); in t4vf_ethrx_handler() 1650 rxq->stats.pkts++; in t4vf_ethrx_handler() 1656 rxq->stats.rx_cso++; in t4vf_ethrx_handler() 1661 rxq->stats.rx_cso++; in t4vf_ethrx_handler() 1810 sdesc = &rxq->fl.sdesc[rxq->fl.cidx]; in process_responses() 1872 fl_cap(&rxq->fl) - rxq->fl.avail >= 2*FL_PER_EQ_UNIT) in process_responses() 2574 if (rxq->rspq.desc) in t4vf_free_sge_resources() [all …]
|
/linux/drivers/net/ethernet/alacritech/ |
A D | slicoss.c | 122 return slic_get_free_queue_descs(rxq->put_idx, rxq->done_idx, rxq->len); in slic_get_free_rx_descs() 396 struct slic_rx_queue *rxq = &sdev->rxq; in slic_refill_rx_queue() local 437 buff = &rxq->rxbuffs[rxq->put_idx]; in slic_refill_rx_queue() 446 rxq->put_idx = slic_next_queue_idx(rxq->put_idx, rxq->len); in slic_refill_rx_queue() 549 struct slic_rx_queue *rxq = &sdev->rxq; in slic_handle_receive() local 559 while (todo && (rxq->done_idx != rxq->put_idx)) { in slic_handle_receive() 560 buff = &rxq->rxbuffs[rxq->done_idx]; in slic_handle_receive() 613 rxq->done_idx = slic_next_queue_idx(rxq->done_idx, rxq->len); in slic_handle_receive() 918 struct slic_rx_queue *rxq = &sdev->rxq; in slic_init_rx_queue() local 923 rxq->put_idx = 0; in slic_init_rx_queue() [all …]
|
/linux/include/net/ |
A D | xdp.h | 74 struct xdp_rxq_info *rxq; member 80 xdp_init_buff(struct xdp_buff *xdp, u32 frame_sz, struct xdp_rxq_info *rxq) in xdp_init_buff() argument 83 xdp->rxq = rxq; in xdp_init_buff() 219 if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) in xdp_convert_buff_to_frame() 228 xdp_frame->mem = xdp->rxq->mem; in xdp_convert_buff_to_frame()
|
/linux/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ |
A D | ktls_rx.c | 54 u32 rxq; member 345 c = resync->priv->channels.c[priv_rx->rxq]; in resync_handle_work() 533 c = priv->channels.c[priv_rx->rxq]; in mlx5e_ktls_rx_resync() 575 int rxq = sk_rx_queue_get(sk); in mlx5e_ktls_sk_get_rxq() local 577 if (unlikely(rxq == -1)) in mlx5e_ktls_sk_get_rxq() 578 rxq = 0; in mlx5e_ktls_sk_get_rxq() 580 return rxq; in mlx5e_ktls_sk_get_rxq() 592 int rxq, err; in mlx5e_ktls_add_rx() local 610 rxq = mlx5e_ktls_sk_get_rxq(sk); in mlx5e_ktls_add_rx() 611 priv_rx->rxq = rxq; in mlx5e_ktls_add_rx() [all …]
|
/linux/drivers/net/ethernet/marvell/mvpp2/ |
A D | mvpp2_main.c | 1181 int rxq; in mvpp2_swf_bm_pool_init_shared() local 1204 for (rxq = 0; rxq < port->nrxqs; rxq++) in mvpp2_swf_bm_pool_init_shared() 1217 for (rxq = 0; rxq < port->nrxqs; rxq++) in mvpp2_swf_bm_pool_init_shared() 2406 rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc); in mvpp2_rxq_next_desc_get() 2407 prefetch(rxq->descs + rxq->next_desc_to_proc); in mvpp2_rxq_next_desc_get() 2932 rxq->last_desc = rxq->size - 1; in mvpp2_rxq_init() 2960 mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size); in mvpp2_rxq_init() 2997 rxq->descs, rxq->descs_dma); in mvpp2_rxq_init() 4508 if (!rxq) in mvpp2_poll() 5969 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL); in mvpp2_port_init() [all …]
|
/linux/drivers/net/ethernet/chelsio/cxgb4/ |
A D | cudbg_lib.h | 253 static inline void cudbg_fill_qdesc_rxq(const struct sge_rspq *rxq, in cudbg_fill_qdesc_rxq() argument 258 entry->qid = rxq->cntxt_id; in cudbg_fill_qdesc_rxq() 259 entry->desc_size = rxq->iqe_len; in cudbg_fill_qdesc_rxq() 260 entry->num_desc = rxq->size; in cudbg_fill_qdesc_rxq() 261 entry->data_size = rxq->size * rxq->iqe_len; in cudbg_fill_qdesc_rxq() 262 memcpy(entry->data, rxq->desc, entry->data_size); in cudbg_fill_qdesc_rxq()
|