Lines Matching refs:tx_ring

27 							struct enetc_bdr *tx_ring)  in enetc_rx_ring_from_xdp_tx_ring()  argument
29 int index = &priv->tx_ring[tx_ring->index] - priv->xdp_tx_ring; in enetc_rx_ring_from_xdp_tx_ring()
51 static void enetc_unmap_tx_buff(struct enetc_bdr *tx_ring, in enetc_unmap_tx_buff() argument
59 dma_unmap_page(tx_ring->dev, tx_swbd->dma, in enetc_unmap_tx_buff()
63 dma_unmap_single(tx_ring->dev, tx_swbd->dma, in enetc_unmap_tx_buff()
68 static void enetc_free_tx_frame(struct enetc_bdr *tx_ring, in enetc_free_tx_frame() argument
75 enetc_unmap_tx_buff(tx_ring, tx_swbd); in enetc_free_tx_frame()
87 static void enetc_update_tx_ring_tail(struct enetc_bdr *tx_ring) in enetc_update_tx_ring_tail() argument
90 enetc_wr_reg_hot(tx_ring->tpir, tx_ring->next_to_use); in enetc_update_tx_ring_tail()
126 static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb) in enetc_map_tx_buffs() argument
129 struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev); in enetc_map_tx_buffs()
143 i = tx_ring->next_to_use; in enetc_map_tx_buffs()
144 txbd = ENETC_TXBD(*tx_ring, i); in enetc_map_tx_buffs()
147 dma = dma_map_single(tx_ring->dev, skb->data, len, DMA_TO_DEVICE); in enetc_map_tx_buffs()
148 if (unlikely(dma_mapping_error(tx_ring->dev, dma))) in enetc_map_tx_buffs()
155 tx_swbd = &tx_ring->tx_swbd[i]; in enetc_map_tx_buffs()
180 if (tx_ring->tsd_enable) in enetc_map_tx_buffs()
201 if (unlikely(i == tx_ring->bd_count)) { in enetc_map_tx_buffs()
203 tx_swbd = tx_ring->tx_swbd; in enetc_map_tx_buffs()
204 txbd = ENETC_TXBD(*tx_ring, 0); in enetc_map_tx_buffs()
259 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len, in enetc_map_tx_buffs()
261 if (dma_mapping_error(tx_ring->dev, dma)) in enetc_map_tx_buffs()
271 if (unlikely(i == tx_ring->bd_count)) { in enetc_map_tx_buffs()
273 tx_swbd = tx_ring->tx_swbd; in enetc_map_tx_buffs()
274 txbd = ENETC_TXBD(*tx_ring, 0); in enetc_map_tx_buffs()
293 tx_ring->tx_swbd[i].is_eof = true; in enetc_map_tx_buffs()
294 tx_ring->tx_swbd[i].skb = skb; in enetc_map_tx_buffs()
296 enetc_bdr_idx_inc(tx_ring, &i); in enetc_map_tx_buffs()
297 tx_ring->next_to_use = i; in enetc_map_tx_buffs()
301 enetc_update_tx_ring_tail(tx_ring); in enetc_map_tx_buffs()
306 dev_err(tx_ring->dev, "DMA map error"); in enetc_map_tx_buffs()
309 tx_swbd = &tx_ring->tx_swbd[i]; in enetc_map_tx_buffs()
310 enetc_free_tx_frame(tx_ring, tx_swbd); in enetc_map_tx_buffs()
312 i = tx_ring->bd_count; in enetc_map_tx_buffs()
319 static void enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb, in enetc_map_tx_tso_hdr() argument
329 addr = tx_ring->tso_headers_dma + *i * TSO_HEADER_SIZE; in enetc_map_tx_tso_hdr()
355 enetc_bdr_idx_inc(tx_ring, i); in enetc_map_tx_tso_hdr()
356 txbd = ENETC_TXBD(*tx_ring, *i); in enetc_map_tx_tso_hdr()
357 tx_swbd = &tx_ring->tx_swbd[*i]; in enetc_map_tx_tso_hdr()
372 static int enetc_map_tx_tso_data(struct enetc_bdr *tx_ring, struct sk_buff *skb, in enetc_map_tx_tso_data() argument
383 addr = dma_map_single(tx_ring->dev, data, size, DMA_TO_DEVICE); in enetc_map_tx_tso_data()
384 if (unlikely(dma_mapping_error(tx_ring->dev, addr))) { in enetc_map_tx_tso_data()
385 netdev_err(tx_ring->ndev, "DMA map error\n"); in enetc_map_tx_tso_data()
438 static void enetc_tso_complete_csum(struct enetc_bdr *tx_ring, struct tso_t *tso, in enetc_tso_complete_csum() argument
468 static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb) in enetc_map_tx_tso_buffs() argument
481 i = tx_ring->next_to_use; in enetc_map_tx_tso_buffs()
487 txbd = ENETC_TXBD(*tx_ring, i); in enetc_map_tx_tso_buffs()
488 tx_swbd = &tx_ring->tx_swbd[i]; in enetc_map_tx_tso_buffs()
496 hdr = tx_ring->tso_headers + i * TSO_HEADER_SIZE; in enetc_map_tx_tso_buffs()
501 enetc_map_tx_tso_hdr(tx_ring, skb, tx_swbd, txbd, &i, hdr_len, data_len); in enetc_map_tx_tso_buffs()
511 enetc_bdr_idx_inc(tx_ring, &i); in enetc_map_tx_tso_buffs()
512 txbd = ENETC_TXBD(*tx_ring, i); in enetc_map_tx_tso_buffs()
513 tx_swbd = &tx_ring->tx_swbd[i]; in enetc_map_tx_tso_buffs()
524 err = enetc_map_tx_tso_data(tx_ring, skb, tx_swbd, txbd, in enetc_map_tx_tso_buffs()
539 enetc_tso_complete_csum(tx_ring, &tso, skb, hdr, pos, csum); in enetc_map_tx_tso_buffs()
545 enetc_bdr_idx_inc(tx_ring, &i); in enetc_map_tx_tso_buffs()
548 tx_ring->next_to_use = i; in enetc_map_tx_tso_buffs()
549 enetc_update_tx_ring_tail(tx_ring); in enetc_map_tx_tso_buffs()
554 dev_err(tx_ring->dev, "DMA map error"); in enetc_map_tx_tso_buffs()
558 tx_swbd = &tx_ring->tx_swbd[i]; in enetc_map_tx_tso_buffs()
559 enetc_free_tx_frame(tx_ring, tx_swbd); in enetc_map_tx_tso_buffs()
561 i = tx_ring->bd_count; in enetc_map_tx_tso_buffs()
572 struct enetc_bdr *tx_ring; in enetc_start_xmit() local
584 tx_ring = priv->tx_ring[skb->queue_mapping]; in enetc_start_xmit()
587 if (enetc_bd_unused(tx_ring) < tso_count_descs(skb)) { in enetc_start_xmit()
588 netif_stop_subqueue(ndev, tx_ring->index); in enetc_start_xmit()
593 count = enetc_map_tx_tso_buffs(tx_ring, skb); in enetc_start_xmit()
601 if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) { in enetc_start_xmit()
602 netif_stop_subqueue(ndev, tx_ring->index); in enetc_start_xmit()
612 count = enetc_map_tx_buffs(tx_ring, skb); in enetc_start_xmit()
619 if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED) in enetc_start_xmit()
620 netif_stop_subqueue(ndev, tx_ring->index); in enetc_start_xmit()
703 static int enetc_bd_ready_count(struct enetc_bdr *tx_ring, int ci) in enetc_bd_ready_count() argument
705 int pi = enetc_rd_reg_hot(tx_ring->tcir) & ENETC_TBCIR_IDX_MASK; in enetc_bd_ready_count()
707 return pi >= ci ? pi - ci : tx_ring->bd_count - ci + pi; in enetc_bd_ready_count()
754 static void enetc_recycle_xdp_tx_buff(struct enetc_bdr *tx_ring, in enetc_recycle_xdp_tx_buff() argument
757 struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev); in enetc_recycle_xdp_tx_buff()
767 rx_ring = enetc_rx_ring_from_xdp_tx_ring(priv, tx_ring); in enetc_recycle_xdp_tx_buff()
793 static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget) in enetc_clean_tx_ring() argument
795 struct net_device *ndev = tx_ring->ndev; in enetc_clean_tx_ring()
803 i = tx_ring->next_to_clean; in enetc_clean_tx_ring()
804 tx_swbd = &tx_ring->tx_swbd[i]; in enetc_clean_tx_ring()
806 bds_to_clean = enetc_bd_ready_count(tx_ring, i); in enetc_clean_tx_ring()
816 union enetc_tx_bd *txbd = ENETC_TXBD(*tx_ring, i); in enetc_clean_tx_ring()
827 enetc_recycle_xdp_tx_buff(tx_ring, tx_swbd); in enetc_clean_tx_ring()
829 enetc_unmap_tx_buff(tx_ring, tx_swbd); in enetc_clean_tx_ring()
856 if (unlikely(i == tx_ring->bd_count)) { in enetc_clean_tx_ring()
858 tx_swbd = tx_ring->tx_swbd; in enetc_clean_tx_ring()
865 enetc_wr_reg_hot(tx_ring->idr, BIT(tx_ring->index) | in enetc_clean_tx_ring()
866 BIT(16 + tx_ring->index)); in enetc_clean_tx_ring()
870 bds_to_clean = enetc_bd_ready_count(tx_ring, i); in enetc_clean_tx_ring()
873 tx_ring->next_to_clean = i; in enetc_clean_tx_ring()
874 tx_ring->stats.packets += tx_frm_cnt; in enetc_clean_tx_ring()
875 tx_ring->stats.bytes += tx_byte_cnt; in enetc_clean_tx_ring()
878 __netif_subqueue_stopped(ndev, tx_ring->index) && in enetc_clean_tx_ring()
879 (enetc_bd_unused(tx_ring) >= ENETC_TXBDS_MAX_NEEDED))) { in enetc_clean_tx_ring()
880 netif_wake_subqueue(ndev, tx_ring->index); in enetc_clean_tx_ring()
1220 static void enetc_xdp_map_tx_buff(struct enetc_bdr *tx_ring, int i, in enetc_xdp_map_tx_buff() argument
1224 union enetc_tx_bd *txbd = ENETC_TXBD(*tx_ring, i); in enetc_xdp_map_tx_buff()
1233 memcpy(&tx_ring->tx_swbd[i], tx_swbd, sizeof(*tx_swbd)); in enetc_xdp_map_tx_buff()
1239 static bool enetc_xdp_tx(struct enetc_bdr *tx_ring, in enetc_xdp_tx() argument
1245 if (unlikely(enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(num_tx_swbd))) in enetc_xdp_tx()
1253 i = tx_ring->next_to_use; in enetc_xdp_tx()
1258 enetc_xdp_map_tx_buff(tx_ring, i, xdp_tx_swbd, frm_len); in enetc_xdp_tx()
1262 union enetc_tx_bd *txbd = ENETC_TXBD(*tx_ring, i); in enetc_xdp_tx()
1267 enetc_bdr_idx_inc(tx_ring, &i); in enetc_xdp_tx()
1270 tx_ring->next_to_use = i; in enetc_xdp_tx()
1275 static int enetc_xdp_frame_to_xdp_tx_swbd(struct enetc_bdr *tx_ring, in enetc_xdp_frame_to_xdp_tx_swbd() argument
1288 dma = dma_map_single(tx_ring->dev, data, len, DMA_TO_DEVICE); in enetc_xdp_frame_to_xdp_tx_swbd()
1289 if (unlikely(dma_mapping_error(tx_ring->dev, dma))) { in enetc_xdp_frame_to_xdp_tx_swbd()
1290 netdev_err(tx_ring->ndev, "DMA map error\n"); in enetc_xdp_frame_to_xdp_tx_swbd()
1311 dma = dma_map_single(tx_ring->dev, data, len, DMA_TO_DEVICE); in enetc_xdp_frame_to_xdp_tx_swbd()
1312 if (unlikely(dma_mapping_error(tx_ring->dev, dma))) { in enetc_xdp_frame_to_xdp_tx_swbd()
1315 enetc_unmap_tx_buff(tx_ring, &xdp_tx_arr[n]); in enetc_xdp_frame_to_xdp_tx_swbd()
1317 netdev_err(tx_ring->ndev, "DMA map error\n"); in enetc_xdp_frame_to_xdp_tx_swbd()
1343 struct enetc_bdr *tx_ring; in enetc_xdp_xmit() local
1349 tx_ring = priv->xdp_tx_ring[smp_processor_id()]; in enetc_xdp_xmit()
1351 prefetchw(ENETC_TXBD(*tx_ring, tx_ring->next_to_use)); in enetc_xdp_xmit()
1354 xdp_tx_bd_cnt = enetc_xdp_frame_to_xdp_tx_swbd(tx_ring, in enetc_xdp_xmit()
1360 if (unlikely(!enetc_xdp_tx(tx_ring, xdp_redirect_arr, in enetc_xdp_xmit()
1363 enetc_unmap_tx_buff(tx_ring, in enetc_xdp_xmit()
1365 tx_ring->stats.xdp_tx_drops++; in enetc_xdp_xmit()
1373 enetc_update_tx_ring_tail(tx_ring); in enetc_xdp_xmit()
1375 tx_ring->stats.xdp_tx += xdp_tx_frm_cnt; in enetc_xdp_xmit()
1511 struct enetc_bdr *tx_ring; in enetc_clean_rx_ring_xdp() local
1572 tx_ring = priv->xdp_tx_ring[rx_ring->index]; in enetc_clean_rx_ring_xdp()
1577 if (!enetc_xdp_tx(tx_ring, xdp_tx_arr, xdp_tx_bd_cnt)) { in enetc_clean_rx_ring_xdp()
1579 tx_ring->stats.xdp_tx_drops++; in enetc_clean_rx_ring_xdp()
1581 tx_ring->stats.xdp_tx += xdp_tx_bd_cnt; in enetc_clean_rx_ring_xdp()
1642 enetc_update_tx_ring_tail(tx_ring); in enetc_clean_rx_ring_xdp()
1664 if (!enetc_clean_tx_ring(&v->tx_ring[i], budget)) in enetc_poll()
1812 err = enetc_alloc_txbdr(priv->tx_ring[i]); in enetc_alloc_tx_resources()
1822 enetc_free_txbdr(priv->tx_ring[i]); in enetc_alloc_tx_resources()
1832 enetc_free_txbdr(priv->tx_ring[i]); in enetc_free_tx_resources()
1903 static void enetc_free_tx_ring(struct enetc_bdr *tx_ring) in enetc_free_tx_ring() argument
1907 if (!tx_ring->tx_swbd) in enetc_free_tx_ring()
1910 for (i = 0; i < tx_ring->bd_count; i++) { in enetc_free_tx_ring()
1911 struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i]; in enetc_free_tx_ring()
1913 enetc_free_tx_frame(tx_ring, tx_swbd); in enetc_free_tx_ring()
1916 tx_ring->next_to_clean = 0; in enetc_free_tx_ring()
1917 tx_ring->next_to_use = 0; in enetc_free_tx_ring()
1952 enetc_free_tx_ring(priv->tx_ring[i]); in enetc_free_rxtx_rings()
2033 static void enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring) in enetc_setup_txbdr() argument
2035 int idx = tx_ring->index; in enetc_setup_txbdr()
2039 lower_32_bits(tx_ring->bd_dma_base)); in enetc_setup_txbdr()
2042 upper_32_bits(tx_ring->bd_dma_base)); in enetc_setup_txbdr()
2044 WARN_ON(!IS_ALIGNED(tx_ring->bd_count, 64)); /* multiple of 64 */ in enetc_setup_txbdr()
2046 ENETC_RTBLENR_LEN(tx_ring->bd_count)); in enetc_setup_txbdr()
2049 tx_ring->next_to_use = enetc_txbdr_rd(hw, idx, ENETC_TBPIR); in enetc_setup_txbdr()
2050 tx_ring->next_to_clean = enetc_txbdr_rd(hw, idx, ENETC_TBCIR); in enetc_setup_txbdr()
2056 if (tx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_TX) in enetc_setup_txbdr()
2062 tx_ring->tpir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBPIR); in enetc_setup_txbdr()
2063 tx_ring->tcir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBCIR); in enetc_setup_txbdr()
2064 tx_ring->idr = hw->reg + ENETC_SITXIDR; in enetc_setup_txbdr()
2116 enetc_setup_txbdr(&priv->si->hw, priv->tx_ring[i]); in enetc_setup_bdrs()
2130 static void enetc_clear_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring) in enetc_clear_txbdr() argument
2133 int idx = tx_ring->index; in enetc_clear_txbdr()
2146 netdev_warn(tx_ring->ndev, "timeout for tx ring #%d clear\n", in enetc_clear_txbdr()
2155 enetc_clear_txbdr(&priv->si->hw, priv->tx_ring[i]); in enetc_clear_bdrs()
2190 int idx = v->tx_ring[j].index; in enetc_setup_irqs()
2433 struct enetc_bdr *tx_ring; in enetc_setup_tc_mqprio() local
2448 tx_ring = priv->tx_ring[i]; in enetc_setup_tc_mqprio()
2449 enetc_set_bdr_prio(&priv->si->hw, tx_ring->index, 0); in enetc_setup_tc_mqprio()
2467 tx_ring = priv->tx_ring[i]; in enetc_setup_tc_mqprio()
2468 enetc_set_bdr_prio(&priv->si->hw, tx_ring->index, i); in enetc_setup_tc_mqprio()
2568 packets += priv->tx_ring[i]->stats.packets; in enetc_get_stats()
2569 bytes += priv->tx_ring[i]->stats.bytes; in enetc_get_stats()
2766 v = kzalloc(struct_size(v, tx_ring, v_tx_rings), GFP_KERNEL); in enetc_alloc_msix()
2812 bdr = &v->tx_ring[j]; in enetc_alloc_msix()
2817 priv->tx_ring[idx] = bdr; in enetc_alloc_msix()
2822 priv->xdp_tx_ring = &priv->tx_ring[first_xdp_tx_ring]; in enetc_alloc_msix()
2861 priv->tx_ring[i] = NULL; in enetc_free_msix()