Lines Matching refs:tx_ring

37 	struct ice_tx_ring *tx_ring;  in ice_prgm_fdir_fltr()  local
46 tx_ring = vsi->tx_rings[0]; in ice_prgm_fdir_fltr()
47 if (!tx_ring || !tx_ring->desc) in ice_prgm_fdir_fltr()
49 dev = tx_ring->dev; in ice_prgm_fdir_fltr()
52 for (i = ICE_FDIR_CLEAN_DELAY; ICE_DESC_UNUSED(tx_ring) < 2; i--) { in ice_prgm_fdir_fltr()
65 i = tx_ring->next_to_use; in ice_prgm_fdir_fltr()
66 first = &tx_ring->tx_buf[i]; in ice_prgm_fdir_fltr()
67 f_desc = ICE_TX_FDIRDESC(tx_ring, i); in ice_prgm_fdir_fltr()
71 i = (i < tx_ring->count) ? i : 0; in ice_prgm_fdir_fltr()
72 tx_desc = ICE_TX_DESC(tx_ring, i); in ice_prgm_fdir_fltr()
73 tx_buf = &tx_ring->tx_buf[i]; in ice_prgm_fdir_fltr()
76 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in ice_prgm_fdir_fltr()
100 writel(tx_ring->next_to_use, tx_ring->tail); in ice_prgm_fdir_fltr()
147 void ice_clean_tx_ring(struct ice_tx_ring *tx_ring) in ice_clean_tx_ring() argument
152 if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_pool) { in ice_clean_tx_ring()
153 ice_xsk_clean_xdp_ring(tx_ring); in ice_clean_tx_ring()
158 if (!tx_ring->tx_buf) in ice_clean_tx_ring()
162 for (i = 0; i < tx_ring->count; i++) in ice_clean_tx_ring()
163 ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]); in ice_clean_tx_ring()
166 memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count); in ice_clean_tx_ring()
168 size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), in ice_clean_tx_ring()
171 memset(tx_ring->desc, 0, size); in ice_clean_tx_ring()
173 tx_ring->next_to_use = 0; in ice_clean_tx_ring()
174 tx_ring->next_to_clean = 0; in ice_clean_tx_ring()
176 if (!tx_ring->netdev) in ice_clean_tx_ring()
180 netdev_tx_reset_queue(txring_txq(tx_ring)); in ice_clean_tx_ring()
189 void ice_free_tx_ring(struct ice_tx_ring *tx_ring) in ice_free_tx_ring() argument
193 ice_clean_tx_ring(tx_ring); in ice_free_tx_ring()
194 devm_kfree(tx_ring->dev, tx_ring->tx_buf); in ice_free_tx_ring()
195 tx_ring->tx_buf = NULL; in ice_free_tx_ring()
197 if (tx_ring->desc) { in ice_free_tx_ring()
198 size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), in ice_free_tx_ring()
200 dmam_free_coherent(tx_ring->dev, size, in ice_free_tx_ring()
201 tx_ring->desc, tx_ring->dma); in ice_free_tx_ring()
202 tx_ring->desc = NULL; in ice_free_tx_ring()
213 static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget) in ice_clean_tx_irq() argument
217 struct ice_vsi *vsi = tx_ring->vsi; in ice_clean_tx_irq()
218 s16 i = tx_ring->next_to_clean; in ice_clean_tx_irq()
222 tx_buf = &tx_ring->tx_buf[i]; in ice_clean_tx_irq()
223 tx_desc = ICE_TX_DESC(tx_ring, i); in ice_clean_tx_irq()
224 i -= tx_ring->count; in ice_clean_tx_irq()
237 ice_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf); in ice_clean_tx_irq()
254 dma_unmap_single(tx_ring->dev, in ice_clean_tx_irq()
265 ice_trace(clean_tx_irq_unmap, tx_ring, tx_desc, tx_buf); in ice_clean_tx_irq()
270 i -= tx_ring->count; in ice_clean_tx_irq()
271 tx_buf = tx_ring->tx_buf; in ice_clean_tx_irq()
272 tx_desc = ICE_TX_DESC(tx_ring, 0); in ice_clean_tx_irq()
277 dma_unmap_page(tx_ring->dev, in ice_clean_tx_irq()
284 ice_trace(clean_tx_irq_unmap_eop, tx_ring, tx_desc, tx_buf); in ice_clean_tx_irq()
291 i -= tx_ring->count; in ice_clean_tx_irq()
292 tx_buf = tx_ring->tx_buf; in ice_clean_tx_irq()
293 tx_desc = ICE_TX_DESC(tx_ring, 0); in ice_clean_tx_irq()
302 i += tx_ring->count; in ice_clean_tx_irq()
303 tx_ring->next_to_clean = i; in ice_clean_tx_irq()
305 ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes); in ice_clean_tx_irq()
307 netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts, in ice_clean_tx_irq()
311 if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) && in ice_clean_tx_irq()
312 (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { in ice_clean_tx_irq()
317 if (__netif_subqueue_stopped(tx_ring->netdev, in ice_clean_tx_irq()
318 tx_ring->q_index) && in ice_clean_tx_irq()
320 netif_wake_subqueue(tx_ring->netdev, in ice_clean_tx_irq()
321 tx_ring->q_index); in ice_clean_tx_irq()
322 ++tx_ring->tx_stats.restart_q; in ice_clean_tx_irq()
335 int ice_setup_tx_ring(struct ice_tx_ring *tx_ring) in ice_setup_tx_ring() argument
337 struct device *dev = tx_ring->dev; in ice_setup_tx_ring()
344 WARN_ON(tx_ring->tx_buf); in ice_setup_tx_ring()
345 tx_ring->tx_buf = in ice_setup_tx_ring()
346 devm_kcalloc(dev, sizeof(*tx_ring->tx_buf), tx_ring->count, in ice_setup_tx_ring()
348 if (!tx_ring->tx_buf) in ice_setup_tx_ring()
352 size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), in ice_setup_tx_ring()
354 tx_ring->desc = dmam_alloc_coherent(dev, size, &tx_ring->dma, in ice_setup_tx_ring()
356 if (!tx_ring->desc) { in ice_setup_tx_ring()
362 tx_ring->next_to_use = 0; in ice_setup_tx_ring()
363 tx_ring->next_to_clean = 0; in ice_setup_tx_ring()
364 tx_ring->tx_stats.prev_pkt = -1; in ice_setup_tx_ring()
368 devm_kfree(dev, tx_ring->tx_buf); in ice_setup_tx_ring()
369 tx_ring->tx_buf = NULL; in ice_setup_tx_ring()
1277 struct ice_tx_ring *tx_ring; in __ice_update_sample() local
1279 ice_for_each_tx_ring(tx_ring, *rc) { in __ice_update_sample()
1280 packets += tx_ring->stats.pkts; in __ice_update_sample()
1281 bytes += tx_ring->stats.bytes; in __ice_update_sample()
1442 struct ice_tx_ring *tx_ring; in ice_napi_poll() local
1451 ice_for_each_tx_ring(tx_ring, q_vector->tx) { in ice_napi_poll()
1454 if (tx_ring->xsk_pool) in ice_napi_poll()
1455 wd = ice_clean_tx_irq_zc(tx_ring, budget); in ice_napi_poll()
1456 else if (ice_ring_is_xdp(tx_ring)) in ice_napi_poll()
1459 wd = ice_clean_tx_irq(tx_ring, budget); in ice_napi_poll()
1525 static int __ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size) in __ice_maybe_stop_tx() argument
1527 netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index); in __ice_maybe_stop_tx()
1532 if (likely(ICE_DESC_UNUSED(tx_ring) < size)) in __ice_maybe_stop_tx()
1536 netif_start_subqueue(tx_ring->netdev, tx_ring->q_index); in __ice_maybe_stop_tx()
1537 ++tx_ring->tx_stats.restart_q; in __ice_maybe_stop_tx()
1548 static int ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size) in ice_maybe_stop_tx() argument
1550 if (likely(ICE_DESC_UNUSED(tx_ring) >= size)) in ice_maybe_stop_tx()
1553 return __ice_maybe_stop_tx(tx_ring, size); in ice_maybe_stop_tx()
1567 ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first, in ice_tx_map() argument
1571 u16 i = tx_ring->next_to_use; in ice_tx_map()
1587 tx_desc = ICE_TX_DESC(tx_ring, i); in ice_tx_map()
1595 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); in ice_tx_map()
1602 if (dma_mapping_error(tx_ring->dev, dma)) in ice_tx_map()
1624 if (i == tx_ring->count) { in ice_tx_map()
1625 tx_desc = ICE_TX_DESC(tx_ring, 0); in ice_tx_map()
1645 if (i == tx_ring->count) { in ice_tx_map()
1646 tx_desc = ICE_TX_DESC(tx_ring, 0); in ice_tx_map()
1653 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, in ice_tx_map()
1656 tx_buf = &tx_ring->tx_buf[i]; in ice_tx_map()
1660 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); in ice_tx_map()
1666 if (i == tx_ring->count) in ice_tx_map()
1685 tx_ring->next_to_use = i; in ice_tx_map()
1687 ice_maybe_stop_tx(tx_ring, DESC_NEEDED); in ice_tx_map()
1690 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) in ice_tx_map()
1691 writel(i, tx_ring->tail); in ice_tx_map()
1698 tx_buf = &tx_ring->tx_buf[i]; in ice_tx_map()
1699 ice_unmap_and_free_tx_buf(tx_ring, tx_buf); in ice_tx_map()
1703 i = tx_ring->count; in ice_tx_map()
1707 tx_ring->next_to_use = i; in ice_tx_map()
1903 ice_tx_prepare_vlan_flags(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first) in ice_tx_prepare_vlan_flags() argument
1919 ice_tx_prepare_vlan_flags_dcb(tx_ring, first); in ice_tx_prepare_vlan_flags()
2209 ice_tstamp(struct ice_tx_ring *tx_ring, struct sk_buff *skb, in ice_tstamp() argument
2218 if (!tx_ring->ptp_tx) in ice_tstamp()
2226 idx = ice_ptp_request_ts(tx_ring->tx_tstamps, skb); in ice_tstamp()
2244 ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring) in ice_xmit_frame_ring() argument
2247 struct ice_vsi *vsi = tx_ring->vsi; in ice_xmit_frame_ring()
2253 ice_trace(xmit_frame_ring, tx_ring, skb); in ice_xmit_frame_ring()
2260 tx_ring->tx_stats.tx_linearize++; in ice_xmit_frame_ring()
2269 if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE + in ice_xmit_frame_ring()
2271 tx_ring->tx_stats.tx_busy++; in ice_xmit_frame_ring()
2275 offload.tx_ring = tx_ring; in ice_xmit_frame_ring()
2278 first = &tx_ring->tx_buf[tx_ring->next_to_use]; in ice_xmit_frame_ring()
2285 ice_tx_prepare_vlan_flags(tx_ring, first); in ice_xmit_frame_ring()
2307 ice_tstamp(tx_ring, skb, first, &offload); in ice_xmit_frame_ring()
2313 u16 i = tx_ring->next_to_use; in ice_xmit_frame_ring()
2316 cdesc = ICE_TX_CTX_DESC(tx_ring, i); in ice_xmit_frame_ring()
2318 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in ice_xmit_frame_ring()
2327 ice_tx_map(tx_ring, first, &offload); in ice_xmit_frame_ring()
2331 ice_trace(xmit_frame_ring_drop, tx_ring, skb); in ice_xmit_frame_ring()
2347 struct ice_tx_ring *tx_ring; in ice_start_xmit() local
2349 tx_ring = vsi->tx_rings[skb->queue_mapping]; in ice_start_xmit()
2357 return ice_xmit_frame_ring(skb, tx_ring); in ice_start_xmit()
2397 void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring) in ice_clean_ctrl_tx_irq() argument
2399 struct ice_vsi *vsi = tx_ring->vsi; in ice_clean_ctrl_tx_irq()
2400 s16 i = tx_ring->next_to_clean; in ice_clean_ctrl_tx_irq()
2405 tx_buf = &tx_ring->tx_buf[i]; in ice_clean_ctrl_tx_irq()
2406 tx_desc = ICE_TX_DESC(tx_ring, i); in ice_clean_ctrl_tx_irq()
2407 i -= tx_ring->count; in ice_clean_ctrl_tx_irq()
2434 i -= tx_ring->count; in ice_clean_ctrl_tx_irq()
2435 tx_buf = tx_ring->tx_buf; in ice_clean_ctrl_tx_irq()
2436 tx_desc = ICE_TX_DESC(tx_ring, 0); in ice_clean_ctrl_tx_irq()
2441 dma_unmap_single(tx_ring->dev, in ice_clean_ctrl_tx_irq()
2446 devm_kfree(tx_ring->dev, tx_buf->raw_buf); in ice_clean_ctrl_tx_irq()
2461 i -= tx_ring->count; in ice_clean_ctrl_tx_irq()
2462 tx_buf = tx_ring->tx_buf; in ice_clean_ctrl_tx_irq()
2463 tx_desc = ICE_TX_DESC(tx_ring, 0); in ice_clean_ctrl_tx_irq()
2469 i += tx_ring->count; in ice_clean_ctrl_tx_irq()
2470 tx_ring->next_to_clean = i; in ice_clean_ctrl_tx_irq()