Lines Matching refs:tx_ring
21 static void i40e_fdir(struct i40e_ring *tx_ring, in i40e_fdir() argument
25 struct i40e_pf *pf = tx_ring->vsi->back; in i40e_fdir()
30 i = tx_ring->next_to_use; in i40e_fdir()
31 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i); in i40e_fdir()
34 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in i40e_fdir()
91 struct i40e_ring *tx_ring; in i40e_program_fdir_filter() local
103 tx_ring = vsi->tx_rings[0]; in i40e_program_fdir_filter()
104 dev = tx_ring->dev; in i40e_program_fdir_filter()
107 for (i = I40E_FD_CLEAN_DELAY; I40E_DESC_UNUSED(tx_ring) < 2; i--) { in i40e_program_fdir_filter()
119 i = tx_ring->next_to_use; in i40e_program_fdir_filter()
120 first = &tx_ring->tx_bi[i]; in i40e_program_fdir_filter()
121 i40e_fdir(tx_ring, fdir_data, add); in i40e_program_fdir_filter()
124 i = tx_ring->next_to_use; in i40e_program_fdir_filter()
125 tx_desc = I40E_TX_DESC(tx_ring, i); in i40e_program_fdir_filter()
126 tx_buf = &tx_ring->tx_bi[i]; in i40e_program_fdir_filter()
128 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0; in i40e_program_fdir_filter()
153 writel(tx_ring->next_to_use, tx_ring->tail); in i40e_program_fdir_filter()
788 void i40e_clean_tx_ring(struct i40e_ring *tx_ring) in i40e_clean_tx_ring() argument
793 if (ring_is_xdp(tx_ring) && tx_ring->xsk_pool) { in i40e_clean_tx_ring()
794 i40e_xsk_clean_tx_ring(tx_ring); in i40e_clean_tx_ring()
797 if (!tx_ring->tx_bi) in i40e_clean_tx_ring()
801 for (i = 0; i < tx_ring->count; i++) in i40e_clean_tx_ring()
802 i40e_unmap_and_free_tx_resource(tx_ring, in i40e_clean_tx_ring()
803 &tx_ring->tx_bi[i]); in i40e_clean_tx_ring()
806 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; in i40e_clean_tx_ring()
807 memset(tx_ring->tx_bi, 0, bi_size); in i40e_clean_tx_ring()
810 memset(tx_ring->desc, 0, tx_ring->size); in i40e_clean_tx_ring()
812 tx_ring->next_to_use = 0; in i40e_clean_tx_ring()
813 tx_ring->next_to_clean = 0; in i40e_clean_tx_ring()
815 if (!tx_ring->netdev) in i40e_clean_tx_ring()
819 netdev_tx_reset_queue(txring_txq(tx_ring)); in i40e_clean_tx_ring()
828 void i40e_free_tx_resources(struct i40e_ring *tx_ring) in i40e_free_tx_resources() argument
830 i40e_clean_tx_ring(tx_ring); in i40e_free_tx_resources()
831 kfree(tx_ring->tx_bi); in i40e_free_tx_resources()
832 tx_ring->tx_bi = NULL; in i40e_free_tx_resources()
833 kfree(tx_ring->xsk_descs); in i40e_free_tx_resources()
834 tx_ring->xsk_descs = NULL; in i40e_free_tx_resources()
836 if (tx_ring->desc) { in i40e_free_tx_resources()
837 dma_free_coherent(tx_ring->dev, tx_ring->size, in i40e_free_tx_resources()
838 tx_ring->desc, tx_ring->dma); in i40e_free_tx_resources()
839 tx_ring->desc = NULL; in i40e_free_tx_resources()
879 struct i40e_ring *tx_ring = NULL; in i40e_detect_recover_hung() local
898 tx_ring = vsi->tx_rings[i]; in i40e_detect_recover_hung()
899 if (tx_ring && tx_ring->desc) { in i40e_detect_recover_hung()
907 packets = tx_ring->stats.packets & INT_MAX; in i40e_detect_recover_hung()
908 if (tx_ring->tx_stats.prev_pkt_ctr == packets) { in i40e_detect_recover_hung()
909 i40e_force_wb(vsi, tx_ring->q_vector); in i40e_detect_recover_hung()
917 tx_ring->tx_stats.prev_pkt_ctr = in i40e_detect_recover_hung()
918 i40e_get_tx_pending(tx_ring, true) ? packets : -1; in i40e_detect_recover_hung()
932 struct i40e_ring *tx_ring, int napi_budget) in i40e_clean_tx_irq() argument
934 int i = tx_ring->next_to_clean; in i40e_clean_tx_irq()
941 tx_buf = &tx_ring->tx_bi[i]; in i40e_clean_tx_irq()
942 tx_desc = I40E_TX_DESC(tx_ring, i); in i40e_clean_tx_irq()
943 i -= tx_ring->count; in i40e_clean_tx_irq()
945 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring)); in i40e_clean_tx_irq()
957 i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf); in i40e_clean_tx_irq()
970 if (ring_is_xdp(tx_ring)) in i40e_clean_tx_irq()
976 dma_unmap_single(tx_ring->dev, in i40e_clean_tx_irq()
988 tx_ring, tx_desc, tx_buf); in i40e_clean_tx_irq()
994 i -= tx_ring->count; in i40e_clean_tx_irq()
995 tx_buf = tx_ring->tx_bi; in i40e_clean_tx_irq()
996 tx_desc = I40E_TX_DESC(tx_ring, 0); in i40e_clean_tx_irq()
1001 dma_unmap_page(tx_ring->dev, in i40e_clean_tx_irq()
1014 i -= tx_ring->count; in i40e_clean_tx_irq()
1015 tx_buf = tx_ring->tx_bi; in i40e_clean_tx_irq()
1016 tx_desc = I40E_TX_DESC(tx_ring, 0); in i40e_clean_tx_irq()
1025 i += tx_ring->count; in i40e_clean_tx_irq()
1026 tx_ring->next_to_clean = i; in i40e_clean_tx_irq()
1027 i40e_update_tx_stats(tx_ring, total_packets, total_bytes); in i40e_clean_tx_irq()
1028 i40e_arm_wb(tx_ring, vsi, budget); in i40e_clean_tx_irq()
1030 if (ring_is_xdp(tx_ring)) in i40e_clean_tx_irq()
1034 netdev_tx_completed_queue(txring_txq(tx_ring), in i40e_clean_tx_irq()
1038 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && in i40e_clean_tx_irq()
1039 (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { in i40e_clean_tx_irq()
1044 if (__netif_subqueue_stopped(tx_ring->netdev, in i40e_clean_tx_irq()
1045 tx_ring->queue_index) && in i40e_clean_tx_irq()
1047 netif_wake_subqueue(tx_ring->netdev, in i40e_clean_tx_irq()
1048 tx_ring->queue_index); in i40e_clean_tx_irq()
1049 ++tx_ring->tx_stats.restart_queue; in i40e_clean_tx_irq()
1421 int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring) in i40e_setup_tx_descriptors() argument
1423 struct device *dev = tx_ring->dev; in i40e_setup_tx_descriptors()
1430 WARN_ON(tx_ring->tx_bi); in i40e_setup_tx_descriptors()
1431 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; in i40e_setup_tx_descriptors()
1432 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL); in i40e_setup_tx_descriptors()
1433 if (!tx_ring->tx_bi) in i40e_setup_tx_descriptors()
1436 if (ring_is_xdp(tx_ring)) { in i40e_setup_tx_descriptors()
1437 tx_ring->xsk_descs = kcalloc(I40E_MAX_NUM_DESCRIPTORS, sizeof(*tx_ring->xsk_descs), in i40e_setup_tx_descriptors()
1439 if (!tx_ring->xsk_descs) in i40e_setup_tx_descriptors()
1443 u64_stats_init(&tx_ring->syncp); in i40e_setup_tx_descriptors()
1446 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc); in i40e_setup_tx_descriptors()
1450 tx_ring->size += sizeof(u32); in i40e_setup_tx_descriptors()
1451 tx_ring->size = ALIGN(tx_ring->size, 4096); in i40e_setup_tx_descriptors()
1452 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, in i40e_setup_tx_descriptors()
1453 &tx_ring->dma, GFP_KERNEL); in i40e_setup_tx_descriptors()
1454 if (!tx_ring->desc) { in i40e_setup_tx_descriptors()
1456 tx_ring->size); in i40e_setup_tx_descriptors()
1460 tx_ring->next_to_use = 0; in i40e_setup_tx_descriptors()
1461 tx_ring->next_to_clean = 0; in i40e_setup_tx_descriptors()
1462 tx_ring->tx_stats.prev_pkt_ctr = -1; in i40e_setup_tx_descriptors()
1466 kfree(tx_ring->xsk_descs); in i40e_setup_tx_descriptors()
1467 tx_ring->xsk_descs = NULL; in i40e_setup_tx_descriptors()
1468 kfree(tx_ring->tx_bi); in i40e_setup_tx_descriptors()
1469 tx_ring->tx_bi = NULL; in i40e_setup_tx_descriptors()
2794 static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, in i40e_atr() argument
2798 struct i40e_pf *pf = tx_ring->vsi->back; in i40e_atr()
2818 if (!tx_ring->atr_sample_rate) in i40e_atr()
2864 tx_ring->atr_count++; in i40e_atr()
2870 (tx_ring->atr_count < tx_ring->atr_sample_rate)) in i40e_atr()
2873 tx_ring->atr_count = 0; in i40e_atr()
2876 i = tx_ring->next_to_use; in i40e_atr()
2877 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i); in i40e_atr()
2880 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in i40e_atr()
2882 flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) & in i40e_atr()
2890 flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT; in i40e_atr()
2940 struct i40e_ring *tx_ring, in i40e_tx_prepare_vlan_flags() argument
2947 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { in i40e_tx_prepare_vlan_flags()
2976 if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED)) in i40e_tx_prepare_vlan_flags()
3128 static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb, in i40e_tsyn() argument
3143 pf = i40e_netdev_to_pf(tx_ring->netdev); in i40e_tsyn()
3174 struct i40e_ring *tx_ring, in i40e_tx_enable_csum() argument
3339 static void i40e_create_tx_ctx(struct i40e_ring *tx_ring, in i40e_create_tx_ctx() argument
3344 int i = tx_ring->next_to_use; in i40e_create_tx_ctx()
3351 context_desc = I40E_TX_CTXTDESC(tx_ring, i); in i40e_create_tx_ctx()
3354 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in i40e_create_tx_ctx()
3370 int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) in __i40e_maybe_stop_tx() argument
3372 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); in __i40e_maybe_stop_tx()
3377 if (likely(I40E_DESC_UNUSED(tx_ring) < size)) in __i40e_maybe_stop_tx()
3381 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); in __i40e_maybe_stop_tx()
3382 ++tx_ring->tx_stats.restart_queue; in __i40e_maybe_stop_tx()
3482 static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, in i40e_tx_map() argument
3491 u16 i = tx_ring->next_to_use; in i40e_tx_map()
3504 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); in i40e_tx_map()
3506 tx_desc = I40E_TX_DESC(tx_ring, i); in i40e_tx_map()
3512 if (dma_mapping_error(tx_ring->dev, dma)) in i40e_tx_map()
3532 if (i == tx_ring->count) { in i40e_tx_map()
3533 tx_desc = I40E_TX_DESC(tx_ring, 0); in i40e_tx_map()
3554 if (i == tx_ring->count) { in i40e_tx_map()
3555 tx_desc = I40E_TX_DESC(tx_ring, 0); in i40e_tx_map()
3562 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, in i40e_tx_map()
3565 tx_bi = &tx_ring->tx_bi[i]; in i40e_tx_map()
3568 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); in i40e_tx_map()
3571 if (i == tx_ring->count) in i40e_tx_map()
3574 tx_ring->next_to_use = i; in i40e_tx_map()
3576 i40e_maybe_stop_tx(tx_ring, DESC_NEEDED); in i40e_tx_map()
3584 desc_count |= ++tx_ring->packet_stride; in i40e_tx_map()
3589 tx_ring->packet_stride = 0; in i40e_tx_map()
3609 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { in i40e_tx_map()
3610 writel(i, tx_ring->tail); in i40e_tx_map()
3616 dev_info(tx_ring->dev, "TX DMA map failed\n"); in i40e_tx_map()
3620 tx_bi = &tx_ring->tx_bi[i]; in i40e_tx_map()
3621 i40e_unmap_and_free_tx_resource(tx_ring, tx_bi); in i40e_tx_map()
3625 i = tx_ring->count; in i40e_tx_map()
3629 tx_ring->next_to_use = i; in i40e_tx_map()
3745 struct i40e_ring *tx_ring) in i40e_xmit_frame_ring() argument
3761 i40e_trace(xmit_frame_ring, skb, tx_ring); in i40e_xmit_frame_ring()
3770 tx_ring->tx_stats.tx_linearize++; in i40e_xmit_frame_ring()
3779 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) { in i40e_xmit_frame_ring()
3780 tx_ring->tx_stats.tx_busy++; in i40e_xmit_frame_ring()
3785 first = &tx_ring->tx_bi[tx_ring->next_to_use]; in i40e_xmit_frame_ring()
3791 if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) in i40e_xmit_frame_ring()
3812 tx_ring, &cd_tunneling); in i40e_xmit_frame_ring()
3816 tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss); in i40e_xmit_frame_ring()
3824 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss, in i40e_xmit_frame_ring()
3831 i40e_atr(tx_ring, skb, tx_flags); in i40e_xmit_frame_ring()
3833 if (i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len, in i40e_xmit_frame_ring()
3840 i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring); in i40e_xmit_frame_ring()
3845 struct i40e_pf *pf = i40e_netdev_to_pf(tx_ring->netdev); in i40e_xmit_frame_ring()
3866 struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping]; in i40e_lan_xmit_frame() local
3874 return i40e_xmit_frame_ring(skb, tx_ring); in i40e_lan_xmit_frame()