Lines Matching refs:tx_ring

735 static int fm10k_tso(struct fm10k_ring *tx_ring,  in fm10k_tso()  argument
768 tx_desc = FM10K_TX_DESC(tx_ring, tx_ring->next_to_use); in fm10k_tso()
775 tx_ring->netdev->features &= ~NETIF_F_GSO_UDP_TUNNEL; in fm10k_tso()
777 netdev_err(tx_ring->netdev, in fm10k_tso()
782 static void fm10k_tx_csum(struct fm10k_ring *tx_ring, in fm10k_tx_csum() argument
804 dev_warn(tx_ring->dev, in fm10k_tx_csum()
806 tx_ring->tx_stats.csum_err++; in fm10k_tx_csum()
847 dev_warn(tx_ring->dev, in fm10k_tx_csum()
852 tx_ring->tx_stats.csum_err++; in fm10k_tx_csum()
858 tx_ring->tx_stats.csum_good++; in fm10k_tx_csum()
862 tx_desc = FM10K_TX_DESC(tx_ring, tx_ring->next_to_use); in fm10k_tx_csum()
884 static bool fm10k_tx_desc_push(struct fm10k_ring *tx_ring, in fm10k_tx_desc_push() argument
898 return i == tx_ring->count; in fm10k_tx_desc_push()
901 static int __fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size) in __fm10k_maybe_stop_tx() argument
903 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); in __fm10k_maybe_stop_tx()
909 if (likely(fm10k_desc_unused(tx_ring) < size)) in __fm10k_maybe_stop_tx()
913 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); in __fm10k_maybe_stop_tx()
914 ++tx_ring->tx_stats.restart_queue; in __fm10k_maybe_stop_tx()
918 static inline int fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size) in fm10k_maybe_stop_tx() argument
920 if (likely(fm10k_desc_unused(tx_ring) >= size)) in fm10k_maybe_stop_tx()
922 return __fm10k_maybe_stop_tx(tx_ring, size); in fm10k_maybe_stop_tx()
925 static void fm10k_tx_map(struct fm10k_ring *tx_ring, in fm10k_tx_map() argument
936 u16 i = tx_ring->next_to_use; in fm10k_tx_map()
939 tx_desc = FM10K_TX_DESC(tx_ring, i); in fm10k_tx_map()
950 dma = dma_map_single(tx_ring->dev, data, size, DMA_TO_DEVICE); in fm10k_tx_map()
956 if (dma_mapping_error(tx_ring->dev, dma)) in fm10k_tx_map()
964 if (fm10k_tx_desc_push(tx_ring, tx_desc++, i++, dma, in fm10k_tx_map()
966 tx_desc = FM10K_TX_DESC(tx_ring, 0); in fm10k_tx_map()
977 if (fm10k_tx_desc_push(tx_ring, tx_desc++, i++, in fm10k_tx_map()
979 tx_desc = FM10K_TX_DESC(tx_ring, 0); in fm10k_tx_map()
986 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, in fm10k_tx_map()
989 tx_buffer = &tx_ring->tx_buffer[i]; in fm10k_tx_map()
995 if (fm10k_tx_desc_push(tx_ring, tx_desc, i++, dma, size, flags)) in fm10k_tx_map()
999 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); in fm10k_tx_map()
1016 tx_ring->next_to_use = i; in fm10k_tx_map()
1019 fm10k_maybe_stop_tx(tx_ring, DESC_NEEDED); in fm10k_tx_map()
1022 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { in fm10k_tx_map()
1023 writel(i, tx_ring->tail); in fm10k_tx_map()
1028 dev_err(tx_ring->dev, "TX DMA map failed\n"); in fm10k_tx_map()
1032 tx_buffer = &tx_ring->tx_buffer[i]; in fm10k_tx_map()
1033 fm10k_unmap_and_free_tx_resource(tx_ring, tx_buffer); in fm10k_tx_map()
1037 i = tx_ring->count; in fm10k_tx_map()
1041 tx_ring->next_to_use = i; in fm10k_tx_map()
1045 struct fm10k_ring *tx_ring) in fm10k_xmit_frame_ring() argument
1064 if (fm10k_maybe_stop_tx(tx_ring, count + 3)) { in fm10k_xmit_frame_ring()
1065 tx_ring->tx_stats.tx_busy++; in fm10k_xmit_frame_ring()
1070 first = &tx_ring->tx_buffer[tx_ring->next_to_use]; in fm10k_xmit_frame_ring()
1078 tso = fm10k_tso(tx_ring, first); in fm10k_xmit_frame_ring()
1082 fm10k_tx_csum(tx_ring, first); in fm10k_xmit_frame_ring()
1084 fm10k_tx_map(tx_ring, first); in fm10k_xmit_frame_ring()
1122 bool fm10k_check_tx_hang(struct fm10k_ring *tx_ring) in fm10k_check_tx_hang() argument
1124 u32 tx_done = fm10k_get_tx_completed(tx_ring); in fm10k_check_tx_hang()
1125 u32 tx_done_old = tx_ring->tx_stats.tx_done_old; in fm10k_check_tx_hang()
1126 u32 tx_pending = fm10k_get_tx_pending(tx_ring, true); in fm10k_check_tx_hang()
1128 clear_check_for_tx_hang(tx_ring); in fm10k_check_tx_hang()
1140 tx_ring->tx_stats.tx_done_old = tx_done; in fm10k_check_tx_hang()
1142 clear_bit(__FM10K_HANG_CHECK_ARMED, tx_ring->state); in fm10k_check_tx_hang()
1148 return test_and_set_bit(__FM10K_HANG_CHECK_ARMED, tx_ring->state); in fm10k_check_tx_hang()
1172 struct fm10k_ring *tx_ring, int napi_budget) in fm10k_clean_tx_irq() argument
1179 unsigned int i = tx_ring->next_to_clean; in fm10k_clean_tx_irq()
1184 tx_buffer = &tx_ring->tx_buffer[i]; in fm10k_clean_tx_irq()
1185 tx_desc = FM10K_TX_DESC(tx_ring, i); in fm10k_clean_tx_irq()
1186 i -= tx_ring->count; in fm10k_clean_tx_irq()
1213 dma_unmap_single(tx_ring->dev, in fm10k_clean_tx_irq()
1228 i -= tx_ring->count; in fm10k_clean_tx_irq()
1229 tx_buffer = tx_ring->tx_buffer; in fm10k_clean_tx_irq()
1230 tx_desc = FM10K_TX_DESC(tx_ring, 0); in fm10k_clean_tx_irq()
1235 dma_unmap_page(tx_ring->dev, in fm10k_clean_tx_irq()
1248 i -= tx_ring->count; in fm10k_clean_tx_irq()
1249 tx_buffer = tx_ring->tx_buffer; in fm10k_clean_tx_irq()
1250 tx_desc = FM10K_TX_DESC(tx_ring, 0); in fm10k_clean_tx_irq()
1260 i += tx_ring->count; in fm10k_clean_tx_irq()
1261 tx_ring->next_to_clean = i; in fm10k_clean_tx_irq()
1262 u64_stats_update_begin(&tx_ring->syncp); in fm10k_clean_tx_irq()
1263 tx_ring->stats.bytes += total_bytes; in fm10k_clean_tx_irq()
1264 tx_ring->stats.packets += total_packets; in fm10k_clean_tx_irq()
1265 u64_stats_update_end(&tx_ring->syncp); in fm10k_clean_tx_irq()
1269 if (check_for_tx_hang(tx_ring) && fm10k_check_tx_hang(tx_ring)) { in fm10k_clean_tx_irq()
1273 netif_err(interface, drv, tx_ring->netdev, in fm10k_clean_tx_irq()
1279 tx_ring->queue_index, in fm10k_clean_tx_irq()
1280 fm10k_read_reg(hw, FM10K_TDH(tx_ring->reg_idx)), in fm10k_clean_tx_irq()
1281 fm10k_read_reg(hw, FM10K_TDT(tx_ring->reg_idx)), in fm10k_clean_tx_irq()
1282 tx_ring->next_to_use, i); in fm10k_clean_tx_irq()
1284 netif_stop_subqueue(tx_ring->netdev, in fm10k_clean_tx_irq()
1285 tx_ring->queue_index); in fm10k_clean_tx_irq()
1287 netif_info(interface, probe, tx_ring->netdev, in fm10k_clean_tx_irq()
1290 tx_ring->queue_index); in fm10k_clean_tx_irq()
1299 netdev_tx_completed_queue(txring_txq(tx_ring), in fm10k_clean_tx_irq()
1303 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && in fm10k_clean_tx_irq()
1304 (fm10k_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { in fm10k_clean_tx_irq()
1309 if (__netif_subqueue_stopped(tx_ring->netdev, in fm10k_clean_tx_irq()
1310 tx_ring->queue_index) && in fm10k_clean_tx_irq()
1312 netif_wake_subqueue(tx_ring->netdev, in fm10k_clean_tx_irq()
1313 tx_ring->queue_index); in fm10k_clean_tx_irq()
1314 ++tx_ring->tx_stats.restart_queue; in fm10k_clean_tx_irq()
1629 interface->tx_ring[txr_idx] = ring; in fm10k_alloc_q_vector()
1691 interface->tx_ring[ring->queue_index] = NULL; in fm10k_free_q_vector()
1870 interface->tx_ring[offset + i]->reg_idx = q_idx; in fm10k_cache_ring_qos()
1871 interface->tx_ring[offset + i]->qos_pc = pc; in fm10k_cache_ring_qos()
1895 interface->tx_ring[i]->reg_idx = i; in fm10k_cache_ring_rss()