Lines Matching refs:tx_ring

71 static void ena_unmask_interrupt(struct ena_ring *tx_ring,
73 static void ena_update_ring_numa_node(struct ena_ring *tx_ring,
75 static void ena_unmap_tx_buff(struct ena_ring *tx_ring,
89 static void ena_ring_tx_doorbell(struct ena_ring *tx_ring) in ena_ring_tx_doorbell() argument
91 ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); in ena_ring_tx_doorbell()
92 ena_increase_stat(&tx_ring->tx_stats.doorbells, 1, &tx_ring->syncp); in ena_ring_tx_doorbell()
358 xdp_ring = &adapter->tx_ring[qid]; in ena_xdp_xmit()
693 txr = &adapter->tx_ring[i]; in ena_init_io_rings()
723 rxr->xdp_ring = &adapter->tx_ring[i + adapter->num_io_queues]; in ena_init_io_rings()
736 struct ena_ring *tx_ring = &adapter->tx_ring[qid]; in ena_setup_tx_resources() local
740 if (tx_ring->tx_buffer_info) { in ena_setup_tx_resources()
746 size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size; in ena_setup_tx_resources()
749 tx_ring->tx_buffer_info = vzalloc_node(size, node); in ena_setup_tx_resources()
750 if (!tx_ring->tx_buffer_info) { in ena_setup_tx_resources()
751 tx_ring->tx_buffer_info = vzalloc(size); in ena_setup_tx_resources()
752 if (!tx_ring->tx_buffer_info) in ena_setup_tx_resources()
756 size = sizeof(u16) * tx_ring->ring_size; in ena_setup_tx_resources()
757 tx_ring->free_ids = vzalloc_node(size, node); in ena_setup_tx_resources()
758 if (!tx_ring->free_ids) { in ena_setup_tx_resources()
759 tx_ring->free_ids = vzalloc(size); in ena_setup_tx_resources()
760 if (!tx_ring->free_ids) in ena_setup_tx_resources()
764 size = tx_ring->tx_max_header_size; in ena_setup_tx_resources()
765 tx_ring->push_buf_intermediate_buf = vzalloc_node(size, node); in ena_setup_tx_resources()
766 if (!tx_ring->push_buf_intermediate_buf) { in ena_setup_tx_resources()
767 tx_ring->push_buf_intermediate_buf = vzalloc(size); in ena_setup_tx_resources()
768 if (!tx_ring->push_buf_intermediate_buf) in ena_setup_tx_resources()
773 for (i = 0; i < tx_ring->ring_size; i++) in ena_setup_tx_resources()
774 tx_ring->free_ids[i] = i; in ena_setup_tx_resources()
777 memset(&tx_ring->tx_stats, 0x0, sizeof(tx_ring->tx_stats)); in ena_setup_tx_resources()
779 tx_ring->next_to_use = 0; in ena_setup_tx_resources()
780 tx_ring->next_to_clean = 0; in ena_setup_tx_resources()
781 tx_ring->cpu = ena_irq->cpu; in ena_setup_tx_resources()
785 vfree(tx_ring->free_ids); in ena_setup_tx_resources()
786 tx_ring->free_ids = NULL; in ena_setup_tx_resources()
788 vfree(tx_ring->tx_buffer_info); in ena_setup_tx_resources()
789 tx_ring->tx_buffer_info = NULL; in ena_setup_tx_resources()
802 struct ena_ring *tx_ring = &adapter->tx_ring[qid]; in ena_free_tx_resources() local
804 vfree(tx_ring->tx_buffer_info); in ena_free_tx_resources()
805 tx_ring->tx_buffer_info = NULL; in ena_free_tx_resources()
807 vfree(tx_ring->free_ids); in ena_free_tx_resources()
808 tx_ring->free_ids = NULL; in ena_free_tx_resources()
810 vfree(tx_ring->push_buf_intermediate_buf); in ena_free_tx_resources()
811 tx_ring->push_buf_intermediate_buf = NULL; in ena_free_tx_resources()
1162 static void ena_unmap_tx_buff(struct ena_ring *tx_ring, in ena_unmap_tx_buff() argument
1176 dma_unmap_single(tx_ring->dev, in ena_unmap_tx_buff()
1186 dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr), in ena_unmap_tx_buff()
1195 static void ena_free_tx_bufs(struct ena_ring *tx_ring) in ena_free_tx_bufs() argument
1200 for (i = 0; i < tx_ring->ring_size; i++) { in ena_free_tx_bufs()
1201 struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i]; in ena_free_tx_bufs()
1207 netif_notice(tx_ring->adapter, ifdown, tx_ring->netdev, in ena_free_tx_bufs()
1209 tx_ring->qid, i); in ena_free_tx_bufs()
1212 netif_dbg(tx_ring->adapter, ifdown, tx_ring->netdev, in ena_free_tx_bufs()
1214 tx_ring->qid, i); in ena_free_tx_bufs()
1217 ena_unmap_tx_buff(tx_ring, tx_info); in ena_free_tx_bufs()
1221 netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev, in ena_free_tx_bufs()
1222 tx_ring->qid)); in ena_free_tx_bufs()
1227 struct ena_ring *tx_ring; in ena_free_all_tx_bufs() local
1231 tx_ring = &adapter->tx_ring[i]; in ena_free_all_tx_bufs()
1232 ena_free_tx_bufs(tx_ring); in ena_free_all_tx_bufs()
1289 static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id) in validate_tx_req_id() argument
1293 tx_info = &tx_ring->tx_buffer_info[req_id]; in validate_tx_req_id()
1297 return handle_invalid_req_id(tx_ring, req_id, tx_info, false); in validate_tx_req_id()
1311 static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget) in ena_clean_tx_irq() argument
1322 next_to_clean = tx_ring->next_to_clean; in ena_clean_tx_irq()
1323 txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->qid); in ena_clean_tx_irq()
1329 rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, in ena_clean_tx_irq()
1333 handle_invalid_req_id(tx_ring, req_id, NULL, in ena_clean_tx_irq()
1339 rc = validate_tx_req_id(tx_ring, req_id); in ena_clean_tx_irq()
1343 tx_info = &tx_ring->tx_buffer_info[req_id]; in ena_clean_tx_irq()
1352 ena_unmap_tx_buff(tx_ring, tx_info); in ena_clean_tx_irq()
1354 netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev, in ena_clean_tx_irq()
1355 "tx_poll: q %d skb %p completed\n", tx_ring->qid, in ena_clean_tx_irq()
1363 tx_ring->free_ids[next_to_clean] = req_id; in ena_clean_tx_irq()
1365 tx_ring->ring_size); in ena_clean_tx_irq()
1368 tx_ring->next_to_clean = next_to_clean; in ena_clean_tx_irq()
1369 ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done); in ena_clean_tx_irq()
1370 ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq); in ena_clean_tx_irq()
1374 netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev, in ena_clean_tx_irq()
1376 tx_ring->qid, tx_pkts); in ena_clean_tx_irq()
1383 above_thresh = ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, in ena_clean_tx_irq()
1388 ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, in ena_clean_tx_irq()
1391 test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags)) { in ena_clean_tx_irq()
1393 ena_increase_stat(&tx_ring->tx_stats.queue_wakeup, 1, in ena_clean_tx_irq()
1394 &tx_ring->syncp); in ena_clean_tx_irq()
1827 static void ena_unmask_interrupt(struct ena_ring *tx_ring, in ena_unmask_interrupt() argument
1845 tx_ring->smoothed_interval, in ena_unmask_interrupt()
1848 ena_increase_stat(&tx_ring->tx_stats.unmask_interrupt, 1, in ena_unmask_interrupt()
1849 &tx_ring->syncp); in ena_unmask_interrupt()
1856 ena_com_unmask_intr(tx_ring->ena_com_io_cq, &intr_reg); in ena_unmask_interrupt()
1859 static void ena_update_ring_numa_node(struct ena_ring *tx_ring, in ena_update_ring_numa_node() argument
1866 if (likely(tx_ring->cpu == cpu)) in ena_update_ring_numa_node()
1873 ena_com_update_numa_node(tx_ring->ena_com_io_cq, numa_node); in ena_update_ring_numa_node()
1879 tx_ring->cpu = cpu; in ena_update_ring_numa_node()
1954 struct ena_ring *tx_ring, *rx_ring; in ena_io_poll() local
1961 tx_ring = ena_napi->tx_ring; in ena_io_poll()
1964 tx_budget = tx_ring->ring_size / ENA_TX_POLL_BUDGET_DIVIDER; in ena_io_poll()
1966 if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) || in ena_io_poll()
1967 test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags)) { in ena_io_poll()
1972 tx_work_done = ena_clean_tx_irq(tx_ring, tx_budget); in ena_io_poll()
1982 if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) || in ena_io_poll()
1983 test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags))) { in ena_io_poll()
2003 ena_unmask_interrupt(tx_ring, rx_ring); in ena_io_poll()
2006 ena_update_ring_numa_node(tx_ring, rx_ring); in ena_io_poll()
2013 u64_stats_update_begin(&tx_ring->syncp); in ena_io_poll()
2014 tx_ring->tx_stats.napi_comp += napi_comp_call; in ena_io_poll()
2015 tx_ring->tx_stats.tx_poll++; in ena_io_poll()
2016 u64_stats_update_end(&tx_ring->syncp); in ena_io_poll()
2018 tx_ring->tx_stats.last_napi_jiffies = jiffies; in ena_io_poll()
2288 napi->tx_ring = &adapter->tx_ring[i]; in ena_init_napi_in_range()
2290 napi->xdp_ring = &adapter->tx_ring[i]; in ena_init_napi_in_range()
2376 struct ena_ring *tx_ring; in ena_create_io_tx_queue() local
2383 tx_ring = &adapter->tx_ring[qid]; in ena_create_io_tx_queue()
2393 ctx.queue_size = tx_ring->ring_size; in ena_create_io_tx_queue()
2394 ctx.numa_node = cpu_to_node(tx_ring->cpu); in ena_create_io_tx_queue()
2405 &tx_ring->ena_com_io_sq, in ena_create_io_tx_queue()
2406 &tx_ring->ena_com_io_cq); in ena_create_io_tx_queue()
2415 ena_com_update_numa_node(tx_ring->ena_com_io_cq, ctx.numa_node); in ena_create_io_tx_queue()
2520 adapter->tx_ring[i].ring_size = new_tx_size; in set_io_rings_size()
2594 cur_tx_ring_size = adapter->tx_ring[0].ring_size; in create_queues_with_size_backoff()
2668 ena_unmask_interrupt(&adapter->tx_ring[i], in ena_up()
2914 static int ena_check_and_linearize_skb(struct ena_ring *tx_ring, in ena_check_and_linearize_skb() argument
2922 if (num_frags < tx_ring->sgl_size) in ena_check_and_linearize_skb()
2925 if ((num_frags == tx_ring->sgl_size) && in ena_check_and_linearize_skb()
2926 (header_len < tx_ring->tx_max_header_size)) in ena_check_and_linearize_skb()
2929 ena_increase_stat(&tx_ring->tx_stats.linearize, 1, &tx_ring->syncp); in ena_check_and_linearize_skb()
2933 ena_increase_stat(&tx_ring->tx_stats.linearize_failed, 1, in ena_check_and_linearize_skb()
2934 &tx_ring->syncp); in ena_check_and_linearize_skb()
2940 static int ena_tx_map_skb(struct ena_ring *tx_ring, in ena_tx_map_skb() argument
2946 struct ena_adapter *adapter = tx_ring->adapter; in ena_tx_map_skb()
2958 if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { in ena_tx_map_skb()
2969 push_len = min_t(u32, skb->len, tx_ring->tx_max_header_size); in ena_tx_map_skb()
2971 tx_ring->push_buf_intermediate_buf); in ena_tx_map_skb()
2974 ena_increase_stat(&tx_ring->tx_stats.llq_buffer_copy, 1, in ena_tx_map_skb()
2975 &tx_ring->syncp); in ena_tx_map_skb()
2982 tx_ring->tx_max_header_size); in ena_tx_map_skb()
2990 dma = dma_map_single(tx_ring->dev, skb->data + push_len, in ena_tx_map_skb()
2992 if (unlikely(dma_mapping_error(tx_ring->dev, dma))) in ena_tx_map_skb()
3017 dma = skb_frag_dma_map(tx_ring->dev, frag, delta, in ena_tx_map_skb()
3019 if (unlikely(dma_mapping_error(tx_ring->dev, dma))) in ena_tx_map_skb()
3032 ena_increase_stat(&tx_ring->tx_stats.dma_mapping_err, 1, in ena_tx_map_skb()
3033 &tx_ring->syncp); in ena_tx_map_skb()
3039 ena_unmap_tx_buff(tx_ring, tx_info); in ena_tx_map_skb()
3050 struct ena_ring *tx_ring; in ena_start_xmit() local
3059 tx_ring = &adapter->tx_ring[qid]; in ena_start_xmit()
3062 rc = ena_check_and_linearize_skb(tx_ring, skb); in ena_start_xmit()
3068 next_to_use = tx_ring->next_to_use; in ena_start_xmit()
3069 req_id = tx_ring->free_ids[next_to_use]; in ena_start_xmit()
3070 tx_info = &tx_ring->tx_buffer_info[req_id]; in ena_start_xmit()
3075 rc = ena_tx_map_skb(tx_ring, tx_info, skb, &push_hdr, &header_len); in ena_start_xmit()
3087 ena_tx_csum(&ena_tx_ctx, skb, tx_ring->disable_meta_caching); in ena_start_xmit()
3090 tx_ring, in ena_start_xmit()
3104 if (unlikely(!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, in ena_start_xmit()
3105 tx_ring->sgl_size + 2))) { in ena_start_xmit()
3110 ena_increase_stat(&tx_ring->tx_stats.queue_stop, 1, in ena_start_xmit()
3111 &tx_ring->syncp); in ena_start_xmit()
3123 if (ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, in ena_start_xmit()
3126 ena_increase_stat(&tx_ring->tx_stats.queue_wakeup, 1, in ena_start_xmit()
3127 &tx_ring->syncp); in ena_start_xmit()
3135 ena_ring_tx_doorbell(tx_ring); in ena_start_xmit()
3140 ena_unmap_tx_buff(tx_ring, tx_info); in ena_start_xmit()
3271 struct ena_ring *rx_ring, *tx_ring; in ena_get_stats64() local
3283 tx_ring = &adapter->tx_ring[i]; in ena_get_stats64()
3286 start = u64_stats_fetch_begin_irq(&tx_ring->syncp); in ena_get_stats64()
3287 packets = tx_ring->tx_stats.cnt; in ena_get_stats64()
3288 bytes = tx_ring->tx_stats.bytes; in ena_get_stats64()
3289 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start)); in ena_get_stats64()
3720 struct ena_ring *tx_ring) in check_missing_comp_in_tx_queue() argument
3722 struct ena_napi *ena_napi = container_of(tx_ring->napi, struct ena_napi, napi); in check_missing_comp_in_tx_queue()
3731 for (i = 0; i < tx_ring->ring_size; i++) { in check_missing_comp_in_tx_queue()
3732 tx_buf = &tx_ring->tx_buffer_info[i]; in check_missing_comp_in_tx_queue()
3748 tx_ring->qid); in check_missing_comp_in_tx_queue()
3760 time_since_last_napi = jiffies_to_usecs(jiffies - tx_ring->tx_stats.last_napi_jiffies); in check_missing_comp_in_tx_queue()
3764 tx_ring->qid, i, time_since_last_napi, missing_tx_comp_to); in check_missing_comp_in_tx_queue()
3783 ena_increase_stat(&tx_ring->tx_stats.missed_tx, missed_tx, in check_missing_comp_in_tx_queue()
3784 &tx_ring->syncp); in check_missing_comp_in_tx_queue()
3791 struct ena_ring *tx_ring; in check_for_missing_completions() local
3812 tx_ring = &adapter->tx_ring[i]; in check_for_missing_completions()
3815 rc = check_missing_comp_in_tx_queue(adapter, tx_ring); in check_for_missing_completions()