Lines Matching refs:rx_ring
577 struct ixgbe_ring *rx_ring; in ixgbe_dump() local
716 rx_ring = adapter->rx_ring[n]; in ixgbe_dump()
718 n, rx_ring->next_to_use, rx_ring->next_to_clean); in ixgbe_dump()
773 rx_ring = adapter->rx_ring[n]; in ixgbe_dump()
775 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); in ixgbe_dump()
786 for (i = 0; i < rx_ring->count; i++) { in ixgbe_dump()
789 if (i == rx_ring->next_to_use) in ixgbe_dump()
791 else if (i == rx_ring->next_to_clean) in ixgbe_dump()
796 rx_buffer_info = &rx_ring->rx_buffer_info[i]; in ixgbe_dump()
797 rx_desc = IXGBE_RX_DESC(rx_ring, i); in ixgbe_dump()
822 ixgbe_rx_bufsz(rx_ring), true); in ixgbe_dump()
1313 struct ixgbe_ring *rx_ring, in ixgbe_update_rx_dca() argument
1318 u8 reg_idx = rx_ring->reg_idx; in ixgbe_update_rx_dca()
1321 rxctrl = dca3_get_tag(rx_ring->dev, cpu); in ixgbe_update_rx_dca()
1526 static unsigned int ixgbe_rx_offset(struct ixgbe_ring *rx_ring) in ixgbe_rx_offset() argument
1528 return ring_uses_build_skb(rx_ring) ? IXGBE_SKB_PAD : 0; in ixgbe_rx_offset()
1531 static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, in ixgbe_alloc_mapped_page() argument
1542 page = dev_alloc_pages(ixgbe_rx_pg_order(rx_ring)); in ixgbe_alloc_mapped_page()
1544 rx_ring->rx_stats.alloc_rx_page_failed++; in ixgbe_alloc_mapped_page()
1549 dma = dma_map_page_attrs(rx_ring->dev, page, 0, in ixgbe_alloc_mapped_page()
1550 ixgbe_rx_pg_size(rx_ring), in ixgbe_alloc_mapped_page()
1558 if (dma_mapping_error(rx_ring->dev, dma)) { in ixgbe_alloc_mapped_page()
1559 __free_pages(page, ixgbe_rx_pg_order(rx_ring)); in ixgbe_alloc_mapped_page()
1561 rx_ring->rx_stats.alloc_rx_page_failed++; in ixgbe_alloc_mapped_page()
1567 bi->page_offset = rx_ring->rx_offset; in ixgbe_alloc_mapped_page()
1570 rx_ring->rx_stats.alloc_rx_page++; in ixgbe_alloc_mapped_page()
1580 void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) in ixgbe_alloc_rx_buffers() argument
1584 u16 i = rx_ring->next_to_use; in ixgbe_alloc_rx_buffers()
1591 rx_desc = IXGBE_RX_DESC(rx_ring, i); in ixgbe_alloc_rx_buffers()
1592 bi = &rx_ring->rx_buffer_info[i]; in ixgbe_alloc_rx_buffers()
1593 i -= rx_ring->count; in ixgbe_alloc_rx_buffers()
1595 bufsz = ixgbe_rx_bufsz(rx_ring); in ixgbe_alloc_rx_buffers()
1598 if (!ixgbe_alloc_mapped_page(rx_ring, bi)) in ixgbe_alloc_rx_buffers()
1602 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, in ixgbe_alloc_rx_buffers()
1616 rx_desc = IXGBE_RX_DESC(rx_ring, 0); in ixgbe_alloc_rx_buffers()
1617 bi = rx_ring->rx_buffer_info; in ixgbe_alloc_rx_buffers()
1618 i -= rx_ring->count; in ixgbe_alloc_rx_buffers()
1627 i += rx_ring->count; in ixgbe_alloc_rx_buffers()
1629 if (rx_ring->next_to_use != i) { in ixgbe_alloc_rx_buffers()
1630 rx_ring->next_to_use = i; in ixgbe_alloc_rx_buffers()
1633 rx_ring->next_to_alloc = i; in ixgbe_alloc_rx_buffers()
1641 writel(i, rx_ring->tail); in ixgbe_alloc_rx_buffers()
1656 static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring, in ixgbe_update_rsc_stats() argument
1663 rx_ring->rx_stats.rsc_count += IXGBE_CB(skb)->append_cnt; in ixgbe_update_rsc_stats()
1664 rx_ring->rx_stats.rsc_flush++; in ixgbe_update_rsc_stats()
1666 ixgbe_set_rsc_gso_size(rx_ring, skb); in ixgbe_update_rsc_stats()
1682 void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, in ixgbe_process_skb_fields() argument
1686 struct net_device *dev = rx_ring->netdev; in ixgbe_process_skb_fields()
1687 u32 flags = rx_ring->q_vector->adapter->flags; in ixgbe_process_skb_fields()
1689 ixgbe_update_rsc_stats(rx_ring, skb); in ixgbe_process_skb_fields()
1691 ixgbe_rx_hash(rx_ring, rx_desc, skb); in ixgbe_process_skb_fields()
1693 ixgbe_rx_checksum(rx_ring, rx_desc, skb); in ixgbe_process_skb_fields()
1696 ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb); in ixgbe_process_skb_fields()
1705 ixgbe_ipsec_rx(rx_ring, rx_desc, skb); in ixgbe_process_skb_fields()
1709 skb_record_rx_queue(skb, rx_ring->queue_index); in ixgbe_process_skb_fields()
1734 static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring, in ixgbe_is_non_eop() argument
1738 u32 ntc = rx_ring->next_to_clean + 1; in ixgbe_is_non_eop()
1741 ntc = (ntc < rx_ring->count) ? ntc : 0; in ixgbe_is_non_eop()
1742 rx_ring->next_to_clean = ntc; in ixgbe_is_non_eop()
1744 prefetch(IXGBE_RX_DESC(rx_ring, ntc)); in ixgbe_is_non_eop()
1747 if (ring_is_rsc_enabled(rx_ring)) { in ixgbe_is_non_eop()
1769 rx_ring->rx_buffer_info[ntc].skb = skb; in ixgbe_is_non_eop()
1770 rx_ring->rx_stats.non_eop_descs++; in ixgbe_is_non_eop()
1787 static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring, in ixgbe_pull_tail() argument
1827 static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring, in ixgbe_dma_sync_frag() argument
1830 if (ring_uses_build_skb(rx_ring)) { in ixgbe_dma_sync_frag()
1831 unsigned long mask = (unsigned long)ixgbe_rx_pg_size(rx_ring) - 1; in ixgbe_dma_sync_frag()
1834 dma_sync_single_range_for_cpu(rx_ring->dev, in ixgbe_dma_sync_frag()
1842 dma_sync_single_range_for_cpu(rx_ring->dev, in ixgbe_dma_sync_frag()
1851 dma_unmap_page_attrs(rx_ring->dev, IXGBE_CB(skb)->dma, in ixgbe_dma_sync_frag()
1852 ixgbe_rx_pg_size(rx_ring), in ixgbe_dma_sync_frag()
1880 bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring, in ixgbe_cleanup_headers() argument
1884 struct net_device *netdev = rx_ring->netdev; in ixgbe_cleanup_headers()
1903 ixgbe_pull_tail(rx_ring, skb); in ixgbe_cleanup_headers()
1907 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) in ixgbe_cleanup_headers()
1925 static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring, in ixgbe_reuse_rx_page() argument
1929 u16 nta = rx_ring->next_to_alloc; in ixgbe_reuse_rx_page()
1931 new_buff = &rx_ring->rx_buffer_info[nta]; in ixgbe_reuse_rx_page()
1935 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; in ixgbe_reuse_rx_page()
2000 static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, in ixgbe_add_rx_frag() argument
2006 unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2; in ixgbe_add_rx_frag()
2008 unsigned int truesize = rx_ring->rx_offset ? in ixgbe_add_rx_frag()
2009 SKB_DATA_ALIGN(rx_ring->rx_offset + size) : in ixgbe_add_rx_frag()
2021 static struct ixgbe_rx_buffer *ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring, in ixgbe_get_rx_buffer() argument
2029 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; in ixgbe_get_rx_buffer()
2048 ixgbe_dma_sync_frag(rx_ring, *skb); in ixgbe_get_rx_buffer()
2052 dma_sync_single_range_for_cpu(rx_ring->dev, in ixgbe_get_rx_buffer()
2063 static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring, in ixgbe_put_rx_buffer() argument
2070 ixgbe_reuse_rx_page(rx_ring, rx_buffer); in ixgbe_put_rx_buffer()
2077 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, in ixgbe_put_rx_buffer()
2078 ixgbe_rx_pg_size(rx_ring), in ixgbe_put_rx_buffer()
2091 static struct sk_buff *ixgbe_construct_skb(struct ixgbe_ring *rx_ring, in ixgbe_construct_skb() argument
2098 unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2; in ixgbe_construct_skb()
2125 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBE_RX_HDR_SIZE); in ixgbe_construct_skb()
2150 static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring, in ixgbe_build_skb() argument
2157 unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2; in ixgbe_build_skb()
2198 struct ixgbe_ring *rx_ring, in ixgbe_run_xdp() argument
2207 xdp_prog = READ_ONCE(rx_ring->xdp_prog); in ixgbe_run_xdp()
2242 trace_xdp_exception(rx_ring->netdev, xdp_prog, act); in ixgbe_run_xdp()
2252 static unsigned int ixgbe_rx_frame_truesize(struct ixgbe_ring *rx_ring, in ixgbe_rx_frame_truesize() argument
2258 truesize = ixgbe_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */ in ixgbe_rx_frame_truesize()
2260 truesize = rx_ring->rx_offset ? in ixgbe_rx_frame_truesize()
2261 SKB_DATA_ALIGN(rx_ring->rx_offset + size) + in ixgbe_rx_frame_truesize()
2268 static void ixgbe_rx_buffer_flip(struct ixgbe_ring *rx_ring, in ixgbe_rx_buffer_flip() argument
2272 unsigned int truesize = ixgbe_rx_frame_truesize(rx_ring, size); in ixgbe_rx_buffer_flip()
2294 struct ixgbe_ring *rx_ring, in ixgbe_clean_rx_irq() argument
2303 u16 cleaned_count = ixgbe_desc_unused(rx_ring); in ixgbe_clean_rx_irq()
2304 unsigned int offset = rx_ring->rx_offset; in ixgbe_clean_rx_irq()
2310 frame_sz = ixgbe_rx_frame_truesize(rx_ring, 0); in ixgbe_clean_rx_irq()
2312 xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq); in ixgbe_clean_rx_irq()
2323 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); in ixgbe_clean_rx_irq()
2327 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean); in ixgbe_clean_rx_irq()
2338 rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size, &rx_buffer_pgcnt); in ixgbe_clean_rx_irq()
2349 xdp.frame_sz = ixgbe_rx_frame_truesize(rx_ring, size); in ixgbe_clean_rx_irq()
2351 skb = ixgbe_run_xdp(adapter, rx_ring, &xdp); in ixgbe_clean_rx_irq()
2359 ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size); in ixgbe_clean_rx_irq()
2366 ixgbe_add_rx_frag(rx_ring, rx_buffer, skb, size); in ixgbe_clean_rx_irq()
2367 } else if (ring_uses_build_skb(rx_ring)) { in ixgbe_clean_rx_irq()
2368 skb = ixgbe_build_skb(rx_ring, rx_buffer, in ixgbe_clean_rx_irq()
2371 skb = ixgbe_construct_skb(rx_ring, rx_buffer, in ixgbe_clean_rx_irq()
2377 rx_ring->rx_stats.alloc_rx_buff_failed++; in ixgbe_clean_rx_irq()
2382 ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb, rx_buffer_pgcnt); in ixgbe_clean_rx_irq()
2386 if (ixgbe_is_non_eop(rx_ring, rx_desc, skb)) in ixgbe_clean_rx_irq()
2390 if (ixgbe_cleanup_headers(rx_ring, rx_desc, skb)) in ixgbe_clean_rx_irq()
2397 ixgbe_process_skb_fields(rx_ring, rx_desc, skb); in ixgbe_clean_rx_irq()
2401 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) { in ixgbe_clean_rx_irq()
2406 mss = rx_ring->netdev->mtu - in ixgbe_clean_rx_irq()
2439 u64_stats_update_begin(&rx_ring->syncp); in ixgbe_clean_rx_irq()
2440 rx_ring->stats.packets += total_rx_packets; in ixgbe_clean_rx_irq()
2441 rx_ring->stats.bytes += total_rx_bytes; in ixgbe_clean_rx_irq()
2442 u64_stats_update_end(&rx_ring->syncp); in ixgbe_clean_rx_irq()
3689 ixgbe_enable_rx_drop(adapter, adapter->rx_ring[i]); in ixgbe_set_rx_drop_en()
3692 ixgbe_disable_rx_drop(adapter, adapter->rx_ring[i]); in ixgbe_set_rx_drop_en()
3699 struct ixgbe_ring *rx_ring) in ixgbe_configure_srrctl() argument
3703 u8 reg_idx = rx_ring->reg_idx; in ixgbe_configure_srrctl()
3719 if (rx_ring->xsk_pool) { in ixgbe_configure_srrctl()
3720 u32 xsk_buf_len = xsk_pool_get_rx_frame_size(rx_ring->xsk_pool); in ixgbe_configure_srrctl()
3734 } else if (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state)) { in ixgbe_configure_srrctl()
4257 struct ixgbe_ring *rx_ring; in ixgbe_set_rx_buffer_len() local
4291 rx_ring = adapter->rx_ring[i]; in ixgbe_set_rx_buffer_len()
4293 clear_ring_rsc_enabled(rx_ring); in ixgbe_set_rx_buffer_len()
4294 clear_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); in ixgbe_set_rx_buffer_len()
4295 clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state); in ixgbe_set_rx_buffer_len()
4298 set_ring_rsc_enabled(rx_ring); in ixgbe_set_rx_buffer_len()
4300 if (test_bit(__IXGBE_RX_FCOE, &rx_ring->state)) in ixgbe_set_rx_buffer_len()
4301 set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); in ixgbe_set_rx_buffer_len()
4306 set_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state); in ixgbe_set_rx_buffer_len()
4310 set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); in ixgbe_set_rx_buffer_len()
4314 set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); in ixgbe_set_rx_buffer_len()
4401 ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]); in ixgbe_configure_rx()
4509 struct ixgbe_ring *ring = adapter->rx_ring[i]; in ixgbe_vlan_strip_disable()
4547 struct ixgbe_ring *ring = adapter->rx_ring[i]; in ixgbe_vlan_strip_enable()
5278 queue = adapter->rx_ring[ring]->reg_idx; in ixgbe_fdir_filter_restore()
5295 static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) in ixgbe_clean_rx_ring() argument
5297 u16 i = rx_ring->next_to_clean; in ixgbe_clean_rx_ring()
5298 struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i]; in ixgbe_clean_rx_ring()
5300 if (rx_ring->xsk_pool) { in ixgbe_clean_rx_ring()
5301 ixgbe_xsk_clean_rx_ring(rx_ring); in ixgbe_clean_rx_ring()
5306 while (i != rx_ring->next_to_alloc) { in ixgbe_clean_rx_ring()
5310 dma_unmap_page_attrs(rx_ring->dev, in ixgbe_clean_rx_ring()
5312 ixgbe_rx_pg_size(rx_ring), in ixgbe_clean_rx_ring()
5321 dma_sync_single_range_for_cpu(rx_ring->dev, in ixgbe_clean_rx_ring()
5324 ixgbe_rx_bufsz(rx_ring), in ixgbe_clean_rx_ring()
5328 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, in ixgbe_clean_rx_ring()
5329 ixgbe_rx_pg_size(rx_ring), in ixgbe_clean_rx_ring()
5337 if (i == rx_ring->count) { in ixgbe_clean_rx_ring()
5339 rx_buffer = rx_ring->rx_buffer_info; in ixgbe_clean_rx_ring()
5344 rx_ring->next_to_alloc = 0; in ixgbe_clean_rx_ring()
5345 rx_ring->next_to_clean = 0; in ixgbe_clean_rx_ring()
5346 rx_ring->next_to_use = 0; in ixgbe_clean_rx_ring()
5371 adapter->rx_ring[baseq + i]->netdev = vdev; in ixgbe_fwd_ring_up()
5390 adapter->rx_ring[baseq + i]->netdev = NULL; in ixgbe_fwd_ring_up()
5774 struct ixgbe_ring *ring = adapter->rx_ring[i]; in ixgbe_disable_rx()
5817 struct ixgbe_ring *ring = adapter->rx_ring[i]; in ixgbe_disable_rx()
6071 ixgbe_clean_rx_ring(adapter->rx_ring[i]); in ixgbe_clean_all_rx_rings()
6547 static int ixgbe_rx_napi_id(struct ixgbe_ring *rx_ring) in ixgbe_rx_napi_id() argument
6549 struct ixgbe_q_vector *q_vector = rx_ring->q_vector; in ixgbe_rx_napi_id()
6562 struct ixgbe_ring *rx_ring) in ixgbe_setup_rx_resources() argument
6564 struct device *dev = rx_ring->dev; in ixgbe_setup_rx_resources()
6569 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; in ixgbe_setup_rx_resources()
6571 if (rx_ring->q_vector) in ixgbe_setup_rx_resources()
6572 ring_node = rx_ring->q_vector->numa_node; in ixgbe_setup_rx_resources()
6574 rx_ring->rx_buffer_info = vmalloc_node(size, ring_node); in ixgbe_setup_rx_resources()
6575 if (!rx_ring->rx_buffer_info) in ixgbe_setup_rx_resources()
6576 rx_ring->rx_buffer_info = vmalloc(size); in ixgbe_setup_rx_resources()
6577 if (!rx_ring->rx_buffer_info) in ixgbe_setup_rx_resources()
6581 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); in ixgbe_setup_rx_resources()
6582 rx_ring->size = ALIGN(rx_ring->size, 4096); in ixgbe_setup_rx_resources()
6585 rx_ring->desc = dma_alloc_coherent(dev, in ixgbe_setup_rx_resources()
6586 rx_ring->size, in ixgbe_setup_rx_resources()
6587 &rx_ring->dma, in ixgbe_setup_rx_resources()
6590 if (!rx_ring->desc) in ixgbe_setup_rx_resources()
6591 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, in ixgbe_setup_rx_resources()
6592 &rx_ring->dma, GFP_KERNEL); in ixgbe_setup_rx_resources()
6593 if (!rx_ring->desc) in ixgbe_setup_rx_resources()
6596 rx_ring->next_to_clean = 0; in ixgbe_setup_rx_resources()
6597 rx_ring->next_to_use = 0; in ixgbe_setup_rx_resources()
6600 if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev, in ixgbe_setup_rx_resources()
6601 rx_ring->queue_index, ixgbe_rx_napi_id(rx_ring)) < 0) in ixgbe_setup_rx_resources()
6604 rx_ring->xdp_prog = adapter->xdp_prog; in ixgbe_setup_rx_resources()
6608 vfree(rx_ring->rx_buffer_info); in ixgbe_setup_rx_resources()
6609 rx_ring->rx_buffer_info = NULL; in ixgbe_setup_rx_resources()
6629 err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]); in ixgbe_setup_all_rx_resources()
6645 ixgbe_free_rx_resources(adapter->rx_ring[i]); in ixgbe_setup_all_rx_resources()
6696 void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring) in ixgbe_free_rx_resources() argument
6698 ixgbe_clean_rx_ring(rx_ring); in ixgbe_free_rx_resources()
6700 rx_ring->xdp_prog = NULL; in ixgbe_free_rx_resources()
6701 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); in ixgbe_free_rx_resources()
6702 vfree(rx_ring->rx_buffer_info); in ixgbe_free_rx_resources()
6703 rx_ring->rx_buffer_info = NULL; in ixgbe_free_rx_resources()
6706 if (!rx_ring->desc) in ixgbe_free_rx_resources()
6709 dma_free_coherent(rx_ring->dev, rx_ring->size, in ixgbe_free_rx_resources()
6710 rx_ring->desc, rx_ring->dma); in ixgbe_free_rx_resources()
6712 rx_ring->desc = NULL; in ixgbe_free_rx_resources()
6730 if (adapter->rx_ring[i]->desc) in ixgbe_free_all_rx_resources()
6731 ixgbe_free_rx_resources(adapter->rx_ring[i]); in ixgbe_free_all_rx_resources()
6751 struct ixgbe_ring *ring = adapter->rx_ring[i]; in ixgbe_change_mtu()
7058 rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count; in ixgbe_update_stats()
7059 rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush; in ixgbe_update_stats()
7066 struct ixgbe_ring *rx_ring = READ_ONCE(adapter->rx_ring[i]); in ixgbe_update_stats() local
7068 if (!rx_ring) in ixgbe_update_stats()
7070 non_eop_descs += rx_ring->rx_stats.non_eop_descs; in ixgbe_update_stats()
7071 alloc_rx_page += rx_ring->rx_stats.alloc_rx_page; in ixgbe_update_stats()
7072 alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed; in ixgbe_update_stats()
7073 alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed; in ixgbe_update_stats()
7074 hw_csum_rx_error += rx_ring->rx_stats.csum_err; in ixgbe_update_stats()
7075 bytes += rx_ring->stats.bytes; in ixgbe_update_stats()
7076 packets += rx_ring->stats.packets; in ixgbe_update_stats()
8940 struct ixgbe_ring *ring = READ_ONCE(adapter->rx_ring[i]); in ixgbe_get_stats64()
9275 data->queue = adapter->rx_ring[vadapter->rx_base_queue]->reg_idx; in get_macvlan_queue()
10056 struct ixgbe_ring *ring = adapter->rx_ring[rxbase + i]; in ixgbe_fwd_del()
10133 struct ixgbe_ring *ring = adapter->rx_ring[i]; in ixgbe_xdp_setup()
10168 (void)xchg(&adapter->rx_ring[i]->xdp_prog, in ixgbe_xdp_setup()
10352 struct ixgbe_ring *rx_ring) in ixgbe_disable_rxr_hw() argument
10356 u8 reg_idx = rx_ring->reg_idx; in ixgbe_disable_rxr_hw()
10396 static void ixgbe_reset_rxr_stats(struct ixgbe_ring *rx_ring) in ixgbe_reset_rxr_stats() argument
10398 memset(&rx_ring->stats, 0, sizeof(rx_ring->stats)); in ixgbe_reset_rxr_stats()
10399 memset(&rx_ring->rx_stats, 0, sizeof(rx_ring->rx_stats)); in ixgbe_reset_rxr_stats()
10412 struct ixgbe_ring *rx_ring, *tx_ring, *xdp_ring; in ixgbe_txrx_ring_disable() local
10414 rx_ring = adapter->rx_ring[ring]; in ixgbe_txrx_ring_disable()
10421 ixgbe_disable_rxr_hw(adapter, rx_ring); in ixgbe_txrx_ring_disable()
10427 napi_disable(&rx_ring->q_vector->napi); in ixgbe_txrx_ring_disable()
10432 ixgbe_clean_rx_ring(rx_ring); in ixgbe_txrx_ring_disable()
10437 ixgbe_reset_rxr_stats(rx_ring); in ixgbe_txrx_ring_disable()
10450 struct ixgbe_ring *rx_ring, *tx_ring, *xdp_ring; in ixgbe_txrx_ring_enable() local
10452 rx_ring = adapter->rx_ring[ring]; in ixgbe_txrx_ring_enable()
10457 napi_enable(&rx_ring->q_vector->napi); in ixgbe_txrx_ring_enable()
10462 ixgbe_configure_rx_ring(adapter, rx_ring); in ixgbe_txrx_ring_enable()
10961 u64_stats_init(&adapter->rx_ring[i]->syncp); in ixgbe_probe()