Lines Matching refs:rx

23 static void gve_rx_unfill_pages(struct gve_priv *priv, struct gve_rx_ring *rx)  in gve_rx_unfill_pages()  argument
25 u32 slots = rx->mask + 1; in gve_rx_unfill_pages()
28 if (rx->data.raw_addressing) { in gve_rx_unfill_pages()
30 gve_rx_free_buffer(&priv->pdev->dev, &rx->data.page_info[i], in gve_rx_unfill_pages()
31 &rx->data.data_ring[i]); in gve_rx_unfill_pages()
34 page_ref_sub(rx->data.page_info[i].page, in gve_rx_unfill_pages()
35 rx->data.page_info[i].pagecnt_bias - 1); in gve_rx_unfill_pages()
36 gve_unassign_qpl(priv, rx->data.qpl->id); in gve_rx_unfill_pages()
37 rx->data.qpl = NULL; in gve_rx_unfill_pages()
39 kvfree(rx->data.page_info); in gve_rx_unfill_pages()
40 rx->data.page_info = NULL; in gve_rx_unfill_pages()
45 struct gve_rx_ring *rx = &priv->rx[idx]; in gve_rx_free_ring() local
47 u32 slots = rx->mask + 1; in gve_rx_free_ring()
53 dma_free_coherent(dev, bytes, rx->desc.desc_ring, rx->desc.bus); in gve_rx_free_ring()
54 rx->desc.desc_ring = NULL; in gve_rx_free_ring()
56 dma_free_coherent(dev, sizeof(*rx->q_resources), in gve_rx_free_ring()
57 rx->q_resources, rx->q_resources_bus); in gve_rx_free_ring()
58 rx->q_resources = NULL; in gve_rx_free_ring()
60 gve_rx_unfill_pages(priv, rx); in gve_rx_free_ring()
62 bytes = sizeof(*rx->data.data_ring) * slots; in gve_rx_free_ring()
63 dma_free_coherent(dev, bytes, rx->data.data_ring, in gve_rx_free_ring()
64 rx->data.data_bus); in gve_rx_free_ring()
65 rx->data.data_ring = NULL; in gve_rx_free_ring()
97 static int gve_prefill_rx_pages(struct gve_rx_ring *rx) in gve_prefill_rx_pages() argument
99 struct gve_priv *priv = rx->gve; in gve_prefill_rx_pages()
107 slots = rx->mask + 1; in gve_prefill_rx_pages()
109 rx->data.page_info = kvzalloc(slots * in gve_prefill_rx_pages()
110 sizeof(*rx->data.page_info), GFP_KERNEL); in gve_prefill_rx_pages()
111 if (!rx->data.page_info) in gve_prefill_rx_pages()
114 if (!rx->data.raw_addressing) { in gve_prefill_rx_pages()
115 rx->data.qpl = gve_assign_rx_qpl(priv); in gve_prefill_rx_pages()
116 if (!rx->data.qpl) { in gve_prefill_rx_pages()
117 kvfree(rx->data.page_info); in gve_prefill_rx_pages()
118 rx->data.page_info = NULL; in gve_prefill_rx_pages()
123 if (!rx->data.raw_addressing) { in gve_prefill_rx_pages()
124 struct page *page = rx->data.qpl->pages[i]; in gve_prefill_rx_pages()
127 gve_setup_rx_buffer(&rx->data.page_info[i], addr, page, in gve_prefill_rx_pages()
128 &rx->data.data_ring[i].qpl_offset); in gve_prefill_rx_pages()
131 err = gve_rx_alloc_buffer(priv, &priv->pdev->dev, &rx->data.page_info[i], in gve_prefill_rx_pages()
132 &rx->data.data_ring[i]); in gve_prefill_rx_pages()
141 &rx->data.page_info[i], in gve_prefill_rx_pages()
142 &rx->data.data_ring[i]); in gve_prefill_rx_pages()
158 struct gve_rx_ring *rx = &priv->rx[idx]; in gve_rx_alloc_ring() local
167 memset(rx, 0, sizeof(*rx)); in gve_rx_alloc_ring()
169 rx->gve = priv; in gve_rx_alloc_ring()
170 rx->q_num = idx; in gve_rx_alloc_ring()
173 rx->mask = slots - 1; in gve_rx_alloc_ring()
174 rx->data.raw_addressing = priv->queue_format == GVE_GQI_RDA_FORMAT; in gve_rx_alloc_ring()
177 bytes = sizeof(*rx->data.data_ring) * slots; in gve_rx_alloc_ring()
178 rx->data.data_ring = dma_alloc_coherent(hdev, bytes, in gve_rx_alloc_ring()
179 &rx->data.data_bus, in gve_rx_alloc_ring()
181 if (!rx->data.data_ring) in gve_rx_alloc_ring()
183 filled_pages = gve_prefill_rx_pages(rx); in gve_rx_alloc_ring()
188 rx->fill_cnt = filled_pages; in gve_rx_alloc_ring()
193 rx->q_resources = in gve_rx_alloc_ring()
195 sizeof(*rx->q_resources), in gve_rx_alloc_ring()
196 &rx->q_resources_bus, in gve_rx_alloc_ring()
198 if (!rx->q_resources) { in gve_rx_alloc_ring()
203 (unsigned long)rx->data.data_bus); in gve_rx_alloc_ring()
213 rx->desc.desc_ring = dma_alloc_coherent(hdev, bytes, &rx->desc.bus, in gve_rx_alloc_ring()
215 if (!rx->desc.desc_ring) { in gve_rx_alloc_ring()
219 rx->cnt = 0; in gve_rx_alloc_ring()
220 rx->db_threshold = priv->rx_desc_cnt / 2; in gve_rx_alloc_ring()
221 rx->desc.seqno = 1; in gve_rx_alloc_ring()
226 rx->packet_buffer_size = PAGE_SIZE / 2; in gve_rx_alloc_ring()
227 gve_rx_ctx_clear(&rx->ctx); in gve_rx_alloc_ring()
233 dma_free_coherent(hdev, sizeof(*rx->q_resources), in gve_rx_alloc_ring()
234 rx->q_resources, rx->q_resources_bus); in gve_rx_alloc_ring()
235 rx->q_resources = NULL; in gve_rx_alloc_ring()
237 gve_rx_unfill_pages(priv, rx); in gve_rx_alloc_ring()
239 bytes = sizeof(*rx->data.data_ring) * slots; in gve_rx_alloc_ring()
240 dma_free_coherent(hdev, bytes, rx->data.data_ring, rx->data.data_bus); in gve_rx_alloc_ring()
241 rx->data.data_ring = NULL; in gve_rx_alloc_ring()
278 void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx) in gve_rx_write_doorbell() argument
280 u32 db_idx = be32_to_cpu(rx->q_resources->db_index); in gve_rx_write_doorbell()
282 iowrite32be(rx->fill_cnt, &priv->db_bar2[db_idx]); in gve_rx_write_doorbell()
367 struct gve_rx_ring *rx, struct gve_rx_slot_page_info *page_info, in gve_rx_qpl() argument
371 struct gve_rx_ctx *ctx = &rx->ctx; in gve_rx_qpl()
380 skb = gve_rx_add_frags(napi, page_info, rx->packet_buffer_size, len, ctx); in gve_rx_qpl()
392 u64_stats_update_begin(&rx->statss); in gve_rx_qpl()
393 rx->rx_frag_copy_cnt++; in gve_rx_qpl()
394 u64_stats_update_end(&rx->statss); in gve_rx_qpl()
406 static bool gve_rx_ctx_init(struct gve_rx_ctx *ctx, struct gve_rx_ring *rx) in gve_rx_ctx_init() argument
408 bool qpl_mode = !rx->data.raw_addressing, packet_size_error = false; in gve_rx_ctx_init()
411 struct gve_priv *priv = rx->gve; in gve_rx_ctx_init()
412 u32 idx = rx->cnt & rx->mask; in gve_rx_ctx_init()
428 desc = &rx->desc.desc_ring[idx]; in gve_rx_ctx_init()
430 if (GVE_SEQNO(desc->flags_seq) != rx->desc.seqno) { in gve_rx_ctx_init()
434 rx->desc.seqno, GVE_SEQNO(desc->flags_seq)); in gve_rx_ctx_init()
438 if (frag_size > rx->packet_buffer_size) { in gve_rx_ctx_init()
442 rx->packet_buffer_size, be16_to_cpu(desc->len)); in gve_rx_ctx_init()
444 page_info = &rx->data.page_info[idx]; in gve_rx_ctx_init()
450 idx = (idx + 1) & rx->mask; in gve_rx_ctx_init()
451 rx->desc.seqno = gve_next_seqno(rx->desc.seqno); in gve_rx_ctx_init()
454 prefetch(rx->desc.desc_ring + idx); in gve_rx_ctx_init()
463 u64_stats_update_begin(&rx->statss); in gve_rx_ctx_init()
464 rx->rx_cont_packet_cnt++; in gve_rx_ctx_init()
465 u64_stats_update_end(&rx->statss); in gve_rx_ctx_init()
468 u64_stats_update_begin(&rx->statss); in gve_rx_ctx_init()
469 rx->rx_copied_pkt++; in gve_rx_ctx_init()
470 u64_stats_update_end(&rx->statss); in gve_rx_ctx_init()
479 u64_stats_update_begin(&rx->statss); in gve_rx_ctx_init()
480 rx->rx_desc_err_dropped_pkt++; in gve_rx_ctx_init()
481 u64_stats_update_end(&rx->statss); in gve_rx_ctx_init()
487 static struct sk_buff *gve_rx_skb(struct gve_priv *priv, struct gve_rx_ring *rx, in gve_rx_skb() argument
492 struct gve_rx_ctx *ctx = &rx->ctx; in gve_rx_skb()
499 u64_stats_update_begin(&rx->statss); in gve_rx_skb()
500 rx->rx_copied_pkt++; in gve_rx_skb()
501 rx->rx_frag_copy_cnt++; in gve_rx_skb()
502 rx->rx_copybreak_pkt++; in gve_rx_skb()
503 u64_stats_update_end(&rx->statss); in gve_rx_skb()
506 if (rx->data.raw_addressing) { in gve_rx_skb()
515 u64_stats_update_begin(&rx->statss); in gve_rx_skb()
516 rx->rx_frag_flip_cnt++; in gve_rx_skb()
517 u64_stats_update_end(&rx->statss); in gve_rx_skb()
522 rx->packet_buffer_size, ctx); in gve_rx_skb()
525 u64_stats_update_begin(&rx->statss); in gve_rx_skb()
526 rx->rx_frag_flip_cnt++; in gve_rx_skb()
527 u64_stats_update_end(&rx->statss); in gve_rx_skb()
529 skb = gve_rx_qpl(&priv->pdev->dev, netdev, rx, in gve_rx_skb()
536 static bool gve_rx(struct gve_rx_ring *rx, netdev_features_t feat, in gve_rx() argument
540 struct gve_rx_ctx *ctx = &rx->ctx; in gve_rx()
542 struct gve_priv *priv = rx->gve; in gve_rx()
553 idx = rx->cnt & rx->mask; in gve_rx()
554 first_desc = &rx->desc.desc_ring[idx]; in gve_rx()
556 napi = &priv->ntfy_blocks[rx->ntfy_id].napi; in gve_rx()
558 if (unlikely(!gve_rx_ctx_init(ctx, rx))) in gve_rx()
563 page_info = &rx->data.page_info[(idx + 2) & rx->mask]; in gve_rx()
572 page_info = &rx->data.page_info[idx]; in gve_rx()
573 data_slot = &rx->data.data_ring[idx]; in gve_rx()
574 page_bus = rx->data.raw_addressing ? in gve_rx()
576 rx->data.qpl->page_buses[idx]; in gve_rx()
579 skb = gve_rx_skb(priv, rx, page_info, napi, len, data_slot); in gve_rx()
581 u64_stats_update_begin(&rx->statss); in gve_rx()
582 rx->rx_skb_alloc_fail++; in gve_rx()
583 u64_stats_update_end(&rx->statss); in gve_rx()
588 rx->cnt++; in gve_rx()
589 idx = rx->cnt & rx->mask; in gve_rx()
591 desc = &rx->desc.desc_ring[idx]; in gve_rx()
625 rx->cnt++; in gve_rx()
632 bool gve_rx_work_pending(struct gve_rx_ring *rx) in gve_rx_work_pending() argument
638 next_idx = rx->cnt & rx->mask; in gve_rx_work_pending()
639 desc = rx->desc.desc_ring + next_idx; in gve_rx_work_pending()
645 return (GVE_SEQNO(flags_seq) == rx->desc.seqno); in gve_rx_work_pending()
648 static bool gve_rx_refill_buffers(struct gve_priv *priv, struct gve_rx_ring *rx) in gve_rx_refill_buffers() argument
650 int refill_target = rx->mask + 1; in gve_rx_refill_buffers()
651 u32 fill_cnt = rx->fill_cnt; in gve_rx_refill_buffers()
653 while (fill_cnt - rx->cnt < refill_target) { in gve_rx_refill_buffers()
655 u32 idx = fill_cnt & rx->mask; in gve_rx_refill_buffers()
657 page_info = &rx->data.page_info[idx]; in gve_rx_refill_buffers()
663 &rx->data.data_ring[idx]; in gve_rx_refill_buffers()
678 if (!rx->data.raw_addressing) in gve_rx_refill_buffers()
685 &rx->data.data_ring[idx]; in gve_rx_refill_buffers()
691 u64_stats_update_begin(&rx->statss); in gve_rx_refill_buffers()
692 rx->rx_buf_alloc_fail++; in gve_rx_refill_buffers()
693 u64_stats_update_end(&rx->statss); in gve_rx_refill_buffers()
700 rx->fill_cnt = fill_cnt; in gve_rx_refill_buffers()
704 static int gve_clean_rx_done(struct gve_rx_ring *rx, int budget, in gve_clean_rx_done() argument
708 struct gve_priv *priv = rx->gve; in gve_clean_rx_done()
709 u32 idx = rx->cnt & rx->mask; in gve_clean_rx_done()
713 desc = &rx->desc.desc_ring[idx]; in gve_clean_rx_done()
714 while ((GVE_SEQNO(desc->flags_seq) == rx->desc.seqno) && in gve_clean_rx_done()
722 rx->q_num, idx, desc, desc->flags_seq); in gve_clean_rx_done()
725 rx->q_num, GVE_SEQNO(desc->flags_seq), in gve_clean_rx_done()
726 rx->desc.seqno); in gve_clean_rx_done()
728 dropped = !gve_rx(rx, feat, &packet_size_bytes, &work_cnt); in gve_clean_rx_done()
734 idx = rx->cnt & rx->mask; in gve_clean_rx_done()
735 desc = &rx->desc.desc_ring[idx]; in gve_clean_rx_done()
739 if (!work_done && rx->fill_cnt - rx->cnt > rx->db_threshold) in gve_clean_rx_done()
743 u64_stats_update_begin(&rx->statss); in gve_clean_rx_done()
744 rx->rpackets += ok_packet_cnt; in gve_clean_rx_done()
745 rx->rbytes += bytes; in gve_clean_rx_done()
746 u64_stats_update_end(&rx->statss); in gve_clean_rx_done()
750 if (!rx->data.raw_addressing) { in gve_clean_rx_done()
752 rx->fill_cnt += work_done; in gve_clean_rx_done()
753 } else if (rx->fill_cnt - rx->cnt <= rx->db_threshold) { in gve_clean_rx_done()
757 if (!gve_rx_refill_buffers(priv, rx)) in gve_clean_rx_done()
763 if (rx->fill_cnt - rx->cnt <= rx->db_threshold) { in gve_clean_rx_done()
764 gve_rx_write_doorbell(priv, rx); in gve_clean_rx_done()
769 gve_rx_write_doorbell(priv, rx); in gve_clean_rx_done()
775 struct gve_rx_ring *rx = block->rx; in gve_rx_poll() local
786 work_done = gve_clean_rx_done(rx, budget, feat); in gve_rx_poll()