Lines Matching refs:rxo

615 			       struct be_rx_obj *rxo, u32 erx_stat)  in populate_erx_stats()  argument
618 rx_stats(rxo)->rx_drops_no_frags = erx_stat; in populate_erx_stats()
623 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags, in populate_erx_stats()
630 struct be_rx_obj *rxo; in be_parse_stats() local
646 for_all_rx_queues(adapter, rxo, i) { in be_parse_stats()
647 erx_stat = erx->rx_drops_no_fragments[rxo->q.id]; in be_parse_stats()
648 populate_erx_stats(adapter, rxo, erx_stat); in be_parse_stats()
658 struct be_rx_obj *rxo; in be_get_stats64() local
664 for_all_rx_queues(adapter, rxo, i) { in be_get_stats64()
665 const struct be_rx_stats *rx_stats = rx_stats(rxo); in be_get_stats64()
669 pkts = rx_stats(rxo)->rx_pkts; in be_get_stats64()
670 bytes = rx_stats(rxo)->rx_bytes; in be_get_stats64()
674 stats->multicast += rx_stats(rxo)->rx_mcast_pkts; in be_get_stats64()
675 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs + in be_get_stats64()
676 rx_stats(rxo)->rx_drops_no_frags; in be_get_stats64()
2141 struct be_rx_obj *rxo; in be_get_new_eqd() local
2156 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) { in be_get_new_eqd()
2158 start = u64_stats_fetch_begin_irq(&rxo->stats.sync); in be_get_new_eqd()
2159 rx_pkts += rxo->stats.rx_pkts; in be_get_new_eqd()
2160 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start)); in be_get_new_eqd()
2250 static void be_rx_stats_update(struct be_rx_obj *rxo, in be_rx_stats_update() argument
2253 struct be_rx_stats *stats = rx_stats(rxo); in be_rx_stats_update()
2277 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo) in get_rx_page_info() argument
2279 struct be_adapter *adapter = rxo->adapter; in get_rx_page_info()
2281 struct be_queue_info *rxq = &rxo->q; in get_rx_page_info()
2284 rx_page_info = &rxo->page_info_tbl[frag_idx]; in get_rx_page_info()
2304 static void be_rx_compl_discard(struct be_rx_obj *rxo, in be_rx_compl_discard() argument
2311 page_info = get_rx_page_info(rxo); in be_rx_compl_discard()
2321 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb, in skb_fill_rx_data() argument
2329 page_info = get_rx_page_info(rxo); in skb_fill_rx_data()
2366 page_info = get_rx_page_info(rxo); in skb_fill_rx_data()
2393 static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi, in be_rx_compl_process() argument
2396 struct be_adapter *adapter = rxo->adapter; in be_rx_compl_process()
2402 rx_stats(rxo)->rx_drops_no_skbs++; in be_rx_compl_process()
2403 be_rx_compl_discard(rxo, rxcp); in be_rx_compl_process()
2407 skb_fill_rx_data(rxo, skb, rxcp); in be_rx_compl_process()
2415 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]); in be_rx_compl_process()
2429 static void be_rx_compl_process_gro(struct be_rx_obj *rxo, in be_rx_compl_process_gro() argument
2433 struct be_adapter *adapter = rxo->adapter; in be_rx_compl_process_gro()
2441 be_rx_compl_discard(rxo, rxcp); in be_rx_compl_process_gro()
2447 page_info = get_rx_page_info(rxo); in be_rx_compl_process_gro()
2473 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]); in be_rx_compl_process_gro()
2530 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo) in be_rx_compl_get() argument
2532 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq); in be_rx_compl_get()
2533 struct be_rx_compl_info *rxcp = &rxo->rxcp; in be_rx_compl_get()
2534 struct be_adapter *adapter = rxo->adapter; in be_rx_compl_get()
2571 queue_tail_inc(&rxo->cq); in be_rx_compl_get()
2588 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed) in be_post_rx_frags() argument
2590 struct be_adapter *adapter = rxo->adapter; in be_post_rx_frags()
2592 struct be_queue_info *rxq = &rxo->q; in be_post_rx_frags()
2599 page_info = &rxo->page_info_tbl[rxq->head]; in be_post_rx_frags()
2604 rx_stats(rxo)->rx_post_fail++; in be_post_rx_frags()
2641 page_info = &rxo->page_info_tbl[rxq->head]; in be_post_rx_frags()
2654 if (rxo->rx_post_starved) in be_post_rx_frags()
2655 rxo->rx_post_starved = false; in be_post_rx_frags()
2663 rxo->rx_post_starved = true; in be_post_rx_frags()
2811 static void be_rxq_clean(struct be_rx_obj *rxo) in be_rxq_clean() argument
2813 struct be_queue_info *rxq = &rxo->q; in be_rxq_clean()
2817 page_info = get_rx_page_info(rxo); in be_rxq_clean()
2826 static void be_rx_cq_clean(struct be_rx_obj *rxo) in be_rx_cq_clean() argument
2828 struct be_queue_info *rx_cq = &rxo->cq; in be_rx_cq_clean()
2830 struct be_adapter *adapter = rxo->adapter; in be_rx_cq_clean()
2840 rxcp = be_rx_compl_get(rxo); in be_rx_cq_clean()
2855 be_rx_compl_discard(rxo, rxcp); in be_rx_cq_clean()
3106 struct be_rx_obj *rxo; in be_rx_cqs_destroy() local
3109 for_all_rx_queues(adapter, rxo, i) { in be_rx_cqs_destroy()
3110 q = &rxo->cq; in be_rx_cqs_destroy()
3120 struct be_rx_obj *rxo; in be_rx_cqs_create() local
3139 for_all_rx_queues(adapter, rxo, i) { in be_rx_cqs_create()
3140 rxo->adapter = adapter; in be_rx_cqs_create()
3141 cq = &rxo->cq; in be_rx_cqs_create()
3147 u64_stats_init(&rxo->stats.sync); in be_rx_cqs_create()
3205 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi, in be_process_rx() argument
3208 struct be_adapter *adapter = rxo->adapter; in be_process_rx()
3209 struct be_queue_info *rx_cq = &rxo->cq; in be_process_rx()
3215 rxcp = be_rx_compl_get(rxo); in be_process_rx()
3225 be_rx_compl_discard(rxo, rxcp); in be_process_rx()
3234 be_rx_compl_discard(rxo, rxcp); in be_process_rx()
3239 be_rx_compl_process_gro(rxo, napi, rxcp); in be_process_rx()
3241 be_rx_compl_process(rxo, napi, rxcp); in be_process_rx()
3245 be_rx_stats_update(rxo, rxcp); in be_process_rx()
3254 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM && in be_process_rx()
3255 !rxo->rx_post_starved) in be_process_rx()
3256 be_post_rx_frags(rxo, GFP_ATOMIC, in be_process_rx()
3298 struct be_rx_obj *rxo; in be_poll() local
3311 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) { in be_poll()
3312 work = be_process_rx(rxo, napi, budget); in be_poll()
3567 struct be_rx_obj *rxo; in be_rx_qs_destroy() local
3570 for_all_rx_queues(adapter, rxo, i) { in be_rx_qs_destroy()
3571 q = &rxo->q; in be_rx_qs_destroy()
3580 be_rx_cq_clean(rxo); in be_rx_qs_destroy()
3582 be_post_rx_frags(rxo, GFP_KERNEL, in be_rx_qs_destroy()
3587 be_rx_cq_clean(rxo); in be_rx_qs_destroy()
3588 be_rxq_clean(rxo); in be_rx_qs_destroy()
3685 struct be_rx_obj *rxo; in be_rx_qs_create() local
3688 for_all_rx_queues(adapter, rxo, i) { in be_rx_qs_create()
3689 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN, in be_rx_qs_create()
3696 rxo = default_rxo(adapter); in be_rx_qs_create()
3697 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, in be_rx_qs_create()
3699 false, &rxo->rss_id); in be_rx_qs_create()
3704 for_all_rss_queues(adapter, rxo, i) { in be_rx_qs_create()
3705 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, in be_rx_qs_create()
3707 true, &rxo->rss_id); in be_rx_qs_create()
3714 for_all_rss_queues(adapter, rxo, i) { in be_rx_qs_create()
3717 rss->rsstable[j + i] = rxo->rss_id; in be_rx_qs_create()
3746 for_all_rx_queues(adapter, rxo, i) in be_rx_qs_create()
3747 be_post_rx_frags(rxo, GFP_KERNEL, RX_Q_LEN - 1); in be_rx_qs_create()
3796 struct be_rx_obj *rxo; in be_open() local
3813 for_all_rx_queues(adapter, rxo, i) in be_open()
3814 be_cq_notify(adapter, rxo->cq.id, true, 0); in be_open()
5492 struct be_rx_obj *rxo; in be_worker() local
5517 for_all_rx_queues(adapter, rxo, i) { in be_worker()
5521 if (rxo->rx_post_starved) in be_worker()
5522 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST); in be_worker()