Lines Matching refs:rxq
106 #define MVNETA_VLAN_PRIO_RXQ_MAP(prio, rxq) ((rxq) << ((prio) * 3)) argument
131 #define MVNETA_CPU_RXQ_ACCESS(rxq) BIT(rxq) argument
820 struct mvneta_rx_queue *rxq, in mvneta_rxq_non_occup_desc_add() argument
827 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), in mvneta_rxq_non_occup_desc_add()
833 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), in mvneta_rxq_non_occup_desc_add()
839 struct mvneta_rx_queue *rxq) in mvneta_rxq_busy_desc_num_get() argument
843 val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id)); in mvneta_rxq_busy_desc_num_get()
851 struct mvneta_rx_queue *rxq, in mvneta_rxq_desc_num_update() argument
859 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); in mvneta_rxq_desc_num_update()
879 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); in mvneta_rxq_desc_num_update()
885 mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq) in mvneta_rxq_next_desc_get() argument
887 int rx_desc = rxq->next_desc_to_proc; in mvneta_rxq_next_desc_get()
889 rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc); in mvneta_rxq_next_desc_get()
890 prefetch(rxq->descs + rxq->next_desc_to_proc); in mvneta_rxq_next_desc_get()
891 return rxq->descs + rx_desc; in mvneta_rxq_next_desc_get()
909 struct mvneta_rx_queue *rxq, in mvneta_rxq_offset_set() argument
914 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); in mvneta_rxq_offset_set()
919 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); in mvneta_rxq_offset_set()
966 struct mvneta_rx_queue *rxq, in mvneta_rxq_buf_size_set() argument
971 val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id)); in mvneta_rxq_buf_size_set()
976 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val); in mvneta_rxq_buf_size_set()
981 struct mvneta_rx_queue *rxq) in mvneta_rxq_bm_disable() argument
985 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); in mvneta_rxq_bm_disable()
987 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); in mvneta_rxq_bm_disable()
992 struct mvneta_rx_queue *rxq) in mvneta_rxq_bm_enable() argument
996 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); in mvneta_rxq_bm_enable()
998 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); in mvneta_rxq_bm_enable()
1003 struct mvneta_rx_queue *rxq) in mvneta_rxq_long_pool_set() argument
1007 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); in mvneta_rxq_long_pool_set()
1011 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); in mvneta_rxq_long_pool_set()
1016 struct mvneta_rx_queue *rxq) in mvneta_rxq_short_pool_set() argument
1020 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); in mvneta_rxq_short_pool_set()
1024 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); in mvneta_rxq_short_pool_set()
1232 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; in mvneta_port_up() local
1234 if (rxq->descs) in mvneta_port_up()
1461 int rxq, txq; in mvneta_defaults_set() local
1463 for (rxq = 0; rxq < rxq_number; rxq++) in mvneta_defaults_set()
1464 if ((rxq % max_cpu) == cpu) in mvneta_defaults_set()
1465 rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq); in mvneta_defaults_set()
1649 struct mvneta_rx_queue *rxq, u32 value) in mvneta_rx_pkts_coal_set() argument
1651 mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id), in mvneta_rx_pkts_coal_set()
1659 struct mvneta_rx_queue *rxq, u32 value) in mvneta_rx_time_coal_set() argument
1667 mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val); in mvneta_rx_time_coal_set()
1687 struct mvneta_rx_queue *rxq) in mvneta_rx_desc_fill() argument
1692 i = rx_desc - rxq->descs; in mvneta_rx_desc_fill()
1693 rxq->buf_virt_addr[i] = virt_addr; in mvneta_rx_desc_fill()
1899 struct mvneta_rx_queue *rxq, in mvneta_rx_refill() argument
1905 page = page_pool_alloc_pages(rxq->page_pool, in mvneta_rx_refill()
1911 mvneta_rx_desc_fill(rx_desc, phys_addr, page, rxq); in mvneta_rx_refill()
1949 struct mvneta_rx_queue *rxq) in mvneta_rxq_drop_pkts() argument
1953 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); in mvneta_rxq_drop_pkts()
1955 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); in mvneta_rxq_drop_pkts()
1960 mvneta_rxq_next_desc_get(rxq); in mvneta_rxq_drop_pkts()
1972 for (i = 0; i < rxq->size; i++) { in mvneta_rxq_drop_pkts()
1973 struct mvneta_rx_desc *rx_desc = rxq->descs + i; in mvneta_rxq_drop_pkts()
1974 void *data = rxq->buf_virt_addr[i]; in mvneta_rxq_drop_pkts()
1978 page_pool_put_full_page(rxq->page_pool, data, false); in mvneta_rxq_drop_pkts()
1980 if (xdp_rxq_info_is_reg(&rxq->xdp_rxq)) in mvneta_rxq_drop_pkts()
1981 xdp_rxq_info_unreg(&rxq->xdp_rxq); in mvneta_rxq_drop_pkts()
1982 page_pool_destroy(rxq->page_pool); in mvneta_rxq_drop_pkts()
1983 rxq->page_pool = NULL; in mvneta_rxq_drop_pkts()
2003 int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq) in mvneta_rx_refill_queue() argument
2006 int curr_desc = rxq->first_to_refill; in mvneta_rx_refill_queue()
2009 for (i = 0; (i < rxq->refill_num) && (i < 64); i++) { in mvneta_rx_refill_queue()
2010 rx_desc = rxq->descs + curr_desc; in mvneta_rx_refill_queue()
2012 if (mvneta_rx_refill(pp, rx_desc, rxq, GFP_ATOMIC)) { in mvneta_rx_refill_queue()
2016 rxq->id, i, rxq->refill_num); in mvneta_rx_refill_queue()
2025 curr_desc = MVNETA_QUEUE_NEXT_DESC(rxq, curr_desc); in mvneta_rx_refill_queue()
2027 rxq->refill_num -= i; in mvneta_rx_refill_queue()
2028 rxq->first_to_refill = curr_desc; in mvneta_rx_refill_queue()
2034 mvneta_xdp_put_buff(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, in mvneta_xdp_put_buff() argument
2041 page_pool_put_full_page(rxq->page_pool, in mvneta_xdp_put_buff()
2043 page_pool_put_page(rxq->page_pool, virt_to_head_page(xdp->data), in mvneta_xdp_put_buff()
2176 mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, in mvneta_run_xdp() argument
2201 mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync); in mvneta_run_xdp()
2212 mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync); in mvneta_run_xdp()
2221 mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync); in mvneta_run_xdp()
2236 struct mvneta_rx_queue *rxq, in mvneta_swbm_rx_frame() argument
2255 dma_dir = page_pool_get_dma_dir(rxq->page_pool); in mvneta_swbm_rx_frame()
2274 struct mvneta_rx_queue *rxq, in mvneta_swbm_add_rx_fragment() argument
2290 dma_dir = page_pool_get_dma_dir(rxq->page_pool); in mvneta_swbm_add_rx_fragment()
2303 page_pool_put_full_page(rxq->page_pool, page, true); in mvneta_swbm_add_rx_fragment()
2350 struct mvneta_rx_queue *rxq) in mvneta_rx_swbm() argument
2360 xdp_init_buff(&xdp_buf, PAGE_SIZE, &rxq->xdp_rxq); in mvneta_rx_swbm()
2366 rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq); in mvneta_rx_swbm()
2372 struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq); in mvneta_rx_swbm()
2377 index = rx_desc - rxq->descs; in mvneta_rx_swbm()
2378 page = (struct page *)rxq->buf_virt_addr[index]; in mvneta_rx_swbm()
2382 rxq->refill_num++; in mvneta_rx_swbm()
2395 mvneta_swbm_rx_frame(pp, rx_desc, rxq, &xdp_buf, in mvneta_rx_swbm()
2400 page_pool_put_full_page(rxq->page_pool, page, in mvneta_rx_swbm()
2405 mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, &xdp_buf, in mvneta_rx_swbm()
2414 mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1); in mvneta_rx_swbm()
2419 mvneta_run_xdp(pp, rxq, xdp_prog, &xdp_buf, frame_sz, &ps)) in mvneta_rx_swbm()
2422 skb = mvneta_swbm_build_skb(pp, rxq->page_pool, &xdp_buf, desc_status); in mvneta_rx_swbm()
2426 mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1); in mvneta_rx_swbm()
2447 mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1); in mvneta_rx_swbm()
2456 refill = mvneta_rx_refill_queue(pp, rxq); in mvneta_rx_swbm()
2459 mvneta_rxq_desc_num_update(pp, rxq, rx_proc, refill); in mvneta_rx_swbm()
2467 struct mvneta_rx_queue *rxq) in mvneta_rx_hwbm() argument
2475 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); in mvneta_rx_hwbm()
2484 struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq); in mvneta_rx_hwbm()
2592 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); in mvneta_rx_hwbm()
3213 struct mvneta_rx_queue *rxq, int size) in mvneta_create_page_pool() argument
3228 rxq->page_pool = page_pool_create(&pp_params); in mvneta_create_page_pool()
3229 if (IS_ERR(rxq->page_pool)) { in mvneta_create_page_pool()
3230 err = PTR_ERR(rxq->page_pool); in mvneta_create_page_pool()
3231 rxq->page_pool = NULL; in mvneta_create_page_pool()
3235 err = xdp_rxq_info_reg(&rxq->xdp_rxq, pp->dev, rxq->id, 0); in mvneta_create_page_pool()
3239 err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL, in mvneta_create_page_pool()
3240 rxq->page_pool); in mvneta_create_page_pool()
3247 xdp_rxq_info_unreg(&rxq->xdp_rxq); in mvneta_create_page_pool()
3249 page_pool_destroy(rxq->page_pool); in mvneta_create_page_pool()
3250 rxq->page_pool = NULL; in mvneta_create_page_pool()
3255 static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, in mvneta_rxq_fill() argument
3260 err = mvneta_create_page_pool(pp, rxq, num); in mvneta_rxq_fill()
3265 memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc)); in mvneta_rxq_fill()
3266 if (mvneta_rx_refill(pp, rxq->descs + i, rxq, in mvneta_rxq_fill()
3270 __func__, rxq->id, i, num); in mvneta_rxq_fill()
3278 mvneta_rxq_non_occup_desc_add(pp, rxq, i); in mvneta_rxq_fill()
3305 struct mvneta_rx_queue *rxq) in mvneta_rxq_sw_init() argument
3307 rxq->size = pp->rx_ring_size; in mvneta_rxq_sw_init()
3310 rxq->descs = dma_alloc_coherent(pp->dev->dev.parent, in mvneta_rxq_sw_init()
3311 rxq->size * MVNETA_DESC_ALIGNED_SIZE, in mvneta_rxq_sw_init()
3312 &rxq->descs_phys, GFP_KERNEL); in mvneta_rxq_sw_init()
3313 if (!rxq->descs) in mvneta_rxq_sw_init()
3316 rxq->last_desc = rxq->size - 1; in mvneta_rxq_sw_init()
3322 struct mvneta_rx_queue *rxq) in mvneta_rxq_hw_init() argument
3325 mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys); in mvneta_rxq_hw_init()
3326 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size); in mvneta_rxq_hw_init()
3329 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal); in mvneta_rxq_hw_init()
3330 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal); in mvneta_rxq_hw_init()
3334 mvneta_rxq_offset_set(pp, rxq, 0); in mvneta_rxq_hw_init()
3335 mvneta_rxq_buf_size_set(pp, rxq, PAGE_SIZE < SZ_64K ? in mvneta_rxq_hw_init()
3338 mvneta_rxq_bm_disable(pp, rxq); in mvneta_rxq_hw_init()
3339 mvneta_rxq_fill(pp, rxq, rxq->size); in mvneta_rxq_hw_init()
3342 mvneta_rxq_offset_set(pp, rxq, in mvneta_rxq_hw_init()
3345 mvneta_rxq_bm_enable(pp, rxq); in mvneta_rxq_hw_init()
3347 mvneta_rxq_long_pool_set(pp, rxq); in mvneta_rxq_hw_init()
3348 mvneta_rxq_short_pool_set(pp, rxq); in mvneta_rxq_hw_init()
3349 mvneta_rxq_non_occup_desc_add(pp, rxq, rxq->size); in mvneta_rxq_hw_init()
3355 struct mvneta_rx_queue *rxq) in mvneta_rxq_init() argument
3360 ret = mvneta_rxq_sw_init(pp, rxq); in mvneta_rxq_init()
3364 mvneta_rxq_hw_init(pp, rxq); in mvneta_rxq_init()
3371 struct mvneta_rx_queue *rxq) in mvneta_rxq_deinit() argument
3373 mvneta_rxq_drop_pkts(pp, rxq); in mvneta_rxq_deinit()
3375 if (rxq->descs) in mvneta_rxq_deinit()
3377 rxq->size * MVNETA_DESC_ALIGNED_SIZE, in mvneta_rxq_deinit()
3378 rxq->descs, in mvneta_rxq_deinit()
3379 rxq->descs_phys); in mvneta_rxq_deinit()
3381 rxq->descs = NULL; in mvneta_rxq_deinit()
3382 rxq->last_desc = 0; in mvneta_rxq_deinit()
3383 rxq->next_desc_to_proc = 0; in mvneta_rxq_deinit()
3384 rxq->descs_phys = 0; in mvneta_rxq_deinit()
3385 rxq->first_to_refill = 0; in mvneta_rxq_deinit()
3386 rxq->refill_num = 0; in mvneta_rxq_deinit()
4156 int rxq; in mvneta_percpu_elect() local
4158 for (rxq = 0; rxq < rxq_number; rxq++) in mvneta_percpu_elect()
4159 if ((rxq % max_cpu) == cpu) in mvneta_percpu_elect()
4160 rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq); in mvneta_percpu_elect()
4497 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; in mvneta_ethtool_set_coalesce() local
4498 rxq->time_coal = c->rx_coalesce_usecs; in mvneta_ethtool_set_coalesce()
4499 rxq->pkts_coal = c->rx_max_coalesced_frames; in mvneta_ethtool_set_coalesce()
4500 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal); in mvneta_ethtool_set_coalesce()
4501 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal); in mvneta_ethtool_set_coalesce()
5045 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; in mvneta_init() local
5046 rxq->id = queue; in mvneta_init()
5047 rxq->size = pp->rx_ring_size; in mvneta_init()
5048 rxq->pkts_coal = MVNETA_RX_COAL_PKTS; in mvneta_init()
5049 rxq->time_coal = MVNETA_RX_COAL_USEC; in mvneta_init()
5050 rxq->buf_virt_addr in mvneta_init()
5052 rxq->size, in mvneta_init()
5053 sizeof(*rxq->buf_virt_addr), in mvneta_init()
5055 if (!rxq->buf_virt_addr) in mvneta_init()
5449 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; in mvneta_suspend() local
5451 mvneta_rxq_drop_pkts(pp, rxq); in mvneta_suspend()
5501 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; in mvneta_resume() local
5503 rxq->next_desc_to_proc = 0; in mvneta_resume()
5504 mvneta_rxq_hw_init(pp, rxq); in mvneta_resume()