/linux/include/net/ |
A D | page_pool.h | 85 struct page_pool { struct 172 void page_pool_destroy(struct page_pool *pool); 175 void page_pool_put_page_bulk(struct page_pool *pool, void **data, 178 static inline void page_pool_destroy(struct page_pool *pool) in page_pool_destroy() 182 static inline void page_pool_use_xdp_mem(struct page_pool *pool, in page_pool_use_xdp_mem() 186 static inline void page_pool_release_page(struct page_pool *pool, in page_pool_release_page() 197 void page_pool_put_page(struct page_pool *pool, struct page *page, 272 static inline bool page_pool_put(struct page_pool *pool) in page_pool_put() 278 void page_pool_update_nid(struct page_pool *pool, int new_nid); 285 static inline void page_pool_ring_lock(struct page_pool *pool) in page_pool_ring_lock() [all …]
|
A D | xdp_priv.h | 12 struct page_pool *page_pool; member
|
/linux/net/core/ |
A D | page_pool.c | 29 static int page_pool_init(struct page_pool *pool, in page_pool_init() 92 struct page_pool *pool; in page_pool_create() 215 static void page_pool_set_pp_info(struct page_pool *pool, in page_pool_set_pp_info() 330 static s32 page_pool_inflight(struct page_pool *pool) in page_pool_inflight() 408 struct page_pool *pool) in page_pool_recycle_in_cache() 546 static void page_pool_free_frag(struct page_pool *pool) in page_pool_free_frag() 618 static void page_pool_free(struct page_pool *pool) in page_pool_free() 648 static void page_pool_scrub(struct page_pool *pool) in page_pool_scrub() 659 static int page_pool_release(struct page_pool *pool) in page_pool_release() 700 void page_pool_destroy(struct page_pool *pool) in page_pool_destroy() [all …]
|
A D | xdp.c | 134 page_pool_destroy(xa->page_pool); in xdp_rxq_info_unreg_mem_model() 354 page_pool_put_full_page(xa->page_pool, page, napi_direct); in __xdp_return() 404 page_pool_put_page_bulk(xa->page_pool, bq->q, bq->count); in xdp_flush_frame_bulk() 456 page_pool_release_page(xa->page_pool, page); in __xdp_release_frame()
|
A D | built-in.a | 32 page_pool.o/
|
A D | .page_pool.o.cmd | 1 …page_pool.o := /usr/bin/ccache /home/test/workspace/code/optee_3.16/build/../toolchains/aarch64/bi… 3 source_net/core/page_pool.o := net/core/page_pool.c 5 deps_net/core/page_pool.o := \ 931 include/net/page_pool.h \ 1186 include/trace/events/page_pool.h \ 1202 net/core/page_pool.o: $(deps_net/core/page_pool.o) 1204 $(deps_net/core/page_pool.o):
|
A D | Makefile | 17 obj-$(CONFIG_PAGE_POOL) += page_pool.o
|
A D | .built-in.a.cmd | 1 ….o net/core/xdp.o net/core/flow_offload.o net/core/net-sysfs.o net/core/page_pool.o net/core/net-p…
|
/linux/include/trace/events/ |
A D | page_pool.h | 3 #define TRACE_SYSTEM page_pool 16 TP_PROTO(const struct page_pool *pool, 22 __field(const struct page_pool *, pool) 44 TP_PROTO(const struct page_pool *pool, 50 __field(const struct page_pool *, pool) 69 TP_PROTO(const struct page_pool *pool, 75 __field(const struct page_pool *, pool) 94 TP_PROTO(const struct page_pool *pool, int new_nid), 99 __field(const struct page_pool *, pool)
|
/linux/Documentation/networking/ |
A D | page_pool.rst | 7 The page_pool allocator is optimized for the XDP mode that uses one frame 15 when it is safe to free a page_pool object. Thus, API users 16 must run page_pool_release_page() when a page is leaving the page_pool or 122 /* internal DMA mapping in page_pool */ 128 page_pool = page_pool_create(&pp_params); 147 dma_dir = page_pool_get_dma_dir(dring->page_pool); 150 page_pool_recycle_direct(page_pool, page); 153 page_pool_recycle_direct(page_pool, page); 155 page_pool_release_page(page_pool, page); 156 new_page = page_pool_dev_alloc_pages(page_pool); [all …]
|
A D | index.rst | 27 page_pool
|
/linux/drivers/net/ethernet/apm/xgene/ |
A D | xgene_enet_main.c | 690 page_pool = rx_ring->page_pool; in xgene_enet_rx_frame() 1077 page_pool = ring->page_pool; in xgene_enet_delete_desc_rings() 1078 if (page_pool) { in xgene_enet_delete_desc_rings() 1175 page_pool = ring->page_pool; in xgene_enet_free_desc_rings() 1176 if (page_pool) { in xgene_enet_free_desc_rings() 1369 if (!page_pool) { in xgene_enet_create_desc_rings() 1391 page_pool->dst_ring_num = xgene_enet_dst_ring_num(page_pool); in xgene_enet_create_desc_rings() 1392 rx_ring->page_pool = page_pool; in xgene_enet_create_desc_rings() 1831 page_pool = pdata->rx_ring[i]->page_pool; in xgene_enet_init_hw() 1865 page_pool = pdata->rx_ring[0]->page_pool; in xgene_enet_init_hw() [all …]
|
A D | xgene_enet_cle.c | 709 if (pdata->rx_ring[idx]->page_pool) { in xgene_cle_set_rss_idt() 710 pool_id = pdata->rx_ring[idx]->page_pool->id; in xgene_cle_set_rss_idt() 786 if (pdata->rx_ring[0]->page_pool) { in xgene_enet_cle_init() 787 pool_id = pdata->rx_ring[0]->page_pool->id; in xgene_enet_cle_init()
|
/linux/block/ |
A D | bounce.c | 32 static mempool_t page_pool; variable 61 ret = mempool_init_page_pool(&page_pool, POOL_SIZE, 0); in init_emergency_pool() 116 mempool_free(bvec->bv_page, &page_pool); in bounce_end_io() 248 bounce_page = mempool_alloc(&page_pool, GFP_NOIO); in __blk_queue_bounce()
|
/linux/drivers/net/ethernet/socionext/ |
A D | netsec.c | 286 struct page_pool *page_pool; member 742 page = page_pool_dev_alloc_pages(dring->page_pool); in netsec_alloc_rx_data() 863 page_pool_get_dma_dir(rx_ring->page_pool); in netsec_xdp_queue_one() 966 dma_dir = page_pool_get_dma_dir(dring->page_pool); in netsec_process_rx() 1047 page_pool_release_page(dring->page_pool, page); in netsec_process_rx() 1227 page_pool_destroy(dring->page_pool); in netsec_uninit_pkt_dring() 1308 dring->page_pool = page_pool_create(&pp_params); in netsec_setup_rx_dring() 1309 if (IS_ERR(dring->page_pool)) { in netsec_setup_rx_dring() 1310 err = PTR_ERR(dring->page_pool); in netsec_setup_rx_dring() 1311 dring->page_pool = NULL; in netsec_setup_rx_dring() [all …]
|
/linux/mm/ |
A D | readahead.c | 178 LIST_HEAD(page_pool); in page_cache_ra_unbounded() 210 read_pages(ractl, &page_pool, true); in page_cache_ra_unbounded() 220 list_add(&page->lru, &page_pool); in page_cache_ra_unbounded() 224 read_pages(ractl, &page_pool, true); in page_cache_ra_unbounded() 238 read_pages(ractl, &page_pool, false); in page_cache_ra_unbounded()
|
/linux/drivers/net/ethernet/ti/ |
A D | cpsw_priv.c | 1103 struct page_pool *pool; in cpsw_fill_rx_channels() 1110 pool = cpsw->page_pool[ch]; in cpsw_fill_rx_channels() 1144 static struct page_pool *cpsw_create_page_pool(struct cpsw_common *cpsw, in cpsw_create_page_pool() 1148 struct page_pool *pool; in cpsw_create_page_pool() 1166 struct page_pool *pool; in cpsw_create_rx_pool() 1174 cpsw->page_pool[ch] = pool; in cpsw_create_rx_pool() 1183 struct page_pool *pool; in cpsw_ndev_create_xdp_rxq() 1186 pool = cpsw->page_pool[ch]; in cpsw_ndev_create_xdp_rxq() 1224 page_pool_destroy(cpsw->page_pool[ch]); in cpsw_destroy_xdp_rxqs() 1225 cpsw->page_pool[ch] = NULL; in cpsw_destroy_xdp_rxqs() [all …]
|
A D | cpsw_priv.h | 358 struct page_pool *page_pool[CPSW_MAX_QUEUES]; member
|
/linux/drivers/net/ethernet/marvell/mvpp2/ |
A D | mvpp2_main.c | 104 static struct page_pool * 361 struct page_pool *page_pool) in mvpp2_frag_alloc() argument 363 if (page_pool) in mvpp2_frag_alloc() 373 struct page_pool *page_pool, void *data) in mvpp2_frag_free() argument 375 if (page_pool) in mvpp2_frag_free() 495 struct page_pool *pp = NULL; in mvpp2_bm_bufs_free() 637 priv->page_pool[i] = in mvpp2_bm_init() 738 struct page_pool *page_pool, in mvpp2_buf_alloc() argument 751 if (page_pool) { in mvpp2_buf_alloc() 3563 struct page_pool *page_pool, int pool) in mvpp2_rx_refill() argument [all …]
|
/linux/drivers/net/ |
A D | xen-netfront.c | 154 struct page_pool *page_pool; member 276 page = page_pool_dev_alloc_pages(queue->page_pool); in xennet_alloc_one_rx_buffer() 1778 page_pool_destroy(queue->page_pool); in xennet_disconnect_backend() 2137 queue->page_pool = page_pool_create(&pp_params); in xennet_create_page_pool() 2138 if (IS_ERR(queue->page_pool)) { in xennet_create_page_pool() 2139 err = PTR_ERR(queue->page_pool); in xennet_create_page_pool() 2140 queue->page_pool = NULL; in xennet_create_page_pool() 2152 MEM_TYPE_PAGE_POOL, queue->page_pool); in xennet_create_page_pool() 2162 page_pool_destroy(queue->page_pool); in xennet_create_page_pool() 2163 queue->page_pool = NULL; in xennet_create_page_pool()
|
/linux/drivers/net/ethernet/marvell/ |
A D | mvneta.c | 682 struct page_pool *page_pool; member 1905 page = page_pool_alloc_pages(rxq->page_pool, in mvneta_rx_refill() 1982 page_pool_destroy(rxq->page_pool); in mvneta_rxq_drop_pkts() 1983 rxq->page_pool = NULL; in mvneta_rxq_drop_pkts() 2041 page_pool_put_full_page(rxq->page_pool, in mvneta_xdp_put_buff() 3229 if (IS_ERR(rxq->page_pool)) { in mvneta_create_page_pool() 3230 err = PTR_ERR(rxq->page_pool); in mvneta_create_page_pool() 3231 rxq->page_pool = NULL; in mvneta_create_page_pool() 3240 rxq->page_pool); in mvneta_create_page_pool() 3249 page_pool_destroy(rxq->page_pool); in mvneta_create_page_pool() [all …]
|
/linux/drivers/net/ethernet/hisilicon/hns3/ |
A D | hns3_debugfs.c | 1085 READ_ONCE(ring->page_pool->pages_state_hold_cnt)); in hns3_dump_page_pool_info() 1087 atomic_read(&ring->page_pool->pages_state_release_cnt)); in hns3_dump_page_pool_info() 1088 sprintf(result[j++], "%u", ring->page_pool->p.pool_size); in hns3_dump_page_pool_info() 1089 sprintf(result[j++], "%u", ring->page_pool->p.order); in hns3_dump_page_pool_info() 1090 sprintf(result[j++], "%d", ring->page_pool->p.nid); in hns3_dump_page_pool_info() 1091 sprintf(result[j++], "%uK", ring->page_pool->p.max_len / 1024); in hns3_dump_page_pool_info() 1110 if (!priv->ring[h->kinfo.num_tqps].page_pool) { in hns3_dbg_page_pool_info()
|
A D | hns3_enet.h | 462 struct page_pool *page_pool; member
|
/linux/drivers/net/ethernet/stmicro/stmmac/ |
A D | stmmac.h | 100 struct page_pool *page_pool; member
|
/linux/drivers/net/ethernet/mellanox/mlx5/core/ |
A D | en_rx.c | 277 dma_info->page = page_pool_dev_alloc_pages(rq->page_pool); in mlx5e_page_alloc_pool() 284 page_pool_recycle_direct(rq->page_pool, dma_info->page); in mlx5e_page_alloc_pool() 315 page_pool_recycle_direct(rq->page_pool, dma_info->page); in mlx5e_page_release_dynamic() 318 page_pool_release_page(rq->page_pool, dma_info->page); in mlx5e_page_release_dynamic() 782 if (rq->page_pool) in mlx5e_post_rx_wqes() 783 page_pool_nid_changed(rq->page_pool, numa_mem_id()); in mlx5e_post_rx_wqes() 965 if (rq->page_pool) in mlx5e_post_rx_mpwqes() 966 page_pool_nid_changed(rq->page_pool, numa_mem_id()); in mlx5e_post_rx_mpwqes()
|