Searched refs:nb_entries (Results 1 – 4 of 4) sorted by relevance
/linux/net/xdp/ |
A D | xsk_queue.h | 212 u32 cached_cons = q->cached_cons, nb_entries = 0; in xskq_cons_read_desc_batch() local 214 while (cached_cons != q->cached_prod && nb_entries < max) { in xskq_cons_read_desc_batch() 218 descs[nb_entries] = ring->desc[idx]; in xskq_cons_read_desc_batch() 219 if (unlikely(!xskq_cons_is_valid_desc(q, &descs[nb_entries], pool))) { in xskq_cons_read_desc_batch() 225 nb_entries++; in xskq_cons_read_desc_batch() 229 return nb_entries; in xskq_cons_read_desc_batch() 372 u32 nb_entries, i, cached_prod; in xskq_prod_reserve_addr_batch() local 374 nb_entries = xskq_prod_nb_free(q, max); in xskq_prod_reserve_addr_batch() 378 for (i = 0; i < nb_entries; i++) in xskq_prod_reserve_addr_batch() 382 return nb_entries; in xskq_prod_reserve_addr_batch() [all …]
|
A D | xsk_buff_pool.c | 521 u32 i, cached_cons, nb_entries; in xp_alloc_new_from_fq() local 528 nb_entries = max; in xp_alloc_new_from_fq() 541 nb_entries--; in xp_alloc_new_from_fq() 559 return nb_entries; in xp_alloc_new_from_fq() 562 static u32 xp_alloc_reused(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 nb_entries) in xp_alloc_reused() argument 567 nb_entries = min_t(u32, nb_entries, pool->free_list_cnt); in xp_alloc_reused() 569 i = nb_entries; in xp_alloc_reused() 577 pool->free_list_cnt -= nb_entries; in xp_alloc_reused() 579 return nb_entries; in xp_alloc_reused()
|
A D | xsk.c | 296 void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries) in xsk_tx_completed() argument 298 xskq_prod_submit_n(pool->cq, nb_entries); in xsk_tx_completed()
|
/linux/include/net/ |
A D | xdp_sock_drv.h | 14 void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries); 135 static inline void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries) in xsk_tx_completed() argument
|
Completed in 10 milliseconds