Home
last modified time | relevance | path

Searched refs:pool (Results 1 – 25 of 628) sorted by relevance

12345678910>>...26

/linux/net/core/
A Dpage_pool.c34 memcpy(&pool->p, params, sizeof(pool->p)); in page_pool_init()
96 if (!pool) in page_pool_create()
143 pool->alloc.cache[pool->alloc.count++] = page; in page_pool_refill_alloc_cache()
158 page = pool->alloc.cache[--pool->alloc.count]; in page_pool_refill_alloc_cache()
172 page = pool->alloc.cache[--pool->alloc.count]; in __page_pool_get_cached()
269 return pool->alloc.cache[--pool->alloc.count]; in __page_pool_alloc_pages_slow()
290 pool->alloc.cache[pool->alloc.count++] = page; in __page_pool_alloc_pages_slow()
299 page = pool->alloc.cache[--pool->alloc.count]; in __page_pool_alloc_pages_slow()
414 pool->alloc.cache[pool->alloc.count++] = page; in page_pool_recycle_in_cache()
621 pool->disconnect(pool); in page_pool_free()
[all …]
/linux/net/xdp/
A Dxsk_buff_pool.c37 if (!pool) in xp_destroy()
41 kvfree(pool); in xp_destroy()
54 if (!pool) in xp_create_and_assign_umem()
84 xskb->pool = pool; in xp_create_and_assign_umem()
90 xp_init_xskb_addr(xskb, pool, i * pool->chunk_size); in xp_create_and_assign_umem()
173 bpf.xsk.pool = pool; in xp_assign_dev()
206 if (!pool->fq || !pool->cq) in xp_assign_dev_shared()
222 xsk_clear_pool_at_qid(pool->netdev, pool->queue_id); in xp_clear_dev()
257 if (!pool) in xp_put_pool()
442 *addr + pool->chunk_size > pool->addrs_cnt || in xp_check_unaligned()
[all …]
/linux/mm/
A Dmempool.c123 BUG_ON(pool->curr_nr >= pool->min_nr); in add_element()
126 pool->elements[pool->curr_nr++] = element; in add_element()
131 void *element = pool->elements[--pool->curr_nr]; in remove_element()
154 pool->free(element, pool->pool_data); in mempool_exit()
198 while (pool->curr_nr < pool->min_nr) { in mempool_init_node()
201 element = pool->alloc(gfp_mask, pool->pool_data); in mempool_init_node()
312 pool->free(element, pool->pool_data); in mempool_resize()
339 while (pool->curr_nr < pool->min_nr) { in mempool_resize()
345 if (pool->curr_nr < pool->min_nr) { in mempool_resize()
493 if (likely(pool->curr_nr < pool->min_nr)) { in mempool_free()
[all …]
A Dzbud.c226 if (!pool) in zbud_create_pool()
234 pool->ops = ops; in zbud_create_pool()
235 return pool; in zbud_create_pool()
246 kfree(pool); in zbud_destroy_pool()
422 if (!pool->ops || !pool->ops->evict || list_empty(&pool->lru) || in zbud_reclaim_page()
447 ret = pool->ops->evict(pool, first_handle); in zbud_reclaim_page()
452 ret = pool->ops->evict(pool, last_handle); in zbud_reclaim_page()
529 if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict) in zbud_zpool_evict()
530 return pool->zpool_ops->evict(pool->zpool, handle); in zbud_zpool_evict()
546 if (pool) { in zbud_zpool_create()
[all …]
A Ddmapool.c95 pages * (pool->allocation / pool->size), in pools_show()
226 page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation, in pool_alloc_page()
278 if (pool->dev && list_empty(&pool->dev->dma_pools)) in dma_pool_destroy()
287 if (pool->dev) in dma_pool_destroy()
300 kfree(pool); in dma_pool_destroy()
354 if (pool->dev) in dma_pool_alloc()
414 if (pool->dev) in dma_pool_free()
429 if (pool->dev) in dma_pool_free()
445 if (pool->dev) in dma_pool_free()
508 if (pool) in dmam_pool_create()
[all …]
A Dz3fold.c223 slots->pool = (unsigned long)pool; in alloc_slots()
373 pool->inode->i_mapping->private_data = pool; in z3fold_register_migration()
409 zhdr->pool = pool; in init_z3fold_page()
522 queue_work(pool->release_wq, &pool->work); in __release_z3fold_page()
980 if (!pool) in z3fold_create_pool()
1424 ret = pool->ops->evict(pool, middle_handle); in z3fold_reclaim_page()
1429 ret = pool->ops->evict(pool, first_handle); in z3fold_reclaim_page()
1434 ret = pool->ops->evict(pool, last_handle); in z3fold_reclaim_page()
1702 if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict) in z3fold_zpool_evict()
1703 return pool->zpool_ops->evict(pool->zpool, handle); in z3fold_zpool_evict()
[all …]
A Dzswap.c511 return pool; in __zswap_pool_current()
603 pool = kzalloc(sizeof(*pool), GFP_KERNEL); in zswap_pool_create()
604 if (!pool) in zswap_pool_create()
619 pool->acomp_ctx = alloc_percpu(*pool->acomp_ctx); in zswap_pool_create()
705 if (!pool) in zswap_pool_get()
729 pool = container_of(kref, typeof(*pool), kref); in __zswap_pool_empty()
795 if (pool) { in __zswap_param_set()
803 if (!pool) in __zswap_param_set()
806 if (pool) in __zswap_param_set()
1115 if (pool) in zswap_frontswap_store()
[all …]
/linux/drivers/md/
A Ddm-thin.c626 struct pool *pool = tc->pool; in requeue_deferred_cells() local
679 struct pool *pool = tc->pool; in get_bio_block() local
696 struct pool *pool = tc->pool; in get_bio_block_range() local
720 struct pool *pool = tc->pool; in remap() local
757 struct pool *pool = tc->pool; in issue() local
885 struct pool *pool = tc->pool; in cell_defer_no_holder() local
963 struct pool *pool = tc->pool; in complete_overwrite_bio() local
996 struct pool *pool = tc->pool; in process_prepared_mapping() local
1087 struct pool *pool = tc->pool; in passdown_double_checking_shared_status() local
1151 struct pool *pool = tc->pool; in process_prepared_discard_passdown_pt1() local
[all …]
/linux/drivers/net/ethernet/ti/
A Dk3-cppi-desc-pool.c29 if (!pool) in k3_cppi_desc_pool_destroy()
37 dma_free_coherent(pool->dev, pool->mem_size, pool->cpumem, in k3_cppi_desc_pool_destroy()
52 pool = devm_kzalloc(dev, sizeof(*pool), GFP_KERNEL); in k3_cppi_desc_pool_create_name()
53 if (!pool) in k3_cppi_desc_pool_create_name()
59 pool->mem_size = pool->num_desc * pool->desc_size; in k3_cppi_desc_pool_create_name()
76 pool->cpumem = dma_alloc_coherent(pool->dev, pool->mem_size, in k3_cppi_desc_pool_create_name()
83 (phys_addr_t)pool->dma_addr, pool->mem_size, in k3_cppi_desc_pool_create_name()
90 return pool; in k3_cppi_desc_pool_create_name()
93 dma_free_coherent(pool->dev, pool->mem_size, pool->cpumem, in k3_cppi_desc_pool_create_name()
98 devm_kfree(pool->dev, pool); in k3_cppi_desc_pool_create_name()
[all …]
/linux/sound/core/seq/
A Dseq_memory.c24 return pool->total_elements - atomic_read(&pool->counter); in snd_seq_pool_available()
29 return snd_seq_pool_available(pool) >= pool->room; in snd_seq_output_ok()
180 pool = cell->pool; in snd_seq_cell_free()
231 while (pool->free == NULL && ! nonblock && ! pool->closing) { in snd_seq_cell_alloc()
397 cellptr->pool = pool; in snd_seq_pool_init()
401 pool->room = (pool->size + 1) / 2; in snd_seq_pool_init()
405 pool->total_elements = pool->size; in snd_seq_pool_init()
461 pool = kzalloc(sizeof(*pool), GFP_KERNEL); in snd_seq_pool_new()
462 if (!pool) in snd_seq_pool_new()
476 return pool; in snd_seq_pool_new()
[all …]
/linux/drivers/infiniband/sw/rxe/
A Drxe_pool.c136 memset(pool, 0, sizeof(*pool)); in rxe_pool_init()
252 struct rxe_pool *pool = elem->pool; in __rxe_add_key_locked() local
263 struct rxe_pool *pool = elem->pool; in __rxe_add_key() local
276 struct rxe_pool *pool = elem->pool; in __rxe_drop_key_locked() local
283 struct rxe_pool *pool = elem->pool; in __rxe_drop_key() local
293 struct rxe_pool *pool = elem->pool; in __rxe_add_index_locked() local
304 struct rxe_pool *pool = elem->pool; in __rxe_add_index() local
317 struct rxe_pool *pool = elem->pool; in __rxe_drop_index_locked() local
348 elem->pool = pool; in rxe_alloc_locked()
373 elem->pool = pool; in rxe_alloc()
[all …]
/linux/include/net/
A Dxdp_sock_drv.h33 return pool->chunk_size; in xsk_pool_get_chunk_size()
38 return xsk_pool_get_chunk_size(pool) - xsk_pool_get_headroom(pool); in xsk_pool_get_rx_frame_size()
44 xp_set_rxq_info(pool, rxq); in xsk_pool_set_rxq_info()
50 xp_dma_unmap(pool, attrs); in xsk_pool_dma_unmap()
56 struct xdp_umem *umem = pool->umem; in xsk_pool_dma_map()
77 return xp_alloc(pool); in xsk_buff_alloc()
83 return xp_alloc_batch(pool, xdp, max); in xsk_buff_alloc_batch()
88 return xp_can_alloc(pool, count); in xsk_buff_can_alloc()
108 return xp_raw_get_dma(pool, addr); in xsk_buff_raw_get_dma()
113 return xp_raw_get_data(pool, addr); in xsk_buff_raw_get_data()
[all …]
A Dpage_pool.h143 return page_pool_alloc_pages(pool, gfp); in page_pool_dev_alloc_pages()
164 return pool->p.dma_dir; in page_pool_get_dma_dir()
216 page_pool_put_full_page(pool, page, true); in page_pool_recycle_direct()
281 if (unlikely(pool->p.nid != new_nid)) in page_pool_nid_changed()
282 page_pool_update_nid(pool, new_nid); in page_pool_nid_changed()
286 __acquires(&pool->ring.producer_lock) in page_pool_ring_lock()
289 spin_lock(&pool->ring.producer_lock); in page_pool_ring_lock()
291 spin_lock_bh(&pool->ring.producer_lock); in page_pool_ring_lock()
295 __releases(&pool->ring.producer_lock) in page_pool_ring_unlock()
298 spin_unlock(&pool->ring.producer_lock); in page_pool_ring_unlock()
[all …]
A Dxsk_buff_pool.h26 struct xsk_buff_pool *pool; member
113 xskb->xdp.data_hard_start = pool->addrs + addr + pool->headroom; in xp_init_xskb_addr()
155 if (!pool->dma_need_sync) in xp_dma_sync_for_device()
176 if (pool->dma_pages_cnt) { in xp_desc_crosses_non_contig_pg()
182 return addr + len > pool->addrs_cnt; in xp_desc_crosses_non_contig_pg()
187 return addr & pool->chunk_mask; in xp_aligned_extract_addr()
208 return xp_aligned_extract_addr(pool, addr) >> pool->chunk_shift; in xp_aligned_extract_idx()
213 if (xskb->pool->unaligned) in xp_release()
214 xskb->pool->free_heads[xskb->pool->free_heads_cnt++] = xskb; in xp_release()
221 offset += xskb->pool->headroom; in xp_get_handle()
[all …]
/linux/net/ceph/
A Dmsgpool.c17 msg = ceph_msg_new2(pool->type, pool->front_len, pool->max_data_items, in msgpool_alloc()
23 msg->pool = pool; in msgpool_alloc()
34 msg->pool = NULL; in msgpool_free()
43 pool->type = type; in ceph_msgpool_init()
46 pool->pool = mempool_create(size, msgpool_alloc, msgpool_free, pool); in ceph_msgpool_init()
47 if (!pool->pool) in ceph_msgpool_init()
49 pool->name = name; in ceph_msgpool_init()
56 mempool_destroy(pool->pool); in ceph_msgpool_destroy()
68 pool->front_len, pool->max_data_items); in ceph_msgpool_get()
76 msg = mempool_alloc(pool->pool, GFP_NOFS); in ceph_msgpool_get()
[all …]
/linux/drivers/gpu/drm/amd/display/dc/dce80/
A Ddce80_resource.c817 pool->base.mis[i] = NULL; in dce80_resource_destruct()
931 *pool = NULL; in dce80_destroy_resource_pool()
1041 if (!pool->base.irqs) in dce80_construct()
1135 if (!pool) in dce80_create_resource_pool()
1139 return &pool->base; in dce80_create_resource_pool()
1240 if (!pool->base.irqs) in dce81_construct()
1334 if (!pool) in dce81_create_resource_pool()
1338 return &pool->base; in dce81_create_resource_pool()
1435 if (!pool->base.irqs) in dce83_construct()
1529 if (!pool) in dce83_create_resource_pool()
[all …]
/linux/drivers/gpu/drm/amd/display/dc/dce60/
A Ddce60_resource.c812 pool->base.mis[i] = NULL; in dce60_resource_destruct()
926 *pool = NULL; in dce60_destroy_resource_pool()
1030 if (!pool->base.irqs) in dce60_construct()
1124 if (!pool) in dce60_create_resource_pool()
1128 return &pool->base; in dce60_create_resource_pool()
1227 if (!pool->base.irqs) in dce61_construct()
1321 if (!pool) in dce61_create_resource_pool()
1325 return &pool->base; in dce61_create_resource_pool()
1420 if (!pool->base.irqs) in dce64_construct()
1514 if (!pool) in dce64_create_resource_pool()
[all …]
/linux/drivers/staging/media/atomisp/pci/runtime/rmgr/src/
A Drmgr_vbuf.c134 assert(pool); in ia_css_rmgr_init_vbuf()
135 if (!pool) in ia_css_rmgr_init_vbuf()
138 if (pool->recycle && pool->size) { in ia_css_rmgr_init_vbuf()
142 pool->size; in ia_css_rmgr_init_vbuf()
144 if (pool->handles) in ia_css_rmgr_init_vbuf()
150 pool->size = 0; in ia_css_rmgr_init_vbuf()
151 pool->handles = NULL; in ia_css_rmgr_init_vbuf()
166 if (!pool) { in ia_css_rmgr_uninit_vbuf()
170 if (pool->handles) { in ia_css_rmgr_uninit_vbuf()
203 assert(pool); in rmgr_push_handle()
[all …]
/linux/net/rds/
A Dib_rdma.c275 struct rds_ib_mr_pool *pool = ibmr->pool; in rds_ib_teardown_mr() local
423 &pool->clean_list); in rds_ib_flush_mr_pool()
450 if (atomic_inc_return(&pool->item_count) <= pool->max_items) in rds_ib_try_reuse_ibmr()
487 struct rds_ib_mr_pool *pool = ibmr->pool; in rds_ib_free_mr() local
509 if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned || in rds_ib_free_mr()
510 atomic_read(&pool->dirty_count) >= pool->max_items / 5) in rds_ib_free_mr()
640 kfree(pool); in rds_ib_destroy_mr_pool()
648 pool = kzalloc(sizeof(*pool), GFP_KERNEL); in rds_ib_create_mr_pool()
649 if (!pool) in rds_ib_create_mr_pool()
671 pool->max_free_pinned = pool->max_items * pool->max_pages / 4; in rds_ib_create_mr_pool()
[all …]
/linux/drivers/net/ethernet/mellanox/mlxsw/
A Dspectrum_cnt.c127 pool = kzalloc(struct_size(pool, sub_pools, sub_pools_count), in mlxsw_sp_counter_pool_init()
129 if (!pool) in mlxsw_sp_counter_pool_init()
134 flex_array_size(pool, sub_pools, pool->sub_pools_count)); in mlxsw_sp_counter_pool_init()
139 &pool->pool_size); in mlxsw_sp_counter_pool_init()
145 pool->usage = bitmap_zalloc(pool->pool_size, GFP_KERNEL); in mlxsw_sp_counter_pool_init()
146 if (!pool->usage) { in mlxsw_sp_counter_pool_init()
158 bitmap_free(pool->usage); in mlxsw_sp_counter_pool_init()
163 kfree(pool); in mlxsw_sp_counter_pool_init()
173 WARN_ON(find_first_bit(pool->usage, pool->pool_size) != in mlxsw_sp_counter_pool_fini()
176 bitmap_free(pool->usage); in mlxsw_sp_counter_pool_fini()
[all …]
/linux/drivers/gpu/drm/i915/gt/
A Dintel_gt_buffer_pool.c28 if (n >= ARRAY_SIZE(pool->cache_list)) in bucket_for_size()
31 return &pool->cache_list[n]; in bucket_for_size()
77 spin_unlock_irq(&pool->lock); in pool_free_older_than()
93 struct intel_gt_buffer_pool *pool = in pool_free_work() local
96 if (pool_free_older_than(pool, HZ)) in pool_free_work()
105 struct intel_gt_buffer_pool *pool = node->pool; in pool_retire() local
123 schedule_delayed_work(&pool->work, in pool_retire()
154 node->pool = pool; in node_create()
199 spin_lock_irq(&pool->lock); in intel_gt_get_buffer_pool()
201 spin_unlock_irq(&pool->lock); in intel_gt_get_buffer_pool()
[all …]
/linux/kernel/
A Dworkqueue.c897 pool = worker->pool; in wq_worker_sleeping()
969 struct worker_pool *pool = worker->pool; in worker_set_flags() local
994 struct worker_pool *pool = worker->pool; in worker_clr_flags() local
1298 if (pwq && pwq->pool == pool) { in try_to_grab_pending()
1351 struct worker_pool *pool = pwq->pool; in insert_work() local
1888 worker->pool = pool; in worker_attach_to_pool()
2524 struct worker_pool *pool = pwq->pool; in rescuer_thread() local
3032 if (unlikely(pwq->pool != pool)) in start_flush_work()
3709 struct worker_pool *pool = pwq->pool; in pwq_unbound_release_workfn() local
3805 pwq->pool = pool; in init_pwq()
[all …]
/linux/lib/
A Dgenalloc.c158 if (pool != NULL) { in gen_pool_create()
166 return pool; in gen_pool_create()
260 kfree(pool); in gen_pool_destroy()
343 return gen_pool_dma_alloc_algo(pool, size, dma, pool->algo, pool->data); in gen_pool_dma_alloc()
367 if (!pool) in gen_pool_dma_alloc_algo()
421 return gen_pool_dma_zalloc_algo(pool, size, dma, pool->algo, pool->data); in gen_pool_dma_zalloc()
627 if (!pool->algo) in gen_pool_set_algo()
850 if (!pool) in devm_gen_pool_create()
853 *ptr = pool; in devm_gen_pool_create()
857 return pool; in devm_gen_pool_create()
[all …]
/linux/drivers/net/ethernet/mellanox/mlx5/core/steering/
A Ddr_icm_pool.c74 icm_mr->dmn = pool->dmn; in dr_icm_pool_mr_create()
209 buddy->pool = pool; in dr_icm_buddy_create()
266 mlx5dr_err(pool->dmn, in dr_icm_chunk_create()
341 mlx5dr_err(pool->dmn, in dr_icm_handle_buddies_get_mem()
351 mlx5dr_err(pool->dmn, in dr_icm_handle_buddies_get_mem()
404 struct mlx5dr_icm_pool *pool = buddy->pool; in mlx5dr_icm_free_chunk() local
429 pool = kvzalloc(sizeof(*pool), GFP_KERNEL); in mlx5dr_icm_pool_create()
430 if (!pool) in mlx5dr_icm_pool_create()
433 pool->dmn = dmn; in mlx5dr_icm_pool_create()
441 return pool; in mlx5dr_icm_pool_create()
[all …]
/linux/arch/arm64/kvm/hyp/nvhe/
A Dpage_alloc.c45 if (addr < pool->range_start || addr >= pool->range_end) in __find_buddy_nocheck()
169 __hyp_attach_page(pool, p); in __hyp_put_page()
183 hyp_spin_lock(&pool->lock); in hyp_put_page()
184 __hyp_put_page(pool, p); in hyp_put_page()
185 hyp_spin_unlock(&pool->lock); in hyp_put_page()
192 hyp_spin_lock(&pool->lock); in hyp_get_page()
194 hyp_spin_unlock(&pool->lock); in hyp_get_page()
216 hyp_spin_lock(&pool->lock); in hyp_alloc_pages()
219 while (i < pool->max_order && list_empty(&pool->free_area[i])) in hyp_alloc_pages()
221 if (i >= pool->max_order) { in hyp_alloc_pages()
[all …]

Completed in 162 milliseconds

12345678910>>...26