Searched refs:workers (Results 1 – 13 of 13) sorted by relevance
/linux/tools/testing/selftests/bpf/ |
A D | test_progs.c | 687 env->workers = atoi(arg); in parse_arg() 688 if (!env->workers) { in parse_arg() 693 env->workers = get_nprocs(); in parse_arg() 838 for (i = 0; i < env.workers; i++) in sigint_handler() 1111 for (i = 0; i < env.workers; i++) { in server_main() 1124 for (i = 0; i < env.workers; i++) { in server_main() 1207 for (i = 0; i < env.workers; i++) { in server_main() 1388 env.workers = 0; in main() 1392 if (env.workers) { in main() 1394 env.worker_socks = calloc(sizeof(int), env.workers); in main() [all …]
|
A D | test_progs.h | 87 int workers; /* number of worker process */ member
|
/linux/Documentation/core-api/ |
A D | workqueue.rst | 34 number of workers as the number of CPUs. The kernel grew a lot of MT 118 number of the currently runnable workers. Generally, work items are 122 workers on the CPU, the worker-pool doesn't start execution of a new 125 are pending work items. This allows using a minimal number of workers 128 Keeping idle workers around doesn't cost other than the memory space 140 Forward progress guarantee relies on that workers can be created when 142 through the use of rescue workers. All work items which might be used 169 worker-pools which host workers which are not bound to any 178 of mostly unused workers across different CPUs as the issuer 200 each other. Each maintains its separate pool of workers and [all …]
|
/linux/drivers/md/ |
A D | raid5.h | 516 struct r5worker *workers; member
|
A D | raid5.c | 196 group->workers[0].working = true; in raid5_wakeup_stripe_thread() 203 if (group->workers[i].working == false) { in raid5_wakeup_stripe_thread() 204 group->workers[i].working = true; in raid5_wakeup_stripe_thread() 206 &group->workers[i].work); in raid5_wakeup_stripe_thread() 6928 kfree(old_groups[0].workers); in raid5_store_group_thread_cnt() 6965 struct r5worker *workers; in alloc_thread_groups() local 6974 workers = kcalloc(size, *group_cnt, GFP_NOIO); in alloc_thread_groups() 6977 if (!*worker_groups || !workers) { in alloc_thread_groups() 6978 kfree(workers); in alloc_thread_groups() 6990 group->workers = workers + i * cnt; in alloc_thread_groups() [all …]
|
/linux/net/l2tp/ |
A D | Kconfig | 23 with home workers to connect to their offices.
|
/linux/kernel/ |
A D | workqueue.c | 171 struct list_head workers; /* A: attached workers */ member 427 list_for_each_entry((worker), &(pool)->workers, node) \ 1887 list_add_tail(&worker->node, &pool->workers); in worker_attach_to_pool() 1912 if (list_empty(&pool->workers)) in worker_detach_from_pool() 3470 INIT_LIST_HEAD(&pool->workers); in init_worker_pool() 3605 if (!list_empty(&pool->workers)) in put_unbound_pool()
|
/linux/drivers/block/mtip32xx/ |
A D | mtip32xx.c | 745 int do_irq_enable = 1, i, workers; in mtip_handle_irq() local 766 for (i = 0, workers = 0; i < MTIP_MAX_SLOT_GROUPS; in mtip_handle_irq() 771 workers++; in mtip_handle_irq() 774 atomic_set(&dd->irq_workers_active, workers); in mtip_handle_irq() 775 if (workers) { in mtip_handle_irq()
|
/linux/Documentation/dev-tools/ |
A D | kcov.rst | 223 some kernel interface (e.g. vhost workers); as well as from soft
|
/linux/fs/btrfs/ |
A D | disk-io.c | 872 btrfs_queue_work(fs_info->workers, &async->work); in btrfs_wq_submit_bio() 2130 btrfs_destroy_workqueue(fs_info->workers); in btrfs_stop_all_workers() 2300 fs_info->workers = in btrfs_init_workqueues() 2352 if (!(fs_info->workers && fs_info->delalloc_workers && in btrfs_init_workqueues()
|
A D | super.c | 1835 btrfs_workqueue_set_max(fs_info->workers, new_pool_size); in btrfs_resize_thread_pool()
|
A D | ctree.h | 807 struct btrfs_workqueue *workers; member
|
/linux/Documentation/admin-guide/ |
A D | kernel-per-CPU-kthreads.rst | 262 d. As of v3.18, Christoph Lameter's on-demand vmstat workers
|
Completed in 69 milliseconds