Home
last modified time | relevance | path

Searched refs:workers (Results 1 - 17 of 17) sorted by relevance

/kernel/linux/linux-6.6/tools/testing/selftests/bpf/
H A Dtest_progs.c420 if (verbose() && !env.workers) in test__end_subtest()
696 { "workers", ARG_NUM_WORKERS, "WORKERS", OPTION_ARG_OPTIONAL,
697 "Number of workers to run in parallel, default to number of cpus." },
830 env->workers = atoi(arg); in parse_arg()
831 if (!env->workers) { in parse_arg()
836 env->workers = get_nprocs(); in parse_arg()
1008 for (i = 0; i < env.workers; i++) in sigint_handler()
1370 dispatcher_threads = calloc(sizeof(pthread_t), env.workers); in server_main()
1371 data = calloc(sizeof(struct dispatch_data), env.workers); in server_main()
1373 env.worker_current_test = calloc(sizeof(int), env.workers); in server_main()
[all...]
H A Dtest_progs.h125 int workers; /* number of worker process */ member
/kernel/linux/linux-5.10/drivers/md/
H A Draid5.h516 struct r5worker *workers; member
H A Draid5.c196 group->workers[0].working = true; in raid5_wakeup_stripe_thread()
198 queue_work_on(sh->cpu, raid5_wq, &group->workers[0].work); in raid5_wakeup_stripe_thread()
201 /* wakeup more workers */ in raid5_wakeup_stripe_thread()
203 if (group->workers[i].working == false) { in raid5_wakeup_stripe_thread()
204 group->workers[i].working = true; in raid5_wakeup_stripe_thread()
206 &group->workers[i].work); in raid5_wakeup_stripe_thread()
6922 kfree(old_groups[0].workers); in raid5_store_group_thread_cnt()
6959 struct r5worker *workers; in alloc_thread_groups() local
6968 workers = kcalloc(size, *group_cnt, GFP_NOIO); in alloc_thread_groups()
6971 if (!*worker_groups || !workers) { in alloc_thread_groups()
[all...]
/kernel/linux/linux-6.6/drivers/md/
H A Draid5.h518 struct r5worker *workers; member
H A Draid5.c204 group->workers[0].working = true;
206 queue_work_on(sh->cpu, raid5_wq, &group->workers[0].work);
209 /* wakeup more workers */
211 if (group->workers[i].working == false) {
212 group->workers[i].working = true;
214 &group->workers[i].work);
7257 kfree(old_groups[0].workers); in raid5_store_group_thread_cnt()
7294 struct r5worker *workers; in alloc_thread_groups() local
7303 workers = kcalloc(size, *group_cnt, GFP_NOIO); in alloc_thread_groups()
7306 if (!*worker_groups || !workers) { in alloc_thread_groups()
[all...]
/kernel/linux/linux-5.10/kernel/
H A Dworkqueue.c62 * While associated (!DISASSOCIATED), all workers are bound to the
66 * While DISASSOCIATED, the cpu may be offline and all workers have
75 POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */
103 * Rescue workers are used only on emergencies and shared by
159 int nr_workers; /* L: total number of workers */
160 int nr_idle; /* L: currently idle workers */
162 struct list_head idle_list; /* X: list of idle workers */
164 struct timer_list mayday_timer; /* L: SOS timer for workers */
166 /* a workers is either on busy_hash or idle_list, or the manager */
168 /* L: hash of busy workers */
171 struct list_head workers; /* A: attached workers */ global() member
[all...]
/kernel/linux/linux-6.6/fs/btrfs/
H A Dfs.h543 struct btrfs_workqueue *workers; member
555 * Fixup workers take dirty pages that didn't properly go through the
H A Dbio.c641 btrfs_queue_work(fs_info->workers, &async->work); in btrfs_wq_submit_bio()
H A Ddisk-io.c1754 /* helper to cleanup workers */
1759 btrfs_destroy_workqueue(fs_info->workers); in btrfs_stop_all_workers()
1952 fs_info->workers = in btrfs_init_workqueues()
1991 if (!(fs_info->workers && in btrfs_init_workqueues()
4354 * There might be existing delayed inode workers still running in close_ctree()
4404 * submit after we stopping all workers. in close_ctree()
H A Dsuper.c1637 btrfs_workqueue_set_max(fs_info->workers, new_pool_size); in btrfs_resize_thread_pool()
/kernel/linux/linux-5.10/drivers/block/mtip32xx/
H A Dmtip32xx.c745 int do_irq_enable = 1, i, workers; in mtip_handle_irq() local
766 for (i = 0, workers = 0; i < MTIP_MAX_SLOT_GROUPS; in mtip_handle_irq()
771 workers++; in mtip_handle_irq()
774 atomic_set(&dd->irq_workers_active, workers); in mtip_handle_irq()
775 if (workers) { in mtip_handle_irq()
2681 "Completion workers still active!"); in mtip_service_thread()
4202 /* Spin until workers are done */ in mtip_pci_remove()
4214 "Completion workers still active!\n"); in mtip_pci_remove()
/kernel/linux/linux-6.6/drivers/block/mtip32xx/
H A Dmtip32xx.c733 int do_irq_enable = 1, i, workers; in mtip_handle_irq() local
754 for (i = 0, workers = 0; i < MTIP_MAX_SLOT_GROUPS; in mtip_handle_irq()
759 workers++; in mtip_handle_irq()
762 atomic_set(&dd->irq_workers_active, workers); in mtip_handle_irq()
763 if (workers) { in mtip_handle_irq()
2525 "Completion workers still active!"); in mtip_service_thread()
3878 /* Spin until workers are done */ in mtip_pci_remove()
3887 "Completion workers still active!\n"); in mtip_pci_remove()
/kernel/linux/linux-6.6/kernel/
H A Dworkqueue.c64 * While associated (!DISASSOCIATED), all workers are bound to the
68 * While DISASSOCIATED, the cpu may be offline and all workers have
77 POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */
105 * Rescue workers are used only on emergencies and shared by
179 int nr_workers; /* L: total number of workers */
180 int nr_idle; /* L: currently idle workers */
182 struct list_head idle_list; /* L: list of idle workers */
186 struct timer_list mayday_timer; /* L: SOS timer for workers */
188 /* a workers is either on busy_hash or idle_list, or the manager */
190 /* L: hash of busy workers */
193 struct list_head workers; /* A: attached workers */ global() member
[all...]
/kernel/linux/linux-5.10/fs/btrfs/
H A Ddisk-io.c774 btrfs_queue_work(fs_info->workers, &async->work); in btrfs_wq_submit_bio()
1999 /* helper to cleanup workers */
2004 btrfs_destroy_workqueue(fs_info->workers); in btrfs_stop_all_workers()
2175 fs_info->workers = in btrfs_init_workqueues()
2227 if (!(fs_info->workers && fs_info->delalloc_workers && in btrfs_init_workqueues()
4173 * There might be existing delayed inode workers still running in close_ctree()
4224 * submit after we stopping all workers. in close_ctree()
H A Dsuper.c1804 btrfs_workqueue_set_max(fs_info->workers, new_pool_size); in btrfs_resize_thread_pool()
H A Dctree.h766 struct btrfs_workqueue *workers; member
780 * fixup workers take dirty pages that didn't properly go through

Completed in 61 milliseconds