/third_party/node/test/sequential/ |
H A D | test-inspector-port-cluster.js | 23 workers: [{ expectedPort: 9230 }] 28 workers: [ 40 workers: [ 51 workers: [{ expectedPort: port + 1 }] 58 workers: [{ expectedPort: port + 1 }] 65 workers: [{ expectedPort: port + 1, expectedHost: '0.0.0.0' }] 72 workers: [{ expectedPort: port + 1, expectedHost: '127.0.0.1' }] 80 workers: [{ expectedPort: port + 1, expectedHost: '::' }] 87 workers: [{ expectedPort: port + 1, expectedHost: '::1' }] 99 workers [all...] |
/third_party/node/benchmark/cluster/ |
H A D | echo.js | 7 workers: [1], 16 workers, 21 const expectedPerBroadcast = sendsPerBroadcast * workers; 40 for (let i = 0; i < workers; ++i) 44 if (++readies === workers) { 53 for (const id in cluster.workers) 54 cluster.workers[id].disconnect(); 57 for (const id in cluster.workers) { 58 const worker = cluster.workers[id];
|
/third_party/ltp/testcases/kernel/io/direct_io/ |
H A D | dma_thread_diotest.c | 134 static int workers; variable 231 workers = sysconf(_SC_NPROCESSORS_ONLN); in main() 232 if (workers > MAX_WORKERS) in main() 233 workers = MAX_WORKERS; in main() 260 for (j = 0; j < workers; j++) { in dma_thread_diotest_verify() 270 for (j = 0; j < workers; j++) { in dma_thread_diotest_verify() 277 worker[workers - 1].length = in dma_thread_diotest_verify() 278 READSIZE - PAGE_SIZE * (workers - 1); in dma_thread_diotest_verify() 288 for (j = 0; j < workers; j++) { in dma_thread_diotest_verify() 298 for (j = 0; j < workers; in dma_thread_diotest_verify() [all...] |
/third_party/node/lib/internal/cluster/ |
H A D | shared_handle.js | 11 this.workers = new SafeMap(); 28 assert(!this.workers.has(worker.id)); 29 this.workers.set(worker.id, worker); 34 if (!this.workers.has(worker.id)) 37 this.workers.delete(worker.id); 39 if (this.workers.size !== 0)
|
H A D | primary.js | 39 cluster.workers = {}; 99 for (const worker of ObjectValues(cluster.workers)) { 144 delete cluster.workers[worker.id]; 146 if (ObjectKeys(cluster.workers).length === 0) { 176 * Remove the worker from the workers list only 200 * Remove the worker from the workers list only 215 cluster.workers[worker.id] = worker; 224 const workers = ObjectKeys(cluster.workers); 226 if (workers [all...] |
/third_party/ltp/testcases/kernel/fs/read_all/ |
H A D | read_all.c | 90 static struct worker *workers; variable 213 tst_atomic_store(atomic_timestamp(), &workers[worker].last_seen); in worker_heartbeat() 218 struct worker *const w = workers + worker; in worker_elapsed() 233 const pid_t pid = workers[worker].pid; in read_test() 303 struct worker *const self = workers + worker; in worker_run() 333 struct worker *wa = workers; in spawn_workers() 335 memset(workers, 0, worker_count * sizeof(*workers)); in spawn_workers() 349 struct worker *const w = workers + worker; in restart_worker() 432 struct worker *const w = workers in try_push_work() [all...] |
/third_party/ltp/testcases/kernel/fs/fs_fill/ |
H A D | fs_fill.c | 26 static struct worker *workers; variable 75 workers[i].pattern = n; in testrun() 76 SAFE_PTHREAD_CREATE(&threads[i], NULL, worker, &workers[i]); in testrun() 101 workers = SAFE_MALLOC(sizeof(struct worker) * nthreads); in setup() 110 snprintf(workers[i].dir, sizeof(workers[i].dir), in setup() 112 SAFE_MKDIR(workers[i].dir, 0700); in setup() 120 free(workers); in cleanup()
|
/third_party/libabigail/tests/ |
H A D | test-read-write.cc | 20 #include "abg-workers.h" 40 using abigail::workers::queue; 41 using abigail::workers::task; 42 using abigail::workers::task_sptr; 43 using abigail::workers::get_number_of_threads; 250 struct test_task : public abigail::workers::task 352 using abigail::workers::queue; in main() 353 using abigail::workers::task; in main() 354 using abigail::workers::task_sptr; in main() 355 using abigail::workers in main() [all...] |
H A D | test-types-stability.cc | 33 #include "abg-workers.h" 61 struct test_task : public abigail::workers::task 112 using abigail::workers::queue; in main() 113 using abigail::workers::task; in main() 114 using abigail::workers::task_sptr; in main() 115 using abigail::workers::get_number_of_threads; in main()
|
H A D | test-diff-filter.cc | 26 #include "abg-workers.h" 833 struct test_task : public abigail::workers::task 926 using abigail::workers::queue; in main() 927 using abigail::workers::task; in main() 928 using abigail::workers::task_sptr; in main() 929 using abigail::workers::get_number_of_threads; in main()
|
/third_party/libabigail/src/ |
H A D | abg-workers.cc | 26 #include "abg-workers.h" 34 namespace workers namespace 120 // used to wait for tasks completed when bringing the workers down. 137 std::vector<worker> workers; member 170 workers.push_back(w); in create_workers() 188 if (workers.empty() || !t) in schedule_task() 231 if (workers.empty()) in do_bring_workers_down() 242 // Now that the task queue is empty, drain the workers by waking them up, in do_bring_workers_down() 246 for (std::vector<worker>::const_iterator i = workers.begin(); in do_bring_workers_down() 247 i != workers in do_bring_workers_down() [all...] |
/third_party/node/benchmark/worker/ |
H A D | echo.js | 7 workers: [1], 15 function main({ n, workers, sendsPerBroadcast: sends, payload: payloadType }) { 16 const expectedPerBroadcast = sends * workers; 35 for (let i = 0; i < workers; ++i) { 43 if (++readies === workers) {
|
/third_party/node/test/internet/ |
H A D | test-dgram-broadcast-multi-process.js | 59 const workers = {}; 73 killSubprocesses(workers); 82 workers[worker.pid] = worker; 86 // Handle the death of workers 104 console.error('[PARENT] All workers have died.'); 107 killSubprocesses(workers); 133 console.error('[PARENT] All workers have received the ' + 137 Object.keys(workers).forEach((pid) => { 138 const worker = workers[pid]; 160 killSubprocesses(workers); [all...] |
H A D | test-dgram-multicast-multi-process.js | 40 const workers = {}; 47 workers[worker.pid] = worker; 51 // Handle the death of workers. 66 console.error('[PARENT] All workers have died.'); 94 console.error('[PARENT] All workers have received the ' + 97 Object.keys(workers).forEach(function(pid) { 98 const worker = workers[pid]; 119 killSubprocesses(workers); 144 killSubprocesses(workers);
|
H A D | test-dgram-multicast-ssmv6-multi-process.js | 19 const workers = {}; 41 workers[worker.pid] = worker; 45 // Handle the death of workers. 60 console.error('[PARENT] All workers have died.'); 88 console.error('[PARENT] All workers have received the ' + 91 Object.keys(workers).forEach(function(pid) { 92 const worker = workers[pid]; 113 killChildren(workers); 138 killChildren(workers);
|
H A D | test-dgram-multicast-ssm-multi-process.js | 19 const workers = {}; 41 workers[worker.pid] = worker; 45 // Handle the death of workers. 60 console.error('[PARENT] All workers have died.'); 88 console.error('[PARENT] All workers have received the ' + 91 Object.keys(workers).forEach(function(pid) { 92 const worker = workers[pid]; 113 killChildren(workers); 138 killChildren(workers);
|
H A D | test-dgram-multicast-set-interface-lo.js | 79 const workers = {}; 98 killSubprocesses(workers); 118 workers[worker.pid] = worker; 123 // Handle the death of workers. 138 console.error('[PARENT] All workers have died.'); 141 killSubprocesses(workers); 167 console.error('[PARENT] All workers have received the ' + 171 Object.keys(workers).forEach((pid) => { 172 const worker = workers[pid]; 196 killSubprocesses(workers); [all...] |
/third_party/ffmpeg/libavutil/ |
H A D | slicethread.c | 38 WorkerContext *workers; member 121 if (nb_workers && !(ctx->workers = av_calloc(nb_workers, sizeof(*ctx->workers)))) { in avpriv_slicethread_create() 141 WorkerContext *w = &ctx->workers[i]; in avpriv_slicethread_create() 180 WorkerContext *w = &ctx->workers[i]; in avpriv_slicethread_execute() 216 WorkerContext *w = &ctx->workers[i]; in avpriv_slicethread_free() 224 WorkerContext *w = &ctx->workers[i]; in avpriv_slicethread_free() 232 av_freep(&ctx->workers); in avpriv_slicethread_free()
|
/third_party/skia/third_party/externals/abseil-cpp/absl/synchronization/ |
H A D | blocking_counter_test.cc | 42 std::vector<std::thread> workers; in TEST() local 47 workers.reserve(num_workers); in TEST() 49 workers.emplace_back( in TEST() 56 // Check that all the workers have completed. in TEST() 61 for (std::thread& w : workers) { in TEST()
|
/third_party/skia/third_party/externals/spirv-cross/include/spirv_cross/ |
H A D | thread_group.hpp | 34 workers[i].start(&impl[i]); in ThreadGroup() 39 for (auto &worker : workers) in run() 45 for (auto &worker : workers) in wait() 110 Thread workers[Size]; member in spirv_cross::ThreadGroup
|
/third_party/node/benchmark/http/ |
H A D | cluster.js | 22 let workers = 0; 27 workers++; 28 if (workers < 2)
|
/third_party/python/Lib/ |
H A D | compileall.py | 49 rx=None, quiet=0, legacy=False, optimize=-1, workers=1, 67 workers: maximum number of parallel workers 84 if workers < 0: 85 raise ValueError('workers must be greater or equal to 0') 86 if workers != 1: 92 workers = 1 99 if workers != 1 and ProcessPoolExecutor is not None: 100 # If workers == 0, let ProcessPoolExecutor choose 101 workers [all...] |
/third_party/python/Lib/test/libregrtest/ |
H A D | runtest_mp.py | 34 # buildbot workers. 358 def get_running(workers: list[TestWorkerProcess]) -> list[TestWorkerProcess]: 360 for worker in workers: 386 self.workers = None 389 self.workers = [TestWorkerProcess(index, self) 391 msg = f"Run tests in parallel using {len(self.workers)} child processes" 397 for worker in self.workers: 402 for worker in self.workers: 404 for worker in self.workers: 411 # bpo-46205: check the status of workers ever [all...] |
/third_party/icu/ohos_icu4j/src/main/tests/ohos/global/icu/dev/test/util/ |
H A D | VersionInfoTest.java | 373 GetInstanceWorker[] workers = new GetInstanceWorker[numThreads]; in TestMultiThread() 376 // Create workers in TestMultiThread() 377 for (int i = 0; i < workers.length; i++) { in TestMultiThread() 378 workers[i] = new GetInstanceWorker(i, results[i]); in TestMultiThread() 381 // Start workers in TestMultiThread() 382 for (int i = 0; i < workers.length; i++) { in TestMultiThread() 383 workers[i].start(); in TestMultiThread() 387 for (int i = 0; i < workers.length; i++) { in TestMultiThread() 389 workers[i].join(); in TestMultiThread()
|
/third_party/icu/icu4j/main/tests/core/src/com/ibm/icu/dev/test/util/ |
H A D | VersionInfoTest.java | 382 GetInstanceWorker[] workers = new GetInstanceWorker[numThreads]; in TestMultiThread() 385 // Create workers in TestMultiThread() 386 for (int i = 0; i < workers.length; i++) { in TestMultiThread() 387 workers[i] = new GetInstanceWorker(i, results[i]); in TestMultiThread() 390 // Start workers in TestMultiThread() 391 for (int i = 0; i < workers.length; i++) { in TestMultiThread() 392 workers[i].start(); in TestMultiThread() 396 for (int i = 0; i < workers.length; i++) { in TestMultiThread() 398 workers[i].join(); in TestMultiThread()
|