/kernel/linux/linux-5.10/scripts/ |
H A D | generate_initcall_order.pl | 18 my $jobs = {}; # child process pid -> file handle 169 if (!exists($jobs->{$pid})) { 173 my $fh = $jobs->{$pid}; 181 delete($jobs->{$pid}); 202 $jobs->{$pid} = $fh; 213 if (scalar(keys(%{$jobs})) >= $njobs) { 219 while (scalar(keys(%{$jobs})) > 0) {
|
/kernel/linux/linux-6.6/scripts/ |
H A D | generate_initcall_order.pl | 18 my $jobs = {}; # child process pid -> file handle 169 if (!exists($jobs->{$pid})) { 173 my $fh = $jobs->{$pid}; 181 delete($jobs->{$pid}); 202 $jobs->{$pid} = $fh; 213 if (scalar(keys(%{$jobs})) >= $njobs) { 219 while (scalar(keys(%{$jobs})) > 0) {
|
/kernel/linux/linux-6.6/drivers/gpu/drm/panfrost/ |
H A D | panfrost_job.c | 108 /* JS0: fragment jobs. in panfrost_job_get_slot() 109 * JS1: vertex/tiler jobs in panfrost_job_get_slot() 110 * JS2: compute jobs in panfrost_job_get_slot() 136 * Eventually we may need to support tiler only jobs and h/w with in panfrost_job_write_affinity() 159 struct panfrost_job *job = pfdev->jobs[slot][0]; in panfrost_dequeue_job() 162 pfdev->jobs[slot][0] = pfdev->jobs[slot][1]; in panfrost_dequeue_job() 163 pfdev->jobs[slot][1] = NULL; in panfrost_dequeue_job() 175 if (!pfdev->jobs[slot][0]) { in panfrost_enqueue_job() 176 pfdev->jobs[slo in panfrost_enqueue_job() [all...] |
H A D | panfrost_device.h | 106 struct panfrost_job *jobs[NUM_JOB_SLOTS][2]; member
|
/kernel/linux/linux-5.10/drivers/md/ |
H A D | dm-kcopyd.c | 77 * We maintain four lists of jobs: 79 * i) jobs waiting for pages 80 * ii) jobs that have pages, and are waiting for the io to be issued. 81 * iii) jobs that don't need to do any IO and just run a callback 82 * iv) jobs that have completed. 411 static struct kcopyd_job *pop_io_job(struct list_head *jobs, in pop_io_job() argument 417 * For I/O jobs, pop any read, any write without sequential write in pop_io_job() 420 list_for_each_entry(job, jobs, list) { in pop_io_job() 436 static struct kcopyd_job *pop(struct list_head *jobs, in pop() argument 444 if (!list_empty(jobs)) { in pop() 457 push(struct list_head *jobs, struct kcopyd_job *job) push() argument 468 push_head(struct list_head *jobs, struct kcopyd_job *job) push_head() argument 607 process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc, int (*fn) (struct kcopyd_job *)) process_jobs() argument [all...] |
/kernel/linux/linux-6.6/drivers/md/ |
H A D | dm-kcopyd.c | 80 * We maintain four lists of jobs: 82 * i) jobs waiting for pages 83 * ii) jobs that have pages, and are waiting for the io to be issued. 84 * iii) jobs that don't need to do any IO and just run a callback 85 * iv) jobs that have completed. 417 static struct kcopyd_job *pop_io_job(struct list_head *jobs, in pop_io_job() argument 423 * For I/O jobs, pop any read, any write without sequential write in pop_io_job() 426 list_for_each_entry(job, jobs, list) { in pop_io_job() 443 static struct kcopyd_job *pop(struct list_head *jobs, in pop() argument 450 if (!list_empty(jobs)) { in pop() 463 push(struct list_head *jobs, struct kcopyd_job *job) push() argument 474 push_head(struct list_head *jobs, struct kcopyd_job *job) push_head() argument 611 process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc, int (*fn)(struct kcopyd_job *)) process_jobs() argument [all...] |
/kernel/linux/linux-5.10/tools/testing/kunit/ |
H A D | kunit_kernel.py | 80 def make(self, jobs, build_dir, make_options): 81 command = ['make', 'ARCH=um', '--jobs=' + str(jobs)] 195 def build_um_kernel(self, alltests, jobs, build_dir, make_options): 200 self._ops.make(jobs, build_dir, make_options)
|
H A D | kunit.py | 28 ['jobs', 'build_dir', 'alltests', 34 KunitRequest = namedtuple('KunitRequest', ['raw_output','timeout', 'jobs', 74 request.jobs, 144 build_request = KunitBuildRequest(request.jobs, request.build_dir, 187 parser.add_argument('--jobs', 189 'jobs (commands) to run simultaneously."', 190 type=int, default=8, metavar='jobs') 266 cli_args.jobs, 300 request = KunitBuildRequest(cli_args.jobs,
|
/kernel/linux/linux-6.6/drivers/gpu/drm/amd/amdgpu/ |
H A D | amdgpu_cs.c | 292 num_ibs[i], &p->jobs[i]); in amdgpu_cs_pass1() 296 p->gang_leader = p->jobs[p->gang_leader_idx]; in amdgpu_cs_pass1() 343 job = p->jobs[r]; in amdgpu_cs_p2_ib() 578 p->jobs[i]->shadow_va = shadow->shadow_va; in amdgpu_cs_p2_shadow() 579 p->jobs[i]->csa_va = shadow->csa_va; in amdgpu_cs_p2_shadow() 580 p->jobs[i]->gds_va = shadow->gds_va; in amdgpu_cs_p2_shadow() 581 p->jobs[i]->init_shadow = in amdgpu_cs_p2_shadow() 980 amdgpu_job_set_resources(p->jobs[i], p->bo_list->gds_obj, in amdgpu_cs_parser_bos() 1008 struct amdgpu_job *job = p->jobs[i]; in trace_amdgpu_cs_ibs() 1078 r = amdgpu_cs_patch_ibs(p, p->jobs[ in amdgpu_cs_patch_jobs() [all...] |
H A D | amdgpu_cs.h | 62 struct amdgpu_job *jobs[AMDGPU_CS_GANG_SIZE]; member
|
/kernel/linux/linux-6.6/tools/testing/kunit/ |
H A D | kunit.py | 46 jobs: int 92 success = linux.build_kernel(request.jobs, 360 parser.add_argument('--jobs', 362 'jobs (commands) to run simultaneously."', 448 jobs=cli_args.jobs, 484 jobs=cli_args.jobs)
|
H A D | kunit_kernel.py | 74 def make(self, jobs: int, build_dir: str, make_options: Optional[List[str]]) -> None: 75 command = ['make', 'ARCH=' + self._linux_arch, 'O=' + build_dir, '--jobs=' + str(jobs)] 324 def build_kernel(self, jobs: int, build_dir: str, make_options: Optional[List[str]]) -> bool: 327 self._ops.make(jobs, build_dir, make_options)
|
/kernel/linux/linux-5.10/drivers/gpu/drm/panfrost/ |
H A D | panfrost_job.c | 114 /* JS0: fragment jobs. in panfrost_job_get_slot() 115 * JS1: vertex/tiler jobs in panfrost_job_get_slot() 116 * JS2: compute jobs in panfrost_job_get_slot() 142 * Eventually we may need to support tiler only jobs and h/w with in panfrost_job_write_affinity() 354 pfdev->jobs[slot] = job; in panfrost_job_run() 442 * If the GPU managed to complete this jobs fence, the timeout is in panfrost_job_timedout() 522 job = pfdev->jobs[j]; in panfrost_job_irq_handler() 525 pfdev->jobs[j] = NULL; in panfrost_job_irq_handler() 579 if (pfdev->jobs[i]) { in panfrost_reset() 582 pfdev->jobs[ in panfrost_reset() [all...] |
H A D | panfrost_device.h | 103 struct panfrost_job *jobs[NUM_JOB_SLOTS]; member
|
/kernel/linux/linux-5.10/tools/testing/selftests/net/ |
H A D | udpgso_bench.sh | 50 local -r jobs="$(jobs -p)" 52 if [[ "${jobs}" != "" ]]; then 53 kill -1 ${jobs} 2>/dev/null
|
H A D | udpgro_bench.sh | 9 local -r jobs="$(jobs -p)" 12 [ -n "${jobs}" ] && kill -INT ${jobs} 2>/dev/null
|
H A D | udpgro.sh | 17 local -r jobs="$(jobs -p)" 20 [ -n "${jobs}" ] && kill -1 ${jobs} 2>/dev/null 56 wait $(jobs -p) 102 wait $(jobs -p) 126 wait $(jobs -p)
|
/kernel/linux/linux-6.6/tools/testing/selftests/net/ |
H A D | udpgso_bench.sh | 50 local -r jobs="$(jobs -p)" 52 if [[ "${jobs}" != "" ]]; then 53 kill -1 ${jobs} 2>/dev/null
|
H A D | udpgro_frglist.sh | 11 local -r jobs="$(jobs -p)" 14 [ -n "${jobs}" ] && kill -INT ${jobs} 2>/dev/null
|
H A D | udpgro_bench.sh | 11 local -r jobs="$(jobs -p)" 14 [ -n "${jobs}" ] && kill -INT ${jobs} 2>/dev/null
|
H A D | udpgro.sh | 19 local -r jobs="$(jobs -p)" 22 [ -n "${jobs}" ] && kill -1 ${jobs} 2>/dev/null 58 wait $(jobs -p) 104 wait $(jobs -p) 128 wait $(jobs -p)
|
/kernel/linux/linux-5.10/drivers/net/wireless/cisco/ |
H A D | airo.c | 1205 unsigned long jobs; member 1335 clear_bit(JOB_MIC, &ai->jobs); in micinit() 1893 clear_bit(JOB_DIE, &ai->jobs); in airo_open() 1905 set_bit(JOB_DIE, &ai->jobs); in airo_open() 2110 clear_bit(JOB_XMIT, &priv->jobs); in airo_end_xmit() 2166 set_bit(JOB_XMIT, &priv->jobs); in airo_start_xmit() 2182 clear_bit(JOB_XMIT11, &priv->jobs); in airo_end_xmit11() 2245 set_bit(JOB_XMIT11, &priv->jobs); in airo_start_xmit11() 2258 clear_bit(JOB_STATS, &ai->jobs); in airo_read_stats() 2290 if (!test_bit(JOB_STATS, &local->jobs)) { in airo_get_stats() [all...] |
/kernel/linux/linux-6.6/drivers/net/wireless/cisco/ |
H A D | airo.c | 1205 unsigned long jobs; member 1335 clear_bit(JOB_MIC, &ai->jobs); in micinit() 1893 clear_bit(JOB_DIE, &ai->jobs); in airo_open() 1905 set_bit(JOB_DIE, &ai->jobs); in airo_open() 2110 clear_bit(JOB_XMIT, &priv->jobs); in airo_end_xmit() 2166 set_bit(JOB_XMIT, &priv->jobs); in airo_start_xmit() 2182 clear_bit(JOB_XMIT11, &priv->jobs); in airo_end_xmit11() 2245 set_bit(JOB_XMIT11, &priv->jobs); in airo_start_xmit11() 2258 clear_bit(JOB_STATS, &ai->jobs); in airo_read_stats() 2290 if (!test_bit(JOB_STATS, &local->jobs)) { in airo_get_stats() [all...] |
/kernel/linux/linux-5.10/tools/cgroup/ |
H A D | iocost_coef_gen.py | 47 help='Number of parallel fio jobs to run (default: %(default)s)') 89 def run_fio(testfile, duration, iotype, iodepth, blocksize, jobs): 98 f'--time_based --numjobs={jobs}') 104 return sum(j['read']['bw_bytes'] + j['write']['bw_bytes'] for j in d['jobs'])
|
/kernel/linux/linux-6.6/tools/cgroup/ |
H A D | iocost_coef_gen.py | 47 help='Number of parallel fio jobs to run (default: %(default)s)') 89 def run_fio(testfile, duration, iotype, iodepth, blocksize, jobs): 98 f'--time_based --numjobs={jobs}') 104 return sum(j['read']['bw_bytes'] + j['write']['bw_bytes'] for j in d['jobs'])
|