/linux/scripts/ |
A D | jobserver-exec | 15 jobs = b"" variable 37 jobs += slot 43 if len(jobs): 44 os.write(writer, jobs) 48 claim = len(jobs) + 1 63 if len(jobs): 64 os.write(writer, jobs)
|
A D | generate_initcall_order.pl | 18 my $jobs = {}; # child process pid -> file handle 169 if (!exists($jobs->{$pid})) { 173 my $fh = $jobs->{$pid}; 181 delete($jobs->{$pid}); 202 $jobs->{$pid} = $fh; 213 if (scalar(keys(%{$jobs})) >= $njobs) { 219 while (scalar(keys(%{$jobs})) > 0) {
|
/linux/drivers/gpu/drm/panfrost/ |
A D | panfrost_job.c | 158 struct panfrost_job *job = pfdev->jobs[slot][0]; in panfrost_dequeue_job() 161 pfdev->jobs[slot][0] = pfdev->jobs[slot][1]; in panfrost_dequeue_job() 162 pfdev->jobs[slot][1] = NULL; in panfrost_dequeue_job() 174 if (!pfdev->jobs[slot][0]) { in panfrost_enqueue_job() 175 pfdev->jobs[slot][0] = job; in panfrost_enqueue_job() 179 WARN_ON(pfdev->jobs[slot][1]); in panfrost_enqueue_job() 180 pfdev->jobs[slot][1] = job; in panfrost_enqueue_job() 547 if (!failed[j] || !pfdev->jobs[j][0]) in panfrost_job_handle_irq() 550 if (pfdev->jobs[j][0]->jc == 0) { in panfrost_job_handle_irq() 661 for (j = 0; j < ARRAY_SIZE(pfdev->jobs[0]) && pfdev->jobs[i][j]; j++) { in panfrost_reset() [all …]
|
A D | TODO | 11 - Compute job support. So called 'compute only' jobs need to be plumbed up to
|
A D | panfrost_device.h | 105 struct panfrost_job *jobs[NUM_JOB_SLOTS][2]; member
|
/linux/drivers/md/ |
A D | dm-kcopyd.c | 411 static struct kcopyd_job *pop_io_job(struct list_head *jobs, in pop_io_job() argument 420 list_for_each_entry(job, jobs, list) { in pop_io_job() 436 static struct kcopyd_job *pop(struct list_head *jobs, in pop() argument 443 if (!list_empty(jobs)) { in pop() 444 if (jobs == &kc->io_jobs) in pop() 445 job = pop_io_job(jobs, kc); in pop() 447 job = list_entry(jobs->next, struct kcopyd_job, list); in pop() 462 list_add_tail(&job->list, jobs); in push() 472 list_add(&job->list, jobs); in push_head() 611 while ((job = pop(jobs, kc))) { in process_jobs() [all …]
|
/linux/Documentation/core-api/ |
A D | padata.rst | 9 Padata is a mechanism by which the kernel can farm jobs out to be done in 16 Padata also supports multithreaded jobs, splitting up the job evenly while load 25 The first step in using padata to run serialized jobs is to set up a 26 padata_instance structure for overall control of how jobs are to be run:: 39 jobs to be serialized independently. A padata_instance may have one or more 40 padata_shells associated with it, each allowing a separate series of jobs. 45 The CPUs used to run jobs can be changed in two ways, programatically with 52 parallel cpumask describes which processors will be used to execute jobs 116 true parallelism is achieved by submitting multiple jobs. parallel() runs with 141 pains to ensure that jobs are completed in the order in which they were [all …]
|
/linux/tools/testing/kunit/ |
A D | kunit_kernel.py | 80 def make(self, jobs, build_dir, make_options) -> None: argument 81 command = ['make', 'ARCH=' + self._linux_arch, '--jobs=' + str(jobs)] 307 def build_kernel(self, alltests, jobs, build_dir, make_options) -> bool: argument 312 self._ops.make(jobs, build_dir, make_options)
|
A D | kunit.py | 78 request.jobs, 220 build_request = KunitBuildRequest(request.jobs, request.build_dir, 407 cli_args.jobs, 446 request = KunitBuildRequest(cli_args.jobs,
|
/linux/Documentation/admin-guide/device-mapper/ |
A D | kcopyd.rst | 10 to set aside for their copy jobs. This is done with a call to 43 When a user is done with all their copy jobs, they should call
|
A D | unstriped.rst | 105 has read and write jobs that are independent of each other. Compared to
|
/linux/tools/testing/selftests/net/ |
A D | udpgso_bench.sh | 51 if [[ "${jobs}" != "" ]]; then
|
A D | udpgro_bench.sh | 12 [ -n "${jobs}" ] && kill -INT ${jobs} 2>/dev/null
|
A D | udpgro.sh | 20 [ -n "${jobs}" ] && kill -1 ${jobs} 2>/dev/null
|
A D | udpgro_fwd.sh | 23 [ -n "${jobs}" ] && kill -1 ${jobs} 2>/dev/null
|
A D | veth.sh | 23 [ -n "${jobs}" ] && kill -1 ${jobs} 2>/dev/null
|
/linux/drivers/net/wireless/cisco/ |
A D | airo.c | 1205 unsigned long jobs; member 1335 clear_bit(JOB_MIC, &ai->jobs); in micinit() 1893 clear_bit(JOB_DIE, &ai->jobs); in airo_open() 1905 set_bit(JOB_DIE, &ai->jobs); in airo_open() 2166 set_bit(JOB_XMIT, &priv->jobs); in airo_start_xmit() 2382 set_bit(JOB_DIE, &ai->jobs); in airo_close() 2802 ai->jobs = 0; in _init_airo_card() 3089 if (ai->jobs) { in airo_thread() 3098 if (ai->jobs) in airo_thread() 3189 set_bit(JOB_MIC, &ai->jobs); in airo_handle_cisco_mic() [all …]
|
/linux/Documentation/admin-guide/cgroup-v1/ |
A D | cpusets.rst | 87 can benefit from explicitly placing jobs on properly sized subsets of 100 executing jobs. The location of the running jobs pages may also be moved 252 jobs can share common kernel data, such as file system pages, while 254 construct a large mem_exclusive cpuset to hold all the jobs, and 268 This enables batch managers monitoring jobs running in dedicated 273 submitted jobs, which may choose to terminate or re-prioritize jobs that 381 This policy can provide substantial improvements for jobs that need 384 the several nodes in the jobs cpuset in order to fit. Without this 385 policy, especially for jobs that might have one thread reading in the 386 data set, the memory allocation across the nodes in the jobs cpuset [all …]
|
A D | memcg_test.rst | 223 run jobs under child_a and child_b 225 create/delete following groups at random while jobs are running:: 231 running new jobs in new group is also good.
|
/linux/Documentation/dev-tools/kunit/ |
A D | kunit-tool.rst | 49 ./tools/testing/kunit/kunit.py run --timeout=30 --jobs=`nproc --all` 52 - ``--jobs`` sets the number of threads to use to build the kernel. 196 --jobs=12 \
|
/linux/Documentation/accounting/ |
A D | psi.rst | 27 dynamically using techniques such as load shedding, migrating jobs to 29 priority or restartable batch jobs.
|
/linux/tools/cgroup/ |
A D | iocost_coef_gen.py | 89 def run_fio(testfile, duration, iotype, iodepth, blocksize, jobs): argument
|
/linux/drivers/crypto/caam/ |
A D | Kconfig | 121 & receiving crypto jobs to/from CAAM. This gives better performance
|
/linux/Documentation/admin-guide/ |
A D | efi-stub.rst | 23 elilo. Since the EFI boot stub performs the jobs of a boot loader, in
|
/linux/Documentation/driver-api/ |
A D | dma-buf.rst | 326 all workloads must be flushed from the GPU when switching between jobs 327 requiring DMA fences or jobs requiring page fault handling: This means all DMA
|