Lines Matching refs:job
40 MODULE_PARM_DESC(kcopyd_subjob_size_kb, "Sub-job size for dm-kcopyd clients");
347 * Error state of the job.
367 * Set this to ensure you are notified when the job has
374 * These fields are only used if the job has been split
408 * Functions to push and pop a job onto the head of a given job
414 struct kcopyd_job *job;
420 list_for_each_entry(job, jobs, list) {
421 if (job->rw == READ || !test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags)) {
422 list_del(&job->list);
423 return job;
426 if (job->write_offset == job->master_job->write_offset) {
427 job->master_job->write_offset += job->source.count;
428 list_del(&job->list);
429 return job;
439 struct kcopyd_job *job = NULL;
446 job = pop_io_job(jobs, kc);
448 job = list_entry(jobs->next, struct kcopyd_job, list);
449 list_del(&job->list);
454 return job;
457 static void push(struct list_head *jobs, struct kcopyd_job *job)
460 struct dm_kcopyd_client *kc = job->kc;
463 list_add_tail(&job->list, jobs);
468 static void push_head(struct list_head *jobs, struct kcopyd_job *job)
471 struct dm_kcopyd_client *kc = job->kc;
474 list_add(&job->list, jobs);
480 * job list.
487 static int run_complete_job(struct kcopyd_job *job)
489 void *context = job->context;
490 int read_err = job->read_err;
491 unsigned long write_err = job->write_err;
492 dm_kcopyd_notify_fn fn = job->fn;
493 struct dm_kcopyd_client *kc = job->kc;
495 if (job->pages && job->pages != &zero_page_list)
496 kcopyd_put_pages(kc, job->pages);
498 * If this is the master job, the sub jobs have already
501 if (job->master_job == job) {
502 mutex_destroy(&job->lock);
503 mempool_free(job, &kc->job_pool);
517 struct kcopyd_job *job = (struct kcopyd_job *) context;
518 struct dm_kcopyd_client *kc = job->kc;
523 if (op_is_write(job->rw))
524 job->write_err |= error;
526 job->read_err = 1;
528 if (!test_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags)) {
529 push(&kc->complete_jobs, job);
535 if (op_is_write(job->rw))
536 push(&kc->complete_jobs, job);
539 job->rw = WRITE;
540 push(&kc->io_jobs, job);
548 * a particular job.
550 static int run_io_job(struct kcopyd_job *job)
554 .bi_op = job->rw,
557 .mem.ptr.pl = job->pages,
560 .notify.context = job,
561 .client = job->kc->io_client,
568 if (test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags) &&
569 job->master_job->write_err) {
570 job->write_err = job->master_job->write_err;
574 io_job_start(job->kc->throttle);
576 if (job->rw == READ)
577 r = dm_io(&io_req, 1, &job->source, NULL);
579 r = dm_io(&io_req, job->num_dests, job->dests, NULL);
584 static int run_pages_job(struct kcopyd_job *job)
587 unsigned nr_pages = dm_div_up(job->dests[0].count, PAGE_SIZE >> 9);
589 r = kcopyd_get_pages(job->kc, nr_pages, &job->pages);
591 /* this job is ready for io */
592 push(&job->kc->io_jobs, job);
610 struct kcopyd_job *job;
613 while ((job = pop(jobs, kc))) {
615 r = fn(job);
618 /* error this rogue job */
619 if (op_is_write(job->rw))
620 job->write_err = (unsigned long) -1L;
622 job->read_err = 1;
623 push(&kc->complete_jobs, job);
630 * We couldn't service this job ATM, so
631 * push this job back onto the list.
633 push_head(jobs, job);
672 * If we are copying a small region we just dispatch a single job
676 static void dispatch_job(struct kcopyd_job *job)
678 struct dm_kcopyd_client *kc = job->kc;
680 if (unlikely(!job->source.count))
681 push(&kc->callback_jobs, job);
682 else if (job->pages == &zero_page_list)
683 push(&kc->io_jobs, job);
685 push(&kc->pages_jobs, job);
696 struct kcopyd_job *job = sub_job->master_job;
697 struct dm_kcopyd_client *kc = job->kc;
699 mutex_lock(&job->lock);
703 job->read_err = 1;
706 job->write_err |= write_err;
711 if ((!job->read_err && !job->write_err) ||
712 test_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags)) {
714 progress = job->progress;
715 count = job->source.count - progress;
720 job->progress += count;
723 mutex_unlock(&job->lock);
728 *sub_job = *job;
733 for (i = 0; i < job->num_dests; i++) {
742 } else if (atomic_dec_and_test(&job->sub_jobs)) {
753 push(&kc->complete_jobs, job);
778 struct kcopyd_job *job;
782 * Allocate an array of jobs consisting of one master job
785 job = mempool_alloc(&kc->job_pool, GFP_NOIO);
786 mutex_init(&job->lock);
791 job->kc = kc;
792 job->flags = flags;
793 job->read_err = 0;
794 job->write_err = 0;
796 job->num_dests = num_dests;
797 memcpy(&job->dests, dests, sizeof(*dests) * num_dests);
804 if (!test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags)) {
805 for (i = 0; i < job->num_dests; i++) {
807 set_bit(DM_KCOPYD_WRITE_SEQ, &job->flags);
816 if (test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags) &&
817 test_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags))
818 clear_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags);
821 job->source = *from;
822 job->pages = NULL;
823 job->rw = READ;
825 memset(&job->source, 0, sizeof job->source);
826 job->source.count = job->dests[0].count;
827 job->pages = &zero_page_list;
832 job->rw = REQ_OP_WRITE_ZEROES;
833 for (i = 0; i < job->num_dests; i++)
834 if (!bdev_write_zeroes_sectors(job->dests[i].bdev)) {
835 job->rw = WRITE;
840 job->fn = fn;
841 job->context = context;
842 job->master_job = job;
843 job->write_offset = 0;
845 if (job->source.count <= kc->sub_job_size)
846 dispatch_job(job);
848 job->progress = 0;
849 split_job(job);
865 struct kcopyd_job *job;
867 job = mempool_alloc(&kc->job_pool, GFP_NOIO);
869 memset(job, 0, sizeof(struct kcopyd_job));
870 job->kc = kc;
871 job->fn = fn;
872 job->context = context;
873 job->master_job = job;
877 return job;
883 struct kcopyd_job *job = j;
884 struct dm_kcopyd_client *kc = job->kc;
886 job->read_err = read_err;
887 job->write_err = write_err;
889 push(&kc->callback_jobs, job);
895 * Cancels a kcopyd job, eg. someone might be deactivating a
899 int kcopyd_cancel(struct kcopyd_job *job, int block)