Lines Matching refs:job
41 MODULE_PARM_DESC(kcopyd_subjob_size_kb, "Sub-job size for dm-kcopyd clients");
353 * Error state of the job.
373 * Set this to ensure you are notified when the job has
380 * These fields are only used if the job has been split
414 * Functions to push and pop a job onto the head of a given job
420 struct kcopyd_job *job;
426 list_for_each_entry(job, jobs, list) {
427 if (job->op == REQ_OP_READ ||
428 !(job->flags & BIT(DM_KCOPYD_WRITE_SEQ))) {
429 list_del(&job->list);
430 return job;
433 if (job->write_offset == job->master_job->write_offset) {
434 job->master_job->write_offset += job->source.count;
435 list_del(&job->list);
436 return job;
446 struct kcopyd_job *job = NULL;
452 job = pop_io_job(jobs, kc);
454 job = list_entry(jobs->next, struct kcopyd_job, list);
455 list_del(&job->list);
460 return job;
463 static void push(struct list_head *jobs, struct kcopyd_job *job)
466 struct dm_kcopyd_client *kc = job->kc;
469 list_add_tail(&job->list, jobs);
474 static void push_head(struct list_head *jobs, struct kcopyd_job *job)
476 struct dm_kcopyd_client *kc = job->kc;
479 list_add(&job->list, jobs);
485 * job list.
492 static int run_complete_job(struct kcopyd_job *job)
494 void *context = job->context;
495 int read_err = job->read_err;
496 unsigned long write_err = job->write_err;
497 dm_kcopyd_notify_fn fn = job->fn;
498 struct dm_kcopyd_client *kc = job->kc;
500 if (job->pages && job->pages != &zero_page_list)
501 kcopyd_put_pages(kc, job->pages);
503 * If this is the master job, the sub jobs have already
506 if (job->master_job == job) {
507 mutex_destroy(&job->lock);
508 mempool_free(job, &kc->job_pool);
522 struct kcopyd_job *job = context;
523 struct dm_kcopyd_client *kc = job->kc;
528 if (op_is_write(job->op))
529 job->write_err |= error;
531 job->read_err = 1;
533 if (!(job->flags & BIT(DM_KCOPYD_IGNORE_ERROR))) {
534 push(&kc->complete_jobs, job);
540 if (op_is_write(job->op))
541 push(&kc->complete_jobs, job);
544 job->op = REQ_OP_WRITE;
545 push(&kc->io_jobs, job);
553 * a particular job.
555 static int run_io_job(struct kcopyd_job *job)
559 .bi_opf = job->op,
561 .mem.ptr.pl = job->pages,
564 .notify.context = job,
565 .client = job->kc->io_client,
572 if (job->flags & BIT(DM_KCOPYD_WRITE_SEQ) &&
573 job->master_job->write_err) {
574 job->write_err = job->master_job->write_err;
578 io_job_start(job->kc->throttle);
580 if (job->op == REQ_OP_READ)
581 r = dm_io(&io_req, 1, &job->source, NULL, IOPRIO_DEFAULT);
583 r = dm_io(&io_req, job->num_dests, job->dests, NULL, IOPRIO_DEFAULT);
588 static int run_pages_job(struct kcopyd_job *job)
591 unsigned int nr_pages = dm_div_up(job->dests[0].count, PAGE_SIZE >> 9);
593 r = kcopyd_get_pages(job->kc, nr_pages, &job->pages);
595 /* this job is ready for io */
596 push(&job->kc->io_jobs, job);
614 struct kcopyd_job *job;
617 while ((job = pop(jobs, kc))) {
619 r = fn(job);
622 /* error this rogue job */
623 if (op_is_write(job->op))
624 job->write_err = (unsigned long) -1L;
626 job->read_err = 1;
627 push(&kc->complete_jobs, job);
634 * We couldn't service this job ATM, so
635 * push this job back onto the list.
637 push_head(jobs, job);
675 * If we are copying a small region we just dispatch a single job
679 static void dispatch_job(struct kcopyd_job *job)
681 struct dm_kcopyd_client *kc = job->kc;
684 if (unlikely(!job->source.count))
685 push(&kc->callback_jobs, job);
686 else if (job->pages == &zero_page_list)
687 push(&kc->io_jobs, job);
689 push(&kc->pages_jobs, job);
700 struct kcopyd_job *job = sub_job->master_job;
701 struct dm_kcopyd_client *kc = job->kc;
703 mutex_lock(&job->lock);
707 job->read_err = 1;
710 job->write_err |= write_err;
715 if ((!job->read_err && !job->write_err) ||
716 job->flags & BIT(DM_KCOPYD_IGNORE_ERROR)) {
718 progress = job->progress;
719 count = job->source.count - progress;
724 job->progress += count;
727 mutex_unlock(&job->lock);
732 *sub_job = *job;
737 for (i = 0; i < job->num_dests; i++) {
746 } else if (atomic_dec_and_test(&job->sub_jobs)) {
757 push(&kc->complete_jobs, job);
782 struct kcopyd_job *job;
786 * Allocate an array of jobs consisting of one master job
789 job = mempool_alloc(&kc->job_pool, GFP_NOIO);
790 mutex_init(&job->lock);
795 job->kc = kc;
796 job->flags = flags;
797 job->read_err = 0;
798 job->write_err = 0;
800 job->num_dests = num_dests;
801 memcpy(&job->dests, dests, sizeof(*dests) * num_dests);
808 if (!(job->flags & BIT(DM_KCOPYD_WRITE_SEQ))) {
809 for (i = 0; i < job->num_dests; i++) {
811 job->flags |= BIT(DM_KCOPYD_WRITE_SEQ);
820 if (job->flags & BIT(DM_KCOPYD_WRITE_SEQ) &&
821 job->flags & BIT(DM_KCOPYD_IGNORE_ERROR))
822 job->flags &= ~BIT(DM_KCOPYD_IGNORE_ERROR);
825 job->source = *from;
826 job->pages = NULL;
827 job->op = REQ_OP_READ;
829 memset(&job->source, 0, sizeof(job->source));
830 job->source.count = job->dests[0].count;
831 job->pages = &zero_page_list;
836 job->op = REQ_OP_WRITE_ZEROES;
837 for (i = 0; i < job->num_dests; i++)
838 if (!bdev_write_zeroes_sectors(job->dests[i].bdev)) {
839 job->op = REQ_OP_WRITE;
844 job->fn = fn;
845 job->context = context;
846 job->master_job = job;
847 job->write_offset = 0;
849 if (job->source.count <= kc->sub_job_size)
850 dispatch_job(job);
852 job->progress = 0;
853 split_job(job);
869 struct kcopyd_job *job;
871 job = mempool_alloc(&kc->job_pool, GFP_NOIO);
873 memset(job, 0, sizeof(struct kcopyd_job));
874 job->kc = kc;
875 job->fn = fn;
876 job->context = context;
877 job->master_job = job;
881 return job;
887 struct kcopyd_job *job = j;
888 struct dm_kcopyd_client *kc = job->kc;
890 job->read_err = read_err;
891 job->write_err = write_err;
893 push(&kc->callback_jobs, job);
899 * Cancels a kcopyd job, eg. someone might be deactivating a
903 int kcopyd_cancel(struct kcopyd_job *job, int block)