Lines Matching defs:kc
206 static void wake(struct dm_kcopyd_client *kc)
208 queue_work(kc->kcopyd_wq, &kc->kcopyd_work);
241 static void kcopyd_put_pages(struct dm_kcopyd_client *kc, struct page_list *pl)
248 if (kc->nr_free_pages >= kc->nr_reserved_pages)
251 pl->next = kc->pages;
252 kc->pages = pl;
253 kc->nr_free_pages++;
260 static int kcopyd_get_pages(struct dm_kcopyd_client *kc,
271 pl = kc->pages;
274 kc->pages = pl->next;
275 kc->nr_free_pages--;
285 kcopyd_put_pages(kc, *pages);
306 static int client_reserve_pages(struct dm_kcopyd_client *kc, unsigned nr_pages)
322 kc->nr_reserved_pages += nr_pages;
323 kcopyd_put_pages(kc, pl);
328 static void client_free_pages(struct dm_kcopyd_client *kc)
330 BUG_ON(kc->nr_free_pages != kc->nr_reserved_pages);
331 drop_pages(kc->pages);
332 kc->pages = NULL;
333 kc->nr_free_pages = kc->nr_reserved_pages = 0;
342 struct dm_kcopyd_client *kc;
412 struct dm_kcopyd_client *kc)
437 struct dm_kcopyd_client *kc)
442 spin_lock_irqsave(&kc->job_lock, flags);
445 if (jobs == &kc->io_jobs)
446 job = pop_io_job(jobs, kc);
452 spin_unlock_irqrestore(&kc->job_lock, flags);
460 struct dm_kcopyd_client *kc = job->kc;
462 spin_lock_irqsave(&kc->job_lock, flags);
464 spin_unlock_irqrestore(&kc->job_lock, flags);
471 struct dm_kcopyd_client *kc = job->kc;
473 spin_lock_irqsave(&kc->job_lock, flags);
475 spin_unlock_irqrestore(&kc->job_lock, flags);
493 struct dm_kcopyd_client *kc = job->kc;
496 kcopyd_put_pages(kc, job->pages);
503 mempool_free(job, &kc->job_pool);
507 if (atomic_dec_and_test(&kc->nr_jobs))
508 wake_up(&kc->destroyq);
518 struct dm_kcopyd_client *kc = job->kc;
520 io_job_finish(kc->throttle);
529 push(&kc->complete_jobs, job);
530 wake(kc);
536 push(&kc->complete_jobs, job);
540 push(&kc->io_jobs, job);
543 wake(kc);
561 .client = job->kc->io_client,
574 io_job_start(job->kc->throttle);
589 r = kcopyd_get_pages(job->kc, nr_pages, &job->pages);
592 push(&job->kc->io_jobs, job);
607 static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc,
613 while ((job = pop(jobs, kc))) {
623 push(&kc->complete_jobs, job);
624 wake(kc);
648 struct dm_kcopyd_client *kc = container_of(work,
660 spin_lock_irqsave(&kc->job_lock, flags);
661 list_splice_tail_init(&kc->callback_jobs, &kc->complete_jobs);
662 spin_unlock_irqrestore(&kc->job_lock, flags);
665 process_jobs(&kc->complete_jobs, kc, run_complete_job);
666 process_jobs(&kc->pages_jobs, kc, run_pages_job);
667 process_jobs(&kc->io_jobs, kc, run_io_job);
678 struct dm_kcopyd_client *kc = job->kc;
679 atomic_inc(&kc->nr_jobs);
681 push(&kc->callback_jobs, job);
683 push(&kc->io_jobs, job);
685 push(&kc->pages_jobs, job);
686 wake(kc);
697 struct dm_kcopyd_client *kc = job->kc;
717 if (count > kc->sub_job_size)
718 count = kc->sub_job_size;
753 push(&kc->complete_jobs, job);
754 wake(kc);
765 atomic_inc(&master_job->kc->nr_jobs);
774 void dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
785 job = mempool_alloc(&kc->job_pool, GFP_NOIO);
791 job->kc = kc;
845 if (job->source.count <= kc->sub_job_size)
854 void dm_kcopyd_zero(struct dm_kcopyd_client *kc,
858 dm_kcopyd_copy(kc, NULL, num_dests, dests, flags, fn, context);
862 void *dm_kcopyd_prepare_callback(struct dm_kcopyd_client *kc,
867 job = mempool_alloc(&kc->job_pool, GFP_NOIO);
870 job->kc = kc;
875 atomic_inc(&kc->nr_jobs);
884 struct dm_kcopyd_client *kc = job->kc;
889 push(&kc->callback_jobs, job);
890 wake(kc);
913 struct dm_kcopyd_client *kc;
915 kc = kzalloc(sizeof(*kc), GFP_KERNEL);
916 if (!kc)
919 spin_lock_init(&kc->job_lock);
920 INIT_LIST_HEAD(&kc->callback_jobs);
921 INIT_LIST_HEAD(&kc->complete_jobs);
922 INIT_LIST_HEAD(&kc->io_jobs);
923 INIT_LIST_HEAD(&kc->pages_jobs);
924 kc->throttle = throttle;
926 r = mempool_init_slab_pool(&kc->job_pool, MIN_JOBS, _job_cache);
930 INIT_WORK(&kc->kcopyd_work, do_work);
931 kc->kcopyd_wq = alloc_workqueue("kcopyd", WQ_MEM_RECLAIM, 0);
932 if (!kc->kcopyd_wq) {
937 kc->sub_job_size = dm_get_kcopyd_subjob_size();
938 reserve_pages = DIV_ROUND_UP(kc->sub_job_size << SECTOR_SHIFT, PAGE_SIZE);
940 kc->pages = NULL;
941 kc->nr_reserved_pages = kc->nr_free_pages = 0;
942 r = client_reserve_pages(kc, reserve_pages);
946 kc->io_client = dm_io_client_create();
947 if (IS_ERR(kc->io_client)) {
948 r = PTR_ERR(kc->io_client);
952 init_waitqueue_head(&kc->destroyq);
953 atomic_set(&kc->nr_jobs, 0);
955 return kc;
958 client_free_pages(kc);
960 destroy_workqueue(kc->kcopyd_wq);
962 mempool_exit(&kc->job_pool);
964 kfree(kc);
970 void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc)
973 wait_event(kc->destroyq, !atomic_read(&kc->nr_jobs));
975 BUG_ON(!list_empty(&kc->callback_jobs));
976 BUG_ON(!list_empty(&kc->complete_jobs));
977 BUG_ON(!list_empty(&kc->io_jobs));
978 BUG_ON(!list_empty(&kc->pages_jobs));
979 destroy_workqueue(kc->kcopyd_wq);
980 dm_io_client_destroy(kc->io_client);
981 client_free_pages(kc);
982 mempool_exit(&kc->job_pool);
983 kfree(kc);