Lines Matching refs:lc

84 static int userspace_do_request(struct log_c *lc, const char *uuid,
96 r = dm_consult_userspace(uuid, lc->luid, request_type, data,
107 r = dm_consult_userspace(uuid, lc->luid, DM_ULOG_CTR,
108 lc->usr_argv_str,
109 strlen(lc->usr_argv_str) + 1,
115 r = dm_consult_userspace(uuid, lc->luid, DM_ULOG_RESUME, NULL,
159 struct log_c *lc = container_of(work, struct log_c, flush_log_work.work);
161 atomic_set(&lc->sched_flush, 0);
163 r = userspace_do_request(lc, lc->uuid, DM_ULOG_FLUSH, NULL, 0, NULL, NULL);
166 dm_table_event(lc->ti->table);
196 struct log_c *lc = NULL;
207 lc = kzalloc(sizeof(*lc), GFP_KERNEL);
208 if (!lc) {
214 lc->luid = (unsigned long)lc;
216 lc->ti = ti;
220 kfree(lc);
224 lc->usr_argc = argc;
226 strncpy(lc->uuid, argv[0], DM_UUID_LEN);
229 spin_lock_init(&lc->flush_lock);
230 INIT_LIST_HEAD(&lc->mark_list);
231 INIT_LIST_HEAD(&lc->clear_list);
234 lc->integrated_flush = 1;
241 kfree(lc);
252 r = mempool_init_slab_pool(&lc->flush_entry_pool, FLUSH_ENTRY_POOL_SIZE,
262 r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_CTR,
276 r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_GET_REGION_SIZE,
284 lc->region_size = (uint32_t)rdata;
285 lc->region_count = dm_sector_div_up(ti->len, lc->region_size);
294 dm_table_get_mode(ti->table), &lc->log_dev);
300 if (lc->integrated_flush) {
301 lc->dmlog_wq = alloc_workqueue("dmlogd", WQ_MEM_RECLAIM, 0);
302 if (!lc->dmlog_wq) {
308 INIT_DELAYED_WORK(&lc->flush_log_work, do_flush);
309 atomic_set(&lc->sched_flush, 0);
315 mempool_exit(&lc->flush_entry_pool);
316 kfree(lc);
319 lc->usr_argv_str = ctr_str;
320 log->context = lc;
328 struct log_c *lc = log->context;
330 if (lc->integrated_flush) {
332 if (atomic_read(&lc->sched_flush))
333 flush_delayed_work(&lc->flush_log_work);
335 destroy_workqueue(lc->dmlog_wq);
338 (void) dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_DTR,
341 if (lc->log_dev)
342 dm_put_device(lc->ti, lc->log_dev);
344 mempool_exit(&lc->flush_entry_pool);
346 kfree(lc->usr_argv_str);
347 kfree(lc);
355 struct log_c *lc = log->context;
357 r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_PRESUSPEND,
366 struct log_c *lc = log->context;
371 if (lc->integrated_flush && atomic_read(&lc->sched_flush))
372 flush_delayed_work(&lc->flush_log_work);
374 r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_POSTSUSPEND,
383 struct log_c *lc = log->context;
385 lc->in_sync_hint = 0;
386 r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_RESUME,
394 struct log_c *lc = log->context;
396 return lc->region_size;
413 struct log_c *lc = log->context;
416 r = userspace_do_request(lc, lc->uuid, DM_ULOG_IS_CLEAN,
441 struct log_c *lc = log->context;
460 r = userspace_do_request(lc, lc->uuid, DM_ULOG_IN_SYNC,
466 static int flush_one_by_one(struct log_c *lc, struct list_head *flush_list)
472 r = userspace_do_request(lc, lc->uuid, fe->type,
483 static int flush_by_group(struct log_c *lc, struct list_head *flush_list,
511 r = userspace_do_request(lc, lc->uuid, DM_ULOG_FLUSH,
521 r = userspace_do_request(lc, lc->uuid, type,
530 r = flush_one_by_one(lc, flush_list);
566 struct log_c *lc = log->context;
572 mempool_t *flush_entry_pool = &lc->flush_entry_pool;
574 spin_lock_irqsave(&lc->flush_lock, flags);
575 list_splice_init(&lc->mark_list, &mark_list);
576 list_splice_init(&lc->clear_list, &clear_list);
577 spin_unlock_irqrestore(&lc->flush_lock, flags);
585 r = flush_by_group(lc, &clear_list, 0);
589 if (!lc->integrated_flush) {
590 r = flush_by_group(lc, &mark_list, 0);
593 r = userspace_do_request(lc, lc->uuid, DM_ULOG_FLUSH,
601 r = flush_by_group(lc, &mark_list, 1);
605 if (mark_list_is_empty && !atomic_read(&lc->sched_flush)) {
610 queue_delayed_work(lc->dmlog_wq, &lc->flush_log_work, 3 * HZ);
611 atomic_set(&lc->sched_flush, 1);
617 cancel_delayed_work(&lc->flush_log_work);
618 atomic_set(&lc->sched_flush, 0);
637 dm_table_event(lc->ti->table);
651 struct log_c *lc = log->context;
655 fe = mempool_alloc(&lc->flush_entry_pool, GFP_NOIO);
658 spin_lock_irqsave(&lc->flush_lock, flags);
661 list_add(&fe->list, &lc->mark_list);
662 spin_unlock_irqrestore(&lc->flush_lock, flags);
680 struct log_c *lc = log->context;
689 fe = mempool_alloc(&lc->flush_entry_pool, GFP_ATOMIC);
695 spin_lock_irqsave(&lc->flush_lock, flags);
698 list_add(&fe->list, &lc->clear_list);
699 spin_unlock_irqrestore(&lc->flush_lock, flags);
716 struct log_c *lc = log->context;
722 if (lc->in_sync_hint >= lc->region_count)
726 r = userspace_do_request(lc, lc->uuid, DM_ULOG_GET_RESYNC_WORK,
742 struct log_c *lc = log->context;
751 (void) userspace_do_request(lc, lc->uuid, DM_ULOG_SET_REGION_SYNC,
774 struct log_c *lc = log->context;
777 r = userspace_do_request(lc, lc->uuid, DM_ULOG_GET_SYNC_COUNT,
783 if (sync_count >= lc->region_count)
784 lc->in_sync_hint = lc->region_count;
800 struct log_c *lc = log->context;
804 r = userspace_do_request(lc, lc->uuid, DM_ULOG_STATUS_INFO,
814 table_args = strchr(lc->usr_argv_str, ' ');
818 DMEMIT("%s %u %s ", log->type->name, lc->usr_argc, lc->uuid);
819 if (lc->integrated_flush)
837 struct log_c *lc = log->context;
852 if (region < lc->in_sync_hint)
858 r = userspace_do_request(lc, lc->uuid, DM_ULOG_IS_REMOTE_RECOVERING,
864 lc->in_sync_hint = pkg.in_sync_hint;