Lines Matching refs:lc
85 static int userspace_do_request(struct log_c *lc, const char *uuid,
97 r = dm_consult_userspace(uuid, lc->luid, request_type, data,
108 r = dm_consult_userspace(uuid, lc->luid, DM_ULOG_CTR,
109 lc->usr_argv_str,
110 strlen(lc->usr_argv_str) + 1,
116 r = dm_consult_userspace(uuid, lc->luid, DM_ULOG_RESUME, NULL,
160 struct log_c *lc = container_of(work, struct log_c, flush_log_work.work);
162 atomic_set(&lc->sched_flush, 0);
164 r = userspace_do_request(lc, lc->uuid, DM_ULOG_FLUSH, NULL, 0, NULL, NULL);
167 dm_table_event(lc->ti->table);
197 struct log_c *lc = NULL;
208 lc = kzalloc(sizeof(*lc), GFP_KERNEL);
209 if (!lc) {
215 lc->luid = (unsigned long)lc;
217 lc->ti = ti;
221 kfree(lc);
225 lc->usr_argc = argc;
227 strncpy(lc->uuid, argv[0], DM_UUID_LEN);
230 spin_lock_init(&lc->flush_lock);
231 INIT_LIST_HEAD(&lc->mark_list);
232 INIT_LIST_HEAD(&lc->clear_list);
235 lc->integrated_flush = 1;
242 kfree(lc);
253 r = mempool_init_slab_pool(&lc->flush_entry_pool, FLUSH_ENTRY_POOL_SIZE,
263 r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_CTR,
277 r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_GET_REGION_SIZE,
285 lc->region_size = (uint32_t)rdata;
286 lc->region_count = dm_sector_div_up(ti->len, lc->region_size);
295 dm_table_get_mode(ti->table), &lc->log_dev);
301 if (lc->integrated_flush) {
302 lc->dmlog_wq = alloc_workqueue("dmlogd", WQ_MEM_RECLAIM, 0);
303 if (!lc->dmlog_wq) {
309 INIT_DELAYED_WORK(&lc->flush_log_work, do_flush);
310 atomic_set(&lc->sched_flush, 0);
316 mempool_exit(&lc->flush_entry_pool);
317 kfree(lc);
320 lc->usr_argv_str = ctr_str;
321 log->context = lc;
329 struct log_c *lc = log->context;
331 if (lc->integrated_flush) {
333 if (atomic_read(&lc->sched_flush))
334 flush_delayed_work(&lc->flush_log_work);
336 destroy_workqueue(lc->dmlog_wq);
339 (void) dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_DTR,
342 if (lc->log_dev)
343 dm_put_device(lc->ti, lc->log_dev);
345 mempool_exit(&lc->flush_entry_pool);
347 kfree(lc->usr_argv_str);
348 kfree(lc);
354 struct log_c *lc = log->context;
356 r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_PRESUSPEND,
365 struct log_c *lc = log->context;
370 if (lc->integrated_flush && atomic_read(&lc->sched_flush))
371 flush_delayed_work(&lc->flush_log_work);
373 r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_POSTSUSPEND,
382 struct log_c *lc = log->context;
384 lc->in_sync_hint = 0;
385 r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_RESUME,
393 struct log_c *lc = log->context;
395 return lc->region_size;
412 struct log_c *lc = log->context;
415 r = userspace_do_request(lc, lc->uuid, DM_ULOG_IS_CLEAN,
440 struct log_c *lc = log->context;
459 r = userspace_do_request(lc, lc->uuid, DM_ULOG_IN_SYNC,
465 static int flush_one_by_one(struct log_c *lc, struct list_head *flush_list)
471 r = userspace_do_request(lc, lc->uuid, fe->type,
482 static int flush_by_group(struct log_c *lc, struct list_head *flush_list,
510 r = userspace_do_request(lc, lc->uuid, DM_ULOG_FLUSH,
520 r = userspace_do_request(lc, lc->uuid, type,
529 r = flush_one_by_one(lc, flush_list);
565 struct log_c *lc = log->context;
571 mempool_t *flush_entry_pool = &lc->flush_entry_pool;
573 spin_lock_irqsave(&lc->flush_lock, flags);
574 list_splice_init(&lc->mark_list, &mark_list);
575 list_splice_init(&lc->clear_list, &clear_list);
576 spin_unlock_irqrestore(&lc->flush_lock, flags);
584 r = flush_by_group(lc, &clear_list, 0);
588 if (!lc->integrated_flush) {
589 r = flush_by_group(lc, &mark_list, 0);
592 r = userspace_do_request(lc, lc->uuid, DM_ULOG_FLUSH,
600 r = flush_by_group(lc, &mark_list, 1);
604 if (mark_list_is_empty && !atomic_read(&lc->sched_flush)) {
609 queue_delayed_work(lc->dmlog_wq, &lc->flush_log_work, 3 * HZ);
610 atomic_set(&lc->sched_flush, 1);
616 cancel_delayed_work(&lc->flush_log_work);
617 atomic_set(&lc->sched_flush, 0);
636 dm_table_event(lc->ti->table);
650 struct log_c *lc = log->context;
654 fe = mempool_alloc(&lc->flush_entry_pool, GFP_NOIO);
657 spin_lock_irqsave(&lc->flush_lock, flags);
660 list_add(&fe->list, &lc->mark_list);
661 spin_unlock_irqrestore(&lc->flush_lock, flags);
677 struct log_c *lc = log->context;
686 fe = mempool_alloc(&lc->flush_entry_pool, GFP_ATOMIC);
692 spin_lock_irqsave(&lc->flush_lock, flags);
695 list_add(&fe->list, &lc->clear_list);
696 spin_unlock_irqrestore(&lc->flush_lock, flags);
711 struct log_c *lc = log->context;
717 if (lc->in_sync_hint >= lc->region_count)
721 r = userspace_do_request(lc, lc->uuid, DM_ULOG_GET_RESYNC_WORK,
737 struct log_c *lc = log->context;
746 (void) userspace_do_request(lc, lc->uuid, DM_ULOG_SET_REGION_SYNC,
768 struct log_c *lc = log->context;
771 r = userspace_do_request(lc, lc->uuid, DM_ULOG_GET_SYNC_COUNT,
777 if (sync_count >= lc->region_count)
778 lc->in_sync_hint = lc->region_count;
794 struct log_c *lc = log->context;
798 r = userspace_do_request(lc, lc->uuid, DM_ULOG_STATUS_INFO,
808 table_args = strchr(lc->usr_argv_str, ' ');
812 DMEMIT("%s %u %s ", log->type->name, lc->usr_argc, lc->uuid);
813 if (lc->integrated_flush)
834 struct log_c *lc = log->context;
849 if (region < lc->in_sync_hint)
855 r = userspace_do_request(lc, lc->uuid, DM_ULOG_IS_REMOTE_RECOVERING,
861 lc->in_sync_hint = pkg.in_sync_hint;