Lines Matching refs:lc

294 static int rw_header(struct log_c *lc, int op)
296 lc->io_req.bi_op = op;
297 lc->io_req.bi_op_flags = 0;
299 return dm_io(&lc->io_req, 1, &lc->header_location, NULL);
302 static int flush_header(struct log_c *lc)
305 .bdev = lc->header_location.bdev,
310 lc->io_req.bi_op = REQ_OP_WRITE;
311 lc->io_req.bi_op_flags = REQ_PREFLUSH;
313 return dm_io(&lc->io_req, 1, &null_location, NULL);
369 struct log_c *lc;
401 lc = kmalloc(sizeof(*lc), GFP_KERNEL);
402 if (!lc) {
407 lc->ti = ti;
408 lc->touched_dirtied = 0;
409 lc->touched_cleaned = 0;
410 lc->flush_failed = 0;
411 lc->region_size = region_size;
412 lc->region_count = region_count;
413 lc->sync = sync;
421 lc->bitset_uint32_count = bitset_size / sizeof(*lc->clean_bits);
427 lc->clean_bits = vmalloc(bitset_size);
428 if (!lc->clean_bits) {
430 kfree(lc);
433 lc->disk_header = NULL;
435 lc->log_dev = dev;
436 lc->log_dev_failed = 0;
437 lc->log_dev_flush_failed = 0;
438 lc->header_location.bdev = lc->log_dev->bdev;
439 lc->header_location.sector = 0;
446 bdev_logical_block_size(lc->header_location.
452 kfree(lc);
456 lc->header_location.count = buf_size >> SECTOR_SHIFT;
458 lc->io_req.mem.type = DM_IO_VMA;
459 lc->io_req.notify.fn = NULL;
460 lc->io_req.client = dm_io_client_create();
461 if (IS_ERR(lc->io_req.client)) {
462 r = PTR_ERR(lc->io_req.client);
464 kfree(lc);
468 lc->disk_header = vmalloc(buf_size);
469 if (!lc->disk_header) {
471 dm_io_client_destroy(lc->io_req.client);
472 kfree(lc);
476 lc->io_req.mem.ptr.vma = lc->disk_header;
477 lc->clean_bits = (void *)lc->disk_header +
481 memset(lc->clean_bits, -1, bitset_size);
483 lc->sync_bits = vmalloc(bitset_size);
484 if (!lc->sync_bits) {
487 vfree(lc->clean_bits);
489 dm_io_client_destroy(lc->io_req.client);
490 vfree(lc->disk_header);
491 kfree(lc);
494 memset(lc->sync_bits, (sync == NOSYNC) ? -1 : 0, bitset_size);
495 lc->sync_count = (sync == NOSYNC) ? region_count : 0;
497 lc->recovering_bits = vzalloc(bitset_size);
498 if (!lc->recovering_bits) {
500 vfree(lc->sync_bits);
502 vfree(lc->clean_bits);
504 dm_io_client_destroy(lc->io_req.client);
505 vfree(lc->disk_header);
506 kfree(lc);
509 lc->sync_search = 0;
510 log->context = lc;
521 static void destroy_log_context(struct log_c *lc)
523 vfree(lc->sync_bits);
524 vfree(lc->recovering_bits);
525 kfree(lc);
530 struct log_c *lc = (struct log_c *) log->context;
532 vfree(lc->clean_bits);
533 destroy_log_context(lc);
567 struct log_c *lc = (struct log_c *) log->context;
569 dm_put_device(lc->ti, lc->log_dev);
570 vfree(lc->disk_header);
571 dm_io_client_destroy(lc->io_req.client);
572 destroy_log_context(lc);
575 static void fail_log_device(struct log_c *lc)
577 if (lc->log_dev_failed)
580 lc->log_dev_failed = 1;
581 dm_table_event(lc->ti->table);
588 struct log_c *lc = (struct log_c *) log->context;
589 size_t size = lc->bitset_uint32_count * sizeof(uint32_t);
592 r = read_header(lc);
595 lc->log_dev->name);
596 fail_log_device(lc);
604 lc->header.nr_regions = 0;
608 if (lc->sync == NOSYNC)
609 for (i = lc->header.nr_regions; i < lc->region_count; i++)
611 log_set_bit(lc, lc->clean_bits, i);
613 for (i = lc->header.nr_regions; i < lc->region_count; i++)
615 log_clear_bit(lc, lc->clean_bits, i);
618 for (i = lc->region_count; i % BITS_PER_LONG; i++)
619 log_clear_bit(lc, lc->clean_bits, i);
622 memcpy(lc->sync_bits, lc->clean_bits, size);
623 lc->sync_count = memweight(lc->clean_bits,
624 lc->bitset_uint32_count * sizeof(uint32_t));
625 lc->sync_search = 0;
628 lc->header.nr_regions = lc->region_count;
630 header_to_disk(&lc->header, lc->disk_header);
633 r = rw_header(lc, REQ_OP_WRITE);
635 r = flush_header(lc);
637 lc->log_dev_flush_failed = 1;
641 lc->log_dev->name);
642 fail_log_device(lc);
650 struct log_c *lc = (struct log_c *) log->context;
651 return lc->region_size;
656 struct log_c *lc = (struct log_c *) log->context;
657 lc->sync_search = 0;
663 struct log_c *lc = (struct log_c *) log->context;
664 return log_test_bit(lc->clean_bits, region);
669 struct log_c *lc = (struct log_c *) log->context;
670 return log_test_bit(lc->sync_bits, region);
682 struct log_c *lc = log->context;
685 if (!lc->touched_cleaned && !lc->touched_dirtied)
688 if (lc->touched_cleaned && log->flush_callback_fn &&
689 log->flush_callback_fn(lc->ti)) {
696 lc->flush_failed = 1;
697 for (i = 0; i < lc->region_count; i++)
698 log_clear_bit(lc, lc->clean_bits, i);
701 r = rw_header(lc, REQ_OP_WRITE);
703 fail_log_device(lc);
705 if (lc->touched_dirtied) {
706 r = flush_header(lc);
708 lc->log_dev_flush_failed = 1;
709 fail_log_device(lc);
711 lc->touched_dirtied = 0;
713 lc->touched_cleaned = 0;
721 struct log_c *lc = (struct log_c *) log->context;
722 log_clear_bit(lc, lc->clean_bits, region);
727 struct log_c *lc = (struct log_c *) log->context;
728 if (likely(!lc->flush_failed))
729 log_set_bit(lc, lc->clean_bits, region);
734 struct log_c *lc = (struct log_c *) log->context;
736 if (lc->sync_search >= lc->region_count)
740 *region = find_next_zero_bit_le(lc->sync_bits,
741 lc->region_count,
742 lc->sync_search);
743 lc->sync_search = *region + 1;
745 if (*region >= lc->region_count)
748 } while (log_test_bit(lc->recovering_bits, *region));
750 log_set_bit(lc, lc->recovering_bits, *region);
757 struct log_c *lc = (struct log_c *) log->context;
759 log_clear_bit(lc, lc->recovering_bits, region);
761 log_set_bit(lc, lc->sync_bits, region);
762 lc->sync_count++;
763 } else if (log_test_bit(lc->sync_bits, region)) {
764 lc->sync_count--;
765 log_clear_bit(lc, lc->sync_bits, region);
771 struct log_c *lc = (struct log_c *) log->context;
773 return lc->sync_count;
777 if (lc->sync != DEFAULTSYNC) \
778 DMEMIT("%ssync ", lc->sync == NOSYNC ? "no" : "")
784 struct log_c *lc = log->context;
793 lc->sync == DEFAULTSYNC ? 1 : 2, lc->region_size);
804 struct log_c *lc = log->context;
808 DMEMIT("3 %s %s %c", log->type->name, lc->log_dev->name,
809 lc->log_dev_flush_failed ? 'F' :
810 lc->log_dev_failed ? 'D' :
816 lc->sync == DEFAULTSYNC ? 2 : 3, lc->log_dev->name,
817 lc->region_size);