Lines Matching refs:lc
137 static inline sector_t bio_to_dev_sectors(struct log_writes_c *lc,
140 return sectors >> (lc->sectorshift - SECTOR_SHIFT);
143 static inline sector_t dev_to_bio_sectors(struct log_writes_c *lc,
146 return sectors << (lc->sectorshift - SECTOR_SHIFT);
149 static void put_pending_block(struct log_writes_c *lc)
151 if (atomic_dec_and_test(&lc->pending_blocks)) {
153 if (waitqueue_active(&lc->wait))
154 wake_up(&lc->wait);
158 static void put_io_block(struct log_writes_c *lc)
160 if (atomic_dec_and_test(&lc->io_blocks)) {
162 if (waitqueue_active(&lc->wait))
163 wake_up(&lc->wait);
169 struct log_writes_c *lc = bio->bi_private;
175 spin_lock_irqsave(&lc->blocks_lock, flags);
176 lc->logging_enabled = false;
177 spin_unlock_irqrestore(&lc->blocks_lock, flags);
181 put_io_block(lc);
187 struct log_writes_c *lc = bio->bi_private;
189 complete(&lc->super_done);
197 static void free_pending_block(struct log_writes_c *lc,
208 put_pending_block(lc);
211 static int write_metadata(struct log_writes_c *lc, void *entry,
227 bio_set_dev(bio, lc->logdev->bdev);
230 bio->bi_private = lc;
245 lc->sectorsize - entrylen - datalen);
248 ret = bio_add_page(bio, page, lc->sectorsize, 0);
249 if (ret != lc->sectorsize) {
259 put_io_block(lc);
263 static int write_inline_data(struct log_writes_c *lc, void *entry,
277 atomic_inc(&lc->io_blocks);
287 bio_set_dev(bio, lc->logdev->bdev);
289 bio->bi_private = lc;
294 pg_sectorlen = ALIGN(pg_datalen, lc->sectorsize);
327 put_io_block(lc);
331 static int log_one_block(struct log_writes_c *lc,
345 if (write_metadata(lc, &entry, sizeof(entry), block->data,
347 free_pending_block(lc, block);
351 sector += dev_to_bio_sectors(lc, 1);
354 if (write_inline_data(lc, &entry, sizeof(entry), block->data,
356 free_pending_block(lc, block);
366 atomic_inc(&lc->io_blocks);
374 bio_set_dev(bio, lc->logdev->bdev);
376 bio->bi_private = lc;
387 atomic_inc(&lc->io_blocks);
396 bio_set_dev(bio, lc->logdev->bdev);
398 bio->bi_private = lc;
415 put_pending_block(lc);
418 free_pending_block(lc, block);
419 put_io_block(lc);
423 static int log_super(struct log_writes_c *lc)
429 super.nr_entries = cpu_to_le64(lc->logged_entries);
430 super.sectorsize = cpu_to_le32(lc->sectorsize);
432 if (write_metadata(lc, &super, sizeof(super), NULL, 0,
442 wait_for_completion_io(&lc->super_done);
447 static inline sector_t logdev_last_sector(struct log_writes_c *lc)
449 return i_size_read(lc->logdev->bdev->bd_inode) >> SECTOR_SHIFT;
454 struct log_writes_c *lc = (struct log_writes_c *)arg;
463 spin_lock_irq(&lc->blocks_lock);
464 if (!list_empty(&lc->logging_blocks)) {
465 block = list_first_entry(&lc->logging_blocks,
468 if (!lc->logging_enabled)
471 sector = lc->next_sector;
473 lc->next_sector += dev_to_bio_sectors(lc, block->nr_sectors);
474 lc->next_sector += dev_to_bio_sectors(lc, 1);
480 if (!lc->end_sector)
481 lc->end_sector = logdev_last_sector(lc);
482 if (lc->end_sector &&
483 lc->next_sector >= lc->end_sector) {
485 lc->logging_enabled = false;
488 lc->logged_entries++;
489 atomic_inc(&lc->io_blocks);
493 atomic_inc(&lc->io_blocks);
496 logging_enabled = lc->logging_enabled;
497 spin_unlock_irq(&lc->blocks_lock);
500 ret = log_one_block(lc, block, sector);
502 ret = log_super(lc);
504 spin_lock_irq(&lc->blocks_lock);
505 lc->logging_enabled = false;
506 spin_unlock_irq(&lc->blocks_lock);
509 free_pending_block(lc, block);
516 list_empty(&lc->logging_blocks))
530 struct log_writes_c *lc;
543 lc = kzalloc(sizeof(struct log_writes_c), GFP_KERNEL);
544 if (!lc) {
548 spin_lock_init(&lc->blocks_lock);
549 INIT_LIST_HEAD(&lc->unflushed_blocks);
550 INIT_LIST_HEAD(&lc->logging_blocks);
551 init_waitqueue_head(&lc->wait);
552 init_completion(&lc->super_done);
553 atomic_set(&lc->io_blocks, 0);
554 atomic_set(&lc->pending_blocks, 0);
557 ret = dm_get_device(ti, devname, dm_table_get_mode(ti->table), &lc->dev);
565 &lc->logdev);
568 dm_put_device(ti, lc->dev);
572 lc->sectorsize = bdev_logical_block_size(lc->dev->bdev);
573 lc->sectorshift = ilog2(lc->sectorsize);
574 lc->log_kthread = kthread_run(log_writes_kthread, lc, "log-write");
575 if (IS_ERR(lc->log_kthread)) {
576 ret = PTR_ERR(lc->log_kthread);
578 dm_put_device(ti, lc->dev);
579 dm_put_device(ti, lc->logdev);
588 lc->next_sector = lc->sectorsize >> SECTOR_SHIFT;
589 lc->logging_enabled = true;
590 lc->end_sector = logdev_last_sector(lc);
591 lc->device_supports_discard = true;
598 ti->private = lc;
602 kfree(lc);
606 static int log_mark(struct log_writes_c *lc, char *data)
609 size_t maxsize = lc->sectorsize - sizeof(struct log_write_entry);
623 atomic_inc(&lc->pending_blocks);
626 spin_lock_irq(&lc->blocks_lock);
627 list_add_tail(&block->list, &lc->logging_blocks);
628 spin_unlock_irq(&lc->blocks_lock);
629 wake_up_process(lc->log_kthread);
635 struct log_writes_c *lc = ti->private;
637 spin_lock_irq(&lc->blocks_lock);
638 list_splice_init(&lc->unflushed_blocks, &lc->logging_blocks);
639 spin_unlock_irq(&lc->blocks_lock);
645 log_mark(lc, "dm-log-writes-end");
646 wake_up_process(lc->log_kthread);
647 wait_event(lc->wait, !atomic_read(&lc->io_blocks) &&
648 !atomic_read(&lc->pending_blocks));
649 kthread_stop(lc->log_kthread);
651 WARN_ON(!list_empty(&lc->logging_blocks));
652 WARN_ON(!list_empty(&lc->unflushed_blocks));
653 dm_put_device(ti, lc->dev);
654 dm_put_device(ti, lc->logdev);
655 kfree(lc);
660 struct log_writes_c *lc = ti->private;
662 bio_set_dev(bio, lc->dev->bdev);
667 struct log_writes_c *lc = ti->private;
682 if (!lc->logging_enabled)
707 spin_lock_irq(&lc->blocks_lock);
708 lc->logging_enabled = false;
709 spin_unlock_irq(&lc->blocks_lock);
714 atomic_inc(&lc->pending_blocks);
725 block->sector = bio_to_dev_sectors(lc, bio->bi_iter.bi_sector);
726 block->nr_sectors = bio_to_dev_sectors(lc, bio_sectors(bio));
731 if (lc->device_supports_discard)
739 spin_lock_irq(&lc->blocks_lock);
740 list_splice_init(&lc->unflushed_blocks, &block->list);
741 spin_unlock_irq(&lc->blocks_lock);
761 free_pending_block(lc, block);
762 spin_lock_irq(&lc->blocks_lock);
763 lc->logging_enabled = false;
764 spin_unlock_irq(&lc->blocks_lock);
781 spin_lock_irq(&lc->blocks_lock);
782 list_splice_init(&lc->unflushed_blocks, &block->list);
783 spin_unlock_irq(&lc->blocks_lock);
793 struct log_writes_c *lc = ti->private;
800 spin_lock_irqsave(&lc->blocks_lock, flags);
802 list_splice_tail_init(&block->list, &lc->logging_blocks);
803 list_add_tail(&block->list, &lc->logging_blocks);
804 wake_up_process(lc->log_kthread);
806 list_add_tail(&block->list, &lc->logging_blocks);
807 wake_up_process(lc->log_kthread);
809 list_add_tail(&block->list, &lc->unflushed_blocks);
810 spin_unlock_irqrestore(&lc->blocks_lock, flags);
824 struct log_writes_c *lc = ti->private;
828 DMEMIT("%llu %llu", lc->logged_entries,
829 (unsigned long long)lc->next_sector - 1);
830 if (!lc->logging_enabled)
835 DMEMIT("%s %s", lc->dev->name, lc->logdev->name);
843 struct log_writes_c *lc = ti->private;
844 struct dm_dev *dev = lc->dev;
859 struct log_writes_c *lc = ti->private;
861 return fn(ti, lc->dev, 0, ti->len, data);
872 struct log_writes_c *lc = ti->private;
880 r = log_mark(lc, argv[1]);
889 struct log_writes_c *lc = ti->private;
890 struct request_queue *q = bdev_get_queue(lc->dev->bdev);
893 lc->device_supports_discard = false;
894 limits->discard_granularity = lc->sectorsize;
897 limits->logical_block_size = bdev_logical_block_size(lc->dev->bdev);
898 limits->physical_block_size = bdev_physical_block_size(lc->dev->bdev);
903 static int log_dax(struct log_writes_c *lc, sector_t sector, size_t bytes,
936 block->sector = bio_to_dev_sectors(lc, sector);
937 block->nr_sectors = ALIGN(bytes, lc->sectorsize) >> lc->sectorshift;
939 atomic_inc(&lc->pending_blocks);
940 spin_lock_irq(&lc->blocks_lock);
941 list_add_tail(&block->list, &lc->unflushed_blocks);
942 spin_unlock_irq(&lc->blocks_lock);
943 wake_up_process(lc->log_kthread);
951 struct log_writes_c *lc = ti->private;
955 ret = bdev_dax_pgoff(lc->dev->bdev, sector, nr_pages * PAGE_SIZE, &pgoff);
958 return dax_direct_access(lc->dev->dax_dev, pgoff, nr_pages, kaddr, pfn);
965 struct log_writes_c *lc = ti->private;
969 if (bdev_dax_pgoff(lc->dev->bdev, sector, ALIGN(bytes, PAGE_SIZE), &pgoff))
973 if (!lc->logging_enabled)
976 err = log_dax(lc, sector, bytes, i);
982 return dax_copy_from_iter(lc->dev->dax_dev, pgoff, addr, bytes, i);
989 struct log_writes_c *lc = ti->private;
992 if (bdev_dax_pgoff(lc->dev->bdev, sector, ALIGN(bytes, PAGE_SIZE), &pgoff))
994 return dax_copy_to_iter(lc->dev->dax_dev, pgoff, addr, bytes, i);
1001 struct log_writes_c *lc = ti->private;
1004 ret = bdev_dax_pgoff(lc->dev->bdev, sector, nr_pages << PAGE_SHIFT,
1008 return dax_zero_page_range(lc->dev->dax_dev, pgoff,