Lines Matching refs:lc

138 static inline sector_t bio_to_dev_sectors(struct log_writes_c *lc,
141 return sectors >> (lc->sectorshift - SECTOR_SHIFT);
144 static inline sector_t dev_to_bio_sectors(struct log_writes_c *lc,
147 return sectors << (lc->sectorshift - SECTOR_SHIFT);
150 static void put_pending_block(struct log_writes_c *lc)
152 if (atomic_dec_and_test(&lc->pending_blocks)) {
154 if (waitqueue_active(&lc->wait))
155 wake_up(&lc->wait);
159 static void put_io_block(struct log_writes_c *lc)
161 if (atomic_dec_and_test(&lc->io_blocks)) {
163 if (waitqueue_active(&lc->wait))
164 wake_up(&lc->wait);
170 struct log_writes_c *lc = bio->bi_private;
176 spin_lock_irqsave(&lc->blocks_lock, flags);
177 lc->logging_enabled = false;
178 spin_unlock_irqrestore(&lc->blocks_lock, flags);
182 put_io_block(lc);
188 struct log_writes_c *lc = bio->bi_private;
190 complete(&lc->super_done);
198 static void free_pending_block(struct log_writes_c *lc,
209 put_pending_block(lc);
212 static int write_metadata(struct log_writes_c *lc, void *entry,
221 bio = bio_alloc(lc->logdev->bdev, 1, REQ_OP_WRITE, GFP_KERNEL);
226 bio->bi_private = lc;
240 lc->sectorsize - entrylen - datalen);
243 ret = bio_add_page(bio, page, lc->sectorsize, 0);
244 if (ret != lc->sectorsize) {
254 put_io_block(lc);
258 static int write_inline_data(struct log_writes_c *lc, void *entry,
271 atomic_inc(&lc->io_blocks);
273 bio = bio_alloc(lc->logdev->bdev, bio_pages, REQ_OP_WRITE,
278 bio->bi_private = lc;
282 pg_sectorlen = ALIGN(pg_datalen, lc->sectorsize);
314 put_io_block(lc);
318 static int log_one_block(struct log_writes_c *lc,
332 if (write_metadata(lc, &entry, sizeof(entry), block->data,
334 free_pending_block(lc, block);
338 sector += dev_to_bio_sectors(lc, 1);
341 if (write_inline_data(lc, &entry, sizeof(entry), block->data,
343 free_pending_block(lc, block);
353 atomic_inc(&lc->io_blocks);
354 bio = bio_alloc(lc->logdev->bdev, bio_max_segs(block->vec_cnt),
359 bio->bi_private = lc;
369 atomic_inc(&lc->io_blocks);
371 bio = bio_alloc(lc->logdev->bdev,
377 bio->bi_private = lc;
393 put_pending_block(lc);
396 free_pending_block(lc, block);
397 put_io_block(lc);
401 static int log_super(struct log_writes_c *lc)
407 super.nr_entries = cpu_to_le64(lc->logged_entries);
408 super.sectorsize = cpu_to_le32(lc->sectorsize);
410 if (write_metadata(lc, &super, sizeof(super), NULL, 0,
420 wait_for_completion_io(&lc->super_done);
425 static inline sector_t logdev_last_sector(struct log_writes_c *lc)
427 return bdev_nr_sectors(lc->logdev->bdev);
432 struct log_writes_c *lc = arg;
441 spin_lock_irq(&lc->blocks_lock);
442 if (!list_empty(&lc->logging_blocks)) {
443 block = list_first_entry(&lc->logging_blocks,
446 if (!lc->logging_enabled)
449 sector = lc->next_sector;
451 lc->next_sector += dev_to_bio_sectors(lc, block->nr_sectors);
452 lc->next_sector += dev_to_bio_sectors(lc, 1);
458 if (!lc->end_sector)
459 lc->end_sector = logdev_last_sector(lc);
460 if (lc->end_sector &&
461 lc->next_sector >= lc->end_sector) {
463 lc->logging_enabled = false;
466 lc->logged_entries++;
467 atomic_inc(&lc->io_blocks);
471 atomic_inc(&lc->io_blocks);
474 logging_enabled = lc->logging_enabled;
475 spin_unlock_irq(&lc->blocks_lock);
478 ret = log_one_block(lc, block, sector);
480 ret = log_super(lc);
482 spin_lock_irq(&lc->blocks_lock);
483 lc->logging_enabled = false;
484 spin_unlock_irq(&lc->blocks_lock);
487 free_pending_block(lc, block);
494 list_empty(&lc->logging_blocks))
508 struct log_writes_c *lc;
521 lc = kzalloc(sizeof(struct log_writes_c), GFP_KERNEL);
522 if (!lc) {
526 spin_lock_init(&lc->blocks_lock);
527 INIT_LIST_HEAD(&lc->unflushed_blocks);
528 INIT_LIST_HEAD(&lc->logging_blocks);
529 init_waitqueue_head(&lc->wait);
530 init_completion(&lc->super_done);
531 atomic_set(&lc->io_blocks, 0);
532 atomic_set(&lc->pending_blocks, 0);
535 ret = dm_get_device(ti, devname, dm_table_get_mode(ti->table), &lc->dev);
543 &lc->logdev);
546 dm_put_device(ti, lc->dev);
550 lc->sectorsize = bdev_logical_block_size(lc->dev->bdev);
551 lc->sectorshift = ilog2(lc->sectorsize);
552 lc->log_kthread = kthread_run(log_writes_kthread, lc, "log-write");
553 if (IS_ERR(lc->log_kthread)) {
554 ret = PTR_ERR(lc->log_kthread);
556 dm_put_device(ti, lc->dev);
557 dm_put_device(ti, lc->logdev);
566 lc->next_sector = lc->sectorsize >> SECTOR_SHIFT;
567 lc->logging_enabled = true;
568 lc->end_sector = logdev_last_sector(lc);
569 lc->device_supports_discard = true;
576 ti->private = lc;
580 kfree(lc);
584 static int log_mark(struct log_writes_c *lc, char *data)
587 size_t maxsize = lc->sectorsize - sizeof(struct log_write_entry);
601 atomic_inc(&lc->pending_blocks);
604 spin_lock_irq(&lc->blocks_lock);
605 list_add_tail(&block->list, &lc->logging_blocks);
606 spin_unlock_irq(&lc->blocks_lock);
607 wake_up_process(lc->log_kthread);
613 struct log_writes_c *lc = ti->private;
615 spin_lock_irq(&lc->blocks_lock);
616 list_splice_init(&lc->unflushed_blocks, &lc->logging_blocks);
617 spin_unlock_irq(&lc->blocks_lock);
623 log_mark(lc, "dm-log-writes-end");
624 wake_up_process(lc->log_kthread);
625 wait_event(lc->wait, !atomic_read(&lc->io_blocks) &&
626 !atomic_read(&lc->pending_blocks));
627 kthread_stop(lc->log_kthread);
629 WARN_ON(!list_empty(&lc->logging_blocks));
630 WARN_ON(!list_empty(&lc->unflushed_blocks));
631 dm_put_device(ti, lc->dev);
632 dm_put_device(ti, lc->logdev);
633 kfree(lc);
638 struct log_writes_c *lc = ti->private;
640 bio_set_dev(bio, lc->dev->bdev);
645 struct log_writes_c *lc = ti->private;
660 if (!lc->logging_enabled)
685 spin_lock_irq(&lc->blocks_lock);
686 lc->logging_enabled = false;
687 spin_unlock_irq(&lc->blocks_lock);
692 atomic_inc(&lc->pending_blocks);
703 block->sector = bio_to_dev_sectors(lc, bio->bi_iter.bi_sector);
704 block->nr_sectors = bio_to_dev_sectors(lc, bio_sectors(bio));
709 if (lc->device_supports_discard)
717 spin_lock_irq(&lc->blocks_lock);
718 list_splice_init(&lc->unflushed_blocks, &block->list);
719 spin_unlock_irq(&lc->blocks_lock);
739 free_pending_block(lc, block);
740 spin_lock_irq(&lc->blocks_lock);
741 lc->logging_enabled = false;
742 spin_unlock_irq(&lc->blocks_lock);
757 spin_lock_irq(&lc->blocks_lock);
758 list_splice_init(&lc->unflushed_blocks, &block->list);
759 spin_unlock_irq(&lc->blocks_lock);
769 struct log_writes_c *lc = ti->private;
776 spin_lock_irqsave(&lc->blocks_lock, flags);
778 list_splice_tail_init(&block->list, &lc->logging_blocks);
779 list_add_tail(&block->list, &lc->logging_blocks);
780 wake_up_process(lc->log_kthread);
782 list_add_tail(&block->list, &lc->logging_blocks);
783 wake_up_process(lc->log_kthread);
785 list_add_tail(&block->list, &lc->unflushed_blocks);
786 spin_unlock_irqrestore(&lc->blocks_lock, flags);
800 struct log_writes_c *lc = ti->private;
804 DMEMIT("%llu %llu", lc->logged_entries,
805 (unsigned long long)lc->next_sector - 1);
806 if (!lc->logging_enabled)
811 DMEMIT("%s %s", lc->dev->name, lc->logdev->name);
823 struct log_writes_c *lc = ti->private;
824 struct dm_dev *dev = lc->dev;
839 struct log_writes_c *lc = ti->private;
841 return fn(ti, lc->dev, 0, ti->len, data);
852 struct log_writes_c *lc = ti->private;
860 r = log_mark(lc, argv[1]);
869 struct log_writes_c *lc = ti->private;
871 if (!bdev_max_discard_sectors(lc->dev->bdev)) {
872 lc->device_supports_discard = false;
873 limits->discard_granularity = lc->sectorsize;
876 limits->logical_block_size = bdev_logical_block_size(lc->dev->bdev);
877 limits->physical_block_size = bdev_physical_block_size(lc->dev->bdev);
886 struct log_writes_c *lc = ti->private;
888 *pgoff += (get_start_sect(lc->dev->bdev) >> PAGE_SECTORS_SHIFT);
889 return lc->dev->dax_dev;