Lines Matching refs:bdev
34 struct block_device bdev;
45 return &BDEV_I(inode)->bdev;
49 static void bdev_write_inode(struct block_device *bdev)
51 struct inode *inode = bdev->bd_inode;
61 bdev, ret);
68 static void kill_bdev(struct block_device *bdev)
70 struct address_space *mapping = bdev->bd_inode->i_mapping;
80 void invalidate_bdev(struct block_device *bdev)
82 struct address_space *mapping = bdev->bd_inode->i_mapping;
93 * Drop all buffers & page cache for given bdev range. This function bails
94 * with error if bdev has other exclusive owner (such as filesystem).
96 int truncate_bdev_range(struct block_device *bdev, blk_mode_t mode,
105 int err = bd_prepare_to_claim(bdev, truncate_bdev_range, NULL);
110 truncate_inode_pages_range(bdev->bd_inode->i_mapping, lstart, lend);
112 bd_abort_claiming(bdev, truncate_bdev_range);
120 return invalidate_inode_pages2_range(bdev->bd_inode->i_mapping,
125 static void set_init_blocksize(struct block_device *bdev)
127 unsigned int bsize = bdev_logical_block_size(bdev);
128 loff_t size = i_size_read(bdev->bd_inode);
135 bdev->bd_inode->i_blkbits = blksize_bits(bsize);
138 int set_blocksize(struct block_device *bdev, int size)
145 if (size < bdev_logical_block_size(bdev))
149 if (bdev->bd_inode->i_blkbits != blksize_bits(size)) {
150 sync_blockdev(bdev);
151 bdev->bd_inode->i_blkbits = blksize_bits(size);
152 kill_bdev(bdev);
182 int sync_blockdev_nowait(struct block_device *bdev)
184 if (!bdev)
186 return filemap_flush(bdev->bd_inode->i_mapping);
194 int sync_blockdev(struct block_device *bdev)
196 if (!bdev)
198 return filemap_write_and_wait(bdev->bd_inode->i_mapping);
202 int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend)
204 return filemap_write_and_wait_range(bdev->bd_inode->i_mapping,
211 * @bdev: blockdevice to lock
221 int freeze_bdev(struct block_device *bdev)
226 mutex_lock(&bdev->bd_fsfreeze_mutex);
227 if (++bdev->bd_fsfreeze_count > 1)
230 sb = get_active_super(bdev);
240 bdev->bd_fsfreeze_count--;
243 bdev->bd_fsfreeze_sb = sb;
246 sync_blockdev(bdev);
248 mutex_unlock(&bdev->bd_fsfreeze_mutex);
255 * @bdev: blockdevice to unlock
259 int thaw_bdev(struct block_device *bdev)
264 mutex_lock(&bdev->bd_fsfreeze_mutex);
265 if (!bdev->bd_fsfreeze_count)
269 if (--bdev->bd_fsfreeze_count > 0)
272 sb = bdev->bd_fsfreeze_sb;
281 bdev->bd_fsfreeze_count++;
283 bdev->bd_fsfreeze_sb = NULL;
285 mutex_unlock(&bdev->bd_fsfreeze_mutex);
303 memset(&ei->bdev, 0, sizeof(ei->bdev));
309 struct block_device *bdev = I_BDEV(inode);
311 free_percpu(bdev->bd_stats);
312 kfree(bdev->bd_meta_info);
314 if (!bdev_is_partition(bdev)) {
315 if (bdev->bd_disk && bdev->bd_disk->bdi)
316 bdi_put(bdev->bd_disk->bdi);
317 kfree(bdev->bd_disk);
320 if (MAJOR(bdev->bd_dev) == BLOCK_EXT_MAJOR)
321 blk_free_ext_minor(MINOR(bdev->bd_dev));
359 .name = "bdev",
378 panic("Cannot register bdev pseudo-fs");
381 panic("Cannot create bdev pseudo-fs");
387 struct block_device *bdev;
398 bdev = I_BDEV(inode);
399 mutex_init(&bdev->bd_fsfreeze_mutex);
400 spin_lock_init(&bdev->bd_size_lock);
401 mutex_init(&bdev->bd_holder_lock);
402 bdev->bd_partno = partno;
403 bdev->bd_inode = inode;
404 bdev->bd_queue = disk->queue;
406 bdev->bd_has_submit_bio = disk->part0->bd_has_submit_bio;
408 bdev->bd_has_submit_bio = false;
409 bdev->bd_stats = alloc_percpu(struct disk_stats);
410 if (!bdev->bd_stats) {
414 bdev->bd_disk = disk;
415 return bdev;
418 void bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors)
420 spin_lock(&bdev->bd_size_lock);
421 i_size_write(bdev->bd_inode, (loff_t)sectors << SECTOR_SHIFT);
422 bdev->bd_nr_sectors = sectors;
423 spin_unlock(&bdev->bd_size_lock);
426 void bdev_add(struct block_device *bdev, dev_t dev)
428 if (bdev_stable_writes(bdev))
429 mapping_set_stable_writes(bdev->bd_inode->i_mapping);
430 bdev->bd_dev = dev;
431 bdev->bd_inode->i_rdev = dev;
432 bdev->bd_inode->i_ino = dev;
433 insert_inode_hash(bdev->bd_inode);
451 * @bdev: block device of interest
452 * @holder: holder trying to claim @bdev
455 * Test whether @bdev can be claimed by @holder.
458 * %true if @bdev can be claimed, %false otherwise.
460 static bool bd_may_claim(struct block_device *bdev, void *holder,
463 struct block_device *whole = bdev_whole(bdev);
467 if (bdev->bd_holder) {
471 if (bdev->bd_holder == holder) {
472 if (WARN_ON_ONCE(bdev->bd_holder_ops != hops))
483 if (whole != bdev &&
491 * @bdev: block device of interest
492 * @holder: holder trying to claim @bdev
495 * Claim @bdev. This function fails if @bdev is already claimed by another
500 * 0 if @bdev can be claimed, -EBUSY otherwise.
502 int bd_prepare_to_claim(struct block_device *bdev, void *holder,
505 struct block_device *whole = bdev_whole(bdev);
512 if (!bd_may_claim(bdev, holder, hops)) {
547 * @bdev: block device of interest
548 * @holder: holder that has claimed @bdev
554 static void bd_finish_claiming(struct block_device *bdev, void *holder,
557 struct block_device *whole = bdev_whole(bdev);
560 BUG_ON(!bd_may_claim(bdev, holder, hops));
567 bdev->bd_holders++;
568 mutex_lock(&bdev->bd_holder_lock);
569 bdev->bd_holder = holder;
570 bdev->bd_holder_ops = hops;
571 mutex_unlock(&bdev->bd_holder_lock);
578 * @bdev: block device of interest
579 * @holder: holder that has claimed @bdev
585 void bd_abort_claiming(struct block_device *bdev, void *holder)
588 bd_clear_claiming(bdev_whole(bdev), holder);
593 static void bd_end_claim(struct block_device *bdev, void *holder)
595 struct block_device *whole = bdev_whole(bdev);
603 WARN_ON_ONCE(bdev->bd_holder != holder);
604 WARN_ON_ONCE(--bdev->bd_holders < 0);
606 if (!bdev->bd_holders) {
607 mutex_lock(&bdev->bd_holder_lock);
608 bdev->bd_holder = NULL;
609 bdev->bd_holder_ops = NULL;
610 mutex_unlock(&bdev->bd_holder_lock);
611 if (bdev->bd_write_holder)
623 disk_unblock_events(bdev->bd_disk);
624 bdev->bd_write_holder = false;
628 static void blkdev_flush_mapping(struct block_device *bdev)
630 WARN_ON_ONCE(bdev->bd_holders);
631 sync_blockdev(bdev);
632 kill_bdev(bdev);
633 bdev_write_inode(bdev);
636 static int blkdev_get_whole(struct block_device *bdev, blk_mode_t mode)
638 struct gendisk *disk = bdev->bd_disk;
652 if (!atomic_read(&bdev->bd_openers))
653 set_init_blocksize(bdev);
656 atomic_inc(&bdev->bd_openers);
660 static void blkdev_put_whole(struct block_device *bdev)
662 if (atomic_dec_and_test(&bdev->bd_openers))
663 blkdev_flush_mapping(bdev);
664 if (bdev->bd_disk->fops->release)
665 bdev->bd_disk->fops->release(bdev->bd_disk);
706 struct block_device *bdev;
721 bdev = &BDEV_I(inode)->bdev;
722 if (!kobject_get_unless_zero(&bdev->bd_device.kobj))
723 bdev = NULL;
725 return bdev;
728 void blkdev_put_no_open(struct block_device *bdev)
730 put_device(&bdev->bd_device);
758 struct block_device *bdev;
769 bdev = blkdev_get_no_open(dev);
770 if (!bdev)
772 disk = bdev->bd_disk;
776 ret = bd_prepare_to_claim(bdev, holder, hops);
794 if (bdev_is_partition(bdev))
795 ret = blkdev_get_part(bdev, mode);
797 ret = blkdev_get_whole(bdev, mode);
801 bd_finish_claiming(bdev, holder, hops);
810 if ((mode & BLK_OPEN_WRITE) && !bdev->bd_write_holder &&
812 bdev->bd_write_holder = true;
820 return bdev;
825 bd_abort_claiming(bdev, holder);
829 blkdev_put_no_open(bdev);
838 struct block_device *bdev;
842 bdev = blkdev_get_by_dev(dev, mode, holder, hops);
843 if (IS_ERR(bdev)) {
845 return ERR_CAST(bdev);
847 handle->bdev = bdev;
873 struct block_device *bdev;
881 bdev = blkdev_get_by_dev(dev, mode, holder, hops);
882 if (!IS_ERR(bdev) && (mode & BLK_OPEN_WRITE) && bdev_read_only(bdev)) {
883 blkdev_put(bdev, holder);
887 return bdev;
904 bdev_read_only(handle->bdev)) {
913 void blkdev_put(struct block_device *bdev, void *holder)
915 struct gendisk *disk = bdev->bd_disk;
924 if (atomic_read(&bdev->bd_openers) == 1)
925 sync_blockdev(bdev);
929 bd_end_claim(bdev, holder);
938 if (bdev_is_partition(bdev))
939 blkdev_put_part(bdev);
941 blkdev_put_whole(bdev);
945 blkdev_put_no_open(bdev);
951 blkdev_put(handle->bdev, handle->holder);
998 * @bdev: block device to operate on
1009 void bdev_mark_dead(struct block_device *bdev, bool surprise)
1011 mutex_lock(&bdev->bd_holder_lock);
1012 if (bdev->bd_holder_ops && bdev->bd_holder_ops->mark_dead)
1013 bdev->bd_holder_ops->mark_dead(bdev, surprise);
1015 sync_blockdev(bdev);
1016 mutex_unlock(&bdev->bd_holder_lock);
1018 invalidate_bdev(bdev);
1036 struct block_device *bdev;
1057 bdev = I_BDEV(inode);
1059 mutex_lock(&bdev->bd_disk->open_mutex);
1060 if (!atomic_read(&bdev->bd_openers)) {
1073 mutex_unlock(&bdev->bd_disk->open_mutex);
1090 struct block_device *bdev;
1092 bdev = blkdev_get_no_open(inode->i_rdev);
1093 if (!bdev)
1096 stat->dio_mem_align = bdev_dma_alignment(bdev) + 1;
1097 stat->dio_offset_align = bdev_logical_block_size(bdev);
1100 blkdev_put_no_open(bdev);