Lines Matching refs:bdev

40 	struct block_device bdev;
53 return &BDEV_I(inode)->bdev;
57 static void bdev_write_inode(struct block_device *bdev)
59 struct inode *inode = bdev->bd_inode;
70 bdevname(bdev, name), ret);
78 static void kill_bdev(struct block_device *bdev)
80 struct address_space *mapping = bdev->bd_inode->i_mapping;
90 void invalidate_bdev(struct block_device *bdev)
92 struct address_space *mapping = bdev->bd_inode->i_mapping;
99 /* 99% of the time, we don't need to flush the cleancache on the bdev.
107 * Drop all buffers & page cache for given bdev range. This function bails
108 * with error if bdev has other exclusive owner (such as filesystem).
110 int truncate_bdev_range(struct block_device *bdev, fmode_t mode,
122 claimed_bdev = bdev->bd_contains;
123 err = bd_prepare_to_claim(bdev, claimed_bdev,
128 truncate_inode_pages_range(bdev->bd_inode->i_mapping, lstart, lend);
130 bd_abort_claiming(bdev, claimed_bdev, truncate_bdev_range);
138 return invalidate_inode_pages2_range(bdev->bd_inode->i_mapping,
144 static void set_init_blocksize(struct block_device *bdev)
146 unsigned int bsize = bdev_logical_block_size(bdev);
147 loff_t size = i_size_read(bdev->bd_inode);
154 bdev->bd_inode->i_blkbits = blksize_bits(bsize);
157 int set_blocksize(struct block_device *bdev, int size)
164 if (size < bdev_logical_block_size(bdev))
168 if (bdev->bd_inode->i_blkbits != blksize_bits(size)) {
169 sync_blockdev(bdev);
170 bdev->bd_inode->i_blkbits = blksize_bits(size);
171 kill_bdev(bdev);
241 struct block_device *bdev = I_BDEV(bdev_file_inode(file));
250 (bdev_logical_block_size(bdev) - 1))
263 bio_set_dev(&bio, bdev);
294 !blk_poll(bdev_get_queue(bdev), qc, true))
329 struct block_device *bdev = I_BDEV(kiocb->ki_filp->f_mapping->host);
330 struct request_queue *q = bdev_get_queue(bdev);
379 struct block_device *bdev = I_BDEV(inode);
390 (bdev_logical_block_size(bdev) - 1))
416 bio_set_dev(bio, bdev);
490 !blk_poll(bdev_get_queue(bdev), qc, true))
524 int __sync_blockdev(struct block_device *bdev, int wait)
526 if (!bdev)
529 return filemap_flush(bdev->bd_inode->i_mapping);
530 return filemap_write_and_wait(bdev->bd_inode->i_mapping);
537 int sync_blockdev(struct block_device *bdev)
539 return __sync_blockdev(bdev, 1);
548 int fsync_bdev(struct block_device *bdev)
550 struct super_block *sb = get_super(bdev);
556 return sync_blockdev(bdev);
562 * @bdev: blockdevice to lock
572 struct super_block *freeze_bdev(struct block_device *bdev)
577 mutex_lock(&bdev->bd_fsfreeze_mutex);
578 if (++bdev->bd_fsfreeze_count > 1) {
584 sb = get_super(bdev);
587 mutex_unlock(&bdev->bd_fsfreeze_mutex);
591 sb = get_active_super(bdev);
600 bdev->bd_fsfreeze_count--;
601 mutex_unlock(&bdev->bd_fsfreeze_mutex);
606 sync_blockdev(bdev);
607 mutex_unlock(&bdev->bd_fsfreeze_mutex);
614 * @bdev: blockdevice to unlock
619 int thaw_bdev(struct block_device *bdev, struct super_block *sb)
623 mutex_lock(&bdev->bd_fsfreeze_mutex);
624 if (!bdev->bd_fsfreeze_count)
628 if (--bdev->bd_fsfreeze_count > 0)
639 bdev->bd_fsfreeze_count++;
641 mutex_unlock(&bdev->bd_fsfreeze_mutex);
701 struct block_device *bdev = I_BDEV(bd_inode);
713 error = blkdev_issue_flush(bdev, GFP_KERNEL);
723 * @bdev: The device to read the page from
737 int bdev_read_page(struct block_device *bdev, sector_t sector,
740 const struct block_device_operations *ops = bdev->bd_disk->fops;
743 if (!ops->rw_page || bdev_get_integrity(bdev))
746 result = blk_queue_enter(bdev->bd_disk->queue, 0);
749 result = ops->rw_page(bdev, sector + get_start_sect(bdev), page,
751 blk_queue_exit(bdev->bd_disk->queue);
757 * @bdev: The device to write the page to
774 int bdev_write_page(struct block_device *bdev, sector_t sector,
778 const struct block_device_operations *ops = bdev->bd_disk->fops;
780 if (!ops->rw_page || bdev_get_integrity(bdev))
782 result = blk_queue_enter(bdev->bd_disk->queue, 0);
787 result = ops->rw_page(bdev, sector + get_start_sect(bdev), page,
795 blk_queue_exit(bdev->bd_disk->queue);
822 struct block_device *bdev = &ei->bdev;
824 memset(bdev, 0, sizeof(*bdev));
825 mutex_init(&bdev->bd_mutex);
827 INIT_LIST_HEAD(&bdev->bd_holder_disks);
829 bdev->bd_bdi = &noop_backing_dev_info;
832 mutex_init(&bdev->bd_fsfreeze_mutex);
837 struct block_device *bdev = &BDEV_I(inode)->bdev;
843 if (bdev->bd_bdi != &noop_backing_dev_info) {
844 bdi_put(bdev->bd_bdi);
845 bdev->bd_bdi = &noop_backing_dev_info;
868 .name = "bdev",
887 panic("Cannot register bdev pseudo-fs");
890 panic("Cannot create bdev pseudo-fs");
906 return BDEV_I(inode)->bdev.bd_dev == *(dev_t *)data;
911 BDEV_I(inode)->bdev.bd_dev = *(dev_t *)data;
917 struct block_device *bdev;
926 bdev = &BDEV_I(inode)->bdev;
929 spin_lock_init(&bdev->bd_size_lock);
930 bdev->bd_contains = NULL;
931 bdev->bd_super = NULL;
932 bdev->bd_inode = inode;
933 bdev->bd_part_count = 0;
936 inode->i_bdev = bdev;
941 return bdev;
946 * @bdev: Block device to grab a reference to.
948 struct block_device *bdgrab(struct block_device *bdev)
950 ihold(bdev->bd_inode);
951 return bdev;
973 void bdput(struct block_device *bdev)
975 iput(bdev->bd_inode);
982 struct block_device *bdev;
985 bdev = inode->i_bdev;
986 if (bdev && !inode_unhashed(bdev->bd_inode)) {
987 bdgrab(bdev);
989 return bdev;
999 if (bdev)
1002 bdev = bdget(inode->i_rdev);
1003 if (bdev) {
1012 bdgrab(bdev);
1013 inode->i_bdev = bdev;
1014 inode->i_mapping = bdev->bd_inode->i_mapping;
1018 return bdev;
1025 struct block_device *bdev = NULL;
1029 bdev = inode->i_bdev;
1034 if (bdev)
1035 bdput(bdev);
1040 * @bdev: block device of interest
1041 * @whole: whole block device containing @bdev, may equal @bdev
1042 * @holder: holder trying to claim @bdev
1044 * Test whether @bdev can be claimed by @holder.
1050 * %true if @bdev can be claimed, %false otherwise.
1052 static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
1055 if (bdev->bd_holder == holder)
1057 else if (bdev->bd_holder != NULL)
1059 else if (whole == bdev)
1072 * @bdev: block device of interest
1073 * @whole: the whole device containing @bdev, may equal @bdev
1074 * @holder: holder trying to claim @bdev
1076 * Claim @bdev. This function fails if @bdev is already claimed by another
1081 * 0 if @bdev can be claimed, -EBUSY otherwise.
1083 int bd_prepare_to_claim(struct block_device *bdev, struct block_device *whole,
1089 if (!bd_may_claim(bdev, whole, holder)) {
1113 static struct gendisk *bdev_get_gendisk(struct block_device *bdev, int *partno)
1115 struct gendisk *disk = get_gendisk(bdev->bd_dev, partno);
1120 * Now that we hold gendisk reference we make sure bdev we looked up is
1123 * unhashed bdev with newly created gendisk could lead to two bdevs
1127 if (inode_unhashed(bdev->bd_inode)) {
1145 * @bdev: block device of interest
1147 * @holder: holder that has claimed @bdev
1152 static void bd_finish_claiming(struct block_device *bdev,
1156 BUG_ON(!bd_may_claim(bdev, whole, holder));
1163 bdev->bd_holders++;
1164 bdev->bd_holder = holder;
1171 * @bdev: block device of interest
1173 * @holder: holder that has claimed @bdev
1179 void bd_abort_claiming(struct block_device *bdev, struct block_device *whole,
1195 static struct bd_holder_disk *bd_find_holder_disk(struct block_device *bdev,
1200 list_for_each_entry(holder, &bdev->bd_holder_disks, list)
1217 * bd_link_disk_holder - create symlinks between holding disk and slave bdev
1218 * @bdev: the claimed slave bdev
1225 * - from "slaves" directory of the holder @disk to the claimed @bdev
1226 * - from "holders" directory of the @bdev to the holder @disk
1234 * The caller must have claimed @bdev before calling this function and
1235 * ensure that both @bdev and @disk are valid during the creation and
1244 int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk)
1249 mutex_lock(&bdev->bd_mutex);
1251 WARN_ON_ONCE(!bdev->bd_holder);
1254 if (WARN_ON(!disk->slave_dir || !bdev->bd_part->holder_dir))
1257 holder = bd_find_holder_disk(bdev, disk);
1273 ret = add_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
1277 ret = add_symlink(bdev->bd_part->holder_dir, &disk_to_dev(disk)->kobj);
1281 * bdev could be deleted beneath us which would implicitly destroy
1284 kobject_get(bdev->bd_part->holder_dir);
1286 list_add(&holder->list, &bdev->bd_holder_disks);
1290 del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
1294 mutex_unlock(&bdev->bd_mutex);
1301 * @bdev: the calimed slave bdev
1309 void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk)
1313 mutex_lock(&bdev->bd_mutex);
1315 holder = bd_find_holder_disk(bdev, disk);
1318 del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
1319 del_symlink(bdev->bd_part->holder_dir,
1321 kobject_put(bdev->bd_part->holder_dir);
1326 mutex_unlock(&bdev->bd_mutex);
1332 * check_disk_size_change - checks for disk size change and adjusts bdev size.
1334 * @bdev: struct bdev to adjust.
1337 * This routine checks to see if the bdev size does not match the disk size
1338 * and adjusts it if it differs. When shrinking the bdev size, its all caches
1342 struct block_device *bdev, bool verbose)
1346 spin_lock(&bdev->bd_size_lock);
1348 bdev_size = i_size_read(bdev->bd_inode);
1355 i_size_write(bdev->bd_inode, disk_size);
1357 spin_unlock(&bdev->bd_size_lock);
1360 if (__invalidate_device(bdev, false))
1367 * revalidate_disk_size - checks for disk size change and adjusts bdev size.
1371 * This routine checks to see if the bdev size does not match the disk size
1372 * and adjusts it if it differs. When shrinking the bdev size, its all caches
1377 struct block_device *bdev;
1380 * Hidden disks don't have associated bdev so there's no point in
1386 bdev = bdget_disk(disk, 0);
1387 if (bdev) {
1388 check_disk_size_change(disk, bdev, verbose);
1389 bdput(bdev);
1394 void bd_set_nr_sectors(struct block_device *bdev, sector_t sectors)
1396 spin_lock(&bdev->bd_size_lock);
1397 i_size_write(bdev->bd_inode, (loff_t)sectors << SECTOR_SHIFT);
1398 spin_unlock(&bdev->bd_size_lock);
1402 static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part);
1404 int bdev_disk_changed(struct block_device *bdev, bool invalidate)
1406 struct gendisk *disk = bdev->bd_disk;
1409 lockdep_assert_held(&bdev->bd_mutex);
1415 ret = blk_drop_partitions(bdev);
1438 check_disk_size_change(disk, bdev, !invalidate);
1441 ret = blk_add_partitions(disk, bdev);
1467 static int __blkdev_get(struct block_device *bdev, fmode_t mode, void *holder,
1479 disk = bdev_get_gendisk(bdev, &partno);
1496 claiming = bdev;
1497 ret = bd_prepare_to_claim(bdev, claiming, holder);
1503 mutex_lock_nested(&bdev->bd_mutex, for_part);
1504 if (!bdev->bd_openers) {
1506 bdev->bd_disk = disk;
1507 bdev->bd_contains = bdev;
1508 bdev->bd_partno = partno;
1512 bdev->bd_part = disk_get_part(disk, partno);
1513 if (!bdev->bd_part)
1518 ret = disk->fops->open(bdev, mode);
1528 bd_set_nr_sectors(bdev, get_capacity(disk));
1529 set_init_blocksize(bdev);
1540 bdev_disk_changed(bdev, ret == -ENOMEDIUM);
1549 bdev->bd_contains = bdgrab(whole);
1550 bdev->bd_part = disk_get_part(disk, partno);
1552 !bdev->bd_part || !bdev->bd_part->nr_sects) {
1556 bd_set_nr_sectors(bdev, bdev->bd_part->nr_sects);
1557 set_init_blocksize(bdev);
1560 if (bdev->bd_bdi == &noop_backing_dev_info)
1561 bdev->bd_bdi = bdi_get(disk->queue->backing_dev_info);
1563 if (bdev->bd_contains == bdev) {
1565 if (bdev->bd_disk->fops->open)
1566 ret = bdev->bd_disk->fops->open(bdev, mode);
1570 bdev_disk_changed(bdev, ret == -ENOMEDIUM);
1575 bdev->bd_openers++;
1577 bdev->bd_part_count++;
1579 bd_finish_claiming(bdev, claiming, holder);
1587 if (claiming && (mode & FMODE_WRITE) && !bdev->bd_write_holder &&
1589 bdev->bd_write_holder = true;
1592 mutex_unlock(&bdev->bd_mutex);
1605 disk_put_part(bdev->bd_part);
1606 bdev->bd_disk = NULL;
1607 bdev->bd_part = NULL;
1608 if (bdev != bdev->bd_contains)
1609 __blkdev_put(bdev->bd_contains, mode, 1);
1610 bdev->bd_contains = NULL;
1613 bd_abort_claiming(bdev, claiming, holder);
1614 mutex_unlock(&bdev->bd_mutex);
1629 * @bdev: block_device to open
1633 * Open @bdev with @mode. If @mode includes %FMODE_EXCL, @bdev is
1637 * On success, the reference count of @bdev is unchanged. On failure,
1638 * @bdev is put.
1646 static int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder)
1654 ret = devcgroup_inode_permission(bdev->bd_inode, perm);
1658 ret =__blkdev_get(bdev, mode, holder, 0);
1664 bdput(bdev);
1688 struct block_device *bdev;
1691 bdev = lookup_bdev(path);
1692 if (IS_ERR(bdev))
1693 return bdev;
1695 err = blkdev_get(bdev, mode, holder);
1699 if ((mode & FMODE_WRITE) && bdev_read_only(bdev)) {
1700 blkdev_put(bdev, mode);
1704 return bdev;
1732 struct block_device *bdev;
1735 bdev = bdget(dev);
1736 if (!bdev)
1739 err = blkdev_get(bdev, mode, holder);
1743 return bdev;
1749 struct block_device *bdev;
1768 bdev = bd_acquire(inode);
1769 if (bdev == NULL)
1772 filp->f_mapping = bdev->bd_inode->i_mapping;
1775 return blkdev_get(bdev, filp->f_mode, filp);
1778 static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
1780 struct gendisk *disk = bdev->bd_disk;
1790 if (bdev->bd_openers == 1)
1791 sync_blockdev(bdev);
1793 mutex_lock_nested(&bdev->bd_mutex, for_part);
1795 bdev->bd_part_count--;
1797 if (!--bdev->bd_openers) {
1798 WARN_ON_ONCE(bdev->bd_holders);
1799 sync_blockdev(bdev);
1800 kill_bdev(bdev);
1802 bdev_write_inode(bdev);
1804 if (bdev->bd_contains == bdev) {
1808 if (!bdev->bd_openers) {
1809 disk_put_part(bdev->bd_part);
1810 bdev->bd_part = NULL;
1811 bdev->bd_disk = NULL;
1812 if (bdev != bdev->bd_contains)
1813 victim = bdev->bd_contains;
1814 bdev->bd_contains = NULL;
1818 mutex_unlock(&bdev->bd_mutex);
1819 bdput(bdev);
1824 void blkdev_put(struct block_device *bdev, fmode_t mode)
1826 mutex_lock(&bdev->bd_mutex);
1838 WARN_ON_ONCE(--bdev->bd_holders < 0);
1839 WARN_ON_ONCE(--bdev->bd_contains->bd_holders < 0);
1842 if ((bdev_free = !bdev->bd_holders))
1843 bdev->bd_holder = NULL;
1844 if (!bdev->bd_contains->bd_holders)
1845 bdev->bd_contains->bd_holder = NULL;
1853 if (bdev_free && bdev->bd_write_holder) {
1854 disk_unblock_events(bdev->bd_disk);
1855 bdev->bd_write_holder = false;
1864 disk_flush_events(bdev->bd_disk, DISK_EVENT_MEDIA_CHANGE);
1866 mutex_unlock(&bdev->bd_mutex);
1868 __blkdev_put(bdev, mode, 0);
1874 struct block_device *bdev = I_BDEV(bdev_file_inode(filp));
1875 blkdev_put(bdev, filp->f_mode);
1881 struct block_device *bdev = I_BDEV(bdev_file_inode(file));
1893 return blkdev_ioctl(bdev, mode, cmd, arg);
1992 struct block_device *bdev = I_BDEV(bdev_file_inode(file));
2002 isize = i_size_read(bdev->bd_inode);
2016 if ((start | len) & (bdev_logical_block_size(bdev) - 1))
2026 error = truncate_bdev_range(bdev, file->f_mode, start, end);
2030 error = blkdev_issue_zeroout(bdev, start >> 9, len >> 9,
2034 error = truncate_bdev_range(bdev, file->f_mode, start, end);
2038 error = blkdev_issue_zeroout(bdev, start >> 9, len >> 9,
2042 error = truncate_bdev_range(bdev, file->f_mode, start, end);
2046 error = blkdev_issue_discard(bdev, start >> 9, len >> 9,
2060 return truncate_bdev_range(bdev, file->f_mode, start, end);
2091 struct block_device *bdev;
2111 bdev = bd_acquire(inode);
2112 if (!bdev)
2116 return bdev;
2118 bdev = ERR_PTR(error);
2123 int __invalidate_device(struct block_device *bdev, bool kill_dirty)
2125 struct super_block *sb = get_super(bdev);
2139 invalidate_bdev(bdev);
2151 struct block_device *bdev;
2172 bdev = I_BDEV(inode);
2174 mutex_lock(&bdev->bd_mutex);
2175 if (bdev->bd_openers)
2176 func(bdev, arg);
2177 mutex_unlock(&bdev->bd_mutex);