Lines Matching refs:sb

42 static int thaw_super_locked(struct super_block *sb, enum freeze_holder who);
53 static inline void __super_lock(struct super_block *sb, bool excl)
56 down_write(&sb->s_umount);
58 down_read(&sb->s_umount);
61 static inline void super_unlock(struct super_block *sb, bool excl)
64 up_write(&sb->s_umount);
66 up_read(&sb->s_umount);
69 static inline void __super_lock_excl(struct super_block *sb)
71 __super_lock(sb, true);
74 static inline void super_unlock_excl(struct super_block *sb)
76 super_unlock(sb, true);
79 static inline void super_unlock_shared(struct super_block *sb)
81 super_unlock(sb, false);
84 static inline bool wait_born(struct super_block *sb)
92 flags = smp_load_acquire(&sb->s_flags);
98 * @sb: superblock to wait for
106 * The caller must have acquired a temporary reference on @sb->s_count.
111 static __must_check bool super_lock(struct super_block *sb, bool excl)
114 lockdep_assert_not_held(&sb->s_umount);
117 __super_lock(sb, excl);
121 * @sb->s_root is NULL and @sb->s_active is 0. No one needs to
124 if (sb->s_flags & SB_DYING)
128 if (sb->s_flags & SB_BORN)
131 super_unlock(sb, excl);
134 wait_var_event(&sb->s_flags, wait_born(sb));
138 * Just reacquire @sb->s_umount for the caller.
143 /* wait and acquire read-side of @sb->s_umount */
144 static inline bool super_lock_shared(struct super_block *sb)
146 return super_lock(sb, false);
149 /* wait and acquire write-side of @sb->s_umount */
150 static inline bool super_lock_excl(struct super_block *sb)
152 return super_lock(sb, true);
157 static void super_wake(struct super_block *sb, unsigned int flag)
167 smp_store_release(&sb->s_flags, sb->s_flags | flag);
174 wake_up_var(&sb->s_flags);
178 * One thing we have to be careful of with a per-sb shrinker is that we don't
187 struct super_block *sb;
194 sb = container_of(shrink, struct super_block, s_shrink);
203 if (!super_trylock_shared(sb))
206 if (sb->s_op->nr_cached_objects)
207 fs_objects = sb->s_op->nr_cached_objects(sb, sc);
209 inodes = list_lru_shrink_count(&sb->s_inode_lru, sc);
210 dentries = list_lru_shrink_count(&sb->s_dentry_lru, sc);
228 freed = prune_dcache_sb(sb, sc);
230 freed += prune_icache_sb(sb, sc);
234 freed += sb->s_op->free_cached_objects(sb, sc);
237 super_unlock_shared(sb);
244 struct super_block *sb;
247 sb = container_of(shrink, struct super_block, s_shrink);
263 if (!(sb->s_flags & SB_BORN))
267 if (sb->s_op && sb->s_op->nr_cached_objects)
268 total_objects = sb->s_op->nr_cached_objects(sb, sc);
270 total_objects += list_lru_shrink_count(&sb->s_dentry_lru, sc);
271 total_objects += list_lru_shrink_count(&sb->s_inode_lru, sc);
340 * When it cannot find a suitable sb, it allocates a new
391 if (prealloc_shrinker(&s->s_shrink, "sb-%s", type->name))
425 * @sb: superblock in question
430 void put_super(struct super_block *sb)
433 __put_super(sb);
437 static void kill_super_notify(struct super_block *sb)
439 lockdep_assert_not_held(&sb->s_umount);
442 if (sb->s_flags & SB_DEAD)
453 hlist_del_init(&sb->s_instances);
458 * We don't need @sb->s_umount here as every concurrent caller
462 super_wake(sb, SB_DEAD);
549 static inline bool wait_dead(struct super_block *sb)
557 flags = smp_load_acquire(&sb->s_flags);
563 * @sb: superblock to acquire
568 * sb->kill() and be marked as SB_DEAD.
573 static bool grab_super_dead(struct super_block *sb)
576 sb->s_count++;
577 if (grab_super(sb)) {
578 put_super(sb);
579 lockdep_assert_held(&sb->s_umount);
582 wait_var_event(&sb->s_flags, wait_dead(sb));
583 lockdep_assert_not_held(&sb->s_umount);
584 put_super(sb);
590 * @sb: reference we are trying to grab
605 bool super_trylock_shared(struct super_block *sb)
607 if (down_read_trylock(&sb->s_umount)) {
608 if (!(sb->s_flags & SB_DYING) && sb->s_root &&
609 (sb->s_flags & SB_BORN))
611 super_unlock_shared(sb);
619 * @sb: superblock to retire
633 void retire_super(struct super_block *sb)
635 WARN_ON(!sb->s_bdev);
636 __super_lock_excl(sb);
637 if (sb->s_iflags & SB_I_PERSB_BDI) {
638 bdi_unregister(sb->s_bdi);
639 sb->s_iflags &= ~SB_I_PERSB_BDI;
641 sb->s_iflags |= SB_I_RETIRED;
642 super_unlock_excl(sb);
648 * @sb: superblock to kill
660 void generic_shutdown_super(struct super_block *sb)
662 const struct super_operations *sop = sb->s_op;
664 if (sb->s_root) {
665 shrink_dcache_for_umount(sb);
666 sync_filesystem(sb);
667 sb->s_flags &= ~SB_ACTIVE;
672 evict_inodes(sb);
678 fsnotify_sb_delete(sb);
679 security_sb_delete(sb);
685 fscrypt_destroy_keyring(sb);
687 if (sb->s_dio_done_wq) {
688 destroy_workqueue(sb->s_dio_done_wq);
689 sb->s_dio_done_wq = NULL;
693 sop->put_super(sb);
695 if (CHECK_DATA_CORRUPTION(!list_empty(&sb->s_inodes),
697 sb->s_id, sb->s_type->name)) {
705 spin_lock(&sb->s_inode_list_lock);
706 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
711 spin_unlock(&sb->s_inode_list_lock);
721 * sget{_fc}() until we passed sb->kill_sb().
723 super_wake(sb, SB_DYING);
724 super_unlock_excl(sb);
725 if (sb->s_bdi != &noop_backing_dev_info) {
726 if (sb->s_iflags & SB_I_PERSB_BDI)
727 bdi_unregister(sb->s_bdi);
728 bdi_put(sb->s_bdi);
729 sb->s_bdi = &noop_backing_dev_info;
909 void drop_super(struct super_block *sb)
911 super_unlock_shared(sb);
912 put_super(sb);
917 void drop_super_exclusive(struct super_block *sb)
919 super_unlock_excl(sb);
920 put_super(sb);
926 struct super_block *sb, *p = NULL;
929 list_for_each_entry(sb, &super_blocks, s_list) {
931 if (smp_load_acquire(&sb->s_flags) & SB_DYING)
933 sb->s_count++;
936 f(sb);
941 p = sb;
957 struct super_block *sb, *p = NULL;
960 list_for_each_entry(sb, &super_blocks, s_list) {
963 sb->s_count++;
966 born = super_lock_shared(sb);
967 if (born && sb->s_root)
968 f(sb, arg);
969 super_unlock_shared(sb);
974 p = sb;
993 struct super_block *sb, *p = NULL;
996 hlist_for_each_entry(sb, &type->fs_supers, s_instances) {
999 sb->s_count++;
1002 born = super_lock_shared(sb);
1003 if (born && sb->s_root)
1004 f(sb, arg);
1005 super_unlock_shared(sb);
1010 p = sb;
1029 struct super_block *sb;
1035 list_for_each_entry(sb, &super_blocks, s_list) {
1036 if (sb->s_bdev == bdev) {
1037 if (!grab_super(sb))
1039 super_unlock_excl(sb);
1040 return sb;
1049 struct super_block *sb;
1052 list_for_each_entry(sb, &super_blocks, s_list) {
1053 if (sb->s_dev == dev) {
1056 sb->s_count++;
1059 born = super_lock(sb, excl);
1060 if (born && sb->s_root)
1061 return sb;
1062 super_unlock(sb, excl);
1065 __put_super(sb);
1081 struct super_block *sb = fc->root->d_sb;
1089 if (sb->s_writers.frozen != SB_UNFROZEN)
1092 retval = security_sb_remount(sb, fc->security);
1098 if (!(fc->sb_flags & SB_RDONLY) && sb->s_bdev &&
1099 bdev_read_only(sb->s_bdev))
1102 remount_rw = !(fc->sb_flags & SB_RDONLY) && sb_rdonly(sb);
1103 remount_ro = (fc->sb_flags & SB_RDONLY) && !sb_rdonly(sb);
1107 if (!hlist_empty(&sb->s_pins)) {
1108 super_unlock_excl(sb);
1109 group_pin_kill(&sb->s_pins);
1110 __super_lock_excl(sb);
1111 if (!sb->s_root)
1113 if (sb->s_writers.frozen != SB_UNFROZEN)
1115 remount_ro = !sb_rdonly(sb);
1118 shrink_dcache_sb(sb);
1120 /* If we are reconfiguring to RDONLY and current sb is read/write,
1125 sb_start_ro_state_change(sb);
1127 retval = sb_prepare_remount_readonly(sb);
1136 sb_start_ro_state_change(sb);
1146 sb->s_type->name, retval);
1150 WRITE_ONCE(sb->s_flags, ((sb->s_flags & ~fc->sb_flags_mask) |
1152 sb_end_ro_state_change(sb);
1162 if (remount_ro && sb->s_bdev)
1163 invalidate_bdev(sb->s_bdev);
1167 sb_end_ro_state_change(sb);
1171 static void do_emergency_remount_callback(struct super_block *sb)
1173 bool born = super_lock_excl(sb);
1175 if (born && sb->s_root && sb->s_bdev && !sb_rdonly(sb)) {
1178 fc = fs_context_for_reconfigure(sb->s_root,
1186 super_unlock_excl(sb);
1207 static void do_thaw_all_callback(struct super_block *sb)
1209 bool born = super_lock_excl(sb);
1211 if (born && sb->s_root) {
1213 while (sb->s_bdev && !thaw_bdev(sb->s_bdev))
1214 pr_warn("Emergency Thaw on %pg\n", sb->s_bdev);
1215 thaw_super_locked(sb, FREEZE_HOLDER_USERSPACE);
1217 super_unlock_excl(sb);
1289 void kill_anon_super(struct super_block *sb)
1291 dev_t dev = sb->s_dev;
1292 generic_shutdown_super(sb);
1293 kill_super_notify(sb);
1298 void kill_litter_super(struct super_block *sb)
1300 if (sb->s_root)
1301 d_genocide(sb->s_root);
1302 kill_anon_super(sb);
1306 int set_anon_super_fc(struct super_block *sb, struct fs_context *fc)
1308 return set_anon_super(sb, NULL);
1312 static int test_keyed_super(struct super_block *sb, struct fs_context *fc)
1314 return sb->s_fs_info == fc->s_fs_info;
1324 int (*fill_super)(struct super_block *sb,
1327 struct super_block *sb;
1330 sb = sget_fc(fc, test, set_anon_super_fc);
1331 if (IS_ERR(sb))
1332 return PTR_ERR(sb);
1334 if (!sb->s_root) {
1335 err = fill_super(sb, fc);
1339 sb->s_flags |= SB_ACTIVE;
1342 fc->root = dget(sb->s_root);
1346 deactivate_locked_super(sb);
1351 int (*fill_super)(struct super_block *sb,
1359 int (*fill_super)(struct super_block *sb,
1367 int (*fill_super)(struct super_block *sb,
1429 static bool super_lock_shared_active(struct super_block *sb)
1431 bool born = super_lock_shared(sb);
1433 if (!born || !sb->s_root || !(sb->s_flags & SB_ACTIVE)) {
1434 super_unlock_shared(sb);
1442 struct super_block *sb = bdev->bd_holder;
1444 /* bd_holder_lock ensures that the sb isn't freed */
1447 if (!super_lock_shared_active(sb))
1451 sync_filesystem(sb);
1452 shrink_dcache_sb(sb);
1453 invalidate_inodes(sb);
1454 if (sb->s_op->shutdown)
1455 sb->s_op->shutdown(sb);
1457 super_unlock_shared(sb);
1462 struct super_block *sb = bdev->bd_holder;
1466 if (!super_lock_shared_active(sb))
1468 sync_filesystem(sb);
1469 super_unlock_shared(sb);
1478 int setup_bdev_super(struct super_block *sb, int sb_flags,
1484 bdev = blkdev_get_by_dev(sb->s_dev, mode, sb, &fs_holder_ops);
1497 blkdev_put(bdev, sb);
1513 blkdev_put(bdev, sb);
1517 sb->s_bdev = bdev;
1518 sb->s_bdi = bdi_get(bdev->bd_disk->bdi);
1520 sb->s_iflags |= SB_I_STABLE_WRITES;
1524 snprintf(sb->s_id, sizeof(sb->s_id), "%pg", bdev);
1525 shrinker_debugfs_rename(&sb->s_shrink, "sb-%s:%s", sb->s_type->name,
1526 sb->s_id);
1527 sb_set_blocksize(sb, block_size(bdev));
1570 * bdev_mark_dead()). It is safe because we have active sb
1622 * bdev_mark_dead()). It is safe because we have active sb
1642 void kill_block_super(struct super_block *sb)
1644 struct block_device *bdev = sb->s_bdev;
1646 generic_shutdown_super(sb);
1649 blkdev_put(bdev, sb);
1741 struct super_block *sb;
1763 sb = fc->root->d_sb;
1764 WARN_ON(!sb->s_bdi);
1773 super_wake(sb, SB_BORN);
1775 error = security_sb_set_mnt_opts(sb, fc->security, 0, NULL);
1787 WARN((sb->s_maxbytes < 0), "%s set sb->s_maxbytes to "
1788 "negative value (%lld)\n", fc->fs_type->name, sb->s_maxbytes);
1798 int super_setup_bdi_name(struct super_block *sb, char *fmt, ...)
1815 WARN_ON(sb->s_bdi != &noop_backing_dev_info);
1816 sb->s_bdi = bdi;
1817 sb->s_iflags |= SB_I_PERSB_BDI;
1827 int super_setup_bdi(struct super_block *sb)
1831 return super_setup_bdi_name(sb, "%.28s-%ld", sb->s_type->name,
1838 * @sb: the super for which we wait
1844 static void sb_wait_write(struct super_block *sb, int level)
1846 percpu_down_write(sb->s_writers.rw_sem + level-1);
1853 static void lockdep_sb_freeze_release(struct super_block *sb)
1858 percpu_rwsem_release(sb->s_writers.rw_sem + level, 0, _THIS_IP_);
1862 * Tell lockdep we are holding these locks before we call ->unfreeze_fs(sb).
1864 static void lockdep_sb_freeze_acquire(struct super_block *sb)
1869 percpu_rwsem_acquire(sb->s_writers.rw_sem + level, 0, _THIS_IP_);
1872 static void sb_freeze_unlock(struct super_block *sb, int level)
1875 percpu_up_write(sb->s_writers.rw_sem + level);
1878 static int wait_for_partially_frozen(struct super_block *sb)
1883 unsigned short old = sb->s_writers.frozen;
1885 up_write(&sb->s_umount);
1886 ret = wait_var_event_killable(&sb->s_writers.frozen,
1887 sb->s_writers.frozen != old);
1888 down_write(&sb->s_umount);
1890 sb->s_writers.frozen != SB_UNFROZEN &&
1891 sb->s_writers.frozen != SB_FREEZE_COMPLETE);
1898 * @sb: the super to lock
1915 * During this function, sb->s_writers.frozen goes through these values:
1938 * sb->s_writers.frozen is protected by sb->s_umount.
1940 int freeze_super(struct super_block *sb, enum freeze_holder who)
1944 atomic_inc(&sb->s_active);
1945 if (!super_lock_excl(sb))
1949 if (sb->s_writers.frozen == SB_FREEZE_COMPLETE) {
1950 if (sb->s_writers.freeze_holders & who) {
1951 deactivate_locked_super(sb);
1955 WARN_ON(sb->s_writers.freeze_holders == 0);
1961 sb->s_writers.freeze_holders |= who;
1962 super_unlock_excl(sb);
1966 if (sb->s_writers.frozen != SB_UNFROZEN) {
1967 ret = wait_for_partially_frozen(sb);
1969 deactivate_locked_super(sb);
1976 if (!(sb->s_flags & SB_BORN)) {
1977 super_unlock_excl(sb);
1981 if (sb_rdonly(sb)) {
1983 sb->s_writers.freeze_holders |= who;
1984 sb->s_writers.frozen = SB_FREEZE_COMPLETE;
1985 wake_up_var(&sb->s_writers.frozen);
1986 super_unlock_excl(sb);
1990 sb->s_writers.frozen = SB_FREEZE_WRITE;
1992 super_unlock_excl(sb);
1993 sb_wait_write(sb, SB_FREEZE_WRITE);
1994 if (!super_lock_excl(sb))
1998 sb->s_writers.frozen = SB_FREEZE_PAGEFAULT;
1999 sb_wait_write(sb, SB_FREEZE_PAGEFAULT);
2002 ret = sync_filesystem(sb);
2004 sb->s_writers.frozen = SB_UNFROZEN;
2005 sb_freeze_unlock(sb, SB_FREEZE_PAGEFAULT);
2006 wake_up_var(&sb->s_writers.frozen);
2007 deactivate_locked_super(sb);
2012 sb->s_writers.frozen = SB_FREEZE_FS;
2013 sb_wait_write(sb, SB_FREEZE_FS);
2015 if (sb->s_op->freeze_fs) {
2016 ret = sb->s_op->freeze_fs(sb);
2020 sb->s_writers.frozen = SB_UNFROZEN;
2021 sb_freeze_unlock(sb, SB_FREEZE_FS);
2022 wake_up_var(&sb->s_writers.frozen);
2023 deactivate_locked_super(sb);
2031 sb->s_writers.freeze_holders |= who;
2032 sb->s_writers.frozen = SB_FREEZE_COMPLETE;
2033 wake_up_var(&sb->s_writers.frozen);
2034 lockdep_sb_freeze_release(sb);
2035 super_unlock_excl(sb);
2046 static int thaw_super_locked(struct super_block *sb, enum freeze_holder who)
2050 if (sb->s_writers.frozen == SB_FREEZE_COMPLETE) {
2051 if (!(sb->s_writers.freeze_holders & who)) {
2052 super_unlock_excl(sb);
2061 if (sb->s_writers.freeze_holders & ~who) {
2062 sb->s_writers.freeze_holders &= ~who;
2063 deactivate_locked_super(sb);
2067 super_unlock_excl(sb);
2071 if (sb_rdonly(sb)) {
2072 sb->s_writers.freeze_holders &= ~who;
2073 sb->s_writers.frozen = SB_UNFROZEN;
2074 wake_up_var(&sb->s_writers.frozen);
2078 lockdep_sb_freeze_acquire(sb);
2080 if (sb->s_op->unfreeze_fs) {
2081 error = sb->s_op->unfreeze_fs(sb);
2084 lockdep_sb_freeze_release(sb);
2085 super_unlock_excl(sb);
2090 sb->s_writers.freeze_holders &= ~who;
2091 sb->s_writers.frozen = SB_UNFROZEN;
2092 wake_up_var(&sb->s_writers.frozen);
2093 sb_freeze_unlock(sb, SB_FREEZE_FS);
2095 deactivate_locked_super(sb);
2101 * @sb: the super to thaw
2111 int thaw_super(struct super_block *sb, enum freeze_holder who)
2113 if (!super_lock_excl(sb))
2115 return thaw_super_locked(sb, who);
2125 int sb_init_dio_done_wq(struct super_block *sb)
2130 sb->s_id);
2136 old = cmpxchg(&sb->s_dio_done_wq, NULL, wq);