Lines Matching refs:mnt

112 static inline struct hlist_head *m_hash(struct vfsmount *mnt, struct dentry *dentry)
114 unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES);
127 static int mnt_alloc_id(struct mount *mnt)
133 mnt->mnt_id = res;
137 static void mnt_free_id(struct mount *mnt)
139 ida_free(&mnt_id_ida, mnt->mnt_id);
145 static int mnt_alloc_group_id(struct mount *mnt)
151 mnt->mnt_group_id = res;
158 void mnt_release_group_id(struct mount *mnt)
160 ida_free(&mnt_group_ida, mnt->mnt_group_id);
161 mnt->mnt_group_id = 0;
167 static inline void mnt_add_count(struct mount *mnt, int n)
170 this_cpu_add(mnt->mnt_pcp->mnt_count, n);
173 mnt->mnt_count += n;
181 int mnt_get_count(struct mount *mnt)
188 count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_count;
193 return mnt->mnt_count;
199 struct mount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL);
200 if (mnt) {
203 err = mnt_alloc_id(mnt);
208 mnt->mnt_devname = kstrdup_const(name,
210 if (!mnt->mnt_devname)
215 mnt->mnt_pcp = alloc_percpu(struct mnt_pcp);
216 if (!mnt->mnt_pcp)
219 this_cpu_add(mnt->mnt_pcp->mnt_count, 1);
221 mnt->mnt_count = 1;
222 mnt->mnt_writers = 0;
225 INIT_HLIST_NODE(&mnt->mnt_hash);
226 INIT_LIST_HEAD(&mnt->mnt_child);
227 INIT_LIST_HEAD(&mnt->mnt_mounts);
228 INIT_LIST_HEAD(&mnt->mnt_list);
229 INIT_LIST_HEAD(&mnt->mnt_expire);
230 INIT_LIST_HEAD(&mnt->mnt_share);
231 INIT_LIST_HEAD(&mnt->mnt_slave_list);
232 INIT_LIST_HEAD(&mnt->mnt_slave);
233 INIT_HLIST_NODE(&mnt->mnt_mp_list);
234 INIT_LIST_HEAD(&mnt->mnt_umounting);
235 INIT_HLIST_HEAD(&mnt->mnt_stuck_children);
236 mnt->mnt.mnt_idmap = &nop_mnt_idmap;
238 return mnt;
242 kfree_const(mnt->mnt_devname);
245 mnt_free_id(mnt);
247 kmem_cache_free(mnt_cache, mnt);
261 * @mnt: the mount to check for its write status
270 bool __mnt_is_readonly(struct vfsmount *mnt)
272 return (mnt->mnt_flags & MNT_READONLY) || sb_rdonly(mnt->mnt_sb);
276 static inline void mnt_inc_writers(struct mount *mnt)
279 this_cpu_inc(mnt->mnt_pcp->mnt_writers);
281 mnt->mnt_writers++;
285 static inline void mnt_dec_writers(struct mount *mnt)
288 this_cpu_dec(mnt->mnt_pcp->mnt_writers);
290 mnt->mnt_writers--;
294 static unsigned int mnt_get_writers(struct mount *mnt)
301 count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_writers;
306 return mnt->mnt_writers;
310 static int mnt_is_readonly(struct vfsmount *mnt)
312 if (READ_ONCE(mnt->mnt_sb->s_readonly_remount))
323 return __mnt_is_readonly(mnt);
337 * it, and makes sure that writes are allowed (mnt it read-write) before
344 struct mount *mnt = real_mount(m);
348 mnt_inc_writers(mnt);
356 while (READ_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) {
382 mnt_dec_writers(mnt);
427 if (__mnt_is_readonly(file->f_path.mnt))
431 return __mnt_want_write(file->f_path.mnt);
457 * @mnt: the mount on which to give up write access
463 void __mnt_drop_write(struct vfsmount *mnt)
466 mnt_dec_writers(real_mount(mnt));
472 * @mnt: the mount on which to give up write access
478 void mnt_drop_write(struct vfsmount *mnt)
480 __mnt_drop_write(mnt);
481 sb_end_write(mnt->mnt_sb);
488 __mnt_drop_write(file->f_path.mnt);
500 * @mnt: mnt to prevent write access to
502 * Prevents write access to @mnt if there are no active writers for @mnt.
504 * properties of @mnt that need to remain stable for callers with write access
505 * to @mnt.
509 * @mnt.
516 static inline int mnt_hold_writers(struct mount *mnt)
518 mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
541 if (mnt_get_writers(mnt) > 0)
549 * @mnt: mnt to stop preventing write access to
551 * Stop preventing write access to @mnt allowing callers to gain write access
552 * to @mnt again.
559 static inline void mnt_unhold_writers(struct mount *mnt)
566 mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
569 static int mnt_make_readonly(struct mount *mnt)
573 ret = mnt_hold_writers(mnt);
575 mnt->mnt.mnt_flags |= MNT_READONLY;
576 mnt_unhold_writers(mnt);
582 struct mount *mnt;
590 list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) {
591 if (!(mnt->mnt.mnt_flags & MNT_READONLY)) {
592 err = mnt_hold_writers(mnt);
602 list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) {
603 if (mnt->mnt.mnt_flags & MNT_WRITE_HOLD)
604 mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
611 static void free_vfsmnt(struct mount *mnt)
613 mnt_idmap_put(mnt_idmap(&mnt->mnt));
614 kfree_const(mnt->mnt_devname);
616 free_percpu(mnt->mnt_pcp);
618 kmem_cache_free(mnt_cache, mnt);
629 struct mount *mnt;
634 mnt = real_mount(bastard);
635 mnt_add_count(mnt, 1);
640 mnt_add_count(mnt, -1);
645 mnt_add_count(mnt, -1);
670 * @mnt: parent mount
673 * If @mnt has a child mount @c mounted @dentry find and return it.
677 * propagation when a source mount @mnt whose root got overmounted by a
679 * acquired gets copied and propagated. So @mnt gets copied including
680 * @o. When @mnt is propagated to a destination mount @d that already
682 * mount @mnt will be tucked beneath @n, i.e., @n will be mounted on
683 * @mnt and @mnt mounted on @d. Now both @n and @o are mounted at @mnt
686 * Return: The first child of @mnt mounted @dentry or NULL.
688 struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
690 struct hlist_head *head = m_hash(mnt, dentry);
694 if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry)
705 * mount /dev/sda1 /mnt
706 * mount /dev/sda2 /mnt
707 * mount /dev/sda3 /mnt
709 * Then lookup_mnt() on the base /mnt dentry in the root mount will
724 child_mnt = __lookup_mnt(path->mnt, path->dentry);
725 m = child_mnt ? &child_mnt->mnt : NULL;
741 static inline bool mnt_is_cursor(struct mount *mnt)
743 return mnt->mnt.mnt_flags & MNT_CURSOR;
764 struct mount *mnt;
769 list_for_each_entry(mnt, &ns->list, mnt_list) {
770 if (mnt_is_cursor(mnt))
772 is_covered = (mnt->mnt_mountpoint == dentry);
870 static inline int check_mnt(struct mount *mnt)
872 return mnt->mnt_ns == current->nsproxy->mnt_ns;
900 static struct mountpoint *unhash_mnt(struct mount *mnt)
903 mnt->mnt_parent = mnt;
904 mnt->mnt_mountpoint = mnt->mnt.mnt_root;
905 list_del_init(&mnt->mnt_child);
906 hlist_del_init_rcu(&mnt->mnt_hash);
907 hlist_del_init(&mnt->mnt_mp_list);
908 mp = mnt->mnt_mp;
909 mnt->mnt_mp = NULL;
916 static void umount_mnt(struct mount *mnt)
918 put_mountpoint(unhash_mnt(mnt));
924 void mnt_set_mountpoint(struct mount *mnt,
929 mnt_add_count(mnt, 1); /* essentially, that's mntget */
931 child_mnt->mnt_parent = mnt;
963 static void __attach_mnt(struct mount *mnt, struct mount *parent)
965 hlist_add_head_rcu(&mnt->mnt_hash,
966 m_hash(&parent->mnt, mnt->mnt_mountpoint));
967 list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
974 * @mnt: the new mount
976 * @beneath: whether to mount @mnt beneath or on top of @parent
978 * If @beneath is false, mount @mnt at @mp on @parent. Then attach @mnt
981 * If @beneath is true, remove @mnt from its current parent and
983 * old parent and old mountpoint of @mnt. Finally, attach @parent to
986 * Note, when __attach_mnt() is called @mnt->mnt_parent already points
992 static void attach_mnt(struct mount *mnt, struct mount *parent,
996 mnt_set_mountpoint_beneath(mnt, parent, mp);
998 mnt_set_mountpoint(parent, mp, mnt);
1000 * Note, @mnt->mnt_parent has to be used. If @mnt was mounted
1001 * beneath @parent then @mnt will need to be attached to
1002 * @parent's old parent, not @parent. IOW, @mnt->mnt_parent
1005 __attach_mnt(mnt, mnt->mnt_parent);
1008 void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp, struct mount *mnt)
1010 struct mountpoint *old_mp = mnt->mnt_mp;
1011 struct mount *old_parent = mnt->mnt_parent;
1013 list_del_init(&mnt->mnt_child);
1014 hlist_del_init(&mnt->mnt_mp_list);
1015 hlist_del_init_rcu(&mnt->mnt_hash);
1017 attach_mnt(mnt, parent, mp, false);
1026 static void commit_tree(struct mount *mnt)
1028 struct mount *parent = mnt->mnt_parent;
1033 BUG_ON(parent == mnt);
1035 list_add_tail(&head, &mnt->mnt_list);
1044 __attach_mnt(mnt, parent);
1085 struct mount *mnt;
1090 mnt = alloc_vfsmnt(fc->source ?: "none");
1091 if (!mnt)
1095 mnt->mnt.mnt_flags = MNT_INTERNAL;
1098 mnt->mnt.mnt_sb = fc->root->d_sb;
1099 mnt->mnt.mnt_root = dget(fc->root);
1100 mnt->mnt_mountpoint = mnt->mnt.mnt_root;
1101 mnt->mnt_parent = mnt;
1104 list_add_tail(&mnt->mnt_instance, &mnt->mnt.mnt_sb->s_mounts);
1106 return &mnt->mnt;
1126 struct vfsmount *mnt;
1142 mnt = fc_mount(fc);
1144 mnt = ERR_PTR(ret);
1147 return mnt;
1169 struct super_block *sb = old->mnt.mnt_sb;
1170 struct mount *mnt;
1173 mnt = alloc_vfsmnt(old->mnt_devname);
1174 if (!mnt)
1178 mnt->mnt_group_id = 0; /* not a peer of original */
1180 mnt->mnt_group_id = old->mnt_group_id;
1182 if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) {
1183 err = mnt_alloc_group_id(mnt);
1188 mnt->mnt.mnt_flags = old->mnt.mnt_flags;
1189 mnt->mnt.mnt_flags &= ~(MNT_WRITE_HOLD|MNT_MARKED|MNT_INTERNAL);
1192 mnt->mnt.mnt_idmap = mnt_idmap_get(mnt_idmap(&old->mnt));
1194 mnt->mnt.mnt_sb = sb;
1195 mnt->mnt.mnt_root = dget(root);
1196 mnt->mnt_mountpoint = mnt->mnt.mnt_root;
1197 mnt->mnt_parent = mnt;
1199 list_add_tail(&mnt->mnt_instance, &sb->s_mounts);
1204 list_add(&mnt->mnt_slave, &old->mnt_slave_list);
1205 mnt->mnt_master = old;
1206 CLEAR_MNT_SHARED(mnt);
1209 list_add(&mnt->mnt_share, &old->mnt_share);
1211 list_add(&mnt->mnt_slave, &old->mnt_slave);
1212 mnt->mnt_master = old->mnt_master;
1214 CLEAR_MNT_SHARED(mnt);
1217 set_mnt_shared(mnt);
1223 list_add(&mnt->mnt_expire, &old->mnt_expire);
1226 return mnt;
1229 mnt_free_id(mnt);
1230 free_vfsmnt(mnt);
1234 static void cleanup_mnt(struct mount *mnt)
1245 WARN_ON(mnt_get_writers(mnt));
1246 if (unlikely(mnt->mnt_pins.first))
1247 mnt_pin_kill(mnt);
1248 hlist_for_each_entry_safe(m, p, &mnt->mnt_stuck_children, mnt_umount) {
1250 mntput(&m->mnt);
1252 fsnotify_vfsmount_delete(&mnt->mnt);
1253 dput(mnt->mnt.mnt_root);
1254 deactivate_super(mnt->mnt.mnt_sb);
1255 mnt_free_id(mnt);
1256 call_rcu(&mnt->mnt_rcu, delayed_free_vfsmnt);
1275 static void mntput_no_expire(struct mount *mnt)
1281 if (likely(READ_ONCE(mnt->mnt_ns))) {
1291 mnt_add_count(mnt, -1);
1301 mnt_add_count(mnt, -1);
1302 count = mnt_get_count(mnt);
1309 if (unlikely(mnt->mnt.mnt_flags & MNT_DOOMED)) {
1314 mnt->mnt.mnt_flags |= MNT_DOOMED;
1317 list_del(&mnt->mnt_instance);
1319 if (unlikely(!list_empty(&mnt->mnt_mounts))) {
1321 list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) {
1323 hlist_add_head(&p->mnt_umount, &mnt->mnt_stuck_children);
1329 if (likely(!(mnt->mnt.mnt_flags & MNT_INTERNAL))) {
1332 init_task_work(&mnt->mnt_rcu, __cleanup_mnt);
1333 if (!task_work_add(task, &mnt->mnt_rcu, TWA_RESUME))
1336 if (llist_add(&mnt->mnt_llist, &delayed_mntput_list))
1340 cleanup_mnt(mnt);
1343 void mntput(struct vfsmount *mnt)
1345 if (mnt) {
1346 struct mount *m = real_mount(mnt);
1355 struct vfsmount *mntget(struct vfsmount *mnt)
1357 if (mnt)
1358 mnt_add_count(real_mount(mnt), 1);
1359 return mnt;
1368 void mnt_make_shortterm(struct vfsmount *mnt)
1370 if (mnt)
1371 real_mount(mnt)->mnt_ns = NULL;
1407 p = clone_mnt(real_mount(path->mnt), path->dentry, CL_PRIVATE);
1410 p->mnt.mnt_flags |= MNT_INTERNAL;
1411 return &p->mnt;
1418 struct mount *mnt, *ret = NULL;
1422 mnt = list_entry(p, typeof(*mnt), mnt_list);
1423 if (!mnt_is_cursor(mnt)) {
1424 ret = mnt;
1456 struct mount *mnt = v;
1459 return mnt_list_next(p->ns, &mnt->mnt_list);
1465 struct mount *mnt = v;
1468 if (mnt)
1469 list_move_tail(&p->cursor.mnt_list, &mnt->mnt_list);
1480 return p->show(m, &r->mnt);
1510 struct mount *mnt = real_mount(m);
1518 for (p = mnt; p; p = next_mnt(p, mnt)) {
1534 * @mnt: root of mount
1545 int may_umount(struct vfsmount *mnt)
1550 if (propagate_mount_busy(real_mount(mnt), 2))
1580 mntput(&m->mnt);
1595 static bool disconnect_mount(struct mount *mnt, enum umount_tree_flags how)
1602 if (!mnt_has_parent(mnt))
1609 if (!(mnt->mnt_parent->mnt.mnt_flags & MNT_UMOUNT))
1617 if (IS_MNT_LOCKED(mnt))
1628 static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
1634 propagate_mount_unlock(mnt);
1637 for (p = mnt; p; p = next_mnt(p, mnt)) {
1638 p->mnt.mnt_flags |= MNT_UMOUNT;
1664 p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
1682 static void shrink_submounts(struct mount *mnt);
1707 static int do_umount(struct mount *mnt, int flags)
1709 struct super_block *sb = mnt->mnt.mnt_sb;
1712 retval = security_sb_umount(&mnt->mnt, flags);
1723 if (&mnt->mnt == current->fs->root.mnt ||
1732 if (mnt_get_count(mnt) != 2) {
1738 if (!xchg(&mnt->mnt_expiry_mark, 1))
1765 if (&mnt->mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) {
1780 if (mnt->mnt.mnt_flags & MNT_LOCKED)
1785 if (!list_empty(&mnt->mnt_list))
1786 umount_tree(mnt, UMOUNT_PROPAGATE);
1789 shrink_submounts(mnt);
1791 if (!propagate_mount_busy(mnt, 2)) {
1792 if (!list_empty(&mnt->mnt_list))
1793 umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
1816 struct mount *mnt;
1826 mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
1827 if (mnt->mnt.mnt_flags & MNT_UMOUNT) {
1828 umount_mnt(mnt);
1829 hlist_add_head(&mnt->mnt_umount, &unmounted);
1831 else umount_tree(mnt, UMOUNT_CONNECTED);
1857 return path->mnt->mnt_root == path->dentry;
1871 struct mount *mnt = real_mount(path->mnt);
1877 if (!check_mnt(mnt))
1879 if (mnt->mnt.mnt_flags & MNT_LOCKED) /* Check optimistically */
1889 struct mount *mnt = real_mount(path->mnt);
1894 ret = do_umount(mnt, flags);
1898 mntput_no_expire(mnt);
1949 struct ns_common *from_mnt_ns(struct mnt_namespace *mnt)
1951 return &mnt->ns;
1967 struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
1972 if (!(flag & CL_COPY_UNBINDABLE) && IS_MNT_UNBINDABLE(mnt))
1978 res = q = clone_mnt(mnt, dentry, flag);
1982 q->mnt_mountpoint = mnt->mnt_mountpoint;
1984 p = mnt;
1985 list_for_each_entry(r, &mnt->mnt_mounts, mnt_child) {
1993 if (s->mnt.mnt_flags & MNT_LOCKED) {
2003 is_mnt_ns_file(s->mnt.mnt_root)) {
2013 q = clone_mnt(p, p->mnt.mnt_root, flag);
2038 if (!check_mnt(real_mount(path->mnt)))
2041 tree = copy_tree(real_mount(path->mnt), path->dentry,
2046 return &tree->mnt;
2052 void dissolve_on_fput(struct vfsmount *mnt)
2057 ns = real_mount(mnt)->mnt_ns;
2060 umount_tree(real_mount(mnt), UMOUNT_CONNECTED);
2070 void drop_collected_mounts(struct vfsmount *mnt)
2074 umount_tree(real_mount(mnt), 0);
2079 static bool has_locked_children(struct mount *mnt, struct dentry *dentry)
2083 list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
2087 if (child->mnt.mnt_flags & MNT_LOCKED)
2105 struct mount *old_mnt = real_mount(path->mnt);
2127 return &new_mnt->mnt;
2138 struct mount *mnt;
2142 list_for_each_entry(mnt, &real_mount(root)->mnt_list, mnt_list) {
2143 res = f(&mnt->mnt, arg);
2150 static void lock_mnt_tree(struct mount *mnt)
2154 for (p = mnt; p; p = next_mnt(p, mnt)) {
2155 int flags = p->mnt.mnt_flags;
2173 p->mnt.mnt_flags = flags;
2177 static void cleanup_group_ids(struct mount *mnt, struct mount *end)
2181 for (p = mnt; p != end; p = next_mnt(p, mnt)) {
2187 static int invent_group_ids(struct mount *mnt, bool recurse)
2191 for (p = mnt; p; p = recurse ? next_mnt(p, mnt) : NULL) {
2195 cleanup_group_ids(mnt, p);
2204 int count_mounts(struct mnt_namespace *ns, struct mount *mnt)
2217 for (p = mnt; p; p = next_mnt(p, mnt))
2317 smp = get_mountpoint(source_mnt->mnt.mnt_root);
2369 q = __lookup_mnt(&child->mnt_parent->mnt,
2376 child->mnt.mnt_flags &= ~MNT_LOCKED;
2407 * Follow the mount stack on @path until the top mount @mnt is found. If
2408 * the initial @path->{mnt,dentry} is a mountpoint lookup the first
2409 * mount stacked on top of it. Then simply follow @{mnt,mnt->mnt_root}
2416 * If @beneath is requested, acquire inode_lock() on @mnt's mountpoint
2417 * @mp on @mnt->mnt_parent must be acquired. This protects against a
2419 * where @mnt doesn't have a child mount mounted @mp. A concurrent
2420 * removal of @mnt->mnt_root doesn't matter as nothing will be mounted
2423 * In addition, @beneath needs to make sure that @mnt hasn't been
2425 * @mount_lock and acquiring @namespace_sem. For the !@beneath case @mnt
2427 * check_mnt(mnt) in the function it's called from. For the @beneath
2429 * If @mnt hasn't been unmounted then @mnt->mnt_mountpoint still points
2430 * to @mnt->mnt_mp->m_dentry. But if @mnt has been unmounted it will
2431 * point to @mnt->mnt_root and @mnt->mnt_mp will be NULL.
2438 struct vfsmount *mnt = path->mnt;
2446 m = real_mount(mnt);
2462 if (beneath && (!is_mounted(mnt) || m->mnt_mountpoint != dentry)) {
2468 mnt = lookup_mnt(path);
2469 if (likely(!mnt))
2477 path->mnt = mnt;
2478 path->dentry = dget(mnt->mnt_root);
2511 static int graft_tree(struct mount *mnt, struct mount *p, struct mountpoint *mp)
2513 if (mnt->mnt.mnt_sb->s_flags & SB_NOUSER)
2517 d_is_dir(mnt->mnt.mnt_root))
2520 return attach_recursive_mnt(mnt, p, mp, 0);
2546 struct mount *mnt = real_mount(path->mnt);
2560 err = invent_group_ids(mnt, recurse);
2566 for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL))
2577 struct mount *mnt = ERR_PTR(-EINVAL), *old = real_mount(old_path->mnt);
2580 return mnt;
2583 return mnt;
2586 return mnt;
2589 mnt = copy_tree(old, old_path->dentry, CL_COPY_MNT_NS_FILE);
2591 mnt = clone_mnt(old, old_path->dentry, 0);
2593 if (!IS_ERR(mnt))
2594 mnt->mnt.mnt_flags &= ~MNT_LOCKED;
2596 return mnt;
2606 struct mount *mnt = NULL, *parent;
2625 parent = real_mount(path->mnt);
2629 mnt = __do_loopback(&old_path, recurse);
2630 if (IS_ERR(mnt)) {
2631 err = PTR_ERR(mnt);
2635 err = graft_tree(mnt, parent, mp);
2638 umount_tree(mnt, UMOUNT_SYNC);
2652 struct mount *mnt, *p;
2659 mnt = __do_loopback(path, recursive);
2660 if (IS_ERR(mnt)) {
2663 return ERR_CAST(mnt);
2667 for (p = mnt; p; p = next_mnt(p, mnt)) {
2671 ns->root = mnt;
2672 list_add_tail(&ns->list, &mnt->mnt_list);
2673 mntget(&mnt->mnt);
2677 mntput(path->mnt);
2678 path->mnt = &mnt->mnt;
2681 dissolve_on_fput(path->mnt);
2744 static bool can_change_locked_flags(struct mount *mnt, unsigned int mnt_flags)
2746 unsigned int fl = mnt->mnt.mnt_flags;
2771 static int change_mount_ro_state(struct mount *mnt, unsigned int mnt_flags)
2775 if (readonly_request == __mnt_is_readonly(&mnt->mnt))
2779 return mnt_make_readonly(mnt);
2781 mnt->mnt.mnt_flags &= ~MNT_READONLY;
2785 static void set_mount_attributes(struct mount *mnt, unsigned int mnt_flags)
2787 mnt_flags |= mnt->mnt.mnt_flags & ~MNT_USER_SETTABLE_MASK;
2788 mnt->mnt.mnt_flags = mnt_flags;
2789 touch_mnt_namespace(mnt->mnt_ns);
2792 static void mnt_warn_timestamp_expiry(struct path *mountpoint, struct vfsmount *mnt)
2794 struct super_block *sb = mnt->mnt_sb;
2796 if (!__mnt_is_readonly(mnt) &&
2804 is_mounted(mnt) ? "remounted" : "mounted",
2820 struct super_block *sb = path->mnt->mnt_sb;
2821 struct mount *mnt = real_mount(path->mnt);
2824 if (!check_mnt(mnt))
2830 if (!can_change_locked_flags(mnt, mnt_flags))
2839 ret = change_mount_ro_state(mnt, mnt_flags);
2841 set_mount_attributes(mnt, mnt_flags);
2845 mnt_warn_timestamp_expiry(path, &mnt->mnt);
2859 struct super_block *sb = path->mnt->mnt_sb;
2860 struct mount *mnt = real_mount(path->mnt);
2863 if (!check_mnt(mnt))
2869 if (!can_change_locked_flags(mnt, mnt_flags))
2890 set_mount_attributes(mnt, mnt_flags);
2897 mnt_warn_timestamp_expiry(path, &mnt->mnt);
2903 static inline int tree_contains_unbindable(struct mount *mnt)
2906 for (p = mnt; p; p = next_mnt(p, mnt)) {
2926 if (mnt_ns_loop(p->mnt.mnt_root))
2940 from = real_mount(from_path->mnt);
2941 to = real_mount(to_path->mnt);
2947 if (!is_mounted(&from->mnt))
2949 if (!is_mounted(&to->mnt))
2967 if (from->mnt.mnt_sb != to->mnt.mnt_sb)
2971 if (!is_subdir(to->mnt.mnt_root, from->mnt.mnt_root))
2975 if (has_locked_children(from, to->mnt.mnt_root))
3012 * @path->mnt with @path->dentry as mountpoint.
3020 if (unlikely(__lookup_mnt(path->mnt, path->dentry))) {
3052 struct mount *mnt_from = real_mount(from->mnt),
3053 *mnt_to = real_mount(to->mnt),
3073 if (&mnt_to->mnt == current->fs->root.mnt)
3096 * @mnt->mnt_parent and remounted on @mnt_from. But since @c is
3125 old = real_mount(old_path->mnt);
3126 p = real_mount(new_path->mnt);
3140 if (!is_mounted(&old->mnt))
3147 if (old->mnt.mnt_flags & MNT_LOCKED)
3185 err = attach_recursive_mnt(old, real_mount(new_path->mnt), mp, flags);
3228 struct mount *parent = real_mount(path->mnt);
3242 if (path->mnt->mnt_sb == newmnt->mnt.mnt_sb && path_mounted(path))
3245 if (d_is_symlink(newmnt->mnt.mnt_root))
3248 newmnt->mnt.mnt_flags = mnt_flags;
3261 struct vfsmount *mnt;
3277 mnt = vfs_create_mount(fc);
3278 if (IS_ERR(mnt))
3279 return PTR_ERR(mnt);
3281 mnt_warn_timestamp_expiry(mountpoint, mnt);
3285 mntput(mnt);
3288 error = do_add_mount(real_mount(mnt), mp, mountpoint, mnt_flags);
3291 mntput(mnt);
3358 struct mount *mnt;
3366 mnt = real_mount(m);
3370 BUG_ON(mnt_get_count(mnt) < 2);
3372 if (m->mnt_sb == path->mnt->mnt_sb &&
3399 err = do_add_mount(mnt, mp, path, path->mnt->mnt_flags | MNT_SHRINKABLE);
3411 if (!list_empty(&mnt->mnt_expire)) {
3413 list_del_init(&mnt->mnt_expire);
3423 * @mnt: The mount to list.
3426 void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list)
3430 list_add_tail(&real_mount(mnt)->mnt_expire, expiry_list);
3443 struct mount *mnt, *next;
3458 list_for_each_entry_safe(mnt, next, mounts, mnt_expire) {
3459 if (!xchg(&mnt->mnt_expiry_mark, 1) ||
3460 propagate_mount_busy(mnt, 1))
3462 list_move(&mnt->mnt_expire, &graveyard);
3465 mnt = list_first_entry(&graveyard, struct mount, mnt_expire);
3466 touch_mnt_namespace(mnt->mnt_ns);
3467 umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
3492 struct mount *mnt = list_entry(tmp, struct mount, mnt_child);
3495 if (!(mnt->mnt.mnt_flags & MNT_SHRINKABLE))
3500 if (!list_empty(&mnt->mnt_mounts)) {
3501 this_parent = mnt;
3505 if (!propagate_mount_busy(mnt, 1)) {
3506 list_move_tail(&mnt->mnt_expire, graveyard);
3527 static void shrink_submounts(struct mount *mnt)
3533 while (select_submounts(mnt, &graveyard)) {
3650 mnt_flags |= path->mnt->mnt_flags & MNT_ATIME_MASK;
3783 new = copy_tree(old, old->mnt.mnt_root, copy_flags);
3808 if (&p->mnt == new_fs->root.mnt) {
3809 new_fs->root.mnt = mntget(&q->mnt);
3810 rootmnt = &p->mnt;
3812 if (&p->mnt == new_fs->pwd.mnt) {
3813 new_fs->pwd.mnt = mntget(&q->mnt);
3814 pwdmnt = &p->mnt;
3822 while (p->mnt.mnt_root != q->mnt.mnt_root)
3837 struct mount *mnt = real_mount(m);
3848 mnt->mnt_ns = ns;
3849 ns->root = mnt;
3851 list_add(&mnt->mnt_list, &ns->list);
3862 s = path.mnt->mnt_sb;
3864 mntput(path.mnt);
3947 struct mount *mnt;
4008 newmount.mnt = vfs_create_mount(fc);
4009 if (IS_ERR(newmount.mnt)) {
4010 ret = PTR_ERR(newmount.mnt);
4014 newmount.mnt->mnt_flags = mnt_flags;
4028 mnt = real_mount(newmount.mnt);
4029 mnt->mnt_ns = ns;
4030 ns->root = mnt;
4032 list_add(&mnt->mnt_list, &ns->list);
4033 mntget(newmount.mnt);
4040 dissolve_on_fput(newmount.mnt);
4132 bool is_path_reachable(struct mount *mnt, struct dentry *dentry,
4135 while (&mnt->mnt != root->mnt && mnt_has_parent(mnt)) {
4136 dentry = mnt->mnt_mountpoint;
4137 mnt = mnt->mnt_parent;
4139 return &mnt->mnt == root->mnt && is_subdir(dentry, root->dentry);
4146 res = is_path_reachable(real_mount(path1->mnt), path1->dentry, path2);
4209 new_mnt = real_mount(new.mnt);
4210 root_mnt = real_mount(root.mnt);
4211 old_mnt = real_mount(old.mnt);
4220 if (new_mnt->mnt.mnt_flags & MNT_LOCKED)
4246 if (root_mnt->mnt.mnt_flags & MNT_LOCKED) {
4247 new_mnt->mnt.mnt_flags |= MNT_LOCKED;
4248 root_mnt->mnt.mnt_flags &= ~MNT_LOCKED;
4276 static unsigned int recalc_flags(struct mount_kattr *kattr, struct mount *mnt)
4278 unsigned int flags = mnt->mnt.mnt_flags;
4288 static int can_idmap_mount(const struct mount_kattr *kattr, struct mount *mnt)
4290 struct vfsmount *m = &mnt->mnt;
4320 if (!is_anon_ns(mnt->mnt_ns))
4329 * @mnt: the mount to which @kattr will be applied
4336 const struct mount *mnt)
4339 (mnt->mnt.mnt_flags & MNT_READONLY)) &&
4343 static int mount_setattr_prepare(struct mount_kattr *kattr, struct mount *mnt)
4348 for (m = mnt; m; m = next_mnt(m, mnt)) {
4376 for (p = mnt; p; p = next_mnt(p, mnt)) {
4378 if (p->mnt.mnt_flags & MNT_WRITE_HOLD)
4392 static void do_idmap_mount(const struct mount_kattr *kattr, struct mount *mnt)
4405 smp_store_release(&mnt->mnt.mnt_idmap, mnt_idmap_get(kattr->mnt_idmap));
4408 static void mount_setattr_commit(struct mount_kattr *kattr, struct mount *mnt)
4412 for (m = mnt; m; m = next_mnt(m, mnt)) {
4417 WRITE_ONCE(m->mnt.mnt_flags, flags);
4420 if (m->mnt.mnt_flags & MNT_WRITE_HOLD)
4428 touch_mnt_namespace(mnt->mnt_ns);
4433 struct mount *mnt = real_mount(path->mnt);
4455 err = invent_group_ids(mnt, kattr->recurse);
4467 if (!is_mounted(&mnt->mnt))
4481 if ((mnt_has_parent(mnt) || !is_anon_ns(mnt->mnt_ns)) && !check_mnt(mnt))
4489 err = mount_setattr_prepare(kattr, mnt);
4491 mount_setattr_commit(kattr, mnt);
4498 cleanup_group_ids(mnt, NULL);
4695 struct vfsmount *mnt;
4700 mnt = vfs_kern_mount(&rootfs_fs_type, 0, "rootfs", NULL);
4701 if (IS_ERR(mnt))
4707 m = real_mount(mnt);
4715 root.mnt = mnt;
4716 root.dentry = mnt->mnt_root;
4717 mnt->mnt_flags |= MNT_LOCKED;
4762 drop_collected_mounts(&ns->root->mnt);
4768 struct vfsmount *mnt;
4769 mnt = vfs_kern_mount(type, SB_KERNMOUNT, type->name, NULL);
4770 if (!IS_ERR(mnt)) {
4772 * it is a longterm mount, don't release mnt until
4775 real_mount(mnt)->mnt_ns = MNT_NS_INTERNAL;
4777 return mnt;
4781 void kern_unmount(struct vfsmount *mnt)
4784 if (!IS_ERR(mnt)) {
4785 mnt_make_shortterm(mnt);
4787 mntput(mnt);
4792 void kern_unmount_array(struct vfsmount *mnt[], unsigned int num)
4797 mnt_make_shortterm(mnt[i]);
4800 mntput(mnt[i]);
4804 bool our_mnt(struct vfsmount *mnt)
4806 return check_mnt(real_mount(mnt));
4817 ns_root.mnt = &current->nsproxy->mnt_ns->root->mnt;
4818 ns_root.dentry = ns_root.mnt->mnt_root;
4838 struct mount *mnt;
4843 list_for_each_entry(mnt, &ns->list, mnt_list) {
4847 if (mnt_is_cursor(mnt))
4850 if (mnt->mnt.mnt_sb->s_type != sb->s_type)
4856 if (mnt->mnt.mnt_root != mnt->mnt.mnt_sb->s_root)
4860 mnt_flags = mnt->mnt.mnt_flags;
4863 if (sb_rdonly(mnt->mnt.mnt_sb))
4880 list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
4883 if (!(child->mnt.mnt_flags & MNT_LOCKED))
4925 bool mnt_may_suid(struct vfsmount *mnt)
4934 return !(mnt->mnt_flags & MNT_NOSUID) && check_mnt(real_mount(mnt)) &&
4935 current_in_userns(mnt->mnt_sb->s_user_ns);
4984 err = vfs_path_lookup(mnt_ns->root->mnt.mnt_root, &mnt_ns->root->mnt,
5009 .name = "mnt",