Lines Matching refs:mnt
90 static inline struct hlist_head *m_hash(struct vfsmount *mnt, struct dentry *dentry)
92 unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES);
105 static int mnt_alloc_id(struct mount *mnt)
111 mnt->mnt_id = res;
115 static void mnt_free_id(struct mount *mnt)
117 ida_free(&mnt_id_ida, mnt->mnt_id);
123 static int mnt_alloc_group_id(struct mount *mnt)
129 mnt->mnt_group_id = res;
136 void mnt_release_group_id(struct mount *mnt)
138 ida_free(&mnt_group_ida, mnt->mnt_group_id);
139 mnt->mnt_group_id = 0;
145 static inline void mnt_add_count(struct mount *mnt, int n)
148 this_cpu_add(mnt->mnt_pcp->mnt_count, n);
151 mnt->mnt_count += n;
159 int mnt_get_count(struct mount *mnt)
166 count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_count;
171 return mnt->mnt_count;
177 struct mount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL);
178 if (mnt) {
181 err = mnt_alloc_id(mnt);
186 mnt->mnt_devname = kstrdup_const(name,
188 if (!mnt->mnt_devname)
193 mnt->mnt_pcp = alloc_percpu(struct mnt_pcp);
194 if (!mnt->mnt_pcp)
197 this_cpu_add(mnt->mnt_pcp->mnt_count, 1);
199 mnt->mnt_count = 1;
200 mnt->mnt_writers = 0;
203 INIT_HLIST_NODE(&mnt->mnt_hash);
204 INIT_LIST_HEAD(&mnt->mnt_child);
205 INIT_LIST_HEAD(&mnt->mnt_mounts);
206 INIT_LIST_HEAD(&mnt->mnt_list);
207 INIT_LIST_HEAD(&mnt->mnt_expire);
208 INIT_LIST_HEAD(&mnt->mnt_share);
209 INIT_LIST_HEAD(&mnt->mnt_slave_list);
210 INIT_LIST_HEAD(&mnt->mnt_slave);
211 INIT_HLIST_NODE(&mnt->mnt_mp_list);
212 INIT_LIST_HEAD(&mnt->mnt_umounting);
213 INIT_HLIST_HEAD(&mnt->mnt_stuck_children);
215 return mnt;
219 kfree_const(mnt->mnt_devname);
222 mnt_free_id(mnt);
224 kmem_cache_free(mnt_cache, mnt);
238 * @mnt: the mount to check for its write status
247 bool __mnt_is_readonly(struct vfsmount *mnt)
249 return (mnt->mnt_flags & MNT_READONLY) || sb_rdonly(mnt->mnt_sb);
253 static inline void mnt_inc_writers(struct mount *mnt)
256 this_cpu_inc(mnt->mnt_pcp->mnt_writers);
258 mnt->mnt_writers++;
262 static inline void mnt_dec_writers(struct mount *mnt)
265 this_cpu_dec(mnt->mnt_pcp->mnt_writers);
267 mnt->mnt_writers--;
271 static unsigned int mnt_get_writers(struct mount *mnt)
278 count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_writers;
283 return mnt->mnt_writers;
287 static int mnt_is_readonly(struct vfsmount *mnt)
289 if (mnt->mnt_sb->s_readonly_remount)
293 return __mnt_is_readonly(mnt);
307 * it, and makes sure that writes are allowed (mnt it read-write) before
314 struct mount *mnt = real_mount(m);
318 mnt_inc_writers(mnt);
325 while (READ_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD)
334 mnt_dec_writers(mnt);
365 * @mnt: the mount on which to take a write
375 int mnt_clone_write(struct vfsmount *mnt)
378 if (__mnt_is_readonly(mnt))
381 mnt_inc_writers(real_mount(mnt));
397 return __mnt_want_write(file->f_path.mnt);
399 return mnt_clone_write(file->f_path.mnt);
423 * @mnt: the mount on which to give up write access
429 void __mnt_drop_write(struct vfsmount *mnt)
432 mnt_dec_writers(real_mount(mnt));
438 * @mnt: the mount on which to give up write access
444 void mnt_drop_write(struct vfsmount *mnt)
446 __mnt_drop_write(mnt);
447 sb_end_write(mnt->mnt_sb);
453 __mnt_drop_write(file->f_path.mnt);
463 static int mnt_make_readonly(struct mount *mnt)
468 mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
491 if (mnt_get_writers(mnt) > 0)
494 mnt->mnt.mnt_flags |= MNT_READONLY;
500 mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
505 static int __mnt_unmake_readonly(struct mount *mnt)
508 mnt->mnt.mnt_flags &= ~MNT_READONLY;
515 struct mount *mnt;
523 list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) {
524 if (!(mnt->mnt.mnt_flags & MNT_READONLY)) {
525 mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
527 if (mnt_get_writers(mnt) > 0) {
540 list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) {
541 if (mnt->mnt.mnt_flags & MNT_WRITE_HOLD)
542 mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
549 static void free_vfsmnt(struct mount *mnt)
551 kfree_const(mnt->mnt_devname);
553 free_percpu(mnt->mnt_pcp);
555 kmem_cache_free(mnt_cache, mnt);
566 struct mount *mnt;
571 mnt = real_mount(bastard);
572 mnt_add_count(mnt, 1);
577 mnt_add_count(mnt, -1);
582 mnt_add_count(mnt, -1);
606 * find the first mount at @dentry on vfsmount @mnt.
609 struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
611 struct hlist_head *head = m_hash(mnt, dentry);
615 if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry)
626 * mount /dev/sda1 /mnt
627 * mount /dev/sda2 /mnt
628 * mount /dev/sda3 /mnt
630 * Then lookup_mnt() on the base /mnt dentry in the root mount will
645 child_mnt = __lookup_mnt(path->mnt, path->dentry);
646 m = child_mnt ? &child_mnt->mnt : NULL;
662 static inline bool mnt_is_cursor(struct mount *mnt)
664 return mnt->mnt.mnt_flags & MNT_CURSOR;
685 struct mount *mnt;
690 list_for_each_entry(mnt, &ns->list, mnt_list) {
691 if (mnt_is_cursor(mnt))
693 is_covered = (mnt->mnt_mountpoint == dentry);
791 static inline int check_mnt(struct mount *mnt)
793 return mnt->mnt_ns == current->nsproxy->mnt_ns;
821 static struct mountpoint *unhash_mnt(struct mount *mnt)
824 mnt->mnt_parent = mnt;
825 mnt->mnt_mountpoint = mnt->mnt.mnt_root;
826 list_del_init(&mnt->mnt_child);
827 hlist_del_init_rcu(&mnt->mnt_hash);
828 hlist_del_init(&mnt->mnt_mp_list);
829 mp = mnt->mnt_mp;
830 mnt->mnt_mp = NULL;
837 static void umount_mnt(struct mount *mnt)
839 put_mountpoint(unhash_mnt(mnt));
845 void mnt_set_mountpoint(struct mount *mnt,
850 mnt_add_count(mnt, 1); /* essentially, that's mntget */
852 child_mnt->mnt_parent = mnt;
857 static void __attach_mnt(struct mount *mnt, struct mount *parent)
859 hlist_add_head_rcu(&mnt->mnt_hash,
860 m_hash(&parent->mnt, mnt->mnt_mountpoint));
861 list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
867 static void attach_mnt(struct mount *mnt,
871 mnt_set_mountpoint(parent, mp, mnt);
872 __attach_mnt(mnt, parent);
875 void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp, struct mount *mnt)
877 struct mountpoint *old_mp = mnt->mnt_mp;
878 struct mount *old_parent = mnt->mnt_parent;
880 list_del_init(&mnt->mnt_child);
881 hlist_del_init(&mnt->mnt_mp_list);
882 hlist_del_init_rcu(&mnt->mnt_hash);
884 attach_mnt(mnt, parent, mp);
893 static void commit_tree(struct mount *mnt)
895 struct mount *parent = mnt->mnt_parent;
900 BUG_ON(parent == mnt);
902 list_add_tail(&head, &mnt->mnt_list);
911 __attach_mnt(mnt, parent);
952 struct mount *mnt;
957 mnt = alloc_vfsmnt(fc->source ?: "none");
958 if (!mnt)
962 mnt->mnt.mnt_flags = MNT_INTERNAL;
965 mnt->mnt.mnt_sb = fc->root->d_sb;
966 mnt->mnt.mnt_root = dget(fc->root);
967 mnt->mnt_mountpoint = mnt->mnt.mnt_root;
968 mnt->mnt_parent = mnt;
971 list_add_tail(&mnt->mnt_instance, &mnt->mnt.mnt_sb->s_mounts);
973 return &mnt->mnt;
993 struct vfsmount *mnt;
1009 mnt = fc_mount(fc);
1011 mnt = ERR_PTR(ret);
1014 return mnt;
1036 struct super_block *sb = old->mnt.mnt_sb;
1037 struct mount *mnt;
1040 mnt = alloc_vfsmnt(old->mnt_devname);
1041 if (!mnt)
1045 mnt->mnt_group_id = 0; /* not a peer of original */
1047 mnt->mnt_group_id = old->mnt_group_id;
1049 if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) {
1050 err = mnt_alloc_group_id(mnt);
1055 mnt->mnt.mnt_flags = old->mnt.mnt_flags;
1056 mnt->mnt.mnt_flags &= ~(MNT_WRITE_HOLD|MNT_MARKED|MNT_INTERNAL);
1059 mnt->mnt.mnt_sb = sb;
1060 mnt->mnt.mnt_root = dget(root);
1061 mnt->mnt_mountpoint = mnt->mnt.mnt_root;
1062 mnt->mnt_parent = mnt;
1064 list_add_tail(&mnt->mnt_instance, &sb->s_mounts);
1069 list_add(&mnt->mnt_slave, &old->mnt_slave_list);
1070 mnt->mnt_master = old;
1071 CLEAR_MNT_SHARED(mnt);
1074 list_add(&mnt->mnt_share, &old->mnt_share);
1076 list_add(&mnt->mnt_slave, &old->mnt_slave);
1077 mnt->mnt_master = old->mnt_master;
1079 CLEAR_MNT_SHARED(mnt);
1082 set_mnt_shared(mnt);
1088 list_add(&mnt->mnt_expire, &old->mnt_expire);
1091 return mnt;
1094 mnt_free_id(mnt);
1095 free_vfsmnt(mnt);
1099 static void cleanup_mnt(struct mount *mnt)
1110 WARN_ON(mnt_get_writers(mnt));
1111 if (unlikely(mnt->mnt_pins.first))
1112 mnt_pin_kill(mnt);
1113 hlist_for_each_entry_safe(m, p, &mnt->mnt_stuck_children, mnt_umount) {
1115 mntput(&m->mnt);
1117 fsnotify_vfsmount_delete(&mnt->mnt);
1118 dput(mnt->mnt.mnt_root);
1119 deactivate_super(mnt->mnt.mnt_sb);
1120 mnt_free_id(mnt);
1121 call_rcu(&mnt->mnt_rcu, delayed_free_vfsmnt);
1140 static void mntput_no_expire(struct mount *mnt)
1146 if (likely(READ_ONCE(mnt->mnt_ns))) {
1156 mnt_add_count(mnt, -1);
1166 mnt_add_count(mnt, -1);
1167 count = mnt_get_count(mnt);
1174 if (unlikely(mnt->mnt.mnt_flags & MNT_DOOMED)) {
1179 mnt->mnt.mnt_flags |= MNT_DOOMED;
1182 list_del(&mnt->mnt_instance);
1184 if (unlikely(!list_empty(&mnt->mnt_mounts))) {
1186 list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) {
1188 hlist_add_head(&p->mnt_umount, &mnt->mnt_stuck_children);
1194 if (likely(!(mnt->mnt.mnt_flags & MNT_INTERNAL))) {
1197 init_task_work(&mnt->mnt_rcu, __cleanup_mnt);
1198 if (!task_work_add(task, &mnt->mnt_rcu, TWA_RESUME))
1201 if (llist_add(&mnt->mnt_llist, &delayed_mntput_list))
1205 cleanup_mnt(mnt);
1208 void mntput(struct vfsmount *mnt)
1210 if (mnt) {
1211 struct mount *m = real_mount(mnt);
1220 struct vfsmount *mntget(struct vfsmount *mnt)
1222 if (mnt)
1223 mnt_add_count(real_mount(mnt), 1);
1224 return mnt;
1260 p = clone_mnt(real_mount(path->mnt), path->dentry, CL_PRIVATE);
1263 p->mnt.mnt_flags |= MNT_INTERNAL;
1264 return &p->mnt;
1271 struct mount *mnt, *ret = NULL;
1275 mnt = list_entry(p, typeof(*mnt), mnt_list);
1276 if (!mnt_is_cursor(mnt)) {
1277 ret = mnt;
1309 struct mount *mnt = v;
1312 return mnt_list_next(p->ns, &mnt->mnt_list);
1318 struct mount *mnt = v;
1321 if (mnt)
1322 list_move_tail(&p->cursor.mnt_list, &mnt->mnt_list);
1333 return p->show(m, &r->mnt);
1355 * @mnt: root of mount tree
1363 struct mount *mnt = real_mount(m);
1371 for (p = mnt; p; p = next_mnt(p, mnt)) {
1387 * @mnt: root of mount
1398 int may_umount(struct vfsmount *mnt)
1403 if (propagate_mount_busy(real_mount(mnt), 2))
1433 mntput(&m->mnt);
1448 static bool disconnect_mount(struct mount *mnt, enum umount_tree_flags how)
1455 if (!mnt_has_parent(mnt))
1462 if (!(mnt->mnt_parent->mnt.mnt_flags & MNT_UMOUNT))
1470 if (IS_MNT_LOCKED(mnt))
1481 static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
1487 propagate_mount_unlock(mnt);
1490 for (p = mnt; p; p = next_mnt(p, mnt)) {
1491 p->mnt.mnt_flags |= MNT_UMOUNT;
1517 p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
1535 static void shrink_submounts(struct mount *mnt);
1560 static int do_umount(struct mount *mnt, int flags)
1562 struct super_block *sb = mnt->mnt.mnt_sb;
1565 retval = security_sb_umount(&mnt->mnt, flags);
1576 if (&mnt->mnt == current->fs->root.mnt ||
1585 if (mnt_get_count(mnt) != 2) {
1591 if (!xchg(&mnt->mnt_expiry_mark, 1))
1618 if (&mnt->mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) {
1633 if (mnt->mnt.mnt_flags & MNT_LOCKED)
1638 if (!list_empty(&mnt->mnt_list))
1639 umount_tree(mnt, UMOUNT_PROPAGATE);
1642 shrink_submounts(mnt);
1644 if (!propagate_mount_busy(mnt, 2)) {
1645 if (!list_empty(&mnt->mnt_list))
1646 umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
1669 struct mount *mnt;
1679 mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
1680 if (mnt->mnt.mnt_flags & MNT_UMOUNT) {
1681 umount_mnt(mnt);
1682 hlist_add_head(&mnt->mnt_umount, &unmounted);
1684 else umount_tree(mnt, UMOUNT_CONNECTED);
1719 struct mount *mnt = real_mount(path->mnt);
1723 if (path->dentry != path->mnt->mnt_root)
1725 if (!check_mnt(mnt))
1727 if (mnt->mnt.mnt_flags & MNT_LOCKED) /* Check optimistically */
1737 struct mount *mnt = real_mount(path->mnt);
1742 ret = do_umount(mnt, flags);
1746 mntput_no_expire(mnt);
1797 struct ns_common *from_mnt_ns(struct mnt_namespace *mnt)
1799 return &mnt->ns;
1815 struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
1820 if (!(flag & CL_COPY_UNBINDABLE) && IS_MNT_UNBINDABLE(mnt))
1826 res = q = clone_mnt(mnt, dentry, flag);
1830 q->mnt_mountpoint = mnt->mnt_mountpoint;
1832 p = mnt;
1833 list_for_each_entry(r, &mnt->mnt_mounts, mnt_child) {
1841 if (s->mnt.mnt_flags & MNT_LOCKED) {
1851 is_mnt_ns_file(s->mnt.mnt_root)) {
1861 q = clone_mnt(p, p->mnt.mnt_root, flag);
1886 if (!check_mnt(real_mount(path->mnt)))
1889 tree = copy_tree(real_mount(path->mnt), path->dentry,
1894 return &tree->mnt;
1900 void dissolve_on_fput(struct vfsmount *mnt)
1905 ns = real_mount(mnt)->mnt_ns;
1908 umount_tree(real_mount(mnt), UMOUNT_CONNECTED);
1918 void drop_collected_mounts(struct vfsmount *mnt)
1922 umount_tree(real_mount(mnt), 0);
1927 static bool has_locked_children(struct mount *mnt, struct dentry *dentry)
1931 list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
1935 if (child->mnt.mnt_flags & MNT_LOCKED)
1952 struct mount *old_mnt = real_mount(path->mnt);
1974 return &new_mnt->mnt;
1985 struct mount *mnt;
1989 list_for_each_entry(mnt, &real_mount(root)->mnt_list, mnt_list) {
1990 res = f(&mnt->mnt, arg);
1997 static void lock_mnt_tree(struct mount *mnt)
2001 for (p = mnt; p; p = next_mnt(p, mnt)) {
2002 int flags = p->mnt.mnt_flags;
2020 p->mnt.mnt_flags = flags;
2024 static void cleanup_group_ids(struct mount *mnt, struct mount *end)
2028 for (p = mnt; p != end; p = next_mnt(p, mnt)) {
2034 static int invent_group_ids(struct mount *mnt, bool recurse)
2038 for (p = mnt; p; p = recurse ? next_mnt(p, mnt) : NULL) {
2042 cleanup_group_ids(mnt, p);
2051 int count_mounts(struct mnt_namespace *ns, struct mount *mnt)
2057 for (p = mnt; p; p = next_mnt(p, mnt))
2152 smp = get_mountpoint(source_mnt->mnt.mnt_root);
2192 q = __lookup_mnt(&child->mnt_parent->mnt,
2199 child->mnt.mnt_flags &= ~MNT_LOCKED;
2227 struct vfsmount *mnt;
2236 mnt = lookup_mnt(path);
2237 if (likely(!mnt)) {
2249 path->mnt = mnt;
2250 dentry = path->dentry = dget(mnt->mnt_root);
2266 static int graft_tree(struct mount *mnt, struct mount *p, struct mountpoint *mp)
2268 if (mnt->mnt.mnt_sb->s_flags & SB_NOUSER)
2272 d_is_dir(mnt->mnt.mnt_root))
2275 return attach_recursive_mnt(mnt, p, mp, false);
2301 struct mount *mnt = real_mount(path->mnt);
2306 if (path->dentry != path->mnt->mnt_root)
2315 err = invent_group_ids(mnt, recurse);
2321 for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL))
2332 struct mount *mnt = ERR_PTR(-EINVAL), *old = real_mount(old_path->mnt);
2335 return mnt;
2338 return mnt;
2341 return mnt;
2344 mnt = copy_tree(old, old_path->dentry, CL_COPY_MNT_NS_FILE);
2346 mnt = clone_mnt(old, old_path->dentry, 0);
2348 if (!IS_ERR(mnt))
2349 mnt->mnt.mnt_flags &= ~MNT_LOCKED;
2351 return mnt;
2361 struct mount *mnt = NULL, *parent;
2380 parent = real_mount(path->mnt);
2384 mnt = __do_loopback(&old_path, recurse);
2385 if (IS_ERR(mnt)) {
2386 err = PTR_ERR(mnt);
2390 err = graft_tree(mnt, parent, mp);
2393 umount_tree(mnt, UMOUNT_SYNC);
2407 struct mount *mnt, *p;
2414 mnt = __do_loopback(path, recursive);
2415 if (IS_ERR(mnt)) {
2418 return ERR_CAST(mnt);
2422 for (p = mnt; p; p = next_mnt(p, mnt)) {
2426 ns->root = mnt;
2427 list_add_tail(&ns->list, &mnt->mnt_list);
2428 mntget(&mnt->mnt);
2432 mntput(path->mnt);
2433 path->mnt = &mnt->mnt;
2436 dissolve_on_fput(path->mnt);
2499 static bool can_change_locked_flags(struct mount *mnt, unsigned int mnt_flags)
2501 unsigned int fl = mnt->mnt.mnt_flags;
2526 static int change_mount_ro_state(struct mount *mnt, unsigned int mnt_flags)
2530 if (readonly_request == __mnt_is_readonly(&mnt->mnt))
2534 return mnt_make_readonly(mnt);
2536 return __mnt_unmake_readonly(mnt);
2543 static void set_mount_attributes(struct mount *mnt, unsigned int mnt_flags)
2546 mnt_flags |= mnt->mnt.mnt_flags & ~MNT_USER_SETTABLE_MASK;
2547 mnt->mnt.mnt_flags = mnt_flags;
2548 touch_mnt_namespace(mnt->mnt_ns);
2552 static void mnt_warn_timestamp_expiry(struct path *mountpoint, struct vfsmount *mnt)
2554 struct super_block *sb = mnt->mnt_sb;
2556 if (!__mnt_is_readonly(mnt) &&
2566 is_mounted(mnt) ? "remounted" : "mounted",
2581 struct super_block *sb = path->mnt->mnt_sb;
2582 struct mount *mnt = real_mount(path->mnt);
2585 if (!check_mnt(mnt))
2588 if (path->dentry != mnt->mnt.mnt_root)
2591 if (!can_change_locked_flags(mnt, mnt_flags))
2595 ret = change_mount_ro_state(mnt, mnt_flags);
2597 set_mount_attributes(mnt, mnt_flags);
2600 mnt_warn_timestamp_expiry(path, &mnt->mnt);
2614 struct super_block *sb = path->mnt->mnt_sb;
2615 struct mount *mnt = real_mount(path->mnt);
2618 if (!check_mnt(mnt))
2621 if (path->dentry != path->mnt->mnt_root)
2624 if (!can_change_locked_flags(mnt, mnt_flags))
2644 set_mount_attributes(mnt, mnt_flags);
2649 mnt_warn_timestamp_expiry(path, &mnt->mnt);
2655 static inline int tree_contains_unbindable(struct mount *mnt)
2658 for (p = mnt; p; p = next_mnt(p, mnt)) {
2678 if (mnt_ns_loop(p->mnt.mnt_root))
2701 old = real_mount(old_path->mnt);
2702 p = real_mount(new_path->mnt);
2714 if (!is_mounted(&old->mnt))
2721 if (old->mnt.mnt_flags & MNT_LOCKED)
2724 if (old_path->dentry != old_path->mnt->mnt_root)
2748 err = attach_recursive_mnt(old, real_mount(new_path->mnt), mp,
2792 struct mount *parent = real_mount(path->mnt);
2806 if (path->mnt->mnt_sb == newmnt->mnt.mnt_sb &&
2807 path->mnt->mnt_root == path->dentry)
2810 if (d_is_symlink(newmnt->mnt.mnt_root))
2813 newmnt->mnt.mnt_flags = mnt_flags;
2826 struct vfsmount *mnt;
2842 mnt = vfs_create_mount(fc);
2843 if (IS_ERR(mnt))
2844 return PTR_ERR(mnt);
2846 mnt_warn_timestamp_expiry(mountpoint, mnt);
2850 mntput(mnt);
2853 error = do_add_mount(real_mount(mnt), mp, mountpoint, mnt_flags);
2856 mntput(mnt);
2923 struct mount *mnt;
2931 mnt = real_mount(m);
2935 BUG_ON(mnt_get_count(mnt) < 2);
2937 if (m->mnt_sb == path->mnt->mnt_sb &&
2955 if (unlikely(__lookup_mnt(path->mnt, dentry))) {
2967 err = do_add_mount(mnt, mp, path, path->mnt->mnt_flags | MNT_SHRINKABLE);
2979 if (!list_empty(&mnt->mnt_expire)) {
2981 list_del_init(&mnt->mnt_expire);
2991 * @mnt: The mount to list.
2994 void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list)
2998 list_add_tail(&real_mount(mnt)->mnt_expire, expiry_list);
3011 struct mount *mnt, *next;
3026 list_for_each_entry_safe(mnt, next, mounts, mnt_expire) {
3027 if (!xchg(&mnt->mnt_expiry_mark, 1) ||
3028 propagate_mount_busy(mnt, 1))
3030 list_move(&mnt->mnt_expire, &graveyard);
3033 mnt = list_first_entry(&graveyard, struct mount, mnt_expire);
3034 touch_mnt_namespace(mnt->mnt_ns);
3035 umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
3060 struct mount *mnt = list_entry(tmp, struct mount, mnt_child);
3063 if (!(mnt->mnt.mnt_flags & MNT_SHRINKABLE))
3068 if (!list_empty(&mnt->mnt_mounts)) {
3069 this_parent = mnt;
3073 if (!propagate_mount_busy(mnt, 1)) {
3074 list_move_tail(&mnt->mnt_expire, graveyard);
3095 static void shrink_submounts(struct mount *mnt)
3101 while (select_submounts(mnt, &graveyard)) {
3218 mnt_flags |= path->mnt->mnt_flags & MNT_ATIME_MASK;
3351 new = copy_tree(old, old->mnt.mnt_root, copy_flags);
3376 if (&p->mnt == new_fs->root.mnt) {
3377 new_fs->root.mnt = mntget(&q->mnt);
3378 rootmnt = &p->mnt;
3380 if (&p->mnt == new_fs->pwd.mnt) {
3381 new_fs->pwd.mnt = mntget(&q->mnt);
3382 pwdmnt = &p->mnt;
3389 while (p->mnt.mnt_root != q->mnt.mnt_root)
3404 struct mount *mnt = real_mount(m);
3415 mnt->mnt_ns = ns;
3416 ns->root = mnt;
3418 list_add(&mnt->mnt_list, &ns->list);
3429 s = path.mnt->mnt_sb;
3431 mntput(path.mnt);
3484 struct mount *mnt;
3560 newmount.mnt = vfs_create_mount(fc);
3561 if (IS_ERR(newmount.mnt)) {
3562 ret = PTR_ERR(newmount.mnt);
3566 newmount.mnt->mnt_flags = mnt_flags;
3580 mnt = real_mount(newmount.mnt);
3581 mnt->mnt_ns = ns;
3582 ns->root = mnt;
3584 list_add(&mnt->mnt_list, &ns->list);
3585 mntget(newmount.mnt);
3592 dissolve_on_fput(newmount.mnt);
3676 bool is_path_reachable(struct mount *mnt, struct dentry *dentry,
3679 while (&mnt->mnt != root->mnt && mnt_has_parent(mnt)) {
3680 dentry = mnt->mnt_mountpoint;
3681 mnt = mnt->mnt_parent;
3683 return &mnt->mnt == root->mnt && is_subdir(dentry, root->dentry);
3690 res = is_path_reachable(real_mount(path1->mnt), path1->dentry, path2);
3753 new_mnt = real_mount(new.mnt);
3754 root_mnt = real_mount(root.mnt);
3755 old_mnt = real_mount(old.mnt);
3764 if (new_mnt->mnt.mnt_flags & MNT_LOCKED)
3773 if (root.mnt->mnt_root != root.dentry)
3777 if (new.mnt->mnt_root != new.dentry)
3790 if (root_mnt->mnt.mnt_flags & MNT_LOCKED) {
3791 new_mnt->mnt.mnt_flags |= MNT_LOCKED;
3792 root_mnt->mnt.mnt_flags &= ~MNT_LOCKED;
3822 struct vfsmount *mnt;
3827 mnt = vfs_kern_mount(&rootfs_fs_type, 0, "rootfs", NULL);
3828 if (IS_ERR(mnt))
3834 m = real_mount(mnt);
3842 root.mnt = mnt;
3843 root.dentry = mnt->mnt_root;
3844 mnt->mnt_flags |= MNT_LOCKED;
3889 drop_collected_mounts(&ns->root->mnt);
3895 struct vfsmount *mnt;
3896 mnt = vfs_kern_mount(type, SB_KERNMOUNT, type->name, NULL);
3897 if (!IS_ERR(mnt)) {
3899 * it is a longterm mount, don't release mnt until
3902 real_mount(mnt)->mnt_ns = MNT_NS_INTERNAL;
3904 return mnt;
3908 void kern_unmount(struct vfsmount *mnt)
3911 if (!IS_ERR_OR_NULL(mnt)) {
3912 real_mount(mnt)->mnt_ns = NULL;
3914 mntput(mnt);
3919 void kern_unmount_array(struct vfsmount *mnt[], unsigned int num)
3924 if (mnt[i])
3925 real_mount(mnt[i])->mnt_ns = NULL;
3928 mntput(mnt[i]);
3932 bool our_mnt(struct vfsmount *mnt)
3934 return check_mnt(real_mount(mnt));
3945 ns_root.mnt = ¤t->nsproxy->mnt_ns->root->mnt;
3946 ns_root.dentry = ns_root.mnt->mnt_root;
3966 struct mount *mnt;
3971 list_for_each_entry(mnt, &ns->list, mnt_list) {
3975 if (mnt_is_cursor(mnt))
3978 if (mnt->mnt.mnt_sb->s_type != sb->s_type)
3984 if (mnt->mnt.mnt_root != mnt->mnt.mnt_sb->s_root)
3988 mnt_flags = mnt->mnt.mnt_flags;
3991 if (sb_rdonly(mnt->mnt.mnt_sb))
4008 list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
4011 if (!(child->mnt.mnt_flags & MNT_LOCKED))
4053 bool mnt_may_suid(struct vfsmount *mnt)
4062 return !(mnt->mnt_flags & MNT_NOSUID) && check_mnt(real_mount(mnt)) &&
4063 current_in_userns(mnt->mnt_sb->s_user_ns);
4112 err = vfs_path_lookup(mnt_ns->root->mnt.mnt_root, &mnt_ns->root->mnt,
4137 .name = "mnt",