Lines Matching refs:nd

555 static bool nd_alloc_stack(struct nameidata *nd)
560 nd->flags & LOOKUP_RCU ? GFP_ATOMIC : GFP_KERNEL);
563 memcpy(p, nd->internal, sizeof(nd->internal));
564 nd->stack = p;
585 static void drop_links(struct nameidata *nd)
587 int i = nd->depth;
589 struct saved *last = nd->stack + i;
595 static void terminate_walk(struct nameidata *nd)
597 drop_links(nd);
598 if (!(nd->flags & LOOKUP_RCU)) {
600 path_put(&nd->path);
601 for (i = 0; i < nd->depth; i++)
602 path_put(&nd->stack[i].link);
603 if (nd->state & ND_ROOT_GRABBED) {
604 path_put(&nd->root);
605 nd->state &= ~ND_ROOT_GRABBED;
608 nd->flags &= ~LOOKUP_RCU;
611 nd->depth = 0;
612 nd->path.mnt = NULL;
613 nd->path.dentry = NULL;
633 static inline bool legitimize_path(struct nameidata *nd,
636 return __legitimize_path(path, seq, nd->m_seq);
639 static bool legitimize_links(struct nameidata *nd)
642 if (unlikely(nd->flags & LOOKUP_CACHED)) {
643 drop_links(nd);
644 nd->depth = 0;
647 for (i = 0; i < nd->depth; i++) {
648 struct saved *last = nd->stack + i;
649 if (unlikely(!legitimize_path(nd, &last->link, last->seq))) {
650 drop_links(nd);
651 nd->depth = i + 1;
658 static bool legitimize_root(struct nameidata *nd)
661 * For scoped-lookups (where nd->root has been zeroed), we need to
663 * for these lookups (nd->dfd is the root, not the filesystem root).
665 if (!nd->root.mnt && (nd->flags & LOOKUP_IS_SCOPED))
667 /* Nothing to do if nd->root is zero or is managed by the VFS user. */
668 if (!nd->root.mnt || (nd->state & ND_ROOT_PRESET))
670 nd->state |= ND_ROOT_GRABBED;
671 return legitimize_path(nd, &nd->root, nd->root_seq);
687 * @nd: nameidata pathwalk data
690 * try_to_unlazy attempts to legitimize the current nd->path and nd->root
696 static bool try_to_unlazy(struct nameidata *nd)
698 struct dentry *parent = nd->path.dentry;
700 BUG_ON(!(nd->flags & LOOKUP_RCU));
702 nd->flags &= ~LOOKUP_RCU;
703 if (unlikely(!legitimize_links(nd)))
705 if (unlikely(!legitimize_path(nd, &nd->path, nd->seq)))
707 if (unlikely(!legitimize_root(nd)))
710 BUG_ON(nd->inode != parent->d_inode);
714 nd->path.mnt = NULL;
715 nd->path.dentry = NULL;
723 * @nd: nameidata pathwalk data
730 * nd->path and nd->root for ref-walk mode. Must be called from rcu-walk context.
734 static bool try_to_unlazy_next(struct nameidata *nd, struct dentry *dentry, unsigned seq)
736 BUG_ON(!(nd->flags & LOOKUP_RCU));
738 nd->flags &= ~LOOKUP_RCU;
739 if (unlikely(!legitimize_links(nd)))
741 if (unlikely(!legitimize_mnt(nd->path.mnt, nd->m_seq)))
743 if (unlikely(!lockref_get_not_dead(&nd->path.dentry->d_lockref)))
761 if (unlikely(!legitimize_root(nd)))
767 nd->path.mnt = NULL;
769 nd->path.dentry = NULL;
789 * @nd: pointer nameidata
791 * If we had been in RCU mode, drop out of it and legitimize nd->path.
795 * need to drop nd->path.
797 static int complete_walk(struct nameidata *nd)
799 struct dentry *dentry = nd->path.dentry;
802 if (nd->flags & LOOKUP_RCU) {
804 * We don't want to zero nd->root for scoped-lookups or
805 * externally-managed nd->root.
807 if (!(nd->state & ND_ROOT_PRESET))
808 if (!(nd->flags & LOOKUP_IS_SCOPED))
809 nd->root.mnt = NULL;
810 nd->flags &= ~LOOKUP_CACHED;
811 if (!try_to_unlazy(nd))
815 if (unlikely(nd->flags & LOOKUP_IS_SCOPED)) {
832 if (!path_is_under(&nd->path, &nd->root))
836 if (likely(!(nd->state & ND_JUMPED)))
842 status = dentry->d_op->d_weak_revalidate(dentry, nd->flags);
852 static int set_root(struct nameidata *nd)
861 if (WARN_ON(nd->flags & LOOKUP_IS_SCOPED))
864 if (nd->flags & LOOKUP_RCU) {
869 nd->root = fs->root;
870 nd->root_seq = __read_seqcount_begin(&nd->root.dentry->d_seq);
873 get_fs_root(fs, &nd->root);
874 nd->state |= ND_ROOT_GRABBED;
879 static int nd_jump_root(struct nameidata *nd)
881 if (unlikely(nd->flags & LOOKUP_BENEATH))
883 if (unlikely(nd->flags & LOOKUP_NO_XDEV)) {
885 if (nd->path.mnt != NULL && nd->path.mnt != nd->root.mnt)
888 if (!nd->root.mnt) {
889 int error = set_root(nd);
893 if (nd->flags & LOOKUP_RCU) {
895 nd->path = nd->root;
896 d = nd->path.dentry;
897 nd->inode = d->d_inode;
898 nd->seq = nd->root_seq;
899 if (unlikely(read_seqcount_retry(&d->d_seq, nd->seq)))
902 path_put(&nd->path);
903 nd->path = nd->root;
904 path_get(&nd->path);
905 nd->inode = nd->path.dentry->d_inode;
907 nd->state |= ND_JUMPED;
918 struct nameidata *nd = current->nameidata;
920 if (unlikely(nd->flags & LOOKUP_NO_MAGICLINKS))
924 if (unlikely(nd->flags & LOOKUP_NO_XDEV)) {
925 if (nd->path.mnt != path->mnt)
929 if (unlikely(nd->flags & LOOKUP_IS_SCOPED))
932 path_put(&nd->path);
933 nd->path = *path;
934 nd->inode = nd->path.dentry->d_inode;
935 nd->state |= ND_JUMPED;
943 static inline void put_link(struct nameidata *nd)
945 struct saved *last = nd->stack + --nd->depth;
947 if (!(nd->flags & LOOKUP_RCU))
958 * @nd: nameidata pathwalk data
971 static inline int may_follow_link(struct nameidata *nd, const struct inode *inode)
981 if ((nd->dir_mode & (S_ISVTX|S_IWOTH)) != (S_ISVTX|S_IWOTH))
985 if (uid_valid(nd->dir_uid) && uid_eq(nd->dir_uid, inode->i_uid))
988 if (nd->flags & LOOKUP_RCU)
991 audit_inode(nd->name, nd->stack[0].link.dentry, 0);
1328 static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
1337 if (unlikely(nd->flags & LOOKUP_NO_XDEV))
1357 nd->state |= ND_JUMPED;
1367 if (read_seqretry(&mount_lock, nd->m_seq))
1371 if (read_seqretry(&mount_lock, nd->m_seq))
1378 static inline int handle_mounts(struct nameidata *nd, struct dentry *dentry,
1385 path->mnt = nd->path.mnt;
1387 if (nd->flags & LOOKUP_RCU) {
1391 if (likely(__follow_mount_rcu(nd, path, inode, seqp)))
1393 if (!try_to_unlazy_next(nd, dentry, seq))
1396 path->mnt = nd->path.mnt;
1399 ret = traverse_mounts(path, &jumped, &nd->total_link_count, nd->flags);
1401 if (unlikely(nd->flags & LOOKUP_NO_XDEV))
1404 nd->state |= ND_JUMPED;
1408 if (path->mnt != nd->path.mnt)
1471 static struct dentry *lookup_fast(struct nameidata *nd,
1475 struct dentry *dentry, *parent = nd->path.dentry;
1483 if (nd->flags & LOOKUP_RCU) {
1485 dentry = __d_lookup_rcu(parent, &nd->last, &seq);
1487 if (!try_to_unlazy(nd))
1507 if (unlikely(__read_seqcount_retry(&parent->d_seq, nd->seq)))
1511 status = d_revalidate(dentry, nd->flags);
1514 if (!try_to_unlazy_next(nd, dentry, seq))
1518 status = d_revalidate(dentry, nd->flags);
1520 dentry = __d_lookup(parent, &nd->last);
1523 status = d_revalidate(dentry, nd->flags);
1584 static inline int may_lookup(struct nameidata *nd)
1586 if (nd->flags & LOOKUP_RCU) {
1587 int err = inode_permission(nd->inode, MAY_EXEC|MAY_NOT_BLOCK);
1588 if (err != -ECHILD || !try_to_unlazy(nd))
1591 return inode_permission(nd->inode, MAY_EXEC);
1594 static int reserve_stack(struct nameidata *nd, struct path *link, unsigned seq)
1596 if (unlikely(nd->total_link_count++ >= MAXSYMLINKS))
1599 if (likely(nd->depth != EMBEDDED_LEVELS))
1601 if (likely(nd->stack != nd->internal))
1603 if (likely(nd_alloc_stack(nd)))
1606 if (nd->flags & LOOKUP_RCU) {
1609 bool grabbed_link = legitimize_path(nd, link, seq);
1611 if (!try_to_unlazy(nd) != 0 || !grabbed_link)
1614 if (nd_alloc_stack(nd))
1622 static const char *pick_link(struct nameidata *nd, struct path *link,
1627 int error = reserve_stack(nd, link, seq);
1630 if (!(nd->flags & LOOKUP_RCU))
1634 last = nd->stack + nd->depth++;
1640 error = may_follow_link(nd, inode);
1645 if (unlikely(nd->flags & LOOKUP_NO_SYMLINKS) ||
1649 if (!(nd->flags & LOOKUP_RCU)) {
1653 if (!try_to_unlazy(nd))
1659 nd->flags & LOOKUP_RCU);
1668 if (nd->flags & LOOKUP_RCU) {
1670 if (res == ERR_PTR(-ECHILD) && try_to_unlazy(nd))
1681 error = nd_jump_root(nd);
1690 put_link(nd);
1700 static const char *step_into(struct nameidata *nd, int flags,
1704 int err = handle_mounts(nd, dentry, &path, &inode, &seq);
1709 ((flags & WALK_TRAILING) && !(nd->flags & LOOKUP_FOLLOW)) ||
1712 if (!(nd->flags & LOOKUP_RCU)) {
1713 dput(nd->path.dentry);
1714 if (nd->path.mnt != path.mnt)
1715 mntput(nd->path.mnt);
1717 nd->path = path;
1718 nd->inode = inode;
1719 nd->seq = seq;
1722 if (nd->flags & LOOKUP_RCU) {
1727 if (path.mnt == nd->path.mnt)
1730 return pick_link(nd, &path, inode, seq, flags);
1733 static struct dentry *follow_dotdot_rcu(struct nameidata *nd,
1739 if (path_equal(&nd->path, &nd->root))
1741 if (unlikely(nd->path.dentry == nd->path.mnt->mnt_root)) {
1744 if (!choose_mountpoint_rcu(real_mount(nd->path.mnt),
1745 &nd->root, &path, &seq))
1747 if (unlikely(nd->flags & LOOKUP_NO_XDEV))
1749 nd->path = path;
1750 nd->inode = path.dentry->d_inode;
1751 nd->seq = seq;
1752 if (unlikely(read_seqretry(&mount_lock, nd->m_seq)))
1756 old = nd->path.dentry;
1760 if (unlikely(read_seqcount_retry(&old->d_seq, nd->seq)))
1762 if (unlikely(!path_connected(nd->path.mnt, parent)))
1766 if (unlikely(read_seqretry(&mount_lock, nd->m_seq)))
1768 if (unlikely(nd->flags & LOOKUP_BENEATH))
1773 static struct dentry *follow_dotdot(struct nameidata *nd,
1779 if (path_equal(&nd->path, &nd->root))
1781 if (unlikely(nd->path.dentry == nd->path.mnt->mnt_root)) {
1784 if (!choose_mountpoint(real_mount(nd->path.mnt),
1785 &nd->root, &path))
1787 path_put(&nd->path);
1788 nd->path = path;
1789 nd->inode = path.dentry->d_inode;
1790 if (unlikely(nd->flags & LOOKUP_NO_XDEV))
1794 parent = dget_parent(nd->path.dentry);
1795 if (unlikely(!path_connected(nd->path.mnt, parent))) {
1804 if (unlikely(nd->flags & LOOKUP_BENEATH))
1806 dget(nd->path.dentry);
1810 static const char *handle_dots(struct nameidata *nd, int type)
1818 if (!nd->root.mnt) {
1819 error = ERR_PTR(set_root(nd));
1823 if (nd->flags & LOOKUP_RCU)
1824 parent = follow_dotdot_rcu(nd, &inode, &seq);
1826 parent = follow_dotdot(nd, &inode, &seq);
1830 error = step_into(nd, WALK_NOFOLLOW,
1831 nd->path.dentry, nd->inode, nd->seq);
1833 error = step_into(nd, WALK_NOFOLLOW,
1838 if (unlikely(nd->flags & LOOKUP_IS_SCOPED)) {
1842 * above nd->root (and so userspace should retry or use
1846 if (unlikely(__read_seqcount_retry(&mount_lock.seqcount, nd->m_seq)))
1848 if (unlikely(__read_seqcount_retry(&rename_lock.seqcount, nd->r_seq)))
1855 static const char *walk_component(struct nameidata *nd, int flags)
1865 if (unlikely(nd->last_type != LAST_NORM)) {
1866 if (!(flags & WALK_MORE) && nd->depth)
1867 put_link(nd);
1868 return handle_dots(nd, nd->last_type);
1870 dentry = lookup_fast(nd, &inode, &seq);
1874 dentry = lookup_slow(&nd->last, nd->path.dentry, nd->flags);
1878 if (!(flags & WALK_MORE) && nd->depth)
1879 put_link(nd);
1880 return step_into(nd, flags, dentry, inode, seq);
2116 * Returns 0 and nd will have valid dentry and mnt on success.
2119 static int link_path_walk(const char *name, struct nameidata *nd)
2121 int depth = 0; // depth <= nd->depth
2124 nd->last_type = LAST_ROOT;
2125 nd->flags |= LOOKUP_PARENT;
2139 err = may_lookup(nd);
2143 hash_len = hash_name(nd->path.dentry, name);
2150 nd->state |= ND_JUMPED;
2157 struct dentry *parent = nd->path.dentry;
2158 nd->state &= ~ND_JUMPED;
2169 nd->last.hash_len = hash_len;
2170 nd->last.name = name;
2171 nd->last_type = type;
2187 nd->dir_uid = nd->inode->i_uid;
2188 nd->dir_mode = nd->inode->i_mode;
2189 nd->flags &= ~LOOKUP_PARENT;
2193 name = nd->stack[--depth].name;
2194 link = walk_component(nd, 0);
2197 link = walk_component(nd, WALK_MORE);
2203 nd->stack[depth++].name = name;
2207 if (unlikely(!d_can_lookup(nd->path.dentry))) {
2208 if (nd->flags & LOOKUP_RCU) {
2209 if (!try_to_unlazy(nd))
2218 static const char *path_init(struct nameidata *nd, unsigned flags)
2221 const char *s = nd->name->name;
2232 nd->flags = flags;
2233 nd->state |= ND_JUMPED;
2234 nd->depth = 0;
2236 nd->m_seq = __read_seqcount_begin(&mount_lock.seqcount);
2237 nd->r_seq = __read_seqcount_begin(&rename_lock.seqcount);
2240 if (nd->state & ND_ROOT_PRESET) {
2241 struct dentry *root = nd->root.dentry;
2245 nd->path = nd->root;
2246 nd->inode = inode;
2248 nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
2249 nd->root_seq = nd->seq;
2251 path_get(&nd->path);
2256 nd->root.mnt = NULL;
2258 /* Absolute pathname -- fetch the root (LOOKUP_IN_ROOT uses nd->dfd). */
2260 error = nd_jump_root(nd);
2267 if (nd->dfd == AT_FDCWD) {
2274 nd->path = fs->pwd;
2275 nd->inode = nd->path.dentry->d_inode;
2276 nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
2279 get_fs_pwd(current->fs, &nd->path);
2280 nd->inode = nd->path.dentry->d_inode;
2284 struct fd f = fdget_raw(nd->dfd);
2297 nd->path = f.file->f_path;
2299 nd->inode = nd->path.dentry->d_inode;
2300 nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
2302 path_get(&nd->path);
2303 nd->inode = nd->path.dentry->d_inode;
2310 nd->root = nd->path;
2312 nd->root_seq = nd->seq;
2314 path_get(&nd->root);
2315 nd->state |= ND_ROOT_GRABBED;
2321 static inline const char *lookup_last(struct nameidata *nd)
2323 if (nd->last_type == LAST_NORM && nd->last.name[nd->last.len])
2324 nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
2326 return walk_component(nd, WALK_TRAILING);
2329 static int handle_lookup_down(struct nameidata *nd)
2331 if (!(nd->flags & LOOKUP_RCU))
2332 dget(nd->path.dentry);
2333 return PTR_ERR(step_into(nd, WALK_NOFOLLOW,
2334 nd->path.dentry, nd->inode, nd->seq));
2337 /* Returns 0 and nd will be valid on success; Retuns error, otherwise. */
2338 static int path_lookupat(struct nameidata *nd, unsigned flags, struct path *path)
2340 const char *s = path_init(nd, flags);
2344 err = handle_lookup_down(nd);
2349 while (!(err = link_path_walk(s, nd)) &&
2350 (s = lookup_last(nd)) != NULL)
2352 if (!err && unlikely(nd->flags & LOOKUP_MOUNTPOINT)) {
2353 err = handle_lookup_down(nd);
2354 nd->state &= ~ND_JUMPED; // no d_weak_revalidate(), please...
2357 err = complete_walk(nd);
2359 if (!err && nd->flags & LOOKUP_DIRECTORY)
2360 if (!d_can_lookup(nd->path.dentry))
2363 *path = nd->path;
2364 nd->path.mnt = NULL;
2365 nd->path.dentry = NULL;
2367 terminate_walk(nd);
2375 struct nameidata nd;
2378 set_nameidata(&nd, dfd, name);
2380 nd.root = *root;
2381 nd.state = ND_ROOT_PRESET;
2383 retval = path_lookupat(&nd, flags | LOOKUP_RCU, path);
2385 retval = path_lookupat(&nd, flags, path);
2387 retval = path_lookupat(&nd, flags | LOOKUP_REVAL, path);
2397 /* Returns 0 and nd will be valid on success; Retuns error, otherwise. */
2398 static int path_parentat(struct nameidata *nd, unsigned flags,
2401 const char *s = path_init(nd, flags);
2402 int err = link_path_walk(s, nd);
2404 err = complete_walk(nd);
2406 *parent = nd->path;
2407 nd->path.mnt = NULL;
2408 nd->path.dentry = NULL;
2410 terminate_walk(nd);
2419 struct nameidata nd;
2423 set_nameidata(&nd, dfd, name);
2424 retval = path_parentat(&nd, flags | LOOKUP_RCU, parent);
2426 retval = path_parentat(&nd, flags, parent);
2428 retval = path_parentat(&nd, flags | LOOKUP_REVAL, parent);
2430 *last = nd.last;
2431 *type = nd.last_type;
3024 static struct dentry *atomic_open(struct nameidata *nd, struct dentry *dentry,
3029 struct inode *dir = nd->path.dentry->d_inode;
3032 if (nd->flags & LOOKUP_DIRECTORY)
3036 file->f_path.mnt = nd->path.mnt;
3079 static struct dentry *lookup_open(struct nameidata *nd, struct file *file,
3083 struct dentry *dir = nd->path.dentry;
3095 dentry = d_lookup(dir, &nd->last);
3098 dentry = d_alloc_parallel(dir, &nd->last, &wq);
3105 error = d_revalidate(dentry, nd->flags);
3135 create_error = may_o_create(&nd->path, dentry, mode);
3142 dentry = atomic_open(nd, dentry, file, open_flag, mode);
3150 nd->flags);
3186 static const char *open_last_lookups(struct nameidata *nd,
3189 struct dentry *dir = nd->path.dentry;
3197 nd->flags |= op->intent;
3199 if (nd->last_type != LAST_NORM) {
3200 if (nd->depth)
3201 put_link(nd);
3202 return handle_dots(nd, nd->last_type);
3206 if (nd->last.name[nd->last.len])
3207 nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
3209 dentry = lookup_fast(nd, &inode, &seq);
3215 BUG_ON(nd->flags & LOOKUP_RCU);
3218 if (nd->flags & LOOKUP_RCU) {
3219 if (!try_to_unlazy(nd))
3222 audit_inode(nd->name, dir, AUDIT_INODE_PARENT);
3224 if (unlikely(nd->last.name[nd->last.len]))
3229 got_write = !mnt_want_write(nd->path.mnt);
3240 dentry = lookup_open(nd, file, op, got_write);
3249 mnt_drop_write(nd->path.mnt);
3255 dput(nd->path.dentry);
3256 nd->path.dentry = dentry;
3261 if (nd->depth)
3262 put_link(nd);
3263 res = step_into(nd, WALK_TRAILING, dentry, inode, seq);
3265 nd->flags &= ~(LOOKUP_OPEN|LOOKUP_CREATE|LOOKUP_EXCL);
3272 static int do_open(struct nameidata *nd,
3281 error = complete_walk(nd);
3286 audit_inode(nd->name, nd->path.dentry, 0);
3290 if (d_is_dir(nd->path.dentry))
3292 error = may_create_in_sticky(nd->dir_mode, nd->dir_uid,
3293 d_backing_inode(nd->path.dentry));
3297 if ((nd->flags & LOOKUP_DIRECTORY) && !d_can_lookup(nd->path.dentry))
3306 } else if (d_is_reg(nd->path.dentry) && open_flag & O_TRUNC) {
3307 error = mnt_want_write(nd->path.mnt);
3312 error = may_open(&nd->path, acc_mode, open_flag);
3314 error = vfs_open(&nd->path, file);
3324 mnt_drop_write(nd->path.mnt);
3368 static int do_tmpfile(struct nameidata *nd, unsigned flags,
3374 int error = path_lookupat(nd, flags | LOOKUP_DIRECTORY, &path);
3386 audit_inode(nd->name, child, 0);
3400 static int do_o_path(struct nameidata *nd, unsigned flags, struct file *file)
3403 int error = path_lookupat(nd, flags, &path);
3405 audit_inode(nd->name, path.dentry, 0);
3412 static struct file *path_openat(struct nameidata *nd,
3423 error = do_tmpfile(nd, flags, op, file);
3425 error = do_o_path(nd, flags, file);
3427 const char *s = path_init(nd, flags);
3428 while (!(error = link_path_walk(s, nd)) &&
3429 (s = open_last_lookups(nd, file, op)) != NULL)
3432 error = do_open(nd, file, op);
3433 terminate_walk(nd);
3454 struct nameidata nd;
3458 set_nameidata(&nd, dfd, pathname);
3459 filp = path_openat(&nd, op, flags | LOOKUP_RCU);
3461 filp = path_openat(&nd, op, flags);
3463 filp = path_openat(&nd, op, flags | LOOKUP_REVAL);
3471 struct nameidata nd;
3483 set_nameidata(&nd, -1, filename);
3484 nd.root = *root;
3485 nd.state = ND_ROOT_PRESET;
3486 file = path_openat(&nd, op, flags | LOOKUP_RCU);
3488 file = path_openat(&nd, op, flags);
3490 file = path_openat(&nd, op, flags | LOOKUP_REVAL);