Lines Matching refs:kn

43 static inline struct mutex *kernfs_open_file_mutex_ptr(struct kernfs_node *kn)
45 int idx = hash_ptr(kn, NR_KERNFS_LOCK_BITS);
50 static inline struct mutex *kernfs_open_file_mutex_lock(struct kernfs_node *kn)
54 lock = kernfs_open_file_mutex_ptr(kn);
69 return rcu_dereference_protected(of->kn->attr.open,
74 * kernfs_deref_open_node_locked - Get kernfs_open_node corresponding to @kn
76 * @kn: target kernfs_node.
78 * Fetch and return ->attr.open of @kn when caller holds the
79 * kernfs_open_file_mutex_ptr(kn).
81 * Update of ->attr.open happens under kernfs_open_file_mutex_ptr(kn). So when
88 * Return: @kn->attr.open when kernfs_open_file_mutex is held.
91 kernfs_deref_open_node_locked(struct kernfs_node *kn)
93 return rcu_dereference_protected(kn->attr.open,
94 lockdep_is_held(kernfs_open_file_mutex_ptr(kn)));
106 static const struct kernfs_ops *kernfs_ops(struct kernfs_node *kn)
108 if (kn->flags & KERNFS_LOCKDEP)
109 lockdep_assert_held(kn);
110 return kn->attr.ops;
138 const struct kernfs_ops *ops = kernfs_ops(of->kn);
142 kernfs_put_active(of->kn);
155 if (!kernfs_get_active(of->kn))
158 ops = kernfs_ops(of->kn);
172 const struct kernfs_ops *ops = kernfs_ops(of->kn);
205 return of->kn->attr.ops->seq_show(sf, v);
241 if (!kernfs_get_active(of->kn)) {
249 ops = kernfs_ops(of->kn);
255 kernfs_put_active(of->kn);
278 if (kernfs_of(iocb->ki_filp)->kn->flags & KERNFS_HAS_SEQ_SHOW)
326 if (!kernfs_get_active(of->kn)) {
332 ops = kernfs_ops(of->kn);
338 kernfs_put_active(of->kn);
360 if (!kernfs_get_active(of->kn))
366 kernfs_put_active(of->kn);
378 if (!kernfs_get_active(of->kn))
385 kernfs_put_active(of->kn);
398 if (!kernfs_get_active(of->kn))
407 kernfs_put_active(of->kn);
421 if (!kernfs_get_active(of->kn))
428 kernfs_put_active(of->kn);
443 if (!kernfs_get_active(of->kn))
450 kernfs_put_active(of->kn);
464 if (!kernfs_get_active(of->kn))
471 kernfs_put_active(of->kn);
501 if (!(of->kn->flags & KERNFS_HAS_MMAP))
507 if (!kernfs_get_active(of->kn))
510 ops = kernfs_ops(of->kn);
540 kernfs_put_active(of->kn);
549 * @kn: target kernfs_node
552 * If @kn->attr.open exists, increment its reference count; otherwise,
561 static int kernfs_get_open_node(struct kernfs_node *kn,
567 mutex = kernfs_open_file_mutex_lock(kn);
568 on = kernfs_deref_open_node_locked(kn);
580 rcu_assign_pointer(kn->attr.open, on);
584 if (kn->flags & KERNFS_HAS_RELEASE)
592 * kernfs_unlink_open_file - Unlink @of from @kn.
594 * @kn: target kernfs_node
598 * Unlink @of from list of @kn's associated open files. If list of
605 static void kernfs_unlink_open_file(struct kernfs_node *kn,
612 mutex = kernfs_open_file_mutex_lock(kn);
614 on = kernfs_deref_open_node_locked(kn);
621 if (kn->flags & KERNFS_HAS_RELEASE) {
632 rcu_assign_pointer(kn->attr.open, NULL);
641 struct kernfs_node *kn = inode->i_private;
642 struct kernfs_root *root = kernfs_root(kn);
648 if (!kernfs_get_active(kn))
651 ops = kernfs_ops(kn);
694 of->kn = kn;
740 error = kernfs_get_open_node(kn, of);
752 kernfs_put_active(kn);
756 kernfs_unlink_open_file(kn, of, true);
763 kernfs_put_active(kn);
768 static void kernfs_release_file(struct kernfs_node *kn,
774 * @kernfs_open_file_mutex_ptr(kn) is enough. @of->mutex can't be used
778 lockdep_assert_held(kernfs_open_file_mutex_ptr(kn));
786 kn->attr.ops->release(of);
794 struct kernfs_node *kn = inode->i_private;
797 if (kn->flags & KERNFS_HAS_RELEASE) {
800 mutex = kernfs_open_file_mutex_lock(kn);
801 kernfs_release_file(kn, of);
805 kernfs_unlink_open_file(kn, of, false);
813 bool kernfs_should_drain_open_files(struct kernfs_node *kn)
819 * @kn being deactivated guarantees that @kn->attr.open can't change
822 WARN_ON_ONCE(atomic_read(&kn->active) != KN_DEACTIVATED_BIAS);
825 on = rcu_dereference(kn->attr.open);
832 void kernfs_drain_open_files(struct kernfs_node *kn)
838 mutex = kernfs_open_file_mutex_lock(kn);
839 on = kernfs_deref_open_node_locked(kn);
854 if (kn->flags & KERNFS_HAS_RELEASE)
855 kernfs_release_file(kn, of);
891 struct kernfs_node *kn = kernfs_dentry_node(filp->f_path.dentry);
894 if (!kernfs_get_active(kn))
897 if (kn->attr.ops->poll)
898 ret = kn->attr.ops->poll(of, wait);
902 kernfs_put_active(kn);
908 struct kernfs_node *kn;
914 kn = kernfs_notify_list;
915 if (kn == KERNFS_NOTIFY_EOL) {
919 kernfs_notify_list = kn->attr.notify_next;
920 kn->attr.notify_next = NULL;
923 root = kernfs_root(kn);
927 list_for_each_entry(info, &kernfs_root(kn)->supers, node) {
934 * We want fsnotify_modify() on @kn but as the
939 inode = ilookup(info->sb, kernfs_ino(kn));
943 name = (struct qstr)QSTR_INIT(kn->name, strlen(kn->name));
944 parent = kernfs_get_parent(kn);
964 kernfs_put(kn);
970 * @kn: file to notify
972 * Notify @kn such that poll(2) on @kn wakes up. Maybe be called from any
975 void kernfs_notify(struct kernfs_node *kn)
981 if (WARN_ON(kernfs_type(kn) != KERNFS_FILE))
986 on = rcu_dereference(kn->attr.open);
995 if (!kn->attr.notify_next) {
996 kernfs_get(kn);
997 kn->attr.notify_next = kernfs_notify_list;
998 kernfs_notify_list = kn;
1041 struct kernfs_node *kn;
1047 kn = kernfs_new_node(parent, name, (mode & S_IALLUGO) | S_IFREG,
1049 if (!kn)
1052 kn->attr.ops = ops;
1053 kn->attr.size = size;
1054 kn->ns = ns;
1055 kn->priv = priv;
1059 lockdep_init_map(&kn->dep_map, "kn->active", key, 0);
1060 kn->flags |= KERNFS_LOCKDEP;
1065 * kn->attr.ops is accessible only while holding active ref. We
1070 kn->flags |= KERNFS_HAS_SEQ_SHOW;
1072 kn->flags |= KERNFS_HAS_MMAP;
1074 kn->flags |= KERNFS_HAS_RELEASE;
1076 rc = kernfs_add_one(kn);
1078 kernfs_put(kn);
1081 return kn;