Lines Matching refs:mark
7 * fsnotify inode mark locking/lifetime/and refcnting
10 * The group->recnt and mark->refcnt tell how many "things" in the kernel
13 * the reference a group and a mark hold to each other.
22 * mark->lock
23 * mark->connector->lock
26 * each mark is hooked via the g_list. It also protects the groups private
29 * mark->lock protects the marks attributes like its masks and flags.
30 * Furthermore it protects the access to a reference of the group that the mark
32 * that is being watched by the mark.
34 * mark->connector->lock protects the list of marks anchored inside an
35 * inode / vfsmount and each mark is hooked via the i_list.
39 * marks in the list and is also protected by fsnotify_mark_srcu. A mark gets
40 * detached from fsnotify_mark_connector when last reference to the mark is
41 * dropped. Thus having mark reference is enough to protect mark->connector
43 * because we remove mark from g_list before dropping mark reference associated
44 * with that, any mark found through g_list is guaranteed to have
45 * mark->connector set until we drop group->mark_mutex.
51 * The inode mark can be cleared for a number of different reasons including:
56 * - The fsnotify_group associated with the mark is going away and all such marks
94 void fsnotify_get_mark(struct fsnotify_mark *mark)
96 WARN_ON_ONCE(!refcount_read(&mark->refcnt));
97 refcount_inc(&mark->refcnt);
130 * iput() outside of spinlocks. This happens when last mark that wanted iref is
144 /* Pin inode if any mark wants inode refcount held */
148 /* Unpin inode after detach of last mark that wanted iref */
160 struct fsnotify_mark *mark;
166 hlist_for_each_entry(mark, &conn->list, obj_list) {
167 if (!(mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED))
169 new_mask |= fsnotify_calc_mask(mark);
171 !(mark->flags & FSNOTIFY_MARK_FLAG_NO_IREF))
182 * this by holding a mark->lock or mark->group->mark_mutex for a mark on this
272 static void fsnotify_final_mark_destroy(struct fsnotify_mark *mark)
274 struct fsnotify_group *group = mark->group;
278 group->ops->free_mark(mark);
293 void fsnotify_put_mark(struct fsnotify_mark *mark)
295 struct fsnotify_mark_connector *conn = READ_ONCE(mark->connector);
302 if (refcount_dec_and_test(&mark->refcnt))
303 fsnotify_final_mark_destroy(mark);
309 * safely grab mark reference.
311 if (!refcount_dec_and_lock(&mark->refcnt, &conn->lock))
314 hlist_del_init_rcu(&mark->obj_list);
322 WRITE_ONCE(mark->connector, NULL);
341 list_add(&mark->g_list, &destroy_list);
349 * Get mark reference when we found the mark via lockless traversal of object
355 static bool fsnotify_get_mark_safe(struct fsnotify_mark *mark)
357 if (!mark)
360 if (refcount_inc_not_zero(&mark->refcnt)) {
361 spin_lock(&mark->lock);
362 if (mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED) {
363 /* mark is attached, group is still alive then */
364 atomic_inc(&mark->group->user_waits);
365 spin_unlock(&mark->lock);
368 spin_unlock(&mark->lock);
369 fsnotify_put_mark(mark);
379 static void fsnotify_put_mark_wake(struct fsnotify_mark *mark)
381 if (mark) {
382 struct fsnotify_group *group = mark->group;
384 fsnotify_put_mark(mark);
400 /* This can fail if mark is being removed */
433 * Mark mark as detached, remove it from group list. Mark still stays in object
434 * list until its last reference is dropped. Note that we rely on mark being
436 * particular we rely on mark->connector being valid while we hold
437 * group->mark_mutex if we found the mark through g_list.
440 * reference to the mark or be protected by fsnotify_mark_srcu.
442 void fsnotify_detach_mark(struct fsnotify_mark *mark)
444 fsnotify_group_assert_locked(mark->group);
446 refcount_read(&mark->refcnt) < 1 +
447 !!(mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED));
449 spin_lock(&mark->lock);
450 /* something else already called this function on this mark */
451 if (!(mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED)) {
452 spin_unlock(&mark->lock);
455 mark->flags &= ~FSNOTIFY_MARK_FLAG_ATTACHED;
456 list_del_init(&mark->g_list);
457 spin_unlock(&mark->lock);
459 /* Drop mark reference acquired in fsnotify_add_mark_locked() */
460 fsnotify_put_mark(mark);
464 * Free fsnotify mark. The mark is actually only marked as being freed. The
465 * freeing is actually happening only once last reference to the mark is
468 * Caller must have a reference to the mark or be protected by
471 void fsnotify_free_mark(struct fsnotify_mark *mark)
473 struct fsnotify_group *group = mark->group;
475 spin_lock(&mark->lock);
476 /* something else already called this function on this mark */
477 if (!(mark->flags & FSNOTIFY_MARK_FLAG_ALIVE)) {
478 spin_unlock(&mark->lock);
481 mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE;
482 spin_unlock(&mark->lock);
486 * callback to the group function to let it know that this mark
490 group->ops->freeing_mark(mark, group);
493 void fsnotify_destroy_mark(struct fsnotify_mark *mark,
497 fsnotify_detach_mark(mark);
499 fsnotify_free_mark(mark);
577 * Get mark connector, make sure it is alive and return with its lock held.
579 * hold reference to a mark on the list may directly lock connector->lock as
604 * Add mark into proper place in given list of marks. These marks may be used
609 static int fsnotify_add_mark_list(struct fsnotify_mark *mark,
627 spin_lock(&mark->lock);
630 spin_unlock(&mark->lock);
659 /* is mark the first mark? */
661 hlist_add_head_rcu(&mark->obj_list, &conn->list);
665 /* should mark be in the middle of the current list? */
669 if ((lmark->group == mark->group) &&
671 !(mark->group->flags & FSNOTIFY_GROUP_DUPS)) {
676 cmp = fsnotify_compare_groups(lmark->group, mark->group);
678 hlist_add_before_rcu(&mark->obj_list, &lmark->obj_list);
684 /* mark should be the last entry. last is the current last entry */
685 hlist_add_behind_rcu(&mark->obj_list, &last->obj_list);
690 * seeing mark->connector set.
692 WRITE_ONCE(mark->connector, conn);
695 spin_unlock(&mark->lock);
700 * Attach an initialized mark to a given group and fs object.
704 int fsnotify_add_mark_locked(struct fsnotify_mark *mark,
708 struct fsnotify_group *group = mark->group;
716 * mark->lock
717 * mark->connector->lock
719 spin_lock(&mark->lock);
720 mark->flags |= FSNOTIFY_MARK_FLAG_ALIVE | FSNOTIFY_MARK_FLAG_ATTACHED;
722 list_add(&mark->g_list, &group->marks_list);
723 fsnotify_get_mark(mark); /* for g_list */
724 spin_unlock(&mark->lock);
726 ret = fsnotify_add_mark_list(mark, connp, obj_type, add_flags, fsid);
730 fsnotify_recalc_mask(mark->connector);
734 spin_lock(&mark->lock);
735 mark->flags &= ~(FSNOTIFY_MARK_FLAG_ALIVE |
737 list_del_init(&mark->g_list);
738 spin_unlock(&mark->lock);
740 fsnotify_put_mark(mark);
744 int fsnotify_add_mark(struct fsnotify_mark *mark, fsnotify_connp_t *connp,
749 struct fsnotify_group *group = mark->group;
752 ret = fsnotify_add_mark_locked(mark, connp, obj_type, add_flags, fsid);
759 * Given a list of marks, find the mark associated with given group. If found
760 * take a reference to that mark and return it, else return NULL.
766 struct fsnotify_mark *mark;
772 hlist_for_each_entry(mark, &conn->list, obj_list) {
773 if (mark->group == group &&
774 (mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED)) {
775 fsnotify_get_mark(mark);
777 return mark;
789 struct fsnotify_mark *lmark, *mark;
802 * list. And freeing mark requires us to drop mark_mutex. So we can
803 * reliably free only the first mark in the list. That's why we first
808 list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) {
809 if (mark->connector->type == obj_type)
810 list_move(&mark->g_list, &to_free);
821 mark = list_first_entry(head, struct fsnotify_mark, g_list);
822 fsnotify_get_mark(mark);
823 fsnotify_detach_mark(mark);
825 fsnotify_free_mark(mark);
826 fsnotify_put_mark(mark);
834 struct fsnotify_mark *mark, *old_mark = NULL;
844 * list can get modified. However we are holding mark reference and
845 * thus our mark cannot be removed from obj_list so we can continue
848 hlist_for_each_entry(mark, &conn->list, obj_list) {
849 fsnotify_get_mark(mark);
853 old_mark = mark;
854 fsnotify_destroy_mark(mark, mark->group);
859 * mark references get dropped. It would lead to strange results such
872 void fsnotify_init_mark(struct fsnotify_mark *mark,
875 memset(mark, 0, sizeof(*mark));
876 spin_lock_init(&mark->lock);
877 refcount_set(&mark->refcnt, 1);
879 mark->group = group;
880 WRITE_ONCE(mark->connector, NULL);
890 struct fsnotify_mark *mark, *next;
900 list_for_each_entry_safe(mark, next, &private_destroy_list, g_list) {
901 list_del_init(&mark->g_list);
902 fsnotify_final_mark_destroy(mark);