Lines Matching refs:group
187 static int fanotify_merge(struct fsnotify_group *group,
191 unsigned int bucket = fanotify_event_hash_bucket(group, new);
192 struct hlist_head *hlist = &group->fanotify_data.merge_hash[bucket];
195 pr_debug("%s: group=%p event=%p bucket=%u\n", __func__,
196 group, event, bucket);
229 static int fanotify_get_response(struct fsnotify_group *group,
235 pr_debug("%s: group=%p event=%p\n", __func__, group, event);
237 ret = wait_event_killable(group->fanotify_data.access_waitq,
241 spin_lock(&group->notification_lock);
246 spin_unlock(&group->notification_lock);
251 fsnotify_remove_queued_event(group, &event->fae.fse);
260 spin_unlock(&group->notification_lock);
279 pr_debug("%s: group=%p event=%p about to return ret=%d\n", __func__,
280 group, event, ret);
282 fsnotify_destroy_event(group, &event->fae.fse);
293 static u32 fanotify_group_event_mask(struct fsnotify_group *group,
303 unsigned int fid_mode = FAN_GROUP_FLAG(group, FANOTIFY_FID_BITS);
339 /* Record the mark types of this group that matched the event */
355 * fanotify_alloc_event() when group is reporting fid as indication
493 * and the group flags.
495 * With the group flag FAN_REPORT_TARGET_FID, always report the child fid.
497 * Without the group flag FAN_REPORT_TARGET_FID, report the modified directory
673 struct fsnotify_group *group,
687 fee = mempool_alloc(&group->fanotify_data.error_events_pool, GFP_NOFS);
711 struct fsnotify_group *group,
718 unsigned int fid_mode = FAN_GROUP_FLAG(group, FANOTIFY_FID_BITS);
733 * For certain events and group flags, report the child fid
742 * We record file name only in a group with FAN_REPORT_NAME
800 if (group->max_events == UINT_MAX)
806 old_memcg = set_active_memcg(group->memcg);
811 event = fanotify_alloc_error_event(group, fsid, data,
825 if (FAN_GROUP_FLAG(group, FAN_REPORT_TID))
874 static void fanotify_insert_event(struct fsnotify_group *group,
878 unsigned int bucket = fanotify_event_hash_bucket(group, event);
879 struct hlist_head *hlist = &group->fanotify_data.merge_hash[bucket];
881 assert_spin_locked(&group->notification_lock);
886 pr_debug("%s: group=%p event=%p bucket=%u\n", __func__,
887 group, event, bucket);
892 static int fanotify_handle_event(struct fsnotify_group *group, u32 mask,
928 mask = fanotify_group_event_mask(group, iter_info, &match_mask,
933 pr_debug("%s: group=%p mask=%x report_mask=%x\n", __func__,
934 group, mask, match_mask);
945 if (FAN_GROUP_FLAG(group, FANOTIFY_FID_BITS)) {
952 event = fanotify_alloc_event(group, mask, data, data_type, dir,
961 fsnotify_queue_overflow(group);
966 ret = fsnotify_insert_event(group, fsn_event, fanotify_merge,
972 fsnotify_destroy_event(group, fsn_event);
976 ret = fanotify_get_response(group, FANOTIFY_PERM(event),
986 static void fanotify_free_group_priv(struct fsnotify_group *group)
988 kfree(group->fanotify_data.merge_hash);
989 if (group->fanotify_data.ucounts)
990 dec_ucount(group->fanotify_data.ucounts,
993 if (mempool_initialized(&group->fanotify_data.error_events_pool))
994 mempool_exit(&group->fanotify_data.error_events_pool);
1023 static void fanotify_free_error_event(struct fsnotify_group *group,
1028 mempool_free(fee, &group->fanotify_data.error_events_pool);
1031 static void fanotify_free_event(struct fsnotify_group *group,
1055 fanotify_free_error_event(group, event);
1063 struct fsnotify_group *group)
1065 if (!FAN_GROUP_FLAG(group, FAN_UNLIMITED_MARKS))
1066 dec_ucount(group->fanotify_data.ucounts, UCOUNT_FANOTIFY_MARKS);