Lines Matching refs:user
44 * probes to print out to the user.
46 * These do not reflect the mapped bytes between the user and kernel space.
54 * ready to expose them out to the user ABI.
206 typedef void (*user_event_func_t) (struct user_event *user, struct iov_iter *i,
214 static struct user_event_mm *user_event_mm_get_all(struct user_event *user);
216 static int destroy_user_event(struct user_event *user);
223 static struct user_event *user_event_get(struct user_event *user)
225 refcount_inc(&user->refcnt);
227 return user;
232 struct user_event *user = container_of(
237 if (!refcount_dec_and_test(&user->refcnt))
240 if (destroy_user_event(user)) {
248 refcount_set(&user->refcnt, 1);
254 static void user_event_put(struct user_event *user, bool locked)
258 if (unlikely(!user))
274 delete = refcount_dec_and_mutex_lock(&user->refcnt, &event_mutex);
277 delete = refcount_dec_and_test(&user->refcnt);
290 if (user->reg_flags & USER_EVENT_REG_PERSIST) {
301 INIT_WORK(&user->put_work, delayed_destroy_user_event);
307 * needed because a user-process could register the same event in
311 * user process would fail a register because the trace_event_call
314 refcount_set(&user->refcnt, 1);
316 if (WARN_ON_ONCE(!schedule_work(&user->put_work))) {
399 * bad user processes to cause excessive looping.
437 struct user_event *user = enabler->event;
440 mm->mm, (unsigned long long)uaddr, EVENT_NAME(user));
548 /* Update bit atomically, user tracers must be atomic as well */
573 static void user_event_enabler_update(struct user_event *user)
593 mm = user_event_mm_get_all(user);
600 if (enabler->event == user) {
645 static struct user_event_mm *user_event_mm_get_all(struct user_event *user)
655 * when user based events are most wanted for diagnostics.
667 * Each user mm returned has a ref inc to handle remove RCU races.
673 if (enabler->event == user) {
863 *user_event_enabler_create(struct user_reg *reg, struct user_event *user,
881 enabler->event = user;
909 user_event_get(user);
930 bool user_event_last_ref(struct user_event *user)
934 if (user->reg_flags & USER_EVENT_REG_PERSIST)
937 return refcount_read(&user->refcnt) == last;
956 struct user_event *user = (struct user_event *)call->data;
958 return &user->fields;
969 * NOTE: Offsets are from the user data perspective, they are not from the
971 * sizes to the offset for the user.
1025 /* long is not allowed from a user, since it's ambigious in size */
1067 static void user_event_destroy_validators(struct user_event *user)
1070 struct list_head *head = &user->validators;
1078 static void user_event_destroy_fields(struct user_event *user)
1081 struct list_head *head = &user->fields;
1089 static int user_event_add_field(struct user_event *user, const char *type,
1127 list_add_tail(&validator->user_event_link, &user->validators);
1140 list_add(&field->link, &user->fields);
1143 * Min size from user writes that are required, this does not include
1146 user->min_size = (offset + size) - sizeof(struct trace_entry);
1155 static int user_event_parse_field(char *field, struct user_event *user,
1242 return user_event_add_field(user, type, name, saved_offset, size,
1246 static int user_event_parse_fields(struct user_event *user, char *args)
1256 ret = user_event_parse_field(field, user, &offset);
1367 static int user_event_set_print_fmt(struct user_event *user, char *buf, int len)
1370 struct list_head *head = &user->fields;
1401 static int user_event_create_print_fmt(struct user_event *user)
1406 len = user_event_set_print_fmt(user, NULL, 0);
1413 user_event_set_print_fmt(user, print_fmt, len);
1415 user->call.print_fmt = print_fmt;
1431 static int user_event_set_call_visible(struct user_event *user, bool visible)
1456 ret = trace_add_event_call(&user->call);
1458 ret = trace_remove_event_call(&user->call);
1466 static int destroy_user_event(struct user_event *user)
1473 user_event_destroy_fields(user);
1475 ret = user_event_set_call_visible(user, false);
1480 dyn_event_remove(&user->devent);
1481 hash_del(&user->node);
1483 user_event_destroy_validators(user);
1484 kfree(user->call.print_fmt);
1485 kfree(EVENT_NAME(user));
1486 kfree(user);
1499 struct user_event *user;
1504 hash_for_each_possible(group->register_table, user, node, key)
1505 if (!strcmp(EVENT_NAME(user), name))
1506 return user_event_get(user);
1511 static int user_event_validate(struct user_event *user, void *data, int len)
1513 struct list_head *head = &user->validators;
1545 * Writes the user supplied payload out to a trace file.
1547 static void user_event_ftrace(struct user_event *user, struct iov_iter *i,
1571 if (!list_empty(&user->validators) &&
1572 unlikely(user_event_validate(user, entry, size)))
1586 * Writes the user supplied payload out to perf ring buffer.
1588 static void user_event_perf(struct user_event *user, struct iov_iter *i,
1593 perf_head = this_cpu_ptr(user->call.perf_events);
1612 if (!list_empty(&user->validators) &&
1613 unlikely(user_event_validate(user, perf_entry, size)))
1617 user->call.event.type, 1, regs,
1629 * Update the enabled bit among all user processes.
1631 static void update_enable_bit_for(struct user_event *user)
1633 struct tracepoint *tp = &user->tracepoint;
1662 user->status = status;
1664 user_event_enabler_update(user);
1674 struct user_event *user = (struct user_event *)call->data;
1677 if (!user)
1720 user_event_get(user);
1721 update_enable_bit_for(user);
1724 update_enable_bit_for(user);
1725 user_event_put(user, true);
1732 struct user_event *user;
1757 ret = user_event_parse_cmd(group, name, &user, USER_EVENT_REG_PERSIST);
1760 user_event_put(user, false);
1772 struct user_event *user = container_of(ev, struct user_event, devent);
1777 seq_printf(m, "%s%s", USER_EVENTS_PREFIX, EVENT_NAME(user));
1779 head = trace_get_fields(&user->call);
1802 struct user_event *user = container_of(ev, struct user_event, devent);
1804 return !user_event_last_ref(user);
1809 struct user_event *user = container_of(ev, struct user_event, devent);
1811 if (!user_event_last_ref(user))
1814 return destroy_user_event(user);
1854 static bool user_fields_match(struct user_event *user, int argc,
1858 struct list_head *head = &user->fields;
1875 struct user_event *user = container_of(ev, struct user_event, devent);
1878 match = strcmp(EVENT_NAME(user), event) == 0 &&
1882 match = user_fields_match(user, argc, argv);
1884 match = list_empty(&user->fields);
1897 static int user_event_trace_register(struct user_event *user)
1901 ret = register_trace_event(&user->call.event);
1906 ret = user_event_set_call_visible(user, true);
1909 unregister_trace_event(&user->call.event);
1925 struct user_event *user;
1935 user = find_user_event(group, name, &key);
1938 if (user) {
1946 ret = user_fields_match(user, argc, (const char **)argv);
1950 ret = list_empty(&user->fields);
1953 *newuser = user;
1966 user_event_put(user, false);
1970 user = kzalloc(sizeof(*user), GFP_KERNEL_ACCOUNT);
1972 if (!user)
1975 INIT_LIST_HEAD(&user->class.fields);
1976 INIT_LIST_HEAD(&user->fields);
1977 INIT_LIST_HEAD(&user->validators);
1979 user->group = group;
1980 user->tracepoint.name = name;
1982 ret = user_event_parse_fields(user, args);
1987 ret = user_event_create_print_fmt(user);
1992 user->call.data = user;
1993 user->call.class = &user->class;
1994 user->call.name = name;
1995 user->call.flags = TRACE_EVENT_FL_TRACEPOINT;
1996 user->call.tp = &user->tracepoint;
1997 user->call.event.funcs = &user_event_funcs;
1998 user->class.system = group->system_name;
2000 user->class.fields_array = user_event_fields_array;
2001 user->class.get_fields = user_event_get_fields;
2002 user->class.reg = user_event_reg;
2003 user->class.probe = user_event_ftrace;
2005 user->class.perf_probe = user_event_perf;
2015 ret = user_event_trace_register(user);
2020 user->reg_flags = reg_flags;
2022 if (user->reg_flags & USER_EVENT_REG_PERSIST) {
2024 refcount_set(&user->refcnt, 2);
2027 refcount_set(&user->refcnt, 1);
2030 dyn_event_init(&user->devent, &user_event_dops);
2031 dyn_event_add(&user->devent, &user->call);
2032 hash_add(group->register_table, &user->node, key);
2037 *newuser = user;
2042 user_event_destroy_fields(user);
2043 user_event_destroy_validators(user);
2044 kfree(user->call.print_fmt);
2045 kfree(user);
2055 struct user_event *user = find_user_event(group, name, &key);
2057 if (!user)
2060 user_event_put(user, true);
2062 if (!user_event_last_ref(user))
2065 return destroy_user_event(user);
2069 * Validates the user payload and writes via iterator.
2075 struct user_event *user = NULL;
2092 * added. But the user retrieved from indexing into the events array
2096 user = refs->events[idx];
2100 if (unlikely(user == NULL))
2103 if (unlikely(i->count < user->min_size))
2106 tp = &user->tracepoint;
2133 probe_func(user, ©, tpdata, &faulted);
2191 struct user_event *user)
2204 if (refs->events[i] == user)
2220 new_refs->events[i] = user_event_get(user);
2288 * Registers a user_event on behalf of a user process.
2295 struct user_event *user;
2309 * for user processes that is far easier to debug if this is explictly
2324 ret = user_event_parse_cmd(info->group, name, &user, reg.flags);
2331 ret = user_events_ref_add(info, user);
2334 user_event_put(user, false);
2352 enabler = user_event_enabler_create(®, user, &write_result);
2367 * Deletes a user_event on behalf of a user process.
2449 * Unregisters an enablement address/bit within a task/user mm.
2498 /* Ensure bit is now cleared for user, regardless of event status */
2507 * Handles the ioctl from user mode to register or alter operations.
2540 * Handles the final close of the file from user mode.
2613 struct user_event *user;
2622 hash_for_each(group->register_table, i, user, node) {
2623 status = user->status;
2625 seq_printf(m, "%s", EVENT_NAME(user));
2691 * Creates a set of tracefs files to allow user mode interactions.