Lines Matching defs:event
134 * memcg which the event belongs to.
138 * eventfd to signal userspace about the event.
147 * waiter for changes related to this event. Use eventfd_signal()
160 * All fields below needed to unregister event when
699 * because their event counter is not touched.
913 * @idx: the event item
944 static unsigned long memcg_events(struct mem_cgroup *memcg, int event)
946 return atomic_long_read(&memcg->vmevents[event]);
949 static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
955 x += per_cpu(memcg->vmstats_local->events[event], cpu);
963 /* pagein of a big page is an event. So, ignore page size */
968 nr_pages = -nr_pages; /* for event */
1005 /* threshold event is triggered in finer grain than soft limit */
1592 * well as cumulative event counters that show past behavior.
4523 struct mem_cgroup_eventfd_list *event;
4525 event = kmalloc(sizeof(*event), GFP_KERNEL);
4526 if (!event)
4531 event->eventfd = eventfd;
4532 list_add(&event->list, &memcg->oom_notify);
4828 * Unregister event and free resources.
4834 struct mem_cgroup_event *event =
4836 struct mem_cgroup *memcg = event->memcg;
4838 remove_wait_queue(event->wqh, &event->wait);
4840 event->unregister_event(memcg, event->eventfd);
4842 /* Notify userspace the event is going away. */
4843 eventfd_signal(event->eventfd, 1);
4845 eventfd_ctx_put(event->eventfd);
4846 kfree(event);
4858 struct mem_cgroup_event *event =
4860 struct mem_cgroup *memcg = event->memcg;
4865 * If the event has been detached at cgroup removal, we
4869 * We can't race against event freeing since the other
4874 if (!list_empty(&event->list)) {
4875 list_del_init(&event->list);
4880 schedule_work(&event->remove);
4891 struct mem_cgroup_event *event =
4894 event->wqh = wqh;
4895 add_wait_queue(wqh, &event->wait);
4901 * Parse input and register new cgroup event handler.
4911 struct mem_cgroup_event *event;
4936 event = kzalloc(sizeof(*event), GFP_KERNEL);
4937 if (!event)
4940 event->memcg = memcg;
4941 INIT_LIST_HEAD(&event->list);
4942 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
4943 init_waitqueue_func_entry(&event->wait, memcg_event_wake);
4944 INIT_WORK(&event->remove, memcg_event_remove);
4952 event->eventfd = eventfd_ctx_fileget(efile.file);
4953 if (IS_ERR(event->eventfd)) {
4954 ret = PTR_ERR(event->eventfd);
4981 * Determine the event callbacks and set them in @event. This used
4991 event->register_event = mem_cgroup_usage_register_event;
4992 event->unregister_event = mem_cgroup_usage_unregister_event;
4994 event->register_event = mem_cgroup_oom_register_event;
4995 event->unregister_event = mem_cgroup_oom_unregister_event;
4997 event->register_event = vmpressure_register_event;
4998 event->unregister_event = vmpressure_unregister_event;
5000 event->register_event = memsw_cgroup_usage_register_event;
5001 event->unregister_event = memsw_cgroup_usage_unregister_event;
5022 ret = event->register_event(memcg, event->eventfd, buf);
5026 vfs_poll(efile.file, &event->pt);
5029 list_add(&event->list, &memcg->event_list);
5042 eventfd_ctx_put(event->eventfd);
5046 kfree(event);
5527 struct mem_cgroup_event *event, *tmp;
5544 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
5545 list_del_init(&event->list);
5546 schedule_work(&event->remove);