Lines Matching refs:epi

255 	struct epitem *epi;
338 static inline int ep_is_linked(struct epitem *epi)
340 return !list_empty(&epi->rdllink);
357 return container_of(p, struct ep_pqueue, pt)->epi;
412 static inline void ep_set_busy_poll_napi_id(struct epitem *epi)
423 sock = sock_from_file(epi->ffd.file, &err);
432 ep = epi->ep;
455 static inline void ep_set_busy_poll_napi_id(struct epitem *epi)
551 static void ep_poll_safewake(struct eventpoll *ep, struct epitem *epi,
564 * If we are not being call from ep_poll_callback(), epi is NULL and
573 if (epi) {
574 if ((is_file_epoll(epi->ffd.file))) {
575 ep_src = epi->ffd.file->private_data;
590 static void ep_poll_safewake(struct eventpoll *ep, struct epitem *epi,
620 static void ep_unregister_pollwait(struct eventpoll *ep, struct epitem *epi)
622 struct list_head *lsthead = &epi->pwqlist;
635 static inline struct wakeup_source *ep_wakeup_source(struct epitem *epi)
637 return rcu_dereference_check(epi->ws, lockdep_is_held(&epi->ep->mtx));
641 static inline void ep_pm_stay_awake(struct epitem *epi)
643 struct wakeup_source *ws = ep_wakeup_source(epi);
649 static inline bool ep_has_wakeup_source(struct epitem *epi)
651 return rcu_access_pointer(epi->ws) ? true : false;
655 static inline void ep_pm_stay_awake_rcu(struct epitem *epi)
660 ws = rcu_dereference(epi->ws);
685 struct epitem *epi, *nepi;
722 for (nepi = READ_ONCE(ep->ovflist); (epi = nepi) != NULL;
723 nepi = epi->next, epi->next = EP_UNACTIVE_PTR) {
730 if (!ep_is_linked(epi)) {
735 list_add(&epi->rdllink, &ep->rdllist);
736 ep_pm_stay_awake(epi);
767 struct epitem *epi = container_of(head, struct epitem, rcu);
768 kmem_cache_free(epi_cache, epi);
775 static int ep_remove(struct eventpoll *ep, struct epitem *epi)
777 struct file *file = epi->ffd.file;
784 ep_unregister_pollwait(ep, epi);
788 list_del_rcu(&epi->fllink);
791 rb_erase_cached(&epi->rbn, &ep->rbr);
794 if (ep_is_linked(epi))
795 list_del_init(&epi->rdllink);
798 wakeup_source_unregister(ep_wakeup_source(epi));
801 * field epi->rcu, since we are trying to minimize the size of
806 call_rcu(&epi->rcu, epi_rcu_free);
816 struct epitem *epi;
836 epi = rb_entry(rbp, struct epitem, rbn);
838 ep_unregister_pollwait(ep, epi);
852 epi = rb_entry(rbp, struct epitem, rbn);
853 ep_remove(ep, epi);
885 static __poll_t ep_item_poll(const struct epitem *epi, poll_table *pt,
891 pt->_key = epi->event.events;
892 if (!is_file_epoll(epi->ffd.file))
893 return vfs_poll(epi->ffd.file, pt) & epi->event.events;
895 ep = epi->ffd.file->private_data;
896 poll_wait(epi->ffd.file, &ep->poll_wait, pt);
899 return ep_scan_ready_list(epi->ffd.file->private_data,
901 locked) & epi->event.events;
907 struct epitem *epi, *tmp;
914 list_for_each_entry_safe(epi, tmp, head, rdllink) {
915 if (ep_item_poll(epi, &pt, depth)) {
923 __pm_relax(ep_wakeup_source(epi));
924 list_del_init(&epi->rdllink);
955 struct epitem *epi = rb_entry(rbp, struct epitem, rbn);
956 struct inode *inode = file_inode(epi->ffd.file);
960 epi->ffd.fd, epi->event.events,
961 (long long)epi->event.data,
962 (long long)epi->ffd.file->f_pos,
989 struct epitem *epi, *next;
1005 list_for_each_entry_safe(epi, next, &file->f_ep_links, fllink) {
1006 ep = epi->ep;
1008 ep_remove(ep, epi);
1053 struct epitem *epi, *epir = NULL;
1058 epi = rb_entry(rbp, struct epitem, rbn);
1059 kcmp = ep_cmp_ffd(&ffd, &epi->ffd);
1065 epir = epi;
1077 struct epitem *epi;
1080 epi = rb_entry(rbp, struct epitem, rbn);
1081 if (epi->ffd.fd == tfd) {
1083 return epi;
1098 struct epitem *epi;
1106 epi = ep_find_tfd(ep, tfd, toff);
1107 if (epi)
1108 file_raw = epi->ffd.file;
1171 * Chains a new epi entry to the tail of the ep->ovflist in a lockless way,
1174 * Returns %false if epi element has been already chained, %true otherwise.
1176 static inline bool chain_epi_lockless(struct epitem *epi)
1178 struct eventpoll *ep = epi->ep;
1181 if (epi->next != EP_UNACTIVE_PTR)
1184 /* Check that the same epi has not been just chained from another CPU */
1185 if (cmpxchg(&epi->next, EP_UNACTIVE_PTR, NULL) != EP_UNACTIVE_PTR)
1189 epi->next = xchg(&ep->ovflist, epi);
1206 * concurrently for the same @epi from different CPUs if poll table was inited
1215 struct epitem *epi = ep_item_from_wait(wait);
1216 struct eventpoll *ep = epi->ep;
1223 ep_set_busy_poll_napi_id(epi);
1231 if (!(epi->event.events & ~EP_PRIVATE_BITS))
1240 if (pollflags && !(pollflags & epi->event.events))
1250 if (chain_epi_lockless(epi))
1251 ep_pm_stay_awake_rcu(epi);
1252 } else if (!ep_is_linked(epi)) {
1254 if (list_add_tail_lockless(&epi->rdllink, &ep->rdllist))
1255 ep_pm_stay_awake_rcu(epi);
1263 if ((epi->event.events & EPOLLEXCLUSIVE) &&
1267 if (epi->event.events & EPOLLIN)
1271 if (epi->event.events & EPOLLOUT)
1289 ep_poll_safewake(ep, epi, pollflags & EPOLL_URING_WAKE);
1291 if (!(epi->event.events & EPOLLEXCLUSIVE))
1305 * ep/epi or even wait.
1320 struct epitem *epi = ep_item_from_epqueue(pt);
1323 if (epi->nwait >= 0 && (pwq = kmem_cache_alloc(pwq_cache, GFP_KERNEL))) {
1326 pwq->base = epi;
1327 if (epi->event.events & EPOLLEXCLUSIVE)
1331 list_add_tail(&pwq->llink, &epi->pwqlist);
1332 epi->nwait++;
1335 epi->nwait = -1;
1339 static void ep_rbtree_insert(struct eventpoll *ep, struct epitem *epi)
1349 kcmp = ep_cmp_ffd(&epi->ffd, &epic->ffd);
1356 rb_link_node(&epi->rbn, parent, p);
1357 rb_insert_color_cached(&epi->rbn, &ep->rbr, leftmost);
1401 struct epitem *epi;
1405 list_for_each_entry_rcu(epi, &file->f_ep_links, fllink) {
1406 child_file = epi->ep->file;
1457 static int ep_create_wakeup_source(struct epitem *epi)
1462 if (!epi->ep->ws) {
1463 epi->ep->ws = wakeup_source_register(NULL, "eventpoll");
1464 if (!epi->ep->ws)
1468 take_dentry_name_snapshot(&n, epi->ffd.file->f_path.dentry);
1474 rcu_assign_pointer(epi->ws, ws);
1480 static noinline void ep_destroy_wakeup_source(struct epitem *epi)
1482 struct wakeup_source *ws = ep_wakeup_source(epi);
1484 RCU_INIT_POINTER(epi->ws, NULL);
1504 struct epitem *epi;
1512 if (!(epi = kmem_cache_alloc(epi_cache, GFP_KERNEL)))
1516 INIT_LIST_HEAD(&epi->rdllink);
1517 INIT_LIST_HEAD(&epi->fllink);
1518 INIT_LIST_HEAD(&epi->pwqlist);
1519 epi->ep = ep;
1520 ep_set_ffd(&epi->ffd, tfile, fd);
1521 epi->event = *event;
1522 epi->nwait = 0;
1523 epi->next = EP_UNACTIVE_PTR;
1524 if (epi->event.events & EPOLLWAKEUP) {
1525 error = ep_create_wakeup_source(epi);
1529 RCU_INIT_POINTER(epi->ws, NULL);
1534 list_add_tail_rcu(&epi->fllink, &tfile->f_ep_links);
1541 ep_rbtree_insert(ep, epi);
1549 epq.epi = epi;
1559 revents = ep_item_poll(epi, &epq.pt, 1);
1567 if (epi->nwait < 0)
1574 ep_set_busy_poll_napi_id(epi);
1577 if (revents && !ep_is_linked(epi)) {
1578 list_add_tail(&epi->rdllink, &ep->rdllist);
1579 ep_pm_stay_awake(epi);
1599 ep_unregister_pollwait(ep, epi);
1602 list_del_rcu(&epi->fllink);
1605 rb_erase_cached(&epi->rbn, &ep->rbr);
1614 if (ep_is_linked(epi))
1615 list_del_init(&epi->rdllink);
1618 wakeup_source_unregister(ep_wakeup_source(epi));
1621 kmem_cache_free(epi_cache, epi);
1630 static int ep_modify(struct eventpoll *ep, struct epitem *epi,
1645 epi->event.events = event->events; /* need barrier below */
1646 epi->event.data = event->data; /* protected by mtx */
1647 if (epi->event.events & EPOLLWAKEUP) {
1648 if (!ep_has_wakeup_source(epi))
1649 ep_create_wakeup_source(epi);
1650 } else if (ep_has_wakeup_source(epi)) {
1651 ep_destroy_wakeup_source(epi);
1657 * 1) Flush epi changes above to other CPUs. This ensures
1661 * changing epi above (but ep_poll_callback does take
1680 if (ep_item_poll(epi, &pt, 1)) {
1682 if (!ep_is_linked(epi)) {
1683 list_add_tail(&epi->rdllink, &ep->rdllist);
1684 ep_pm_stay_awake(epi);
1707 struct epitem *epi, *tmp;
1722 list_for_each_entry_safe(epi, tmp, head, rdllink) {
1727 * Activate ep->ws before deactivating epi->ws to prevent
1728 * triggering auto-suspend here (in case we reactive epi->ws
1731 * This could be rearranged to delay the deactivation of epi->ws
1732 * instead, but then epi->ws would temporarily be out of sync
1735 ws = ep_wakeup_source(epi);
1742 list_del_init(&epi->rdllink);
1750 revents = ep_item_poll(epi, &pt, 1);
1755 __put_user(epi->event.data, &uevent->data)) {
1756 list_add(&epi->rdllink, head);
1757 ep_pm_stay_awake(epi);
1764 if (epi->event.events & EPOLLONESHOT)
1765 epi->event.events &= EP_PRIVATE_BITS;
1766 else if (!(epi->event.events & EPOLLET)) {
1778 list_add_tail(&epi->rdllink, &ep->rdllist);
1779 ep_pm_stay_awake(epi);
1865 * lock because we could race and not see an epi being added
2011 struct epitem *epi;
2016 epi = rb_entry(rbp, struct epitem, rbn);
2017 if (unlikely(is_file_epoll(epi->ffd.file))) {
2018 ep_tovisit = epi->ffd.file->private_data;
2022 ep_loop_check_proc, epi->ffd.file,
2035 if (list_empty(&epi->ffd.file->f_tfile_llink)) {
2036 if (get_file_rcu(epi->ffd.file))
2037 list_add(&epi->ffd.file->f_tfile_llink,
2156 struct epitem *epi;
2262 epi = ep_find(ep, tf.file, fd);
2267 if (!epi) {
2274 if (epi)
2275 error = ep_remove(ep, epi);
2280 if (epi) {
2281 if (!(epi->event.events & EPOLLEXCLUSIVE)) {
2283 error = ep_modify(ep, epi, epds);