Lines Matching refs:epi
241 struct epitem *epi;
298 struct epitem *epi= container_of(p, struct epitem, fllink);
299 spin_lock(&epi->ffd.file->f_lock);
303 spin_unlock(&epi->ffd.file->f_lock);
360 static inline int ep_is_linked(struct epitem *epi)
362 return !list_empty(&epi->rdllink);
427 static inline void ep_set_busy_poll_napi_id(struct epitem *epi)
437 sock = sock_from_file(epi->ffd.file);
446 ep = epi->ep;
466 static inline void ep_set_busy_poll_napi_id(struct epitem *epi)
499 static void ep_poll_safewake(struct eventpoll *ep, struct epitem *epi,
512 * If we are not being call from ep_poll_callback(), epi is NULL and
521 if (epi) {
522 if ((is_file_epoll(epi->ffd.file))) {
523 ep_src = epi->ffd.file->private_data;
538 static void ep_poll_safewake(struct eventpoll *ep, struct epitem *epi,
567 static void ep_unregister_pollwait(struct eventpoll *ep, struct epitem *epi)
569 struct eppoll_entry **p = &epi->pwqlist;
580 static inline struct wakeup_source *ep_wakeup_source(struct epitem *epi)
582 return rcu_dereference_check(epi->ws, lockdep_is_held(&epi->ep->mtx));
586 static inline void ep_pm_stay_awake(struct epitem *epi)
588 struct wakeup_source *ws = ep_wakeup_source(epi);
594 static inline bool ep_has_wakeup_source(struct epitem *epi)
596 return rcu_access_pointer(epi->ws) ? true : false;
600 static inline void ep_pm_stay_awake_rcu(struct epitem *epi)
605 ws = rcu_dereference(epi->ws);
636 struct epitem *epi, *nepi;
644 for (nepi = READ_ONCE(ep->ovflist); (epi = nepi) != NULL;
645 nepi = epi->next, epi->next = EP_UNACTIVE_PTR) {
652 if (!ep_is_linked(epi)) {
657 list_add(&epi->rdllink, &ep->rdllist);
658 ep_pm_stay_awake(epi);
684 struct epitem *epi = container_of(head, struct epitem, rcu);
685 kmem_cache_free(epi_cache, epi);
721 static bool __ep_remove(struct eventpoll *ep, struct epitem *epi, bool force)
723 struct file *file = epi->ffd.file;
732 ep_unregister_pollwait(ep, epi);
736 if (epi->dying && !force) {
743 if (head->first == &epi->fllink && !epi->fllink.next) {
752 hlist_del_rcu(&epi->fllink);
756 rb_erase_cached(&epi->rbn, &ep->rbr);
759 if (ep_is_linked(epi))
760 list_del_init(&epi->rdllink);
763 wakeup_source_unregister(ep_wakeup_source(epi));
766 * field epi->rcu, since we are trying to minimize the size of
771 call_rcu(&epi->rcu, epi_rcu_free);
780 static void ep_remove_safe(struct eventpoll *ep, struct epitem *epi)
782 WARN_ON_ONCE(__ep_remove(ep, epi, false));
788 struct epitem *epi;
801 epi = rb_entry(rbp, struct epitem, rbn);
803 ep_unregister_pollwait(ep, epi);
817 epi = rb_entry(rbp, struct epitem, rbn);
818 ep_remove_safe(ep, epi);
839 static __poll_t ep_item_poll(const struct epitem *epi, poll_table *pt, int depth);
845 struct epitem *epi, *tmp;
860 list_for_each_entry_safe(epi, tmp, &txlist, rdllink) {
861 if (ep_item_poll(epi, &pt, depth + 1)) {
870 __pm_relax(ep_wakeup_source(epi));
871 list_del_init(&epi->rdllink);
884 static __poll_t ep_item_poll(const struct epitem *epi, poll_table *pt,
887 struct file *file = epi->ffd.file;
890 pt->_key = epi->event.events;
895 return res & epi->event.events;
911 struct epitem *epi = rb_entry(rbp, struct epitem, rbn);
912 struct inode *inode = file_inode(epi->ffd.file);
916 epi->ffd.fd, epi->event.events,
917 (long long)epi->event.data,
918 (long long)epi->ffd.file->f_pos,
945 struct epitem *epi;
956 epi = hlist_entry(file->f_ep->first, struct epitem, fllink);
957 epi->dying = true;
964 ep = epi->ep;
966 dispose = __ep_remove(ep, epi, true);
1008 struct epitem *epi, *epir = NULL;
1013 epi = rb_entry(rbp, struct epitem, rbn);
1014 kcmp = ep_cmp_ffd(&ffd, &epi->ffd);
1020 epir = epi;
1032 struct epitem *epi;
1035 epi = rb_entry(rbp, struct epitem, rbn);
1036 if (epi->ffd.fd == tfd) {
1038 return epi;
1053 struct epitem *epi;
1061 epi = ep_find_tfd(ep, tfd, toff);
1062 if (epi)
1063 file_raw = epi->ffd.file;
1126 * Chains a new epi entry to the tail of the ep->ovflist in a lockless way,
1129 * Return: %false if epi element has been already chained, %true otherwise.
1131 static inline bool chain_epi_lockless(struct epitem *epi)
1133 struct eventpoll *ep = epi->ep;
1136 if (epi->next != EP_UNACTIVE_PTR)
1139 /* Check that the same epi has not been just chained from another CPU */
1140 if (cmpxchg(&epi->next, EP_UNACTIVE_PTR, NULL) != EP_UNACTIVE_PTR)
1144 epi->next = xchg(&ep->ovflist, epi);
1161 * concurrently for the same @epi from different CPUs if poll table was inited
1170 struct epitem *epi = ep_item_from_wait(wait);
1171 struct eventpoll *ep = epi->ep;
1178 ep_set_busy_poll_napi_id(epi);
1186 if (!(epi->event.events & ~EP_PRIVATE_BITS))
1195 if (pollflags && !(pollflags & epi->event.events))
1205 if (chain_epi_lockless(epi))
1206 ep_pm_stay_awake_rcu(epi);
1207 } else if (!ep_is_linked(epi)) {
1209 if (list_add_tail_lockless(&epi->rdllink, &ep->rdllist))
1210 ep_pm_stay_awake_rcu(epi);
1218 if ((epi->event.events & EPOLLEXCLUSIVE) &&
1222 if (epi->event.events & EPOLLIN)
1226 if (epi->event.events & EPOLLOUT)
1244 ep_poll_safewake(ep, epi, pollflags & EPOLL_URING_WAKE);
1246 if (!(epi->event.events & EPOLLEXCLUSIVE))
1260 * nothing protects ep/epi or even wait.
1276 struct epitem *epi = epq->epi;
1279 if (unlikely(!epi)) // an earlier allocation has failed
1284 epq->epi = NULL;
1290 pwq->base = epi;
1291 if (epi->event.events & EPOLLEXCLUSIVE)
1295 pwq->next = epi->pwqlist;
1296 epi->pwqlist = pwq;
1299 static void ep_rbtree_insert(struct eventpoll *ep, struct epitem *epi)
1309 kcmp = ep_cmp_ffd(&epi->ffd, &epic->ffd);
1316 rb_link_node(&epi->rbn, parent, p);
1317 rb_insert_color_cached(&epi->rbn, &ep->rbr, leftmost);
1359 struct epitem *epi;
1365 hlist_for_each_entry_rcu(epi, refs, fllink) {
1366 struct hlist_head *refs = &epi->ep->refs;
1403 static int ep_create_wakeup_source(struct epitem *epi)
1408 if (!epi->ep->ws) {
1409 epi->ep->ws = wakeup_source_register(NULL, "eventpoll");
1410 if (!epi->ep->ws)
1414 take_dentry_name_snapshot(&n, epi->ffd.file->f_path.dentry);
1420 rcu_assign_pointer(epi->ws, ws);
1426 static noinline void ep_destroy_wakeup_source(struct epitem *epi)
1428 struct wakeup_source *ws = ep_wakeup_source(epi);
1430 RCU_INIT_POINTER(epi->ws, NULL);
1441 static int attach_epitem(struct file *file, struct epitem *epi)
1468 hlist_add_head_rcu(&epi->fllink, file->f_ep);
1482 struct epitem *epi;
1496 if (!(epi = kmem_cache_zalloc(epi_cache, GFP_KERNEL))) {
1502 INIT_LIST_HEAD(&epi->rdllink);
1503 epi->ep = ep;
1504 ep_set_ffd(&epi->ffd, tfile, fd);
1505 epi->event = *event;
1506 epi->next = EP_UNACTIVE_PTR;
1511 if (unlikely(attach_epitem(tfile, epi) < 0)) {
1514 kmem_cache_free(epi_cache, epi);
1526 ep_rbtree_insert(ep, epi);
1538 ep_remove_safe(ep, epi);
1542 if (epi->event.events & EPOLLWAKEUP) {
1543 error = ep_create_wakeup_source(epi);
1545 ep_remove_safe(ep, epi);
1551 epq.epi = epi;
1561 revents = ep_item_poll(epi, &epq.pt, 1);
1568 if (unlikely(!epq.epi)) {
1569 ep_remove_safe(ep, epi);
1577 ep_set_busy_poll_napi_id(epi);
1580 if (revents && !ep_is_linked(epi)) {
1581 list_add_tail(&epi->rdllink, &ep->rdllist);
1582 ep_pm_stay_awake(epi);
1604 static int ep_modify(struct eventpoll *ep, struct epitem *epi,
1619 epi->event.events = event->events; /* need barrier below */
1620 epi->event.data = event->data; /* protected by mtx */
1621 if (epi->event.events & EPOLLWAKEUP) {
1622 if (!ep_has_wakeup_source(epi))
1623 ep_create_wakeup_source(epi);
1624 } else if (ep_has_wakeup_source(epi)) {
1625 ep_destroy_wakeup_source(epi);
1631 * 1) Flush epi changes above to other CPUs. This ensures
1635 * changing epi above (but ep_poll_callback does take
1654 if (ep_item_poll(epi, &pt, 1)) {
1656 if (!ep_is_linked(epi)) {
1657 list_add_tail(&epi->rdllink, &ep->rdllist);
1658 ep_pm_stay_awake(epi);
1679 struct epitem *epi, *tmp;
1701 list_for_each_entry_safe(epi, tmp, &txlist, rdllink) {
1709 * Activate ep->ws before deactivating epi->ws to prevent
1710 * triggering auto-suspend here (in case we reactive epi->ws
1713 * This could be rearranged to delay the deactivation of epi->ws
1714 * instead, but then epi->ws would temporarily be out of sync
1717 ws = ep_wakeup_source(epi);
1724 list_del_init(&epi->rdllink);
1731 revents = ep_item_poll(epi, &pt, 1);
1735 events = epoll_put_uevent(revents, epi->event.data, events);
1737 list_add(&epi->rdllink, &txlist);
1738 ep_pm_stay_awake(epi);
1744 if (epi->event.events & EPOLLONESHOT)
1745 epi->event.events &= EP_PRIVATE_BITS;
1746 else if (!(epi->event.events & EPOLLET)) {
1758 list_add_tail(&epi->rdllink, &ep->rdllist);
1759 ep_pm_stay_awake(epi);
1965 struct epitem *epi;
1970 epi = rb_entry(rbp, struct epitem, rbn);
1971 if (unlikely(is_file_epoll(epi->ffd.file))) {
1973 ep_tovisit = epi->ffd.file->private_data;
1991 list_file(epi->ffd.file);
2118 struct epitem *epi;
2212 epi = ep_find(ep, tf.file, fd);
2217 if (!epi) {
2224 if (epi) {
2229 ep_remove_safe(ep, epi);
2236 if (epi) {
2237 if (!(epi->event.events & EPOLLEXCLUSIVE)) {
2239 error = ep_modify(ep, epi, epds);