Lines Matching refs:uwq
161 struct userfaultfd_wait_queue *uwq;
164 uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
169 if (len && (start > uwq->msg.arg.pagefault.address ||
170 start + len <= uwq->msg.arg.pagefault.address))
172 WRITE_ONCE(uwq->waken, true);
175 * ensure uwq->waken is visible before the task is woken.
417 struct userfaultfd_wait_queue uwq;
519 init_waitqueue_func_entry(&uwq.wq, userfaultfd_wake_function);
520 uwq.wq.private = current;
521 uwq.msg = userfault_msg(vmf->address, vmf->real_address, vmf->flags,
523 uwq.ctx = ctx;
524 uwq.waken = false;
539 * After the __add_wait_queue the uwq is visible to userland
542 __add_wait_queue(&ctx->fault_pending_wqh, &uwq.wq);
576 * and it's fine not to block on the spinlock. The uwq on this
579 if (!list_empty_careful(&uwq.wq.entry)) {
582 * No need of list_del_init(), the uwq on the stack
585 list_del(&uwq.wq.entry);
613 * After the __add_wait_queue the uwq is visible to userland
966 struct userfaultfd_wait_queue *uwq;
970 uwq = NULL;
975 uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
977 return uwq;
1051 struct userfaultfd_wait_queue *uwq;
1068 uwq = find_userfault(ctx);
1069 if (uwq) {
1080 * The fault_pending_wqh.lock prevents the uwq
1093 * handle_userfault(). The uwq->wq.head list
1100 list_del(&uwq->wq.entry);
1101 add_wait_queue(&ctx->fault_wqh, &uwq->wq);
1106 *msg = uwq->msg;
1114 uwq = find_userfault_evt(ctx);
1115 if (uwq) {
1116 *msg = uwq->msg;
1118 if (uwq->msg.event == UFFD_EVENT_FORK) {
1121 uwq->msg.arg.reserved.reserved1;
1122 list_move(&uwq->wq.entry, &fork_event);
1134 userfaultfd_event_complete(ctx, uwq);
1167 uwq = list_first_entry(&fork_event,
1168 typeof(*uwq),
1180 list_del(&uwq->wq.entry);
1181 __add_wait_queue(&ctx->event_wqh, &uwq->wq);
1189 userfaultfd_event_complete(ctx, uwq);