Lines Matching refs:uwq

113 	struct userfaultfd_wait_queue *uwq;
116 uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
121 if (len && (start > uwq->msg.arg.pagefault.address ||
122 start + len <= uwq->msg.arg.pagefault.address))
124 WRITE_ONCE(uwq->waken, true);
127 * ensure uwq->waken is visible before the task is woken.
374 struct userfaultfd_wait_queue uwq;
476 init_waitqueue_func_entry(&uwq.wq, userfaultfd_wake_function);
477 uwq.wq.private = current;
478 uwq.msg = userfault_msg(vmf->address, vmf->flags, reason,
480 uwq.ctx = ctx;
481 uwq.waken = false;
487 * After the __add_wait_queue the uwq is visible to userland
490 __add_wait_queue(&ctx->fault_pending_wqh, &uwq.wq);
525 * and it's fine not to block on the spinlock. The uwq on this
528 if (!list_empty_careful(&uwq.wq.entry)) {
531 * No need of list_del_init(), the uwq on the stack
534 list_del(&uwq.wq.entry);
562 * After the __add_wait_queue the uwq is visible to userland
907 struct userfaultfd_wait_queue *uwq;
911 uwq = NULL;
916 uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
918 return uwq;
992 struct userfaultfd_wait_queue *uwq;
1009 uwq = find_userfault(ctx);
1010 if (uwq) {
1021 * The fault_pending_wqh.lock prevents the uwq
1034 * handle_userfault(). The uwq->wq.head list
1041 list_del(&uwq->wq.entry);
1042 add_wait_queue(&ctx->fault_wqh, &uwq->wq);
1047 *msg = uwq->msg;
1055 uwq = find_userfault_evt(ctx);
1056 if (uwq) {
1057 *msg = uwq->msg;
1059 if (uwq->msg.event == UFFD_EVENT_FORK) {
1062 uwq->msg.arg.reserved.reserved1;
1063 list_move(&uwq->wq.entry, &fork_event);
1075 userfaultfd_event_complete(ctx, uwq);
1108 uwq = list_first_entry(&fork_event,
1109 typeof(*uwq),
1121 list_del(&uwq->wq.entry);
1122 __add_wait_queue(&ctx->event_wqh, &uwq->wq);
1130 userfaultfd_event_complete(ctx, uwq);