Lines Matching refs:ctx

82 	struct userfaultfd_ctx *ctx;
91 struct userfaultfd_ctx *ctx;
103 static bool userfaultfd_is_initialized(struct userfaultfd_ctx *ctx)
105 return ctx->features & UFFD_FEATURE_INITIALIZED;
151 * @ctx: [in] Pointer to the userfaultfd context.
153 static void userfaultfd_ctx_get(struct userfaultfd_ctx *ctx)
155 refcount_inc(&ctx->refcount);
161 * @ctx: [in] Pointer to userfaultfd context.
166 static void userfaultfd_ctx_put(struct userfaultfd_ctx *ctx)
168 if (refcount_dec_and_test(&ctx->refcount)) {
169 VM_BUG_ON(spin_is_locked(&ctx->fault_pending_wqh.lock));
170 VM_BUG_ON(waitqueue_active(&ctx->fault_pending_wqh));
171 VM_BUG_ON(spin_is_locked(&ctx->fault_wqh.lock));
172 VM_BUG_ON(waitqueue_active(&ctx->fault_wqh));
173 VM_BUG_ON(spin_is_locked(&ctx->event_wqh.lock));
174 VM_BUG_ON(waitqueue_active(&ctx->event_wqh));
175 VM_BUG_ON(spin_is_locked(&ctx->fd_wqh.lock));
176 VM_BUG_ON(waitqueue_active(&ctx->fd_wqh));
177 mmdrop(ctx->mm);
178 kmem_cache_free(userfaultfd_ctx_cachep, ctx);
229 static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
235 struct mm_struct *mm = ctx->mm;
261 static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
278 static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx,
283 struct mm_struct *mm = ctx->mm;
373 struct userfaultfd_ctx *ctx;
399 ctx = vmf->vma->vm_userfaultfd_ctx.ctx;
400 if (!ctx)
403 BUG_ON(ctx->mm != mm);
408 if (ctx->features & UFFD_FEATURE_SIGBUS)
416 if (unlikely(READ_ONCE(ctx->released))) {
474 userfaultfd_ctx_get(ctx);
479 ctx->features);
480 uwq.ctx = ctx;
485 spin_lock_irq(&ctx->fault_pending_wqh.lock);
490 __add_wait_queue(&ctx->fault_pending_wqh, &uwq.wq);
497 spin_unlock_irq(&ctx->fault_pending_wqh.lock);
500 must_wait = userfaultfd_must_wait(ctx, vmf->address, vmf->flags,
503 must_wait = userfaultfd_huge_must_wait(ctx, vmf->vma,
508 if (likely(must_wait && !READ_ONCE(ctx->released))) {
509 wake_up_poll(&ctx->fd_wqh, EPOLLIN);
529 spin_lock_irq(&ctx->fault_pending_wqh.lock);
535 spin_unlock_irq(&ctx->fault_pending_wqh.lock);
539 * ctx may go away after this if the userfault pseudo fd is
542 userfaultfd_ctx_put(ctx);
548 static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
556 ewq->ctx = ctx;
560 spin_lock_irq(&ctx->event_wqh.lock);
565 __add_wait_queue(&ctx->event_wqh, &ewq->wq);
570 if (READ_ONCE(ctx->released) ||
578 __remove_wait_queue(&ctx->event_wqh, &ewq->wq);
590 spin_unlock_irq(&ctx->event_wqh.lock);
592 wake_up_poll(&ctx->fd_wqh, EPOLLIN);
595 spin_lock_irq(&ctx->event_wqh.lock);
598 spin_unlock_irq(&ctx->event_wqh.lock);
607 if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) {
617 * ctx may go away after this if the userfault pseudo fd is
621 WRITE_ONCE(ctx->mmap_changing, false);
622 userfaultfd_ctx_put(ctx);
625 static void userfaultfd_event_complete(struct userfaultfd_ctx *ctx,
629 wake_up_locked(&ctx->event_wqh);
630 __remove_wait_queue(&ctx->event_wqh, &ewq->wq);
635 struct userfaultfd_ctx *ctx = NULL, *octx;
638 octx = vma->vm_userfaultfd_ctx.ctx;
647 ctx = fctx->new;
651 if (!ctx) {
656 ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL);
657 if (!ctx) {
662 refcount_set(&ctx->refcount, 1);
663 ctx->flags = octx->flags;
664 ctx->features = octx->features;
665 ctx->released = false;
666 ctx->mmap_changing = false;
667 ctx->mm = vma->vm_mm;
668 mmgrab(ctx->mm);
673 fctx->new = ctx;
677 vma->vm_userfaultfd_ctx.ctx = ctx;
683 struct userfaultfd_ctx *ctx = fctx->orig;
691 userfaultfd_event_wait_completion(ctx, &ewq);
708 struct userfaultfd_ctx *ctx;
710 ctx = vma->vm_userfaultfd_ctx.ctx;
712 if (!ctx)
715 if (ctx->features & UFFD_FEATURE_EVENT_REMAP) {
716 vm_ctx->ctx = ctx;
717 userfaultfd_ctx_get(ctx);
718 WRITE_ONCE(ctx->mmap_changing, true);
730 struct userfaultfd_ctx *ctx = vm_ctx->ctx;
733 if (!ctx)
737 userfaultfd_ctx_put(ctx);
748 userfaultfd_event_wait_completion(ctx, &ewq);
755 struct userfaultfd_ctx *ctx;
758 ctx = vma->vm_userfaultfd_ctx.ctx;
759 if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_REMOVE))
762 userfaultfd_ctx_get(ctx);
763 WRITE_ONCE(ctx->mmap_changing, true);
772 userfaultfd_event_wait_completion(ctx, &ewq);
777 static bool has_unmap_ctx(struct userfaultfd_ctx *ctx, struct list_head *unmaps,
783 if (unmap_ctx->ctx == ctx && unmap_ctx->start == start &&
796 struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx;
798 if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_UNMAP) ||
799 has_unmap_ctx(ctx, unmaps, start, end))
806 userfaultfd_ctx_get(ctx);
807 WRITE_ONCE(ctx->mmap_changing, true);
808 unmap_ctx->ctx = ctx;
819 struct userfaultfd_unmap_ctx *ctx, *n;
822 list_for_each_entry_safe(ctx, n, uf, list) {
826 ewq.msg.arg.remove.start = ctx->start;
827 ewq.msg.arg.remove.end = ctx->end;
829 userfaultfd_event_wait_completion(ctx->ctx, &ewq);
831 list_del(&ctx->list);
832 kfree(ctx);
838 struct userfaultfd_ctx *ctx = file->private_data;
839 struct mm_struct *mm = ctx->mm;
845 WRITE_ONCE(ctx->released, true);
862 BUG_ON(!!vma->vm_userfaultfd_ctx.ctx ^
864 if (vma->vm_userfaultfd_ctx.ctx != ctx) {
889 spin_lock_irq(&ctx->fault_pending_wqh.lock);
890 __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, &range);
891 __wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, &range);
892 spin_unlock_irq(&ctx->fault_pending_wqh.lock);
895 wake_up_all(&ctx->event_wqh);
897 wake_up_poll(&ctx->fd_wqh, EPOLLHUP);
898 userfaultfd_ctx_put(ctx);
922 struct userfaultfd_ctx *ctx)
924 return find_userfault_in(&ctx->fault_pending_wqh);
928 struct userfaultfd_ctx *ctx)
930 return find_userfault_in(&ctx->event_wqh);
935 struct userfaultfd_ctx *ctx = file->private_data;
938 poll_wait(file, &ctx->fd_wqh, wait);
940 if (!userfaultfd_is_initialized(ctx))
961 if (waitqueue_active(&ctx->fault_pending_wqh))
963 else if (waitqueue_active(&ctx->event_wqh))
971 static int resolve_userfault_fork(struct userfaultfd_ctx *ctx,
987 static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
1004 spin_lock_irq(&ctx->fd_wqh.lock);
1005 __add_wait_queue(&ctx->fd_wqh, &wait);
1008 spin_lock(&ctx->fault_pending_wqh.lock);
1009 uwq = find_userfault(ctx);
1018 write_seqcount_begin(&ctx->refile_seq);
1042 add_wait_queue(&ctx->fault_wqh, &uwq->wq);
1044 write_seqcount_end(&ctx->refile_seq);
1048 spin_unlock(&ctx->fault_pending_wqh.lock);
1052 spin_unlock(&ctx->fault_pending_wqh.lock);
1054 spin_lock(&ctx->event_wqh.lock);
1055 uwq = find_userfault_evt(ctx);
1070 spin_unlock(&ctx->event_wqh.lock);
1075 userfaultfd_event_complete(ctx, uwq);
1076 spin_unlock(&ctx->event_wqh.lock);
1080 spin_unlock(&ctx->event_wqh.lock);
1090 spin_unlock_irq(&ctx->fd_wqh.lock);
1092 spin_lock_irq(&ctx->fd_wqh.lock);
1094 __remove_wait_queue(&ctx->fd_wqh, &wait);
1096 spin_unlock_irq(&ctx->fd_wqh.lock);
1099 ret = resolve_userfault_fork(ctx, fork_nctx, msg);
1100 spin_lock_irq(&ctx->event_wqh.lock);
1122 __add_wait_queue(&ctx->event_wqh, &uwq->wq);
1130 userfaultfd_event_complete(ctx, uwq);
1146 spin_unlock_irq(&ctx->event_wqh.lock);
1155 struct userfaultfd_ctx *ctx = file->private_data;
1160 if (!userfaultfd_is_initialized(ctx))
1166 _ret = userfaultfd_ctx_read(ctx, no_wait, &msg);
1182 static void __wake_userfault(struct userfaultfd_ctx *ctx,
1185 spin_lock_irq(&ctx->fault_pending_wqh.lock);
1187 if (waitqueue_active(&ctx->fault_pending_wqh))
1188 __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL,
1190 if (waitqueue_active(&ctx->fault_wqh))
1191 __wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, range);
1192 spin_unlock_irq(&ctx->fault_pending_wqh.lock);
1195 static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx,
1217 seq = read_seqcount_begin(&ctx->refile_seq);
1218 need_wakeup = waitqueue_active(&ctx->fault_pending_wqh) ||
1219 waitqueue_active(&ctx->fault_wqh);
1221 } while (read_seqcount_retry(&ctx->refile_seq, seq));
1223 __wake_userfault(ctx, range);
1255 static int userfaultfd_register(struct userfaultfd_ctx *ctx,
1258 struct mm_struct *mm = ctx->mm;
1328 BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^
1371 if (cur->vm_userfaultfd_ctx.ctx &&
1372 cur->vm_userfaultfd_ctx.ctx != ctx)
1393 BUG_ON(vma->vm_userfaultfd_ctx.ctx &&
1394 vma->vm_userfaultfd_ctx.ctx != ctx);
1401 if (vma->vm_userfaultfd_ctx.ctx == ctx &&
1414 ((struct vm_userfaultfd_ctx){ ctx }),
1437 vma->vm_userfaultfd_ctx.ctx = ctx;
1472 static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
1475 struct mm_struct *mm = ctx->mm;
1529 BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^
1559 if (!vma->vm_userfaultfd_ctx.ctx)
1578 wake_userfault(vma->vm_userfaultfd_ctx.ctx, &range);
1625 static int userfaultfd_wake(struct userfaultfd_ctx *ctx,
1637 ret = validate_range(ctx->mm, uffdio_wake.start, uffdio_wake.len);
1650 wake_userfault(ctx, &range);
1657 static int userfaultfd_copy(struct userfaultfd_ctx *ctx,
1668 if (READ_ONCE(ctx->mmap_changing))
1677 ret = validate_range(ctx->mm, uffdio_copy.dst, uffdio_copy.len);
1690 if (mmget_not_zero(ctx->mm)) {
1691 ret = mcopy_atomic(ctx->mm, uffdio_copy.dst, uffdio_copy.src,
1692 uffdio_copy.len, &ctx->mmap_changing,
1694 mmput(ctx->mm);
1707 wake_userfault(ctx, &range);
1714 static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx,
1725 if (READ_ONCE(ctx->mmap_changing))
1734 ret = validate_range(ctx->mm, uffdio_zeropage.range.start,
1742 if (mmget_not_zero(ctx->mm)) {
1743 ret = mfill_zeropage(ctx->mm, uffdio_zeropage.range.start,
1745 &ctx->mmap_changing);
1746 mmput(ctx->mm);
1759 wake_userfault(ctx, &range);
1766 static int userfaultfd_writeprotect(struct userfaultfd_ctx *ctx,
1775 if (READ_ONCE(ctx->mmap_changing))
1784 ret = validate_range(ctx->mm, uffdio_wp.range.start,
1799 if (mmget_not_zero(ctx->mm)) {
1800 ret = mwriteprotect_range(ctx->mm, uffdio_wp.range.start,
1802 &ctx->mmap_changing);
1803 mmput(ctx->mm);
1814 wake_userfault(ctx, &range);
1833 static int userfaultfd_api(struct userfaultfd_ctx *ctx,
1862 if (cmpxchg(&ctx->features, 0, ctx_features) != 0)
1879 struct userfaultfd_ctx *ctx = file->private_data;
1881 if (cmd != UFFDIO_API && !userfaultfd_is_initialized(ctx))
1886 ret = userfaultfd_api(ctx, arg);
1889 ret = userfaultfd_register(ctx, arg);
1892 ret = userfaultfd_unregister(ctx, arg);
1895 ret = userfaultfd_wake(ctx, arg);
1898 ret = userfaultfd_copy(ctx, arg);
1901 ret = userfaultfd_zeropage(ctx, arg);
1904 ret = userfaultfd_writeprotect(ctx, arg);
1913 struct userfaultfd_ctx *ctx = f->private_data;
1917 spin_lock_irq(&ctx->fault_pending_wqh.lock);
1918 list_for_each_entry(wq, &ctx->fault_pending_wqh.head, entry) {
1922 list_for_each_entry(wq, &ctx->fault_wqh.head, entry) {
1925 spin_unlock_irq(&ctx->fault_pending_wqh.lock);
1933 pending, total, UFFD_API, ctx->features,
1952 struct userfaultfd_ctx *ctx = (struct userfaultfd_ctx *) mem;
1954 init_waitqueue_head(&ctx->fault_pending_wqh);
1955 init_waitqueue_head(&ctx->fault_wqh);
1956 init_waitqueue_head(&ctx->event_wqh);
1957 init_waitqueue_head(&ctx->fd_wqh);
1958 seqcount_spinlock_init(&ctx->refile_seq, &ctx->fault_pending_wqh.lock);
1963 struct userfaultfd_ctx *ctx;
1978 ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL);
1979 if (!ctx)
1982 refcount_set(&ctx->refcount, 1);
1983 ctx->flags = flags;
1984 ctx->features = 0;
1985 ctx->released = false;
1986 ctx->mmap_changing = false;
1987 ctx->mm = current->mm;
1989 mmgrab(ctx->mm);
1991 fd = anon_inode_getfd("[userfaultfd]", &userfaultfd_fops, ctx,
1994 mmdrop(ctx->mm);
1995 kmem_cache_free(userfaultfd_ctx_cachep, ctx);