Lines Matching refs:ctx
46 __u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n, __poll_t mask)
61 spin_lock_irqsave(&ctx->wqh.lock, flags);
63 if (ULLONG_MAX - ctx->count < n)
64 n = ULLONG_MAX - ctx->count;
65 ctx->count += n;
66 if (waitqueue_active(&ctx->wqh))
67 wake_up_locked_poll(&ctx->wqh, EPOLLIN | mask);
69 spin_unlock_irqrestore(&ctx->wqh.lock, flags);
76 * @ctx: [in] Pointer to the eventfd context.
88 __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
90 return eventfd_signal_mask(ctx, n, 0);
94 static void eventfd_free_ctx(struct eventfd_ctx *ctx)
96 if (ctx->id >= 0)
97 ida_simple_remove(&eventfd_ida, ctx->id);
98 kfree(ctx);
103 struct eventfd_ctx *ctx = container_of(kref, struct eventfd_ctx, kref);
105 eventfd_free_ctx(ctx);
110 * @ctx: [in] Pointer to eventfd context.
115 void eventfd_ctx_put(struct eventfd_ctx *ctx)
117 kref_put(&ctx->kref, eventfd_free);
123 struct eventfd_ctx *ctx = file->private_data;
125 wake_up_poll(&ctx->wqh, EPOLLHUP);
126 eventfd_ctx_put(ctx);
132 struct eventfd_ctx *ctx = file->private_data;
136 poll_wait(file, &ctx->wqh, wait);
139 * All writes to ctx->count occur within ctx->wqh.lock. This read
140 * can be done outside ctx->wqh.lock because we know that poll_wait
150 * lock ctx->wqh.lock (in poll_wait)
151 * count = ctx->count
153 * unlock ctx->wqh.lock
154 * lock ctx->qwh.lock
155 * ctx->count += n
158 * unlock ctx->qwh.lock
165 * count = ctx->count (INVALID!)
166 * lock ctx->qwh.lock
167 * ctx->count += n
170 * unlock ctx->qwh.lock
171 * lock ctx->wqh.lock (in poll_wait)
173 * unlock ctx->wqh.lock
176 count = READ_ONCE(ctx->count);
188 void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt)
190 lockdep_assert_held(&ctx->wqh.lock);
192 *cnt = ((ctx->flags & EFD_SEMAPHORE) && ctx->count) ? 1 : ctx->count;
193 ctx->count -= *cnt;
199 * @ctx: [in] Pointer to eventfd context.
210 int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait,
215 spin_lock_irqsave(&ctx->wqh.lock, flags);
216 eventfd_ctx_do_read(ctx, cnt);
217 __remove_wait_queue(&ctx->wqh, wait);
218 if (*cnt != 0 && waitqueue_active(&ctx->wqh))
219 wake_up_locked_poll(&ctx->wqh, EPOLLOUT);
220 spin_unlock_irqrestore(&ctx->wqh.lock, flags);
229 struct eventfd_ctx *ctx = file->private_data;
234 spin_lock_irq(&ctx->wqh.lock);
235 if (!ctx->count) {
238 spin_unlock_irq(&ctx->wqh.lock);
242 if (wait_event_interruptible_locked_irq(ctx->wqh, ctx->count)) {
243 spin_unlock_irq(&ctx->wqh.lock);
247 eventfd_ctx_do_read(ctx, &ucnt);
249 if (waitqueue_active(&ctx->wqh))
250 wake_up_locked_poll(&ctx->wqh, EPOLLOUT);
252 spin_unlock_irq(&ctx->wqh.lock);
262 struct eventfd_ctx *ctx = file->private_data;
272 spin_lock_irq(&ctx->wqh.lock);
274 if (ULLONG_MAX - ctx->count > ucnt)
277 res = wait_event_interruptible_locked_irq(ctx->wqh,
278 ULLONG_MAX - ctx->count > ucnt);
283 ctx->count += ucnt;
285 if (waitqueue_active(&ctx->wqh))
286 wake_up_locked_poll(&ctx->wqh, EPOLLIN);
289 spin_unlock_irq(&ctx->wqh.lock);
297 struct eventfd_ctx *ctx = f->private_data;
299 spin_lock_irq(&ctx->wqh.lock);
301 (unsigned long long)ctx->count);
302 spin_unlock_irq(&ctx->wqh.lock);
303 seq_printf(m, "eventfd-id: %d\n", ctx->id);
305 !!(ctx->flags & EFD_SEMAPHORE));
357 struct eventfd_ctx *ctx;
361 ctx = eventfd_ctx_fileget(f.file);
363 return ctx;
378 struct eventfd_ctx *ctx;
383 ctx = file->private_data;
384 kref_get(&ctx->kref);
385 return ctx;
391 struct eventfd_ctx *ctx;
402 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
403 if (!ctx)
406 kref_init(&ctx->kref);
407 init_waitqueue_head(&ctx->wqh);
408 ctx->count = count;
409 ctx->flags = flags;
410 ctx->id = ida_simple_get(&eventfd_ida, 0, 0, GFP_KERNEL);
418 file = anon_inode_getfile("[eventfd]", &eventfd_fops, ctx, flags);
429 eventfd_free_ctx(ctx);