Lines Matching refs:head
59 unsigned head; /* Written to by userland or under ring_lock
182 struct wait_queue_head *head;
434 * to the ring's head, and prevents page migration from mucking in
500 /* Compensate for the ring buffer's head/tail overlap entry */
577 ring->head = ring->tail = 0;
988 static void refill_reqs_available(struct kioctx *ctx, unsigned head,
993 /* Clamp head since userland can write to it. */
994 head %= ctx->nr_events;
995 if (head <= tail)
996 events_in_ring = tail - head;
998 events_in_ring = ctx->nr_events - (head - tail);
1022 unsigned head;
1024 /* Access of ring->head may race with aio_read_events_ring()
1027 * part is that head cannot pass tail since we prevent
1029 * ctx->completion_lock. Even if head is invalid, the check
1034 head = ring->head;
1036 refill_reqs_available(ctx, head, ctx->tail);
1124 unsigned tail, pos, head;
1159 head = ring->head;
1165 refill_reqs_available(ctx, head, tail);
1206 unsigned head, tail, pos;
1221 head = ring->head;
1230 pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events);
1232 if (head == tail)
1235 head %= ctx->nr_events;
1243 avail = (head <= tail ? tail : ctx->nr_events) - head;
1244 if (head == tail)
1247 pos = head + AIO_EVENTS_OFFSET;
1264 head += avail;
1265 head %= ctx->nr_events;
1269 ring->head = head;
1272 pr_debug("%li h%u t%u\n", ret, head, tail);
1638 * Returns true on success, meaning that req->head->lock was locked, req->wait
1639 * is on req->head, and an RCU read lock was taken. Returns false if the
1644 wait_queue_head_t *head;
1662 head = smp_load_acquire(&req->head);
1663 if (head) {
1664 spin_lock(&head->lock);
1667 spin_unlock(&head->lock);
1675 spin_unlock(&req->head->lock);
1814 * as req->head is NULL'ed out, the request can be
1818 smp_store_release(&req->head, NULL);
1832 aio_poll_queue_proc(struct file *file, struct wait_queue_head *head,
1845 pt->iocb->poll.head = head;
1846 add_wait_queue(head, &pt->iocb->poll.wait);
1867 req->head = NULL;