Lines Matching refs:head

60 	unsigned	head;	/* Written to by userland or under ring_lock
183 struct wait_queue_head *head;
408 * to the ring's head, and prevents page migration from mucking in
474 /* Compensate for the ring buffer's head/tail overlap entry */
551 ring->head = ring->tail = 0;
972 static void refill_reqs_available(struct kioctx *ctx, unsigned head,
977 /* Clamp head since userland can write to it. */
978 head %= ctx->nr_events;
979 if (head <= tail)
980 events_in_ring = tail - head;
982 events_in_ring = ctx->nr_events - (head - tail);
1006 unsigned head;
1008 /* Access of ring->head may race with aio_read_events_ring()
1011 * part is that head cannot pass tail since we prevent
1013 * ctx->completion_lock. Even if head is invalid, the check
1018 head = ring->head;
1021 refill_reqs_available(ctx, head, ctx->tail);
1109 unsigned tail, pos, head;
1145 head = ring->head;
1152 refill_reqs_available(ctx, head, tail);
1193 unsigned head, tail, pos;
1208 head = ring->head;
1218 pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events);
1220 if (head == tail)
1223 head %= ctx->nr_events;
1231 avail = (head <= tail ? tail : ctx->nr_events) - head;
1232 if (head == tail)
1235 pos = head + AIO_EVENTS_OFFSET;
1253 head += avail;
1254 head %= ctx->nr_events;
1258 ring->head = head;
1262 pr_debug("%li h%u t%u\n", ret, head, tail);
1644 * Returns true on success, meaning that req->head->lock was locked, req->wait
1645 * is on req->head, and an RCU read lock was taken. Returns false if the
1650 wait_queue_head_t *head;
1668 head = smp_load_acquire(&req->head);
1669 if (head) {
1670 spin_lock(&head->lock);
1673 spin_unlock(&head->lock);
1681 spin_unlock(&req->head->lock);
1820 * as req->head is NULL'ed out, the request can be
1824 smp_store_release(&req->head, NULL);
1838 aio_poll_queue_proc(struct file *file, struct wait_queue_head *head,
1851 pt->iocb->poll.head = head;
1852 add_wait_queue(head, &pt->iocb->poll.wait);
1873 req->head = NULL;