Lines Matching defs:head
12 * do). It also needs a smp_mb() before updating CQ head (ordering the
13 * entry load(s) with the head store), pairing with an implicit barrier
15 * store head will do). Failure to do so could lead to reading invalid
22 * head load before writing new SQ entries (smp_load_acquire to read
23 * head will do).
115 u32 head ____cacheline_aligned_in_smp;
131 * The kernel controls head of the sq ring and the tail of the cq ring,
132 * and the application controls tail of the sq ring and the head of the
137 * Bitmasks to apply to head and tail offsets (constant, equals
151 * After a new SQ head value was read by the application this
153 * the new SQ head (and possibly more).
303 struct io_kiocb *head;
487 struct wait_queue_head *head;
543 /* head of the link, used by linked timeouts only */
544 struct io_kiocb *head;
1137 #define io_for_each_link(pos, head) \
1138 for (pos = (head); pos; pos = pos->link)
1204 static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
1210 if (task && head->task != task)
1215 io_for_each_link(req, head) {
1222 static bool io_match_linked(struct io_kiocb *head)
1226 io_for_each_link(req, head) {
1237 static bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
1242 if (task && head->task != task)
1247 if (head->flags & REQ_F_LINK_TIMEOUT) {
1248 struct io_ring_ctx *ctx = head->ctx;
1252 matched = io_match_linked(head);
1255 matched = io_match_linked(head);
1604 return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head);
1613 * writes to the cq entry need to come after reading head; the
2071 link->timeout.head = NULL;
3200 struct io_buffer *head;
3209 head = xa_load(&req->ctx->io_buffers, bgid);
3210 if (head) {
3211 if (!list_empty(&head->list)) {
3212 kbuf = list_last_entry(&head->list, struct io_buffer,
3216 kbuf = head;
4326 /* the head kbuf is the list itself */
4348 struct io_buffer *head;
4357 head = xa_load(&ctx->io_buffers, p->bgid);
4358 if (head)
4359 ret = __io_remove_buffers(ctx, head, p->bgid, p->nbufs);
4404 static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
4420 if (!*head) {
4422 *head = buf;
4424 list_add_tail(&buf->list, &(*head)->list);
4436 struct io_buffer *head, *list;
4444 list = head = xa_load(&ctx->io_buffers, p->bgid);
4446 ret = io_add_buffers(p, &head);
4448 ret = xa_insert(&ctx->io_buffers, p->bgid, head,
4451 __io_remove_buffers(ctx, head, p->bgid, -1U);
5423 poll->head = NULL;
5433 struct wait_queue_head *head = smp_load_acquire(&poll->head);
5435 if (head) {
5436 spin_lock_irq(&head->lock);
5438 poll->head = NULL;
5439 spin_unlock_irq(&head->lock);
5643 * as req->head is NULL'ed out, the request can be
5647 smp_store_release(&poll->head, NULL);
5670 struct wait_queue_head *head,
5683 /* double add on the same waitqueue head, ignore */
5684 if (first->head == head)
5688 if ((*poll_ptr)->head == head)
5704 poll->head = head;
5708 add_wait_queue_exclusive(head, &poll->wait);
5710 add_wait_queue(head, &poll->wait);
5713 static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
5718 __io_queue_proc(&pt->req->poll, pt, head,
5780 static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
5786 __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
6284 if (!link->head)
6288 req->timeout.head = link->last;
6619 * head request and the next request/link after the current link.
6627 /* not interested in head, start from the first linked */
7023 prev = req->timeout.head;
7024 req->timeout.head = NULL;
7053 if (req->timeout.head) {
7237 if (link->head) {
7240 * REQ_F_FAIL is set, but the head is an exception since
7242 * so let's leverage req->result to distinguish if a head
7247 if (!(link->head->flags & REQ_F_FAIL))
7248 req_fail_link_node(link->head, -ECANCELED);
7270 * If we already have a head request, queue this one for async
7271 * submittal once the head completes. If we don't have a head but
7272 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
7276 if (link->head) {
7277 struct io_kiocb *head = link->head;
7283 if (!(head->flags & REQ_F_FAIL))
7284 req_fail_link_node(head, -ECANCELED);
7287 trace_io_uring_link(ctx, req, head);
7293 link->head = NULL;
7294 io_queue_sqe(head);
7298 link->head = req;
7314 if (state->link.head)
7315 io_queue_sqe(state->link.head);
7330 /* set only head, no need to init link_last in advance */
7331 state->link.head = NULL;
7340 * since once we write the new head, the application could
7343 smp_store_release(&rings->sq.head, ctx->cached_sq_head);
7356 unsigned head, mask = ctx->sq_entries - 1;
7360 * The cached sq head (or cq tail) serves two purposes:
7363 * head updates.
7364 * 2) allows the kernel side to track the head on its own, even
7367 head = READ_ONCE(ctx->sq_array[sq_idx]);
7368 if (likely(head < ctx->sq_entries))
7369 return &ctx->sq_sqes[head];
7420 /* Commit SQ ring head once we've consumed and submitted all SQEs */
7731 iowq.cq_tail = READ_ONCE(ctx->rings->cq.head) + min_events;
7749 return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
8269 struct sk_buff_head list, *head = &sock->sk_receive_queue;
8279 skb = skb_dequeue(head);
8313 skb = skb_dequeue(head);
8317 spin_lock_irq(&head->lock);
8319 __skb_queue_tail(head, skb);
8320 spin_unlock_irq(&head->lock);
8980 * the last compound head, so generally we'll only do a full search if we don't
8983 * We check if the given compound head page has already been accounted, to
10441 p->sq_off.head = offsetof(struct io_rings, sq.head);
10450 p->cq_off.head = offsetof(struct io_rings, cq.head);