Lines Matching defs:tail

9  * After the application reads the CQ ring tail, it must use an
11 * before writing the tail (using smp_load_acquire to read the tail will
19 * writing the SQ tail (ordering SQ entry stores with the tail store),
21 * to store the tail will do). And it needs a barrier ordering the SQ
27 * updating the SQ tail; a full memory barrier smp_mb() is needed
116 u32 tail ____cacheline_aligned_in_smp;
128 * Head and tail offsets into the ring; the offsets need to be
131 * The kernel controls head of the sq ring and the tail of the cq ring,
132 * and the application controls tail of the sq ring and the head of the
137 * Bitmasks to apply to head and tail offsets (constant, equals
163 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
1585 smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
1599 return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries;
1610 unsigned tail, mask = ctx->cq_entries - 1;
1620 tail = ctx->cached_cq_tail++;
1621 return &rings->cqes[tail & mask];
2453 /* make sure SQ entry isn't read before tail */
2454 return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
2656 u32 tail = ctx->cached_cq_tail;
2663 if (tail != ctx->cached_cq_tail ||
6299 u32 tail, off = req->timeout.off;
6313 tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
6314 req->timeout.target_seq = tail + off;
6320 ctx->cq_last_tm_flush = tail;
6332 /* nxt.seq is behind @tail, otherwise would've been completed */
6333 if (off >= nxt->timeout.target_seq - tail)
7360 * The cached sq head (or cq tail) serves two purposes:
7383 /* make sure SQ entry isn't read before tail */
7749 return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
10442 p->sq_off.tail = offsetof(struct io_rings, sq.tail);
10451 p->cq_off.tail = offsetof(struct io_rings, cq.tail);