Lines Matching defs:refs
237 struct percpu_ref refs;
253 atomic_t refs;
279 refcount_t refs;
333 struct percpu_ref refs;
874 atomic_t refs;
1145 ((unsigned int) atomic_read(&(req->refs)) + 127u <= 127u)
1150 return atomic_inc_not_zero(&req->refs);
1159 return atomic_dec_and_test(&req->refs);
1166 atomic_inc(&req->refs);
1173 atomic_set(&req->refs, nr);
1187 req->fixed_rsrc_refs = &ctx->rsrc_node->refs;
1273 struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
1291 percpu_ref_get(&ctx->refs);
1300 percpu_ref_put(&ctx->refs);
1334 if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
1419 /* linked timeouts should have two refs once prep'ed */
1771 unsigned int refs = tctx->cached_refs;
1773 if (refs) {
1775 percpu_counter_sub(&tctx->inflight, refs);
1776 put_task_struct_many(task, refs);
1867 if (!percpu_ref_tryget(&ctx->refs))
1875 percpu_ref_put(&ctx->refs);
2050 percpu_ref_put(&ctx->refs);
2178 percpu_ref_put(&ctx->refs);
2213 percpu_ref_get(&ctx->refs);
2357 percpu_ref_put_many(&ctx->refs, rb->ctx_refs);
2719 if (percpu_ref_is_dying(&ctx->refs))
5358 * We usually have 1-2 refs taken, 128 is more than enough and we want to
5379 * If refs part of ->poll_refs (see IO_POLL_REF_MASK) is 0, it's free. We can
7385 if (!percpu_ref_tryget_many(&ctx->refs, nr))
7416 percpu_ref_put_many(&ctx->refs, unused);
7470 * Don't submit if refs are dying, good for io_uring_register(),
7473 if (to_submit && likely(!percpu_ref_is_dying(&ctx->refs)) &&
7786 percpu_ref_exit(&ref_node->refs);
7792 struct io_rsrc_node *node = container_of(ref, struct io_rsrc_node, refs);
7828 if (percpu_ref_init(&ref_node->refs, io_rsrc_node_ref_zero,
7853 atomic_inc(&data_to_kill->refs);
7854 percpu_ref_kill(&rsrc_node->refs);
7888 if (atomic_dec_and_test(&data->refs))
7895 if (atomic_read(&data->refs) > 0) {
7906 atomic_inc(&data->refs);
7967 atomic_set(&data->refs, 1);
8078 if (refcount_dec_and_test(&sqd->refs)) {
8126 refcount_inc(&sqd->refs);
8153 refcount_set(&sqd->refs, 1);
8353 if (atomic_dec_and_test(&rsrc_data->refs))
8650 refcount_set(&hash->refs, 1);
9362 if (data && !atomic_dec_and_test(&data->refs))
9413 percpu_ref_exit(&ctx->refs);
9511 * we're waiting for refs to drop. We need to reap these manually,
9549 * Some may use context even when all refs and requests have been put,
9613 percpu_ref_kill(&ctx->refs);
9626 /* drop cached put refs after potentially doing completions */
10083 if (unlikely(!percpu_ref_tryget(&ctx->refs)))
10147 percpu_ref_put(&ctx->refs);
10257 if (percpu_ref_tryget(&ctx->refs)) {
10259 percpu_ref_put(&ctx->refs);
10816 refcount_inc(&sqd->refs);
10904 percpu_ref_kill(&ctx->refs);
10923 io_refs_resurrect(&ctx->refs, &ctx->ref_comp);
10939 if (percpu_ref_is_dying(&ctx->refs))
11061 percpu_ref_reinit(&ctx->refs);