Lines Matching refs:ctx

30 zink_reset_batch_state(struct zink_context *ctx, struct zink_batch_state *bs)
32 struct zink_screen *screen = zink_screen(ctx->base.screen);
53 struct util_idalloc *ids = i ? &ctx->di.bindless[is_buffer].img_slots : &ctx->di.bindless[is_buffer].tex_slots;
89 zink_program_reference(ctx, &pg, NULL);
129 zink_clear_batch_state(struct zink_context *ctx, struct zink_batch_state *bs)
132 zink_reset_batch_state(ctx, bs);
133 unref_resources(zink_screen(ctx->base.screen), bs);
137 pop_batch_state(struct zink_context *ctx)
139 const struct zink_batch_state *bs = ctx->batch_states;
140 ctx->batch_states = bs->next;
141 ctx->batch_states_count--;
142 if (ctx->last_fence == &bs->fence)
143 ctx->last_fence = NULL;
147 zink_batch_reset_all(struct zink_context *ctx)
149 while (ctx->batch_states) {
150 struct zink_batch_state *bs = ctx->batch_states;
152 pop_batch_state(ctx);
153 zink_reset_batch_state(ctx, bs);
154 util_dynarray_append(&ctx->free_batch_states, struct zink_batch_state *, bs);
193 create_batch_state(struct zink_context *ctx)
195 struct zink_screen *screen = zink_screen(ctx->base.screen);
229 bs->ctx = ctx;
273 get_batch_state(struct zink_context *ctx, struct zink_batch *batch)
275 struct zink_screen *screen = zink_screen(ctx->base.screen);
278 if (util_dynarray_num_elements(&ctx->free_batch_states, struct zink_batch_state*))
279 bs = util_dynarray_pop(&ctx->free_batch_states, struct zink_batch_state*);
280 if (!bs && ctx->batch_states) {
282 if (zink_screen_check_last_finished(screen, ctx->batch_states->fence.batch_id) ||
283 find_unused_state(ctx->batch_states)) {
284 bs = ctx->batch_states;
285 pop_batch_state(ctx);
289 zink_reset_batch_state(ctx, bs);
294 struct zink_batch_state *state = create_batch_state(ctx);
295 util_dynarray_append(&ctx->free_batch_states, struct zink_batch_state *, state);
298 bs = create_batch_state(ctx);
304 zink_reset_batch(struct zink_context *ctx, struct zink_batch *batch)
306 batch->state = get_batch_state(ctx, batch);
313 zink_start_batch(struct zink_context *ctx, struct zink_batch *batch)
315 zink_reset_batch(ctx, batch);
332 if (ctx->last_fence) {
333 struct zink_batch_state *last_state = zink_batch_state(ctx->last_fence);
337 if (!ctx->queries_disabled)
338 zink_resume_queries(ctx, batch);
345 struct zink_screen *screen = zink_screen(bs->ctx->base.screen);
348 if (bs->ctx->reset.reset)
349 bs->ctx->reset.reset(bs->ctx->reset.data, PIPE_GUILTY_CONTEXT_RESET);
354 } else if (bs->ctx->batch_states_count > 5000) {
363 struct zink_context *ctx = bs->ctx;
364 struct zink_screen *screen = zink_screen(ctx->base.screen);
456 zink_end_batch(struct zink_context *ctx, struct zink_batch *batch)
458 if (!ctx->queries_disabled)
459 zink_suspend_queries(ctx, batch);
461 tc_driver_internal_flush_notify(ctx->tc);
463 struct zink_screen *screen = zink_screen(ctx->base.screen);
466 if (ctx->oom_flush || ctx->batch_states_count > 10) {
467 assert(!ctx->batch_states_count || ctx->batch_states);
468 while (ctx->batch_states) {
469 bs = ctx->batch_states;
472 if (!zink_check_batch_completion(ctx, fence->batch_id))
475 pop_batch_state(ctx);
476 zink_reset_batch_state(ctx, bs);
477 util_dynarray_append(&ctx->free_batch_states, struct zink_batch_state *, bs);
479 if (ctx->batch_states_count > 50)
480 ctx->oom_flush = true;
484 if (ctx->last_fence)
485 zink_batch_state(ctx->last_fence)->next = bs;
487 assert(!ctx->batch_states);
488 ctx->batch_states = bs;
490 ctx->last_fence = &bs->fence;
491 ctx->batch_states_count++;
518 VkSemaphore acquire = zink_kopper_acquire_submit(zink_screen(batch->state->ctx->base.screen), res);
524 batch->state->ctx->rp_loadop_changed = true;
562 check_oom_flush(struct zink_context *ctx, const struct zink_batch *batch)
565 if (resource_size >= zink_screen(ctx->base.screen)->clamp_video_mem) {
566 ctx->oom_flush = true;
567 ctx->oom_stall = true;
578 check_oom_flush(batch->state->ctx, batch);
588 check_oom_flush(batch->state->ctx, batch);
658 zink_batch_usage_check_completion(struct zink_context *ctx, const struct zink_batch_usage *u)
664 return zink_check_batch_completion(ctx, u->usage);
668 zink_batch_usage_wait(struct zink_context *ctx, struct zink_batch_usage *u)
673 if (likely(u == &ctx->batch.state->usage))
674 ctx->base.flush(&ctx->base, NULL, PIPE_FLUSH_HINT_FINISH);
681 zink_wait_on_batch(ctx, u->usage);