Lines Matching refs:ctx
70 rebind_resource_in_ctx(struct fd_context *ctx,
75 if (ctx->rebind_resource)
76 ctx->rebind_resource(ctx, rsc);
80 struct fd_vertexbuf_stateobj *vb = &ctx->vtx.vertexbuf;
81 for (unsigned i = 0; i < vb->count && !(ctx->dirty & FD_DIRTY_VTXBUF);
84 fd_context_dirty(ctx, FD_DIRTY_VTXBUF);
100 !(ctx->dirty_shader[stage] & FD_DIRTY_CONST)) {
101 struct fd_constbuf_stateobj *cb = &ctx->constbuf[stage];
105 fd_context_dirty_shader(ctx, stage, FD_DIRTY_SHADER_CONST);
113 !(ctx->dirty_shader[stage] & FD_DIRTY_TEX)) {
114 struct fd_texture_stateobj *tex = &ctx->tex[stage];
117 fd_context_dirty_shader(ctx, stage, FD_DIRTY_SHADER_TEX);
125 !(ctx->dirty_shader[stage] & FD_DIRTY_IMAGE)) {
126 struct fd_shaderimg_stateobj *si = &ctx->shaderimg[stage];
130 fd_context_dirty_shader(ctx, stage, FD_DIRTY_SHADER_IMAGE);
138 !(ctx->dirty_shader[stage] & FD_DIRTY_SSBO)) {
139 struct fd_shaderbuf_stateobj *sb = &ctx->shaderbuf[stage];
143 fd_context_dirty_shader(ctx, stage, FD_DIRTY_SHADER_SSBO);
160 list_for_each_entry (struct fd_context, ctx, &screen->context_list, node)
161 rebind_resource_in_ctx(ctx, rsc);
177 __fd_resource_wait(struct fd_context *ctx, struct fd_resource *rsc, unsigned op,
181 return fd_bo_cpu_prep(rsc->bo, ctx->pipe, op);
185 perf_time_ctx (ctx, 10000, "%s: a busy \"%" PRSC_FMT "\" BO stalled", func,
187 ret = fd_bo_cpu_prep(rsc->bo, ctx->pipe, op);
233 do_blit(struct fd_context *ctx, const struct pipe_blit_info *blit,
236 struct pipe_context *pctx = &ctx->base;
238 assert(!ctx->in_blit);
239 ctx->in_blit = true;
250 ctx->in_blit = false;
262 struct fd_context *ctx = fd_context(pctx);
289 util_idalloc_mt_free(&ctx->screen->buffer_ids, delete_buffer_id);
291 fd_screen_lock(ctx->screen);
299 dst->seqno = p_atomic_inc_return(&ctx->screen->rsc_seqno);
301 fd_screen_unlock(ctx->screen);
333 static void flush_resource(struct fd_context *ctx, struct fd_resource *rsc,
357 fd_try_shadow_resource(struct fd_context *ctx, struct fd_resource *rsc,
361 struct pipe_context *pctx = &ctx->base;
374 fd_bc_flush_writer(ctx, rsc);
417 assert(!ctx->in_shadow);
418 ctx->in_shadow = true;
426 fd_screen_lock(ctx->screen);
450 rsc->seqno = p_atomic_inc_return(&ctx->screen->rsc_seqno);
457 foreach_batch (batch, &ctx->screen->batch_cache, rsc->track->batch_mask) {
464 fd_screen_unlock(ctx->screen);
481 bool saved_active_queries = ctx->active_queries;
497 do_blit(ctx, &blit, fallback);
519 do_blit(ctx, &blit, fallback);
526 do_blit(ctx, &blit, fallback);
538 ctx->in_shadow = false;
552 fd_resource_uncompress(struct fd_context *ctx, struct fd_resource *rsc, bool linear)
554 tc_assert_driver_thread(ctx->tc);
558 bool success = fd_try_shadow_resource(ctx, rsc, 0, NULL, modifier);
576 fd_alloc_staging(struct fd_context *ctx, struct fd_resource *rsc,
580 struct pipe_context *pctx = &ctx->base;
586 if ((ctx->screen->gen < 6) && !ctx->blit &&
617 fd_blit_from_staging(struct fd_context *ctx,
635 do_blit(ctx, &blit, false);
639 fd_blit_to_staging(struct fd_context *ctx, struct fd_transfer *trans) assert_dt
656 do_blit(ctx, &blit, false);
673 flush_resource(struct fd_context *ctx, struct fd_resource *rsc,
677 fd_bc_flush_readers(ctx, rsc);
679 fd_bc_flush_writer(ctx, rsc);
687 struct fd_context *ctx = fd_context(pctx);
690 flush_resource(ctx, rsc, PIPE_MAP_READ);
695 fd_resource_wait(ctx, rsc, FD_BO_PREP_FLUSH);
701 in_dt /* TODO for threaded-ctx we'll need to split out unsynchronized path */
703 struct fd_context *ctx = fd_context(pctx);
709 fd_blit_from_staging(ctx, trans);
727 slab_free(&ctx->transfer_pool, ptrans);
752 struct fd_context *ctx = fd_context(pctx);
758 staging_rsc = fd_alloc_staging(ctx, rsc, level, box);
771 fd_blit_to_staging(ctx, trans);
773 fd_resource_wait(ctx, staging_rsc, FD_BO_PREP_READ);
776 ctx->stats.staging_uploads++;
824 struct fd_context *ctx = fd_context(pctx);
829 tc_assert_driver_thread(ctx->tc);
847 perf_debug_ctx(ctx, "wc readback: prsc=%p, level=%u, usage=%x, box=%dx%d+%d,%d",
869 if (ctx->screen->reorder && busy && !(usage & PIPE_MAP_READ) &&
875 if (needs_flush && fd_try_shadow_resource(ctx, rsc, level, box,
878 ctx->stats.shadow_uploads++;
883 flush_resource(ctx, rsc, usage);
893 staging_rsc = fd_alloc_staging(ctx, rsc, level, box);
905 ctx->stats.staging_uploads++;
913 flush_resource(ctx, rsc, usage);
922 ret = fd_resource_wait(ctx, rsc, op);
932 improve_transfer_map_usage(struct fd_context *ctx, struct fd_resource *rsc,
948 if (ctx->in_shadow && !(usage & PIPE_MAP_READ)) {
969 struct fd_context *ctx = fd_context(pctx);
983 ptrans = slab_zalloc(&ctx->transfer_pool_unsync);
985 ptrans = slab_zalloc(&ctx->transfer_pool);
993 usage = improve_transfer_map_usage(ctx, rsc, usage, box);
1455 struct fd_context *ctx = fd_context(pctx);
1457 if (!ctx->cond_query)
1463 bool wait = ctx->cond_mode != PIPE_RENDER_COND_NO_WAIT &&
1464 ctx->cond_mode != PIPE_RENDER_COND_BY_REGION_NO_WAIT;
1466 if (pctx->get_query_result(pctx, ctx->cond_query, wait, &res))
1467 return (bool)res.u64 != ctx->cond_cond;
1476 struct fd_context *ctx = fd_context(pctx);
1493 fd_context_dirty(ctx, FD_DIRTY_ZSA);
1499 fd_context_dirty(ctx, FD_DIRTY_FRAMEBUFFER);