Lines Matching refs:ctx

175 static void sched_node_init(struct ir3_sched_ctx *ctx,
191 struct ir3_sched_ctx *ctx)
198 if (sched_check_src_cond(src, cond, ctx))
201 if (cond(src, ctx))
212 is_outstanding_sy(struct ir3_instruction *instr, struct ir3_sched_ctx *ctx)
220 if (instr->block != ctx->block)
224 return n->sy_index >= ctx->first_outstanding_sy_index;
228 is_outstanding_ss(struct ir3_instruction *instr, struct ir3_sched_ctx *ctx)
236 if (instr->block != ctx->block)
240 return n->ss_index >= ctx->first_outstanding_ss_index;
262 schedule(struct ir3_sched_ctx *ctx, struct ir3_instruction *instr)
264 assert(ctx->block == instr->block);
271 assert(ctx->addr0 == NULL);
272 ctx->addr0 = instr;
276 assert(ctx->addr1 == NULL);
277 ctx->addr1 = instr;
281 assert(ctx->pred == NULL);
282 ctx->pred = instr;
290 ctx->scheduled = instr;
293 assert(ctx->remaining_kills > 0);
294 ctx->remaining_kills--;
319 ctx->ip = MAX2(ctx->ip, n->earliest_ip) + delay_cycles;
325 child->earliest_ip = MAX2(child->earliest_ip, ctx->ip + delay);
328 dag_prune_head(ctx->dag, &n->dag);
333 ctx->ss_delay = soft_ss_delay(instr);
334 n->ss_index = ctx->ss_index++;
336 sched_check_src_cond(instr, is_outstanding_ss, ctx)) {
337 ctx->ss_delay = 0;
338 ctx->first_outstanding_ss_index = ctx->ss_index;
339 } else if (ctx->ss_delay > 0) {
340 ctx->ss_delay -= MIN2(cycles, ctx->ss_delay);
349 ctx->sy_delay = soft_sy_delay(instr, ctx->block->shader);
350 assert(ctx->remaining_tex > 0);
351 ctx->remaining_tex--;
352 n->sy_index = ctx->sy_index++;
354 sched_check_src_cond(instr, is_outstanding_sy, ctx)) {
355 ctx->sy_delay = 0;
356 ctx->first_outstanding_sy_index = ctx->sy_index;
357 } else if (ctx->sy_delay > 0) {
358 ctx->sy_delay -= MIN2(cycles, ctx->sy_delay);
375 should_skip(struct ir3_sched_ctx *ctx, struct ir3_instruction *instr)
377 if (ctx->remaining_kills && (is_tex(instr) || is_mem(instr))) {
394 could_sched(struct ir3_sched_ctx *ctx,
409 return !should_skip(ctx, instr);
416 check_instr(struct ir3_sched_ctx *ctx, struct ir3_sched_notes *notes,
421 if (instr == ctx->split) {
428 if (should_skip(ctx, instr))
447 ready = could_sched(ctx, indirect, instr);
464 ready = could_sched(ctx, indirect, instr);
476 if (writes_addr0(instr) && ctx->addr0) {
477 assert(ctx->addr0 != instr);
482 if (writes_addr1(instr) && ctx->addr1) {
483 assert(ctx->addr1 != instr);
488 if (writes_pred(instr) && ctx->pred) {
489 assert(ctx->pred != instr);
615 should_defer(struct ir3_sched_ctx *ctx, struct ir3_instruction *instr)
617 if (ctx->ss_delay) {
618 if (sched_check_src_cond(instr, is_outstanding_ss, ctx))
627 if (ctx->sy_delay && ctx->remaining_tex) {
628 if (sched_check_src_cond(instr, is_outstanding_sy, ctx))
638 if (ctx->sy_index - ctx->first_outstanding_sy_index >= 8 && is_sy_producer(instr))
641 if (ctx->ss_index - ctx->first_outstanding_ss_index >= 8 && is_ss_producer(instr))
647 static struct ir3_sched_node *choose_instr_inc(struct ir3_sched_ctx *ctx,
676 node_delay(struct ir3_sched_ctx *ctx, struct ir3_sched_node *n)
678 return MAX2(n->earliest_ip, ctx->ip) - ctx->ip;
689 choose_instr_dec(struct ir3_sched_ctx *ctx, struct ir3_sched_notes *notes,
696 foreach_sched_node (n, &ctx->dag->heads) {
697 if (defer && should_defer(ctx, n->instr))
700 unsigned d = node_delay(ctx, n);
706 if (!check_instr(ctx, notes, n->instr))
747 return choose_instr_inc(ctx, notes, defer, true);
773 choose_instr_inc(struct ir3_sched_ctx *ctx, struct ir3_sched_notes *notes,
788 foreach_sched_node (n, &ctx->dag->heads) {
792 if (defer && should_defer(ctx, n->instr))
795 if (!check_instr(ctx, notes, n->instr))
798 unsigned d = node_delay(ctx, n);
828 choose_instr_prio(struct ir3_sched_ctx *ctx, struct ir3_sched_notes *notes)
832 foreach_sched_node (n, &ctx->dag->heads) {
858 dump_state(struct ir3_sched_ctx *ctx)
863 foreach_sched_node (n, &ctx->dag->heads) {
865 live_effect(n->instr), node_delay(ctx, n));
877 choose_instr(struct ir3_sched_ctx *ctx, struct ir3_sched_notes *notes)
881 dump_state(ctx);
883 chosen = choose_instr_prio(ctx, notes);
887 chosen = choose_instr_dec(ctx, notes, true);
891 chosen = choose_instr_dec(ctx, notes, false);
895 chosen = choose_instr_inc(ctx, notes, false, false);
903 split_instr(struct ir3_sched_ctx *ctx, struct ir3_instruction *orig_instr)
907 sched_node_init(ctx, new_instr);
916 split_addr(struct ir3_sched_ctx *ctx, struct ir3_instruction **addr,
939 new_addr = split_instr(ctx, *addr);
963 split_pred(struct ir3_sched_ctx *ctx)
969 assert(ctx->pred);
971 ir = ctx->pred->block->shader;
989 if (ssa(predicated->srcs[0]) == ctx->pred) {
991 new_pred = split_instr(ctx, ctx->pred);
1004 if (ctx->block->condition == ctx->pred) {
1006 new_pred = split_instr(ctx, ctx->pred);
1010 ctx->block->condition = new_pred;
1015 ctx->pred = NULL;
1021 sched_node_init(struct ir3_sched_ctx *ctx, struct ir3_instruction *instr)
1023 struct ir3_sched_node *n = rzalloc(ctx->dag, struct ir3_sched_node);
1025 dag_init_node(ctx->dag, &n->dag);
1157 sched_dag_init(struct ir3_sched_ctx *ctx)
1159 ctx->dag = dag_create(ctx);
1161 foreach_instr (instr, &ctx->unscheduled_list)
1162 sched_node_init(ctx, instr);
1164 foreach_instr (instr, &ctx->unscheduled_list)
1167 dag_traverse_bottom_up(ctx->dag, sched_dag_max_delay_cb, NULL);
1171 sched_dag_destroy(struct ir3_sched_ctx *ctx)
1173 ralloc_free(ctx->dag);
1174 ctx->dag = NULL;
1178 sched_block(struct ir3_sched_ctx *ctx, struct ir3_block *block)
1180 ctx->block = block;
1183 ctx->addr0 = NULL;
1184 ctx->addr1 = NULL;
1185 ctx->pred = NULL;
1186 ctx->sy_delay = 0;
1187 ctx->ss_delay = 0;
1188 ctx->sy_index = ctx->first_outstanding_sy_index = 0;
1189 ctx->ss_index = ctx->first_outstanding_ss_index = 0;
1195 list_replace(&block->instr_list, &ctx->unscheduled_list);
1198 sched_dag_init(ctx);
1200 ctx->remaining_kills = 0;
1201 ctx->remaining_tex = 0;
1202 foreach_instr_safe (instr, &ctx->unscheduled_list) {
1204 ctx->remaining_kills++;
1206 ctx->remaining_tex++;
1220 foreach_instr_safe (instr, &ctx->unscheduled_list)
1222 schedule(ctx, instr);
1224 foreach_instr_safe (instr, &ctx->unscheduled_list)
1226 schedule(ctx, instr);
1228 while (!list_is_empty(&ctx->unscheduled_list)) {
1232 instr = choose_instr(ctx, &notes);
1234 unsigned delay = node_delay(ctx, instr->data);
1239 schedule(ctx, instr);
1244 ctx->split = NULL;
1256 split_addr(ctx, &ctx->addr0, ir->a0_users, ir->a0_users_count);
1259 split_addr(ctx, &ctx->addr1, ir->a1_users, ir->a1_users_count);
1261 new_instr = split_pred(ctx);
1264 foreach_instr (instr, &ctx->unscheduled_list)
1267 ctx->error = true;
1273 list_addtail(&new_instr->node, &ctx->unscheduled_list);
1279 ctx->split = new_instr;
1283 sched_dag_destroy(ctx);
1289 struct ir3_sched_ctx *ctx = rzalloc(NULL, struct ir3_sched_ctx);
1299 ir3_find_ssa_uses(ir, ctx, false);
1302 sched_block(ctx, block);
1305 int ret = ctx->error ? -1 : 0;
1307 ralloc_free(ctx);