Searched refs:ldunif (Results 1 - 13 of 13) sorted by relevance
/third_party/mesa3d/src/broadcom/compiler/ |
H A D | qpu_validate.c | 123 (inst->sig.ldunif || inst->sig.ldunifa)) { in qpu_validate_inst() 135 bool last_reads_ldunif = (state->last && (state->last->sig.ldunif || in qpu_validate_inst() 139 bool reads_ldunif = inst->sig.ldunif || inst->sig.ldunifrf; in qpu_validate_inst()
|
H A D | vir_to_qpu.c | 274 if (qinst->qpu.sig.ldunif || qinst->qpu.sig.ldunifa) { in v3d_generate_code_block() 282 if (qinst->qpu.sig.ldunif) { in v3d_generate_code_block() 283 qinst->qpu.sig.ldunif = false; in v3d_generate_code_block() 343 if (qpu.sig.ldunif || in reads_uniform()
|
H A D | vir_opt_small_immediates.c | 63 if (!src_def || !src_def->qpu.sig.ldunif) in vir_opt_small_immediates()
|
H A D | vir_opt_constant_alu.c | 32 * additions that increment the unifa address by 4 for each leading ldunif 35 * nop t1; ldunif (0x00000004 / 0.000000) 36 * nop t2; ldunif (0x00000004 / 0.000000) 41 * nop t1; ldunif (0x00000004 / 0.000000) 42 * nop t2; ldunif (0x00000004 / 0.000000) 43 * nop t4; ldunif (0x00000008 / 0.000000) 136 if ((def->qpu.sig.ldunif || def->qpu.sig.ldunifrf) && in try_opt_constant_alu()
|
H A D | qpu_schedule.c | 437 /* inst->sig.ldunif or sideband uniform read */ in calculate_deps() 1017 merge.sig.ldunif |= b->sig.ldunif; in qpu_merge_inst() 1050 return inst->sig.ldunif || inst->sig.ldunifrf; in try_skip_for_ldvary_pipelining() 1121 /* ldunif and ldvary both write r5, but ldunif does so a tick in choose_instruction_to_schedule() 1122 * sooner. If the ldvary's r5 wasn't used, then ldunif might in choose_instruction_to_schedule() 1123 * otherwise get scheduled so ldunif and ldvary try to update in choose_instruction_to_schedule() 1126 if ((inst->sig.ldunif || inst->sig.ldunifa) && in choose_instruction_to_schedule() 1177 * have a ldunif o in choose_instruction_to_schedule() [all...] |
H A D | vir_dump.c | 240 if (sig->ldunif) in vir_dump_sig() 241 fprintf(stderr, "; ldunif"); in vir_dump_sig()
|
H A D | vir_register_allocate.c | 132 return def && def->qpu.sig.ldunif; in vir_is_mov_uniform() 412 * ldunif.spill_offset 577 /* We must disable the ldunif optimization if we are spilling uniforms */ in v3d_spill_reg() 1034 /* Only a ldunif gets to write to R5, which only has a in update_graph_and_reg_classes_for_inst() 1038 * shader-db it is best to keep r5 exclusive to ldunif, probably in update_graph_and_reg_classes_for_inst() 1039 * because ldunif has usually a shorter lifespan, allowing for in update_graph_and_reg_classes_for_inst() 1042 if (!inst->qpu.sig.ldunif) { in update_graph_and_reg_classes_for_inst()
|
H A D | vir.c | 88 /* ldunifa works like ldunif: it reads an element and advances the in vir_has_side_effects() 89 * pointer, so each read has a side effect (we don't care for ldunif in vir_has_side_effects() 2008 /* Looks back into the current block to find the ldunif that wrote the uniform 2010 * destination register of the ldunif instruction to 'unif'. 2041 if ((inst->qpu.sig.ldunif || inst->qpu.sig.ldunifrf) && in try_opt_ldunif() 2076 * result of the last ldunif that loaded it. in vir_uniform() 2086 inst->qpu.sig.ldunif = true; in vir_uniform()
|
H A D | nir_to_vir.c | 733 return (sig->ldunif || in is_ld_signal() 747 return sig->ldunif || sig->ldunifrf; in is_ldunif_signal() 2695 /* Even though ldunif is strictly 32-bit we can still use it in try_emit_uniform() 2697 * is 32-bit aligned. In this case, ldunif would still load in try_emit_uniform() 2729 /* Try to emit ldunif if possible, otherwise fallback to general TMU */ in ntq_emit_load_uniform()
|
/third_party/mesa3d/src/broadcom/qpu/ |
H A D | qpu_disasm.c | 212 !sig->ldunif && in v3d_qpu_disasm_sig() 242 if (sig->ldunif) in v3d_qpu_disasm_sig() 243 append(disasm, "; ldunif"); in v3d_qpu_disasm_sig()
|
H A D | qpu_instr.h | 44 bool ldunif:1; member
|
H A D | qpu_instr.c | 868 return inst->sig.ldvary || inst->sig.ldunif || inst->sig.ldunifa; in v3d_qpu_writes_r5()
|
H A D | qpu_pack.c | 108 #define LDUNIF .ldunif = true
|
Completed in 20 milliseconds