Lines Matching refs:abld
172 const fs_builder abld = v->bld.annotate("gl_InvocationID", NULL);
174 fs_reg iid = abld.vgrf(BRW_REGISTER_TYPE_UD, 1);
175 abld.SHR(iid, g1, brw_imm_ud(27u));
214 const fs_builder abld =
228 fs_reg shifted = abld.vgrf(BRW_REGISTER_TYPE_UW, 1);
231 const fs_builder hbld = abld.group(MIN2(16, v->dispatch_width), i);
249 inverted = abld.vgrf(BRW_REGISTER_TYPE_UW);
250 abld.NOT(inverted, shifted);
256 fs_reg anded = abld.vgrf(BRW_REGISTER_TYPE_UD, 1);
257 abld.AND(anded, inverted, brw_imm_uw(1));
259 fs_reg dst = abld.vgrf(BRW_REGISTER_TYPE_D, 1);
260 abld.MOV(dst, negate(retype(anded, BRW_REGISTER_TYPE_D)));
291 const fs_builder abld = bld.annotate("gl_SubgroupInvocation", NULL);
293 reg = abld.vgrf(BRW_REGISTER_TYPE_UW);
295 const fs_builder allbld8 = abld.group(8, 0).exec_all();
300 const fs_builder allbld16 = abld.group(16, 0).exec_all();
422 const fs_builder abld = bld.annotate(NULL, instr);
426 nir_emit_alu(abld, nir_instr_as_alu(instr), true);
436 nir_emit_vs_intrinsic(abld, nir_instr_as_intrinsic(instr));
439 nir_emit_tcs_intrinsic(abld, nir_instr_as_intrinsic(instr));
442 nir_emit_tes_intrinsic(abld, nir_instr_as_intrinsic(instr));
445 nir_emit_gs_intrinsic(abld, nir_instr_as_intrinsic(instr));
448 nir_emit_fs_intrinsic(abld, nir_instr_as_intrinsic(instr));
452 nir_emit_cs_intrinsic(abld, nir_instr_as_intrinsic(instr));
460 nir_emit_bs_intrinsic(abld, nir_instr_as_intrinsic(instr));
463 nir_emit_task_intrinsic(abld, nir_instr_as_intrinsic(instr));
466 nir_emit_mesh_intrinsic(abld, nir_instr_as_intrinsic(instr));
474 nir_emit_texture(abld, nir_instr_as_tex(instr));
478 nir_emit_load_const(abld, nir_instr_as_load_const(instr));
489 nir_emit_jump(abld, nir_instr_as_jump(instr));
2247 const fs_builder abld = bld.annotate("end primitive");
2251 abld.ADD(prev_count, vertex_count, brw_imm_ud(0xffffffffu));
2252 fs_reg mask = intexp2(abld, prev_count);
2258 abld.OR(this->control_data_bits, this->control_data_bits, mask);
2269 const fs_builder abld = bld.annotate("emit control data bits");
2315 abld.ADD(prev_count, vertex_count, brw_imm_ud(0xffffffffu));
2318 abld.SHR(dword_index, prev_count, brw_imm_ud(6u - log2_bits_per_vertex));
2324 abld.SHR(per_slot_offset, dword_index, brw_imm_ud(2u));
2355 abld.LOAD_PAYLOAD(srcs[URB_LOGICAL_SRC_DATA], sources, length, 0);
2357 fs_inst *inst = abld.emit(SHADER_OPCODE_URB_WRITE_LOGICAL, reg_undef,
2391 const fs_builder abld = bld.annotate("set stream control data bits", NULL);
2395 abld.MOV(sid, brw_imm_ud(stream_id));
2399 abld.SHL(shift_count, vertex_count, brw_imm_ud(1u));
2407 abld.SHL(mask, sid, shift_count);
2408 abld.OR(this->control_data_bits, this->control_data_bits, mask);
2443 const fs_builder abld =
2468 abld.AND(bld.null_reg_d(), vertex_count,
2472 abld.IF(BRW_PREDICATE_NORMAL);
2476 abld.CMP(bld.null_reg_d(), vertex_count, brw_imm_ud(0u),
2478 abld.IF(BRW_PREDICATE_NORMAL);
2480 abld.emit(BRW_OPCODE_ENDIF);
2489 inst = abld.MOV(this->control_data_bits, brw_imm_ud(0u));
2491 abld.emit(BRW_OPCODE_ENDIF);