Lines Matching refs:dispatch_width
591 dispatch_width, stage_abbrev, msg);
624 if (dispatch_width > n) {
1118 int reg_width = dispatch_width / 8;
1363 for (unsigned i = 0; i < DIV_ROUND_UP(dispatch_width, 16); i++) {
1364 const fs_builder hbld = abld.group(MIN2(16, dispatch_width), i);
3421 if (dispatch_width >= 16)
3938 fs_reg imm(VGRF, alloc.allocate(dispatch_width / 8), inst->dst.type);
4372 * thread payload, \p bld is required to have a dispatch_width() not greater
4383 assert(bld.dispatch_width() <= 16);
4386 assert(v->devinfo->ver >= 6 && bld.dispatch_width() <= 16);
4432 bld.dispatch_width() == inst->exec_size);
4475 unsigned width = bld.dispatch_width();
5127 return !(is_periodic(inst->src[i], lbld.dispatch_width()) ||
5129 lbld.dispatch_width() <= inst->exec_size)) ||
5152 const fs_builder cbld = lbld.group(MIN2(lbld.dispatch_width(),
5161 } else if (is_periodic(inst->src[i], lbld.dispatch_width())) {
5162 /* The source is invariant for all dispatch_width-wide groups of the
5195 if (lbld.dispatch_width() > inst->exec_size)
5232 assert(lbld_before.dispatch_width() == lbld_after.dispatch_width());
5250 lbld_before.group(MIN2(lbld_before.dispatch_width(),
5259 lbld_after.group(MIN2(lbld_after.dispatch_width(),
5863 if (inst->exec_size != dispatch_width)
5874 const unsigned payload_width = MIN2(16, dispatch_width);
5875 assert(dispatch_width % payload_width == 0);
5881 for (unsigned j = 0; j < dispatch_width / payload_width; j++) {
5886 for (unsigned j = 0; j < dispatch_width / payload_width; j++) {
6054 stage_abbrev, dispatch_width, nir->info.name, iteration, pass_num); \
6068 stage_abbrev, dispatch_width, nir->info.name);
6259 inst->dst = fs_reg(VGRF, alloc.allocate(dispatch_width / 8),
6375 const brw_predicate pred = dispatch_width > 16 ? BRW_PREDICATE_ALIGN1_ANY32H :
6376 dispatch_width > 8 ? BRW_PREDICATE_ALIGN1_ANY16H :
6442 .exec_all().group(dispatch_width, 0);
6450 flag_mask(flag, dispatch_width / 8);
6889 assert(dispatch_width == 16);
6905 const unsigned lower_width = MIN2(dispatch_width, 16);
6906 for (unsigned i = 0; i < dispatch_width / lower_width; i++) {
7035 payload.num_regs = dispatch_width == 32 ? 4 : 3;
7082 payload.num_regs = dispatch_width == 32 ? 4 : 3;
7760 brw_nir_lower_simd(nir_shader *nir, unsigned dispatch_width)
7763 (void *)(uintptr_t)dispatch_width);
7800 const unsigned dispatch_width = 8u << simd;
7804 dispatch_width, true /* is_scalar */);
7806 NIR_PASS(_, shader, brw_nir_lower_simd, dispatch_width);
7816 &prog_data->base, shader, dispatch_width,
7839 dispatch_width, v[simd]->fail_msg);