1/*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based on si_state.c
6 * Copyright © 2015 Advanced Micro Devices, Inc.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 */
27
28/* command buffer handling for AMD GCN */
29
30#include "radv_cs.h"
31#include "radv_private.h"
32#include "radv_shader.h"
33#include "sid.h"
34
35static void
36si_write_harvested_raster_configs(struct radv_physical_device *physical_device,
37                                  struct radeon_cmdbuf *cs, unsigned raster_config,
38                                  unsigned raster_config_1)
39{
40   unsigned num_se = MAX2(physical_device->rad_info.max_se, 1);
41   unsigned raster_config_se[4];
42   unsigned se;
43
44   ac_get_harvested_configs(&physical_device->rad_info, raster_config, &raster_config_1,
45                            raster_config_se);
46
47   for (se = 0; se < num_se; se++) {
48      /* GRBM_GFX_INDEX has a different offset on GFX6 and GFX7+ */
49      if (physical_device->rad_info.gfx_level < GFX7)
50         radeon_set_config_reg(cs, R_00802C_GRBM_GFX_INDEX,
51                               S_00802C_SE_INDEX(se) | S_00802C_SH_BROADCAST_WRITES(1) |
52                                  S_00802C_INSTANCE_BROADCAST_WRITES(1));
53      else
54         radeon_set_uconfig_reg(cs, R_030800_GRBM_GFX_INDEX,
55                                S_030800_SE_INDEX(se) | S_030800_SH_BROADCAST_WRITES(1) |
56                                   S_030800_INSTANCE_BROADCAST_WRITES(1));
57      radeon_set_context_reg(cs, R_028350_PA_SC_RASTER_CONFIG, raster_config_se[se]);
58   }
59
60   /* GRBM_GFX_INDEX has a different offset on GFX6 and GFX7+ */
61   if (physical_device->rad_info.gfx_level < GFX7)
62      radeon_set_config_reg(cs, R_00802C_GRBM_GFX_INDEX,
63                            S_00802C_SE_BROADCAST_WRITES(1) | S_00802C_SH_BROADCAST_WRITES(1) |
64                               S_00802C_INSTANCE_BROADCAST_WRITES(1));
65   else
66      radeon_set_uconfig_reg(cs, R_030800_GRBM_GFX_INDEX,
67                             S_030800_SE_BROADCAST_WRITES(1) | S_030800_SH_BROADCAST_WRITES(1) |
68                                S_030800_INSTANCE_BROADCAST_WRITES(1));
69
70   if (physical_device->rad_info.gfx_level >= GFX7)
71      radeon_set_context_reg(cs, R_028354_PA_SC_RASTER_CONFIG_1, raster_config_1);
72}
73
74void
75si_emit_compute(struct radv_device *device, struct radeon_cmdbuf *cs)
76{
77   const struct radeon_info *info = &device->physical_device->rad_info;
78
79   radeon_set_sh_reg_seq(cs, R_00B810_COMPUTE_START_X, 3);
80   radeon_emit(cs, 0);
81   radeon_emit(cs, 0);
82   radeon_emit(cs, 0);
83
84   radeon_set_sh_reg(cs, R_00B834_COMPUTE_PGM_HI,
85                     S_00B834_DATA(device->physical_device->rad_info.address32_hi >> 8));
86
87   radeon_set_sh_reg_seq(cs, R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0, 2);
88   /* R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0 / SE1,
89    * renamed COMPUTE_DESTINATION_EN_SEn on gfx10. */
90   radeon_emit(cs, S_00B858_SH0_CU_EN(info->spi_cu_en) | S_00B858_SH1_CU_EN(info->spi_cu_en));
91   radeon_emit(cs, S_00B858_SH0_CU_EN(info->spi_cu_en) | S_00B858_SH1_CU_EN(info->spi_cu_en));
92
93   if (device->physical_device->rad_info.gfx_level >= GFX7) {
94      /* Also set R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE2 / SE3 */
95      radeon_set_sh_reg_seq(cs, R_00B864_COMPUTE_STATIC_THREAD_MGMT_SE2, 2);
96      radeon_emit(cs, S_00B858_SH0_CU_EN(info->spi_cu_en) | S_00B858_SH1_CU_EN(info->spi_cu_en));
97      radeon_emit(cs, S_00B858_SH0_CU_EN(info->spi_cu_en) | S_00B858_SH1_CU_EN(info->spi_cu_en));
98
99      if (device->border_color_data.bo) {
100         uint64_t bc_va = radv_buffer_get_va(device->border_color_data.bo);
101
102         radeon_set_uconfig_reg_seq(cs, R_030E00_TA_CS_BC_BASE_ADDR, 2);
103         radeon_emit(cs, bc_va >> 8);
104         radeon_emit(cs, S_030E04_ADDRESS(bc_va >> 40));
105      }
106   }
107
108   if (device->physical_device->rad_info.gfx_level >= GFX9 &&
109       device->physical_device->rad_info.gfx_level < GFX11) {
110      radeon_set_uconfig_reg(cs, R_0301EC_CP_COHER_START_DELAY,
111                             device->physical_device->rad_info.gfx_level >= GFX10 ? 0x20 : 0);
112   }
113
114   if (device->physical_device->rad_info.gfx_level >= GFX10) {
115      radeon_set_sh_reg_seq(cs, R_00B890_COMPUTE_USER_ACCUM_0, 5);
116      radeon_emit(cs, 0); /* R_00B890_COMPUTE_USER_ACCUM_0 */
117      radeon_emit(cs, 0); /* R_00B894_COMPUTE_USER_ACCUM_1 */
118      radeon_emit(cs, 0); /* R_00B898_COMPUTE_USER_ACCUM_2 */
119      radeon_emit(cs, 0); /* R_00B89C_COMPUTE_USER_ACCUM_3 */
120      radeon_emit(cs, 0); /* R_00B8A0_COMPUTE_PGM_RSRC3 */
121   }
122
123   /* This register has been moved to R_00CD20_COMPUTE_MAX_WAVE_ID
124    * and is now per pipe, so it should be handled in the
125    * kernel if we want to use something other than the default value,
126    * which is now 0x22f.
127    */
128   if (device->physical_device->rad_info.gfx_level <= GFX6) {
129      /* XXX: This should be:
130       * (number of compute units) * 4 * (waves per simd) - 1 */
131
132      radeon_set_sh_reg(cs, R_00B82C_COMPUTE_MAX_WAVE_ID, 0x190 /* Default value */);
133
134      if (device->border_color_data.bo) {
135         uint64_t bc_va = radv_buffer_get_va(device->border_color_data.bo);
136         radeon_set_config_reg(cs, R_00950C_TA_CS_BC_BASE_ADDR, bc_va >> 8);
137      }
138   }
139
140   if (device->tma_bo) {
141      uint64_t tba_va, tma_va;
142
143      assert(device->physical_device->rad_info.gfx_level == GFX8);
144
145      tba_va = radv_trap_handler_shader_get_va(device->trap_handler_shader);
146      tma_va = radv_buffer_get_va(device->tma_bo);
147
148      radeon_set_sh_reg_seq(cs, R_00B838_COMPUTE_TBA_LO, 4);
149      radeon_emit(cs, tba_va >> 8);
150      radeon_emit(cs, tba_va >> 40);
151      radeon_emit(cs, tma_va >> 8);
152      radeon_emit(cs, tma_va >> 40);
153   }
154
155   if (device->physical_device->rad_info.gfx_level >= GFX11) {
156      uint32_t spi_cu_en = device->physical_device->rad_info.spi_cu_en;
157
158      radeon_set_sh_reg_seq(cs, R_00B8AC_COMPUTE_STATIC_THREAD_MGMT_SE4, 4);
159      radeon_emit(cs, S_00B8AC_SA0_CU_EN(spi_cu_en) | S_00B8AC_SA1_CU_EN(spi_cu_en)); /* SE4 */
160      radeon_emit(cs, S_00B8AC_SA0_CU_EN(spi_cu_en) | S_00B8AC_SA1_CU_EN(spi_cu_en)); /* SE5 */
161      radeon_emit(cs, S_00B8AC_SA0_CU_EN(spi_cu_en) | S_00B8AC_SA1_CU_EN(spi_cu_en)); /* SE6 */
162      radeon_emit(cs, S_00B8AC_SA0_CU_EN(spi_cu_en) | S_00B8AC_SA1_CU_EN(spi_cu_en)); /* SE7 */
163
164      radeon_set_sh_reg(cs, R_00B8BC_COMPUTE_DISPATCH_INTERLEAVE, 64);
165   }
166}
167
168/* 12.4 fixed-point */
169static unsigned
170radv_pack_float_12p4(float x)
171{
172   return x <= 0 ? 0 : x >= 4096 ? 0xffff : x * 16;
173}
174
175static void
176si_set_raster_config(struct radv_physical_device *physical_device, struct radeon_cmdbuf *cs)
177{
178   unsigned num_rb = MIN2(physical_device->rad_info.max_render_backends, 16);
179   unsigned rb_mask = physical_device->rad_info.enabled_rb_mask;
180   unsigned raster_config, raster_config_1;
181
182   ac_get_raster_config(&physical_device->rad_info, &raster_config, &raster_config_1, NULL);
183
184   /* Always use the default config when all backends are enabled
185    * (or when we failed to determine the enabled backends).
186    */
187   if (!rb_mask || util_bitcount(rb_mask) >= num_rb) {
188      radeon_set_context_reg(cs, R_028350_PA_SC_RASTER_CONFIG, raster_config);
189      if (physical_device->rad_info.gfx_level >= GFX7)
190         radeon_set_context_reg(cs, R_028354_PA_SC_RASTER_CONFIG_1, raster_config_1);
191   } else {
192      si_write_harvested_raster_configs(physical_device, cs, raster_config, raster_config_1);
193   }
194}
195
196void
197si_emit_graphics(struct radv_device *device, struct radeon_cmdbuf *cs)
198{
199   struct radv_physical_device *physical_device = device->physical_device;
200
201   bool has_clear_state = physical_device->rad_info.has_clear_state;
202   int i;
203
204   radeon_emit(cs, PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
205   radeon_emit(cs, CC0_UPDATE_LOAD_ENABLES(1));
206   radeon_emit(cs, CC1_UPDATE_SHADOW_ENABLES(1));
207
208   if (has_clear_state) {
209      radeon_emit(cs, PKT3(PKT3_CLEAR_STATE, 0, 0));
210      radeon_emit(cs, 0);
211   }
212
213   if (physical_device->rad_info.gfx_level <= GFX8)
214      si_set_raster_config(physical_device, cs);
215
216   radeon_set_context_reg(cs, R_028A18_VGT_HOS_MAX_TESS_LEVEL, fui(64));
217   if (!has_clear_state)
218      radeon_set_context_reg(cs, R_028A1C_VGT_HOS_MIN_TESS_LEVEL, fui(0));
219
220   /* FIXME calculate these values somehow ??? */
221   if (physical_device->rad_info.gfx_level <= GFX8) {
222      radeon_set_context_reg(cs, R_028A54_VGT_GS_PER_ES, SI_GS_PER_ES);
223      radeon_set_context_reg(cs, R_028A58_VGT_ES_PER_GS, 0x40);
224   }
225
226   if (!has_clear_state) {
227      if (physical_device->rad_info.gfx_level < GFX11) {
228         radeon_set_context_reg(cs, R_028A5C_VGT_GS_PER_VS, 0x2);
229         radeon_set_context_reg(cs, R_028B98_VGT_STRMOUT_BUFFER_CONFIG, 0x0);
230      }
231      radeon_set_context_reg(cs, R_028A8C_VGT_PRIMITIVEID_RESET, 0x0);
232   }
233
234   if (physical_device->rad_info.gfx_level <= GFX9)
235      radeon_set_context_reg(cs, R_028AA0_VGT_INSTANCE_STEP_RATE_0, 1);
236   if (!has_clear_state && physical_device->rad_info.gfx_level < GFX11)
237      radeon_set_context_reg(cs, R_028AB8_VGT_VTX_CNT_EN, 0x0);
238   if (physical_device->rad_info.gfx_level < GFX7)
239      radeon_set_config_reg(cs, R_008A14_PA_CL_ENHANCE,
240                            S_008A14_NUM_CLIP_SEQ(3) | S_008A14_CLIP_VTX_REORDER_ENA(1));
241
242   if (!has_clear_state)
243      radeon_set_context_reg(cs, R_02882C_PA_SU_PRIM_FILTER_CNTL, 0);
244
245   /* CLEAR_STATE doesn't clear these correctly on certain generations.
246    * I don't know why. Deduced by trial and error.
247    */
248   if (physical_device->rad_info.gfx_level <= GFX7 || !has_clear_state) {
249      radeon_set_context_reg(cs, R_028B28_VGT_STRMOUT_DRAW_OPAQUE_OFFSET, 0);
250      radeon_set_context_reg(cs, R_028204_PA_SC_WINDOW_SCISSOR_TL,
251                             S_028204_WINDOW_OFFSET_DISABLE(1));
252      radeon_set_context_reg(cs, R_028240_PA_SC_GENERIC_SCISSOR_TL,
253                             S_028240_WINDOW_OFFSET_DISABLE(1));
254      radeon_set_context_reg(
255         cs, R_028244_PA_SC_GENERIC_SCISSOR_BR,
256         S_028244_BR_X(MAX_FRAMEBUFFER_WIDTH) | S_028244_BR_Y(MAX_FRAMEBUFFER_HEIGHT));
257      radeon_set_context_reg(cs, R_028030_PA_SC_SCREEN_SCISSOR_TL, 0);
258      radeon_set_context_reg(
259         cs, R_028034_PA_SC_SCREEN_SCISSOR_BR,
260         S_028034_BR_X(MAX_FRAMEBUFFER_WIDTH) | S_028034_BR_Y(MAX_FRAMEBUFFER_HEIGHT));
261   }
262
263   if (!has_clear_state) {
264      for (i = 0; i < 16; i++) {
265         radeon_set_context_reg(cs, R_0282D0_PA_SC_VPORT_ZMIN_0 + i * 8, 0);
266         radeon_set_context_reg(cs, R_0282D4_PA_SC_VPORT_ZMAX_0 + i * 8, fui(1.0));
267      }
268   }
269
270   if (!has_clear_state) {
271      radeon_set_context_reg(cs, R_02820C_PA_SC_CLIPRECT_RULE, 0xFFFF);
272      radeon_set_context_reg(cs, R_028230_PA_SC_EDGERULE, 0xAAAAAAAA);
273      /* PA_SU_HARDWARE_SCREEN_OFFSET must be 0 due to hw bug on GFX6 */
274      radeon_set_context_reg(cs, R_028234_PA_SU_HARDWARE_SCREEN_OFFSET, 0);
275      radeon_set_context_reg(cs, R_028820_PA_CL_NANINF_CNTL, 0);
276      radeon_set_context_reg(cs, R_028AC0_DB_SRESULTS_COMPARE_STATE0, 0x0);
277      radeon_set_context_reg(cs, R_028AC4_DB_SRESULTS_COMPARE_STATE1, 0x0);
278      radeon_set_context_reg(cs, R_028AC8_DB_PRELOAD_CONTROL, 0x0);
279   }
280
281   radeon_set_context_reg(cs, R_02800C_DB_RENDER_OVERRIDE,
282                          S_02800C_FORCE_HIS_ENABLE0(V_02800C_FORCE_DISABLE) |
283                             S_02800C_FORCE_HIS_ENABLE1(V_02800C_FORCE_DISABLE));
284
285   if (physical_device->rad_info.gfx_level >= GFX10) {
286      radeon_set_context_reg(cs, R_028A98_VGT_DRAW_PAYLOAD_CNTL, 0);
287      radeon_set_uconfig_reg(cs, R_030964_GE_MAX_VTX_INDX, ~0);
288      radeon_set_uconfig_reg(cs, R_030924_GE_MIN_VTX_INDX, 0);
289      radeon_set_uconfig_reg(cs, R_030928_GE_INDX_OFFSET, 0);
290      radeon_set_uconfig_reg(cs, R_03097C_GE_STEREO_CNTL, 0);
291      radeon_set_uconfig_reg(cs, R_030988_GE_USER_VGPR_EN, 0);
292
293      if (physical_device->rad_info.gfx_level < GFX11) {
294         radeon_set_context_reg(
295            cs, R_028038_DB_DFSM_CONTROL,
296            S_028038_PUNCHOUT_MODE(V_028038_FORCE_OFF) | S_028038_POPS_DRAIN_PS_ON_OVERLAP(1));
297      }
298   } else if (physical_device->rad_info.gfx_level == GFX9) {
299      radeon_set_uconfig_reg(cs, R_030920_VGT_MAX_VTX_INDX, ~0);
300      radeon_set_uconfig_reg(cs, R_030924_VGT_MIN_VTX_INDX, 0);
301      radeon_set_uconfig_reg(cs, R_030928_VGT_INDX_OFFSET, 0);
302
303      radeon_set_context_reg(cs, R_028060_DB_DFSM_CONTROL,
304                             S_028060_PUNCHOUT_MODE(V_028060_FORCE_OFF) |
305                             S_028060_POPS_DRAIN_PS_ON_OVERLAP(1));
306   } else {
307      /* These registers, when written, also overwrite the
308       * CLEAR_STATE context, so we can't rely on CLEAR_STATE setting
309       * them.  It would be an issue if there was another UMD
310       * changing them.
311       */
312      radeon_set_context_reg(cs, R_028400_VGT_MAX_VTX_INDX, ~0);
313      radeon_set_context_reg(cs, R_028404_VGT_MIN_VTX_INDX, 0);
314      radeon_set_context_reg(cs, R_028408_VGT_INDX_OFFSET, 0);
315   }
316
317   if (device->physical_device->rad_info.gfx_level >= GFX10) {
318      radeon_set_sh_reg(cs, R_00B524_SPI_SHADER_PGM_HI_LS,
319                        S_00B524_MEM_BASE(device->physical_device->rad_info.address32_hi >> 8));
320      radeon_set_sh_reg(cs, R_00B324_SPI_SHADER_PGM_HI_ES,
321                        S_00B324_MEM_BASE(device->physical_device->rad_info.address32_hi >> 8));
322   } else if (device->physical_device->rad_info.gfx_level == GFX9) {
323      radeon_set_sh_reg(cs, R_00B414_SPI_SHADER_PGM_HI_LS,
324                        S_00B414_MEM_BASE(device->physical_device->rad_info.address32_hi >> 8));
325      radeon_set_sh_reg(cs, R_00B214_SPI_SHADER_PGM_HI_ES,
326                        S_00B214_MEM_BASE(device->physical_device->rad_info.address32_hi >> 8));
327   } else {
328      radeon_set_sh_reg(cs, R_00B524_SPI_SHADER_PGM_HI_LS,
329                        S_00B524_MEM_BASE(device->physical_device->rad_info.address32_hi >> 8));
330      radeon_set_sh_reg(cs, R_00B324_SPI_SHADER_PGM_HI_ES,
331                        S_00B324_MEM_BASE(device->physical_device->rad_info.address32_hi >> 8));
332   }
333
334   if (device->physical_device->rad_info.gfx_level < GFX11)
335      radeon_set_sh_reg(cs, R_00B124_SPI_SHADER_PGM_HI_VS,
336                        S_00B124_MEM_BASE(device->physical_device->rad_info.address32_hi >> 8));
337
338   unsigned cu_mask_ps = 0xffffffff;
339
340   /* It's wasteful to enable all CUs for PS if shader arrays have a
341    * different number of CUs. The reason is that the hardware sends the
342    * same number of PS waves to each shader array, so the slowest shader
343    * array limits the performance.  Disable the extra CUs for PS in
344    * other shader arrays to save power and thus increase clocks for busy
345    * CUs. In the future, we might disable or enable this tweak only for
346    * certain apps.
347    */
348   if (physical_device->rad_info.gfx_level >= GFX10_3)
349      cu_mask_ps = u_bit_consecutive(0, physical_device->rad_info.min_good_cu_per_sa);
350
351   if (physical_device->rad_info.gfx_level >= GFX7) {
352      if (physical_device->rad_info.gfx_level >= GFX10 &&
353          physical_device->rad_info.gfx_level < GFX11) {
354         /* Logical CUs 16 - 31 */
355         ac_set_reg_cu_en(cs, R_00B404_SPI_SHADER_PGM_RSRC4_HS, S_00B404_CU_EN(0xffff),
356                          C_00B404_CU_EN, 16, &physical_device->rad_info,
357                          (void*)gfx10_set_sh_reg_idx3);
358         ac_set_reg_cu_en(cs, R_00B104_SPI_SHADER_PGM_RSRC4_VS, S_00B104_CU_EN(0xffff),
359                          C_00B104_CU_EN, 16, &physical_device->rad_info,
360                          (void*)gfx10_set_sh_reg_idx3);
361         ac_set_reg_cu_en(cs, R_00B004_SPI_SHADER_PGM_RSRC4_PS, S_00B004_CU_EN(cu_mask_ps >> 16),
362                          C_00B004_CU_EN, 16, &physical_device->rad_info,
363                          (void*)gfx10_set_sh_reg_idx3);
364      }
365
366      if (physical_device->rad_info.gfx_level >= GFX10) {
367         ac_set_reg_cu_en(cs, R_00B41C_SPI_SHADER_PGM_RSRC3_HS,
368                          S_00B41C_CU_EN(0xffff) | S_00B41C_WAVE_LIMIT(0x3F),
369                          C_00B41C_CU_EN, 0, &physical_device->rad_info,
370                          (void*)gfx10_set_sh_reg_idx3);
371      } else if (physical_device->rad_info.gfx_level == GFX9) {
372         radeon_set_sh_reg_idx(physical_device, cs, R_00B41C_SPI_SHADER_PGM_RSRC3_HS, 3,
373                               S_00B41C_CU_EN(0xffff) | S_00B41C_WAVE_LIMIT(0x3F));
374      } else {
375         radeon_set_sh_reg(cs, R_00B51C_SPI_SHADER_PGM_RSRC3_LS,
376                           S_00B51C_CU_EN(0xffff) | S_00B51C_WAVE_LIMIT(0x3F));
377         radeon_set_sh_reg(cs, R_00B41C_SPI_SHADER_PGM_RSRC3_HS, S_00B41C_WAVE_LIMIT(0x3F));
378         radeon_set_sh_reg(cs, R_00B31C_SPI_SHADER_PGM_RSRC3_ES,
379                           S_00B31C_CU_EN(0xffff) | S_00B31C_WAVE_LIMIT(0x3F));
380         /* If this is 0, Bonaire can hang even if GS isn't being used.
381          * Other chips are unaffected. These are suboptimal values,
382          * but we don't use on-chip GS.
383          */
384         radeon_set_context_reg(cs, R_028A44_VGT_GS_ONCHIP_CNTL,
385                                S_028A44_ES_VERTS_PER_SUBGRP(64) | S_028A44_GS_PRIMS_PER_SUBGRP(4));
386      }
387
388      if (physical_device->rad_info.gfx_level >= GFX10) {
389         ac_set_reg_cu_en(cs, R_00B01C_SPI_SHADER_PGM_RSRC3_PS,
390                          S_00B01C_CU_EN(cu_mask_ps) | S_00B01C_WAVE_LIMIT(0x3F) |
391                          S_00B01C_LDS_GROUP_SIZE(physical_device->rad_info.gfx_level >= GFX11),
392                          C_00B01C_CU_EN, 0, &physical_device->rad_info,
393                          (void*)gfx10_set_sh_reg_idx3);
394      } else {
395         radeon_set_sh_reg_idx(physical_device, cs, R_00B01C_SPI_SHADER_PGM_RSRC3_PS, 3,
396                               S_00B01C_CU_EN(cu_mask_ps) | S_00B01C_WAVE_LIMIT(0x3F));
397      }
398   }
399
400   if (physical_device->rad_info.gfx_level >= GFX10) {
401      /* Break up a pixel wave if it contains deallocs for more than
402       * half the parameter cache.
403       *
404       * To avoid a deadlock where pixel waves aren't launched
405       * because they're waiting for more pixels while the frontend
406       * is stuck waiting for PC space, the maximum allowed value is
407       * the size of the PC minus the largest possible allocation for
408       * a single primitive shader subgroup.
409       */
410      uint32_t max_deallocs_in_wave = physical_device->rad_info.gfx_level >= GFX11 ? 16 : 512;
411      radeon_set_context_reg(cs, R_028C50_PA_SC_NGG_MODE_CNTL,
412                             S_028C50_MAX_DEALLOCS_IN_WAVE(max_deallocs_in_wave));
413
414      if (physical_device->rad_info.gfx_level < GFX11)
415         radeon_set_context_reg(cs, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL, 14);
416
417      /* Vulkan doesn't support user edge flags and it also doesn't
418       * need to prevent drawing lines on internal edges of
419       * decomposed primitives (such as quads) with polygon mode = lines.
420       */
421      unsigned vertex_reuse_depth = physical_device->rad_info.gfx_level >= GFX10_3 ? 30 : 0;
422      radeon_set_context_reg(cs, R_028838_PA_CL_NGG_CNTL,
423                             S_028838_INDEX_BUF_EDGE_FLAG_ENA(0) |
424                             S_028838_VERTEX_REUSE_DEPTH(vertex_reuse_depth));
425
426      /* Enable CMASK/FMASK/HTILE/DCC caching in L2 for small chips. */
427      unsigned meta_write_policy, meta_read_policy;
428      unsigned no_alloc = device->physical_device->rad_info.gfx_level >= GFX11
429                             ? V_02807C_CACHE_NOA_GFX11
430                             : V_02807C_CACHE_NOA_GFX10;
431
432      /* TODO: investigate whether LRU improves performance on other chips too */
433      if (physical_device->rad_info.max_render_backends <= 4) {
434         meta_write_policy = V_02807C_CACHE_LRU_WR; /* cache writes */
435         meta_read_policy = V_02807C_CACHE_LRU_RD;  /* cache reads */
436      } else {
437         meta_write_policy = V_02807C_CACHE_STREAM; /* write combine */
438         meta_read_policy = no_alloc;               /* don't cache reads */
439      }
440
441      radeon_set_context_reg(
442         cs, R_02807C_DB_RMI_L2_CACHE_CONTROL,
443         S_02807C_Z_WR_POLICY(V_02807C_CACHE_STREAM) | S_02807C_S_WR_POLICY(V_02807C_CACHE_STREAM) |
444            S_02807C_HTILE_WR_POLICY(meta_write_policy) |
445            S_02807C_ZPCPSD_WR_POLICY(V_02807C_CACHE_STREAM) | S_02807C_Z_RD_POLICY(no_alloc) |
446            S_02807C_S_RD_POLICY(no_alloc) | S_02807C_HTILE_RD_POLICY(meta_read_policy));
447
448      uint32_t gl2_cc;
449      if (device->physical_device->rad_info.gfx_level >= GFX11) {
450         gl2_cc = S_028410_DCC_WR_POLICY_GFX11(meta_write_policy) |
451                  S_028410_COLOR_WR_POLICY_GFX11(V_028410_CACHE_STREAM) |
452                  S_028410_COLOR_RD_POLICY(V_028410_CACHE_NOA_GFX11);
453      } else {
454         gl2_cc = S_028410_CMASK_WR_POLICY(meta_write_policy) |
455                  S_028410_FMASK_WR_POLICY(V_028410_CACHE_STREAM) |
456                  S_028410_DCC_WR_POLICY_GFX10(meta_write_policy) |
457                  S_028410_COLOR_WR_POLICY_GFX10(V_028410_CACHE_STREAM) |
458                  S_028410_CMASK_RD_POLICY(meta_read_policy) |
459                  S_028410_FMASK_RD_POLICY(V_028410_CACHE_NOA_GFX10) |
460                  S_028410_COLOR_RD_POLICY(V_028410_CACHE_NOA_GFX10);
461      }
462
463      radeon_set_context_reg(cs, R_028410_CB_RMI_GL2_CACHE_CONTROL,
464                             gl2_cc | S_028410_DCC_RD_POLICY(meta_read_policy));
465      radeon_set_context_reg(cs, R_028428_CB_COVERAGE_OUT_CONTROL, 0);
466
467      radeon_set_sh_reg_seq(cs, R_00B0C8_SPI_SHADER_USER_ACCUM_PS_0, 4);
468      radeon_emit(cs, 0); /* R_00B0C8_SPI_SHADER_USER_ACCUM_PS_0 */
469      radeon_emit(cs, 0); /* R_00B0CC_SPI_SHADER_USER_ACCUM_PS_1 */
470      radeon_emit(cs, 0); /* R_00B0D0_SPI_SHADER_USER_ACCUM_PS_2 */
471      radeon_emit(cs, 0); /* R_00B0D4_SPI_SHADER_USER_ACCUM_PS_3 */
472
473      if (physical_device->rad_info.gfx_level < GFX11) {
474         radeon_set_sh_reg_seq(cs, R_00B1C8_SPI_SHADER_USER_ACCUM_VS_0, 4);
475         radeon_emit(cs, 0); /* R_00B1C8_SPI_SHADER_USER_ACCUM_VS_0 */
476         radeon_emit(cs, 0); /* R_00B1CC_SPI_SHADER_USER_ACCUM_VS_1 */
477         radeon_emit(cs, 0); /* R_00B1D0_SPI_SHADER_USER_ACCUM_VS_2 */
478         radeon_emit(cs, 0); /* R_00B1D4_SPI_SHADER_USER_ACCUM_VS_3 */
479      }
480
481      radeon_set_sh_reg_seq(cs, R_00B2C8_SPI_SHADER_USER_ACCUM_ESGS_0, 4);
482      radeon_emit(cs, 0); /* R_00B2C8_SPI_SHADER_USER_ACCUM_ESGS_0 */
483      radeon_emit(cs, 0); /* R_00B2CC_SPI_SHADER_USER_ACCUM_ESGS_1 */
484      radeon_emit(cs, 0); /* R_00B2D0_SPI_SHADER_USER_ACCUM_ESGS_2 */
485      radeon_emit(cs, 0); /* R_00B2D4_SPI_SHADER_USER_ACCUM_ESGS_3 */
486      radeon_set_sh_reg_seq(cs, R_00B4C8_SPI_SHADER_USER_ACCUM_LSHS_0, 4);
487      radeon_emit(cs, 0); /* R_00B4C8_SPI_SHADER_USER_ACCUM_LSHS_0 */
488      radeon_emit(cs, 0); /* R_00B4CC_SPI_SHADER_USER_ACCUM_LSHS_1 */
489      radeon_emit(cs, 0); /* R_00B4D0_SPI_SHADER_USER_ACCUM_LSHS_2 */
490      radeon_emit(cs, 0); /* R_00B4D4_SPI_SHADER_USER_ACCUM_LSHS_3 */
491
492      radeon_set_sh_reg(cs, R_00B0C0_SPI_SHADER_REQ_CTRL_PS,
493                        S_00B0C0_SOFT_GROUPING_EN(1) | S_00B0C0_NUMBER_OF_REQUESTS_PER_CU(4 - 1));
494
495      if (physical_device->rad_info.gfx_level < GFX11)
496         radeon_set_sh_reg(cs, R_00B1C0_SPI_SHADER_REQ_CTRL_VS, 0);
497
498      if (physical_device->rad_info.gfx_level >= GFX10_3) {
499         radeon_set_context_reg(cs, R_028750_SX_PS_DOWNCONVERT_CONTROL, 0xff);
500         /* This allows sample shading. */
501         radeon_set_context_reg(
502            cs, R_028848_PA_CL_VRS_CNTL,
503            S_028848_SAMPLE_ITER_COMBINER_MODE(V_028848_VRS_COMB_MODE_OVERRIDE));
504      }
505   }
506
507   if (physical_device->rad_info.gfx_level >= GFX9) {
508      radeon_set_context_reg(cs, R_028B50_VGT_TESS_DISTRIBUTION,
509                             S_028B50_ACCUM_ISOLINE(40) | S_028B50_ACCUM_TRI(30) |
510                                S_028B50_ACCUM_QUAD(24) | S_028B50_DONUT_SPLIT_GFX9(24) |
511                                S_028B50_TRAP_SPLIT(6));
512   } else if (physical_device->rad_info.gfx_level >= GFX8) {
513      uint32_t vgt_tess_distribution;
514
515      vgt_tess_distribution = S_028B50_ACCUM_ISOLINE(32) | S_028B50_ACCUM_TRI(11) |
516                              S_028B50_ACCUM_QUAD(11) | S_028B50_DONUT_SPLIT_GFX81(16);
517
518      if (physical_device->rad_info.family == CHIP_FIJI ||
519          physical_device->rad_info.family >= CHIP_POLARIS10)
520         vgt_tess_distribution |= S_028B50_TRAP_SPLIT(3);
521
522      radeon_set_context_reg(cs, R_028B50_VGT_TESS_DISTRIBUTION, vgt_tess_distribution);
523   } else if (!has_clear_state) {
524      radeon_set_context_reg(cs, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL, 14);
525      radeon_set_context_reg(cs, R_028C5C_VGT_OUT_DEALLOC_CNTL, 16);
526   }
527
528   if (device->border_color_data.bo) {
529      uint64_t border_color_va = radv_buffer_get_va(device->border_color_data.bo);
530
531      radeon_set_context_reg(cs, R_028080_TA_BC_BASE_ADDR, border_color_va >> 8);
532      if (physical_device->rad_info.gfx_level >= GFX7) {
533         radeon_set_context_reg(cs, R_028084_TA_BC_BASE_ADDR_HI,
534                                S_028084_ADDRESS(border_color_va >> 40));
535      }
536   }
537
538   if (physical_device->rad_info.gfx_level >= GFX9) {
539      radeon_set_context_reg(
540         cs, R_028C48_PA_SC_BINNER_CNTL_1,
541         S_028C48_MAX_ALLOC_COUNT(physical_device->rad_info.pbb_max_alloc_count - 1) |
542            S_028C48_MAX_PRIM_PER_BATCH(1023));
543      radeon_set_context_reg(cs, R_028C4C_PA_SC_CONSERVATIVE_RASTERIZATION_CNTL,
544                             S_028C4C_NULL_SQUAD_AA_MASK_ENABLE(1));
545      radeon_set_uconfig_reg(cs, R_030968_VGT_INSTANCE_BASE_ID, 0);
546   }
547
548   unsigned tmp = (unsigned)(1.0 * 8.0);
549   radeon_set_context_reg(cs, R_028A00_PA_SU_POINT_SIZE,
550                              S_028A00_HEIGHT(tmp) | S_028A00_WIDTH(tmp));
551   radeon_set_context_reg(cs, R_028A04_PA_SU_POINT_MINMAX,
552                              S_028A04_MIN_SIZE(radv_pack_float_12p4(0)) |
553                              S_028A04_MAX_SIZE(radv_pack_float_12p4(8191.875 / 2)));
554
555   if (!has_clear_state) {
556      radeon_set_context_reg(cs, R_028004_DB_COUNT_CONTROL, S_028004_ZPASS_INCREMENT_DISABLE(1));
557   }
558
559   /* Enable the Polaris small primitive filter control.
560    * XXX: There is possibly an issue when MSAA is off (see RadeonSI
561    * has_msaa_sample_loc_bug). But this doesn't seem to regress anything,
562    * and AMDVLK doesn't have a workaround as well.
563    */
564   if (physical_device->rad_info.family >= CHIP_POLARIS10) {
565      unsigned small_prim_filter_cntl =
566         S_028830_SMALL_PRIM_FILTER_ENABLE(1) |
567         /* Workaround for a hw line bug. */
568         S_028830_LINE_FILTER_DISABLE(physical_device->rad_info.family <= CHIP_POLARIS12);
569
570      radeon_set_context_reg(cs, R_028830_PA_SU_SMALL_PRIM_FILTER_CNTL, small_prim_filter_cntl);
571   }
572
573   radeon_set_context_reg(
574      cs, R_0286D4_SPI_INTERP_CONTROL_0,
575      S_0286D4_FLAT_SHADE_ENA(1) | S_0286D4_PNT_SPRITE_ENA(1) |
576         S_0286D4_PNT_SPRITE_OVRD_X(V_0286D4_SPI_PNT_SPRITE_SEL_S) |
577         S_0286D4_PNT_SPRITE_OVRD_Y(V_0286D4_SPI_PNT_SPRITE_SEL_T) |
578         S_0286D4_PNT_SPRITE_OVRD_Z(V_0286D4_SPI_PNT_SPRITE_SEL_0) |
579         S_0286D4_PNT_SPRITE_OVRD_W(V_0286D4_SPI_PNT_SPRITE_SEL_1) |
580         S_0286D4_PNT_SPRITE_TOP_1(0)); /* vulkan is top to bottom - 1.0 at bottom */
581
582   radeon_set_context_reg(cs, R_028BE4_PA_SU_VTX_CNTL,
583                          S_028BE4_PIX_CENTER(1) | S_028BE4_ROUND_MODE(V_028BE4_X_ROUND_TO_EVEN) |
584                             S_028BE4_QUANT_MODE(V_028BE4_X_16_8_FIXED_POINT_1_256TH));
585
586   radeon_set_context_reg(cs, R_028818_PA_CL_VTE_CNTL,
587                          S_028818_VTX_W0_FMT(1) | S_028818_VPORT_X_SCALE_ENA(1) |
588                             S_028818_VPORT_X_OFFSET_ENA(1) | S_028818_VPORT_Y_SCALE_ENA(1) |
589                             S_028818_VPORT_Y_OFFSET_ENA(1) | S_028818_VPORT_Z_SCALE_ENA(1) |
590                             S_028818_VPORT_Z_OFFSET_ENA(1));
591
592   if (device->tma_bo) {
593      uint64_t tba_va, tma_va;
594
595      assert(device->physical_device->rad_info.gfx_level == GFX8);
596
597      tba_va = radv_trap_handler_shader_get_va(device->trap_handler_shader);
598      tma_va = radv_buffer_get_va(device->tma_bo);
599
600      uint32_t regs[] = {R_00B000_SPI_SHADER_TBA_LO_PS, R_00B100_SPI_SHADER_TBA_LO_VS,
601                         R_00B200_SPI_SHADER_TBA_LO_GS, R_00B300_SPI_SHADER_TBA_LO_ES,
602                         R_00B400_SPI_SHADER_TBA_LO_HS, R_00B500_SPI_SHADER_TBA_LO_LS};
603
604      for (i = 0; i < ARRAY_SIZE(regs); ++i) {
605         radeon_set_sh_reg_seq(cs, regs[i], 4);
606         radeon_emit(cs, tba_va >> 8);
607         radeon_emit(cs, tba_va >> 40);
608         radeon_emit(cs, tma_va >> 8);
609         radeon_emit(cs, tma_va >> 40);
610      }
611   }
612
613   /* The DX10 diamond test is unnecessary with Vulkan and it decreases line rasterization
614    * performance.
615    */
616   radeon_set_context_reg(cs, R_028BDC_PA_SC_LINE_CNTL, 0);
617
618   if (physical_device->rad_info.gfx_level >= GFX11) {
619      radeon_set_context_reg(cs, R_028C54_PA_SC_BINNER_CNTL_2, 0);
620      radeon_set_context_reg(cs, R_028620_PA_RATE_CNTL,
621                                 S_028620_VERTEX_RATE(2) | S_028620_PRIM_RATE(1));
622
623      radeon_set_uconfig_reg(cs, R_031110_SPI_GS_THROTTLE_CNTL1, 0x12355123);
624      radeon_set_uconfig_reg(cs, R_031114_SPI_GS_THROTTLE_CNTL2, 0x1544D);
625   }
626
627   si_emit_compute(device, cs);
628}
629
630void
631cik_create_gfx_config(struct radv_device *device)
632{
633   struct radeon_cmdbuf *cs = device->ws->cs_create(device->ws, AMD_IP_GFX);
634   if (!cs)
635      return;
636
637   si_emit_graphics(device, cs);
638
639   while (cs->cdw & 7) {
640      if (device->physical_device->rad_info.gfx_ib_pad_with_type2)
641         radeon_emit(cs, PKT2_NOP_PAD);
642      else
643         radeon_emit(cs, PKT3_NOP_PAD);
644   }
645
646   VkResult result =
647      device->ws->buffer_create(device->ws, cs->cdw * 4, 4096, device->ws->cs_domain(device->ws),
648                                RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING |
649                                   RADEON_FLAG_READ_ONLY | RADEON_FLAG_GTT_WC,
650                                RADV_BO_PRIORITY_CS, 0, &device->gfx_init);
651   if (result != VK_SUCCESS)
652      goto fail;
653
654   void *map = device->ws->buffer_map(device->gfx_init);
655   if (!map) {
656      device->ws->buffer_destroy(device->ws, device->gfx_init);
657      device->gfx_init = NULL;
658      goto fail;
659   }
660   memcpy(map, cs->buf, cs->cdw * 4);
661
662   device->ws->buffer_unmap(device->gfx_init);
663   device->gfx_init_size_dw = cs->cdw;
664fail:
665   device->ws->cs_destroy(cs);
666}
667
668void
669radv_get_viewport_xform(const VkViewport *viewport, float scale[3], float translate[3])
670{
671   float x = viewport->x;
672   float y = viewport->y;
673   float half_width = 0.5f * viewport->width;
674   float half_height = 0.5f * viewport->height;
675   double n = viewport->minDepth;
676   double f = viewport->maxDepth;
677
678   scale[0] = half_width;
679   translate[0] = half_width + x;
680   scale[1] = half_height;
681   translate[1] = half_height + y;
682
683   scale[2] = (f - n);
684   translate[2] = n;
685}
686
687static VkRect2D
688si_scissor_from_viewport(const VkViewport *viewport)
689{
690   float scale[3], translate[3];
691   VkRect2D rect;
692
693   radv_get_viewport_xform(viewport, scale, translate);
694
695   rect.offset.x = translate[0] - fabsf(scale[0]);
696   rect.offset.y = translate[1] - fabsf(scale[1]);
697   rect.extent.width = ceilf(translate[0] + fabsf(scale[0])) - rect.offset.x;
698   rect.extent.height = ceilf(translate[1] + fabsf(scale[1])) - rect.offset.y;
699
700   return rect;
701}
702
703static VkRect2D
704si_intersect_scissor(const VkRect2D *a, const VkRect2D *b)
705{
706   VkRect2D ret;
707   ret.offset.x = MAX2(a->offset.x, b->offset.x);
708   ret.offset.y = MAX2(a->offset.y, b->offset.y);
709   ret.extent.width =
710      MIN2(a->offset.x + a->extent.width, b->offset.x + b->extent.width) - ret.offset.x;
711   ret.extent.height =
712      MIN2(a->offset.y + a->extent.height, b->offset.y + b->extent.height) - ret.offset.y;
713   return ret;
714}
715
716void
717si_write_scissors(struct radeon_cmdbuf *cs, int first, int count, const VkRect2D *scissors,
718                  const VkViewport *viewports, unsigned rast_prim, float line_width)
719{
720   int i;
721   float scale[3], translate[3], guardband_x = INFINITY, guardband_y = INFINITY;
722   float discard_x = 1.0f, discard_y = 1.0f;
723   const float max_range = 32767.0f;
724   if (!count)
725      return;
726
727   radeon_set_context_reg_seq(cs, R_028250_PA_SC_VPORT_SCISSOR_0_TL + first * 4 * 2, count * 2);
728   for (i = 0; i < count; i++) {
729      VkRect2D viewport_scissor = si_scissor_from_viewport(viewports + i);
730      VkRect2D scissor = si_intersect_scissor(&scissors[i], &viewport_scissor);
731
732      radv_get_viewport_xform(viewports + i, scale, translate);
733      scale[0] = fabsf(scale[0]);
734      scale[1] = fabsf(scale[1]);
735
736      if (scale[0] < 0.5)
737         scale[0] = 0.5;
738      if (scale[1] < 0.5)
739         scale[1] = 0.5;
740
741      guardband_x = MIN2(guardband_x, (max_range - fabsf(translate[0])) / scale[0]);
742      guardband_y = MIN2(guardband_y, (max_range - fabsf(translate[1])) / scale[1]);
743
744      radeon_emit(cs, S_028250_TL_X(scissor.offset.x) | S_028250_TL_Y(scissor.offset.y) |
745                         S_028250_WINDOW_OFFSET_DISABLE(1));
746      radeon_emit(cs, S_028254_BR_X(scissor.offset.x + scissor.extent.width) |
747                         S_028254_BR_Y(scissor.offset.y + scissor.extent.height));
748
749      if (radv_rast_prim_is_points_or_lines(rast_prim)) {
750         /* When rendering wide points or lines, we need to be more conservative about when to
751          * discard them entirely. */
752         float pixels;
753
754         if (rast_prim == V_028A6C_POINTLIST) {
755            pixels = 8191.875f;
756         } else {
757            pixels = line_width;
758         }
759
760         /* Add half the point size / line width. */
761         discard_x += pixels / (2.0 * scale[0]);
762         discard_y += pixels / (2.0 * scale[1]);
763
764         /* Discard primitives that would lie entirely outside the clip region. */
765         discard_x = MIN2(discard_x, guardband_x);
766         discard_y = MIN2(discard_y, guardband_y);
767      }
768   }
769
770   radeon_set_context_reg_seq(cs, R_028BE8_PA_CL_GB_VERT_CLIP_ADJ, 4);
771   radeon_emit(cs, fui(guardband_y));
772   radeon_emit(cs, fui(discard_y));
773   radeon_emit(cs, fui(guardband_x));
774   radeon_emit(cs, fui(discard_x));
775}
776
777static inline unsigned
778radv_prims_for_vertices(struct radv_prim_vertex_count *info, unsigned num)
779{
780   if (num == 0)
781      return 0;
782
783   if (info->incr == 0)
784      return 0;
785
786   if (num < info->min)
787      return 0;
788
789   return 1 + ((num - info->min) / info->incr);
790}
791
792static const struct radv_prim_vertex_count prim_size_table[] = {
793   [V_008958_DI_PT_NONE] = {0, 0},          [V_008958_DI_PT_POINTLIST] = {1, 1},
794   [V_008958_DI_PT_LINELIST] = {2, 2},      [V_008958_DI_PT_LINESTRIP] = {2, 1},
795   [V_008958_DI_PT_TRILIST] = {3, 3},       [V_008958_DI_PT_TRIFAN] = {3, 1},
796   [V_008958_DI_PT_TRISTRIP] = {3, 1},      [V_008958_DI_PT_LINELIST_ADJ] = {4, 4},
797   [V_008958_DI_PT_LINESTRIP_ADJ] = {4, 1}, [V_008958_DI_PT_TRILIST_ADJ] = {6, 6},
798   [V_008958_DI_PT_TRISTRIP_ADJ] = {6, 2},  [V_008958_DI_PT_RECTLIST] = {3, 3},
799   [V_008958_DI_PT_LINELOOP] = {2, 1},      [V_008958_DI_PT_POLYGON] = {3, 1},
800   [V_008958_DI_PT_2D_TRI_STRIP] = {0, 0},
801};
802
803uint32_t
804si_get_ia_multi_vgt_param(struct radv_cmd_buffer *cmd_buffer, bool instanced_draw,
805                          bool indirect_draw, bool count_from_stream_output,
806                          uint32_t draw_vertex_count, unsigned topology, bool prim_restart_enable)
807{
808   enum amd_gfx_level gfx_level = cmd_buffer->device->physical_device->rad_info.gfx_level;
809   enum radeon_family family = cmd_buffer->device->physical_device->rad_info.family;
810   struct radeon_info *info = &cmd_buffer->device->physical_device->rad_info;
811   const unsigned max_primgroup_in_wave = 2;
812   /* SWITCH_ON_EOP(0) is always preferable. */
813   bool wd_switch_on_eop = false;
814   bool ia_switch_on_eop = false;
815   bool ia_switch_on_eoi = false;
816   bool partial_vs_wave = false;
817   bool partial_es_wave = cmd_buffer->state.graphics_pipeline->ia_multi_vgt_param.partial_es_wave;
818   bool multi_instances_smaller_than_primgroup;
819   struct radv_prim_vertex_count prim_vertex_count = prim_size_table[topology];
820
821   if (radv_pipeline_has_stage(cmd_buffer->state.graphics_pipeline, MESA_SHADER_TESS_CTRL)) {
822      if (topology == V_008958_DI_PT_PATCH) {
823         prim_vertex_count.min = cmd_buffer->state.graphics_pipeline->tess_patch_control_points;
824         prim_vertex_count.incr = 1;
825      }
826   }
827
828   multi_instances_smaller_than_primgroup = indirect_draw;
829   if (!multi_instances_smaller_than_primgroup && instanced_draw) {
830      uint32_t num_prims = radv_prims_for_vertices(&prim_vertex_count, draw_vertex_count);
831      if (num_prims < cmd_buffer->state.graphics_pipeline->ia_multi_vgt_param.primgroup_size)
832         multi_instances_smaller_than_primgroup = true;
833   }
834
835   ia_switch_on_eoi = cmd_buffer->state.graphics_pipeline->ia_multi_vgt_param.ia_switch_on_eoi;
836   partial_vs_wave = cmd_buffer->state.graphics_pipeline->ia_multi_vgt_param.partial_vs_wave;
837
838   if (gfx_level >= GFX7) {
839      /* WD_SWITCH_ON_EOP has no effect on GPUs with less than
840       * 4 shader engines. Set 1 to pass the assertion below.
841       * The other cases are hardware requirements. */
842      if (cmd_buffer->device->physical_device->rad_info.max_se < 4 ||
843          topology == V_008958_DI_PT_POLYGON || topology == V_008958_DI_PT_LINELOOP ||
844          topology == V_008958_DI_PT_TRIFAN || topology == V_008958_DI_PT_TRISTRIP_ADJ ||
845          (prim_restart_enable &&
846           (cmd_buffer->device->physical_device->rad_info.family < CHIP_POLARIS10 ||
847            (topology != V_008958_DI_PT_POINTLIST && topology != V_008958_DI_PT_LINESTRIP))))
848         wd_switch_on_eop = true;
849
850      /* Hawaii hangs if instancing is enabled and WD_SWITCH_ON_EOP is 0.
851       * We don't know that for indirect drawing, so treat it as
852       * always problematic. */
853      if (family == CHIP_HAWAII && (instanced_draw || indirect_draw))
854         wd_switch_on_eop = true;
855
856      /* Performance recommendation for 4 SE Gfx7-8 parts if
857       * instances are smaller than a primgroup.
858       * Assume indirect draws always use small instances.
859       * This is needed for good VS wave utilization.
860       */
861      if (gfx_level <= GFX8 && info->max_se == 4 && multi_instances_smaller_than_primgroup)
862         wd_switch_on_eop = true;
863
864      /* Hardware requirement when drawing primitives from a stream
865       * output buffer.
866       */
867      if (count_from_stream_output)
868         wd_switch_on_eop = true;
869
870      /* Required on GFX7 and later. */
871      if (info->max_se > 2 && !wd_switch_on_eop)
872         ia_switch_on_eoi = true;
873
874      /* Required by Hawaii and, for some special cases, by GFX8. */
875      if (ia_switch_on_eoi &&
876          (family == CHIP_HAWAII ||
877           (gfx_level == GFX8 &&
878            /* max primgroup in wave is always 2 - leave this for documentation */
879            (radv_pipeline_has_stage(cmd_buffer->state.graphics_pipeline, MESA_SHADER_GEOMETRY) || max_primgroup_in_wave != 2))))
880         partial_vs_wave = true;
881
882      /* Instancing bug on Bonaire. */
883      if (family == CHIP_BONAIRE && ia_switch_on_eoi && (instanced_draw || indirect_draw))
884         partial_vs_wave = true;
885
886      /* If the WD switch is false, the IA switch must be false too. */
887      assert(wd_switch_on_eop || !ia_switch_on_eop);
888   }
889   /* If SWITCH_ON_EOI is set, PARTIAL_ES_WAVE must be set too. */
890   if (gfx_level <= GFX8 && ia_switch_on_eoi)
891      partial_es_wave = true;
892
893   if (radv_pipeline_has_stage(cmd_buffer->state.graphics_pipeline, MESA_SHADER_GEOMETRY)) {
894      /* GS hw bug with single-primitive instances and SWITCH_ON_EOI.
895       * The hw doc says all multi-SE chips are affected, but amdgpu-pro Vulkan
896       * only applies it to Hawaii. Do what amdgpu-pro Vulkan does.
897       */
898      if (family == CHIP_HAWAII && ia_switch_on_eoi) {
899         bool set_vgt_flush = indirect_draw;
900         if (!set_vgt_flush && instanced_draw) {
901            uint32_t num_prims = radv_prims_for_vertices(&prim_vertex_count, draw_vertex_count);
902            if (num_prims <= 1)
903               set_vgt_flush = true;
904         }
905         if (set_vgt_flush)
906            cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_VGT_FLUSH;
907      }
908   }
909
910   /* Workaround for a VGT hang when strip primitive types are used with
911    * primitive restart.
912    */
913   if (prim_restart_enable &&
914       (topology == V_008958_DI_PT_LINESTRIP || topology == V_008958_DI_PT_TRISTRIP ||
915        topology == V_008958_DI_PT_LINESTRIP_ADJ || topology == V_008958_DI_PT_TRISTRIP_ADJ)) {
916      partial_vs_wave = true;
917   }
918
919   return cmd_buffer->state.graphics_pipeline->ia_multi_vgt_param.base |
920          S_028AA8_SWITCH_ON_EOP(ia_switch_on_eop) | S_028AA8_SWITCH_ON_EOI(ia_switch_on_eoi) |
921          S_028AA8_PARTIAL_VS_WAVE_ON(partial_vs_wave) |
922          S_028AA8_PARTIAL_ES_WAVE_ON(partial_es_wave) |
923          S_028AA8_WD_SWITCH_ON_EOP(gfx_level >= GFX7 ? wd_switch_on_eop : 0);
924}
925
926void
927si_cs_emit_write_event_eop(struct radeon_cmdbuf *cs, enum amd_gfx_level gfx_level, bool is_mec,
928                           unsigned event, unsigned event_flags, unsigned dst_sel,
929                           unsigned data_sel, uint64_t va, uint32_t new_fence,
930                           uint64_t gfx9_eop_bug_va)
931{
932   unsigned op = EVENT_TYPE(event) |
933                 EVENT_INDEX(event == V_028A90_CS_DONE || event == V_028A90_PS_DONE ? 6 : 5) |
934                 event_flags;
935   unsigned is_gfx8_mec = is_mec && gfx_level < GFX9;
936   unsigned sel = EOP_DST_SEL(dst_sel) | EOP_DATA_SEL(data_sel);
937
938   /* Wait for write confirmation before writing data, but don't send
939    * an interrupt. */
940   if (data_sel != EOP_DATA_SEL_DISCARD)
941      sel |= EOP_INT_SEL(EOP_INT_SEL_SEND_DATA_AFTER_WR_CONFIRM);
942
943   if (gfx_level >= GFX9 || is_gfx8_mec) {
944      /* A ZPASS_DONE or PIXEL_STAT_DUMP_EVENT (of the DB occlusion
945       * counters) must immediately precede every timestamp event to
946       * prevent a GPU hang on GFX9.
947       */
948      if (gfx_level == GFX9 && !is_mec) {
949         radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
950         radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
951         radeon_emit(cs, gfx9_eop_bug_va);
952         radeon_emit(cs, gfx9_eop_bug_va >> 32);
953      }
954
955      radeon_emit(cs, PKT3(PKT3_RELEASE_MEM, is_gfx8_mec ? 5 : 6, false));
956      radeon_emit(cs, op);
957      radeon_emit(cs, sel);
958      radeon_emit(cs, va);        /* address lo */
959      radeon_emit(cs, va >> 32);  /* address hi */
960      radeon_emit(cs, new_fence); /* immediate data lo */
961      radeon_emit(cs, 0);         /* immediate data hi */
962      if (!is_gfx8_mec)
963         radeon_emit(cs, 0); /* unused */
964   } else {
965      /* On GFX6, EOS events are always emitted with EVENT_WRITE_EOS.
966       * On GFX7+, EOS events are emitted with EVENT_WRITE_EOS on
967       * the graphics queue, and with RELEASE_MEM on the compute
968       * queue.
969       */
970      if (event == V_028B9C_CS_DONE || event == V_028B9C_PS_DONE) {
971         assert(event_flags == 0 && dst_sel == EOP_DST_SEL_MEM &&
972                data_sel == EOP_DATA_SEL_VALUE_32BIT);
973
974         if (is_mec) {
975            radeon_emit(cs, PKT3(PKT3_RELEASE_MEM, 5, false));
976            radeon_emit(cs, op);
977            radeon_emit(cs, sel);
978            radeon_emit(cs, va);        /* address lo */
979            radeon_emit(cs, va >> 32);  /* address hi */
980            radeon_emit(cs, new_fence); /* immediate data lo */
981            radeon_emit(cs, 0);         /* immediate data hi */
982         } else {
983            radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOS, 3, false));
984            radeon_emit(cs, op);
985            radeon_emit(cs, va);
986            radeon_emit(cs, ((va >> 32) & 0xffff) | EOS_DATA_SEL(EOS_DATA_SEL_VALUE_32BIT));
987            radeon_emit(cs, new_fence);
988         }
989      } else {
990         if (gfx_level == GFX7 || gfx_level == GFX8) {
991            /* Two EOP events are required to make all
992             * engines go idle (and optional cache flushes
993             * executed) before the timestamp is written.
994             */
995            radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, false));
996            radeon_emit(cs, op);
997            radeon_emit(cs, va);
998            radeon_emit(cs, ((va >> 32) & 0xffff) | sel);
999            radeon_emit(cs, 0); /* immediate data */
1000            radeon_emit(cs, 0); /* unused */
1001         }
1002
1003         radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, false));
1004         radeon_emit(cs, op);
1005         radeon_emit(cs, va);
1006         radeon_emit(cs, ((va >> 32) & 0xffff) | sel);
1007         radeon_emit(cs, new_fence); /* immediate data */
1008         radeon_emit(cs, 0);         /* unused */
1009      }
1010   }
1011}
1012
1013void
1014radv_cp_wait_mem(struct radeon_cmdbuf *cs, uint32_t op, uint64_t va, uint32_t ref, uint32_t mask)
1015{
1016   assert(op == WAIT_REG_MEM_EQUAL || op == WAIT_REG_MEM_NOT_EQUAL ||
1017          op == WAIT_REG_MEM_GREATER_OR_EQUAL);
1018
1019   radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, false));
1020   radeon_emit(cs, op | WAIT_REG_MEM_MEM_SPACE(1));
1021   radeon_emit(cs, va);
1022   radeon_emit(cs, va >> 32);
1023   radeon_emit(cs, ref);  /* reference value */
1024   radeon_emit(cs, mask); /* mask */
1025   radeon_emit(cs, 4);    /* poll interval */
1026}
1027
1028static void
1029si_emit_acquire_mem(struct radeon_cmdbuf *cs, bool is_mec, bool is_gfx9, unsigned cp_coher_cntl)
1030{
1031   if (is_mec || is_gfx9) {
1032      uint32_t hi_val = is_gfx9 ? 0xffffff : 0xff;
1033      radeon_emit(cs, PKT3(PKT3_ACQUIRE_MEM, 5, false) | PKT3_SHADER_TYPE_S(is_mec));
1034      radeon_emit(cs, cp_coher_cntl); /* CP_COHER_CNTL */
1035      radeon_emit(cs, 0xffffffff);    /* CP_COHER_SIZE */
1036      radeon_emit(cs, hi_val);        /* CP_COHER_SIZE_HI */
1037      radeon_emit(cs, 0);             /* CP_COHER_BASE */
1038      radeon_emit(cs, 0);             /* CP_COHER_BASE_HI */
1039      radeon_emit(cs, 0x0000000A);    /* POLL_INTERVAL */
1040   } else {
1041      /* ACQUIRE_MEM is only required on a compute ring. */
1042      radeon_emit(cs, PKT3(PKT3_SURFACE_SYNC, 3, false));
1043      radeon_emit(cs, cp_coher_cntl); /* CP_COHER_CNTL */
1044      radeon_emit(cs, 0xffffffff);    /* CP_COHER_SIZE */
1045      radeon_emit(cs, 0);             /* CP_COHER_BASE */
1046      radeon_emit(cs, 0x0000000A);    /* POLL_INTERVAL */
1047   }
1048}
1049
1050static void
1051gfx10_cs_emit_cache_flush(struct radeon_cmdbuf *cs, enum amd_gfx_level gfx_level,
1052                          uint32_t *flush_cnt, uint64_t flush_va, bool is_mec,
1053                          enum radv_cmd_flush_bits flush_bits, enum rgp_flush_bits *sqtt_flush_bits,
1054                          uint64_t gfx9_eop_bug_va)
1055{
1056   uint32_t gcr_cntl = 0;
1057   unsigned cb_db_event = 0;
1058
1059   /* We don't need these. */
1060   assert(!(flush_bits & (RADV_CMD_FLAG_VGT_STREAMOUT_SYNC)));
1061
1062   if (flush_bits & RADV_CMD_FLAG_INV_ICACHE) {
1063      gcr_cntl |= S_586_GLI_INV(V_586_GLI_ALL);
1064
1065      *sqtt_flush_bits |= RGP_FLUSH_INVAL_ICACHE;
1066   }
1067   if (flush_bits & RADV_CMD_FLAG_INV_SCACHE) {
1068      /* TODO: When writing to the SMEM L1 cache, we need to set SEQ
1069       * to FORWARD when both L1 and L2 are written out (WB or INV).
1070       */
1071      gcr_cntl |= S_586_GL1_INV(1) | S_586_GLK_INV(1);
1072
1073      *sqtt_flush_bits |= RGP_FLUSH_INVAL_SMEM_L0;
1074   }
1075   if (flush_bits & RADV_CMD_FLAG_INV_VCACHE) {
1076      gcr_cntl |= S_586_GL1_INV(1) | S_586_GLV_INV(1);
1077
1078      *sqtt_flush_bits |= RGP_FLUSH_INVAL_VMEM_L0 | RGP_FLUSH_INVAL_L1;
1079   }
1080   if (flush_bits & RADV_CMD_FLAG_INV_L2) {
1081      /* Writeback and invalidate everything in L2. */
1082      gcr_cntl |= S_586_GL2_INV(1) | S_586_GL2_WB(1) | S_586_GLM_INV(1) | S_586_GLM_WB(1);
1083
1084      *sqtt_flush_bits |= RGP_FLUSH_INVAL_L2;
1085   } else if (flush_bits & RADV_CMD_FLAG_WB_L2) {
1086      /* Writeback but do not invalidate.
1087       * GLM doesn't support WB alone. If WB is set, INV must be set too.
1088       */
1089      gcr_cntl |= S_586_GL2_WB(1) | S_586_GLM_WB(1) | S_586_GLM_INV(1);
1090
1091      *sqtt_flush_bits |= RGP_FLUSH_FLUSH_L2;
1092   } else if (flush_bits & RADV_CMD_FLAG_INV_L2_METADATA) {
1093      gcr_cntl |= S_586_GLM_INV(1) | S_586_GLM_WB(1);
1094   }
1095
1096   if (flush_bits & (RADV_CMD_FLAG_FLUSH_AND_INV_CB | RADV_CMD_FLAG_FLUSH_AND_INV_DB)) {
1097      /* TODO: trigger on RADV_CMD_FLAG_FLUSH_AND_INV_CB_META */
1098      if (flush_bits & RADV_CMD_FLAG_FLUSH_AND_INV_CB) {
1099         /* Flush CMASK/FMASK/DCC. Will wait for idle later. */
1100         radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1101         radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META) | EVENT_INDEX(0));
1102
1103         *sqtt_flush_bits |= RGP_FLUSH_FLUSH_CB | RGP_FLUSH_INVAL_CB;
1104      }
1105
1106      /* TODO: trigger on RADV_CMD_FLAG_FLUSH_AND_INV_DB_META ? */
1107      if (gfx_level < GFX11 && (flush_bits & RADV_CMD_FLAG_FLUSH_AND_INV_DB)) {
1108         /* Flush HTILE. Will wait for idle later. */
1109         radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1110         radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META) | EVENT_INDEX(0));
1111
1112         *sqtt_flush_bits |= RGP_FLUSH_FLUSH_DB | RGP_FLUSH_INVAL_DB;
1113      }
1114
1115      /* First flush CB/DB, then L1/L2. */
1116      gcr_cntl |= S_586_SEQ(V_586_SEQ_FORWARD);
1117
1118      if ((flush_bits & (RADV_CMD_FLAG_FLUSH_AND_INV_CB | RADV_CMD_FLAG_FLUSH_AND_INV_DB)) ==
1119          (RADV_CMD_FLAG_FLUSH_AND_INV_CB | RADV_CMD_FLAG_FLUSH_AND_INV_DB)) {
1120         cb_db_event = V_028A90_CACHE_FLUSH_AND_INV_TS_EVENT;
1121      } else if (flush_bits & RADV_CMD_FLAG_FLUSH_AND_INV_CB) {
1122         cb_db_event = V_028A90_FLUSH_AND_INV_CB_DATA_TS;
1123      } else if (flush_bits & RADV_CMD_FLAG_FLUSH_AND_INV_DB) {
1124         if (gfx_level == GFX11) {
1125            cb_db_event = V_028A90_CACHE_FLUSH_AND_INV_TS_EVENT;
1126         } else {
1127            cb_db_event = V_028A90_FLUSH_AND_INV_DB_DATA_TS;
1128         }
1129      } else {
1130         assert(0);
1131      }
1132   } else {
1133      /* Wait for graphics shaders to go idle if requested. */
1134      if (flush_bits & RADV_CMD_FLAG_PS_PARTIAL_FLUSH) {
1135         radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1136         radeon_emit(cs, EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH) | EVENT_INDEX(4));
1137
1138         *sqtt_flush_bits |= RGP_FLUSH_PS_PARTIAL_FLUSH;
1139      } else if (flush_bits & RADV_CMD_FLAG_VS_PARTIAL_FLUSH) {
1140         radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1141         radeon_emit(cs, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
1142
1143         *sqtt_flush_bits |= RGP_FLUSH_VS_PARTIAL_FLUSH;
1144      }
1145   }
1146
1147   if (flush_bits & RADV_CMD_FLAG_CS_PARTIAL_FLUSH) {
1148      radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1149      radeon_emit(cs, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH | EVENT_INDEX(4)));
1150
1151      *sqtt_flush_bits |= RGP_FLUSH_CS_PARTIAL_FLUSH;
1152   }
1153
1154   if (cb_db_event) {
1155      /* CB/DB flush and invalidate (or possibly just a wait for a
1156       * meta flush) via RELEASE_MEM.
1157       *
1158       * Combine this with other cache flushes when possible; this
1159       * requires affected shaders to be idle, so do it after the
1160       * CS_PARTIAL_FLUSH before (VS/PS partial flushes are always
1161       * implied).
1162       */
1163      /* Get GCR_CNTL fields, because the encoding is different in RELEASE_MEM. */
1164      unsigned glm_wb = G_586_GLM_WB(gcr_cntl);
1165      unsigned glm_inv = G_586_GLM_INV(gcr_cntl);
1166      unsigned glv_inv = G_586_GLV_INV(gcr_cntl);
1167      unsigned gl1_inv = G_586_GL1_INV(gcr_cntl);
1168      assert(G_586_GL2_US(gcr_cntl) == 0);
1169      assert(G_586_GL2_RANGE(gcr_cntl) == 0);
1170      assert(G_586_GL2_DISCARD(gcr_cntl) == 0);
1171      unsigned gl2_inv = G_586_GL2_INV(gcr_cntl);
1172      unsigned gl2_wb = G_586_GL2_WB(gcr_cntl);
1173      unsigned gcr_seq = G_586_SEQ(gcr_cntl);
1174
1175      gcr_cntl &= C_586_GLM_WB & C_586_GLM_INV & C_586_GLV_INV & C_586_GL1_INV & C_586_GL2_INV &
1176                  C_586_GL2_WB; /* keep SEQ */
1177
1178      assert(flush_cnt);
1179      (*flush_cnt)++;
1180
1181      si_cs_emit_write_event_eop(
1182         cs, gfx_level, false, cb_db_event,
1183         S_490_GLM_WB(glm_wb) | S_490_GLM_INV(glm_inv) | S_490_GLV_INV(glv_inv) |
1184            S_490_GL1_INV(gl1_inv) | S_490_GL2_INV(gl2_inv) | S_490_GL2_WB(gl2_wb) |
1185            S_490_SEQ(gcr_seq),
1186         EOP_DST_SEL_MEM, EOP_DATA_SEL_VALUE_32BIT, flush_va, *flush_cnt, gfx9_eop_bug_va);
1187
1188      radv_cp_wait_mem(cs, WAIT_REG_MEM_EQUAL, flush_va, *flush_cnt, 0xffffffff);
1189   }
1190
1191   /* VGT state sync */
1192   if (flush_bits & RADV_CMD_FLAG_VGT_FLUSH) {
1193      radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1194      radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
1195   }
1196
1197   /* Ignore fields that only modify the behavior of other fields. */
1198   if (gcr_cntl & C_586_GL1_RANGE & C_586_GL2_RANGE & C_586_SEQ) {
1199      /* Flush caches and wait for the caches to assert idle.
1200       * The cache flush is executed in the ME, but the PFP waits
1201       * for completion.
1202       */
1203      radeon_emit(cs, PKT3(PKT3_ACQUIRE_MEM, 6, 0));
1204      radeon_emit(cs, 0);          /* CP_COHER_CNTL */
1205      radeon_emit(cs, 0xffffffff); /* CP_COHER_SIZE */
1206      radeon_emit(cs, 0xffffff);   /* CP_COHER_SIZE_HI */
1207      radeon_emit(cs, 0);          /* CP_COHER_BASE */
1208      radeon_emit(cs, 0);          /* CP_COHER_BASE_HI */
1209      radeon_emit(cs, 0x0000000A); /* POLL_INTERVAL */
1210      radeon_emit(cs, gcr_cntl);   /* GCR_CNTL */
1211   } else if ((cb_db_event ||
1212               (flush_bits & (RADV_CMD_FLAG_VS_PARTIAL_FLUSH | RADV_CMD_FLAG_PS_PARTIAL_FLUSH |
1213                              RADV_CMD_FLAG_CS_PARTIAL_FLUSH))) &&
1214              !is_mec) {
1215      /* We need to ensure that PFP waits as well. */
1216      radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
1217      radeon_emit(cs, 0);
1218
1219      *sqtt_flush_bits |= RGP_FLUSH_PFP_SYNC_ME;
1220   }
1221
1222   if (flush_bits & RADV_CMD_FLAG_START_PIPELINE_STATS) {
1223      radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1224      radeon_emit(cs, EVENT_TYPE(V_028A90_PIPELINESTAT_START) | EVENT_INDEX(0));
1225   } else if (flush_bits & RADV_CMD_FLAG_STOP_PIPELINE_STATS) {
1226      radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1227      radeon_emit(cs, EVENT_TYPE(V_028A90_PIPELINESTAT_STOP) | EVENT_INDEX(0));
1228   }
1229}
1230
1231void
1232si_cs_emit_cache_flush(struct radeon_cmdbuf *cs, enum amd_gfx_level gfx_level, uint32_t *flush_cnt,
1233                       uint64_t flush_va, bool is_mec, enum radv_cmd_flush_bits flush_bits,
1234                       enum rgp_flush_bits *sqtt_flush_bits, uint64_t gfx9_eop_bug_va)
1235{
1236   unsigned cp_coher_cntl = 0;
1237   uint32_t flush_cb_db =
1238      flush_bits & (RADV_CMD_FLAG_FLUSH_AND_INV_CB | RADV_CMD_FLAG_FLUSH_AND_INV_DB);
1239
1240   if (gfx_level >= GFX10) {
1241      /* GFX10 cache flush handling is quite different. */
1242      gfx10_cs_emit_cache_flush(cs, gfx_level, flush_cnt, flush_va, is_mec, flush_bits,
1243                                sqtt_flush_bits, gfx9_eop_bug_va);
1244      return;
1245   }
1246
1247   if (flush_bits & RADV_CMD_FLAG_INV_ICACHE) {
1248      cp_coher_cntl |= S_0085F0_SH_ICACHE_ACTION_ENA(1);
1249      *sqtt_flush_bits |= RGP_FLUSH_INVAL_ICACHE;
1250   }
1251   if (flush_bits & RADV_CMD_FLAG_INV_SCACHE) {
1252      cp_coher_cntl |= S_0085F0_SH_KCACHE_ACTION_ENA(1);
1253      *sqtt_flush_bits |= RGP_FLUSH_INVAL_SMEM_L0;
1254   }
1255
1256   if (gfx_level <= GFX8) {
1257      if (flush_bits & RADV_CMD_FLAG_FLUSH_AND_INV_CB) {
1258         cp_coher_cntl |= S_0085F0_CB_ACTION_ENA(1) | S_0085F0_CB0_DEST_BASE_ENA(1) |
1259                          S_0085F0_CB1_DEST_BASE_ENA(1) | S_0085F0_CB2_DEST_BASE_ENA(1) |
1260                          S_0085F0_CB3_DEST_BASE_ENA(1) | S_0085F0_CB4_DEST_BASE_ENA(1) |
1261                          S_0085F0_CB5_DEST_BASE_ENA(1) | S_0085F0_CB6_DEST_BASE_ENA(1) |
1262                          S_0085F0_CB7_DEST_BASE_ENA(1);
1263
1264         /* Necessary for DCC */
1265         if (gfx_level >= GFX8) {
1266            si_cs_emit_write_event_eop(cs, gfx_level, is_mec, V_028A90_FLUSH_AND_INV_CB_DATA_TS, 0,
1267                                       EOP_DST_SEL_MEM, EOP_DATA_SEL_DISCARD, 0, 0,
1268                                       gfx9_eop_bug_va);
1269         }
1270
1271         *sqtt_flush_bits |= RGP_FLUSH_FLUSH_CB | RGP_FLUSH_INVAL_CB;
1272      }
1273      if (flush_bits & RADV_CMD_FLAG_FLUSH_AND_INV_DB) {
1274         cp_coher_cntl |= S_0085F0_DB_ACTION_ENA(1) | S_0085F0_DB_DEST_BASE_ENA(1);
1275
1276         *sqtt_flush_bits |= RGP_FLUSH_FLUSH_DB | RGP_FLUSH_INVAL_DB;
1277      }
1278   }
1279
1280   if (flush_bits & RADV_CMD_FLAG_FLUSH_AND_INV_CB_META) {
1281      radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1282      radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META) | EVENT_INDEX(0));
1283
1284      *sqtt_flush_bits |= RGP_FLUSH_FLUSH_CB | RGP_FLUSH_INVAL_CB;
1285   }
1286
1287   if (flush_bits & RADV_CMD_FLAG_FLUSH_AND_INV_DB_META) {
1288      radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1289      radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META) | EVENT_INDEX(0));
1290
1291      *sqtt_flush_bits |= RGP_FLUSH_FLUSH_DB | RGP_FLUSH_INVAL_DB;
1292   }
1293
1294   if (flush_bits & RADV_CMD_FLAG_PS_PARTIAL_FLUSH) {
1295      radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1296      radeon_emit(cs, EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH) | EVENT_INDEX(4));
1297
1298      *sqtt_flush_bits |= RGP_FLUSH_PS_PARTIAL_FLUSH;
1299   } else if (flush_bits & RADV_CMD_FLAG_VS_PARTIAL_FLUSH) {
1300      radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1301      radeon_emit(cs, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
1302
1303      *sqtt_flush_bits |= RGP_FLUSH_VS_PARTIAL_FLUSH;
1304   }
1305
1306   if (flush_bits & RADV_CMD_FLAG_CS_PARTIAL_FLUSH) {
1307      radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1308      radeon_emit(cs, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH) | EVENT_INDEX(4));
1309
1310      *sqtt_flush_bits |= RGP_FLUSH_CS_PARTIAL_FLUSH;
1311   }
1312
1313   if (gfx_level == GFX9 && flush_cb_db) {
1314      unsigned cb_db_event, tc_flags;
1315
1316      /* Set the CB/DB flush event. */
1317      cb_db_event = V_028A90_CACHE_FLUSH_AND_INV_TS_EVENT;
1318
1319      /* These are the only allowed combinations. If you need to
1320       * do multiple operations at once, do them separately.
1321       * All operations that invalidate L2 also seem to invalidate
1322       * metadata. Volatile (VOL) and WC flushes are not listed here.
1323       *
1324       * TC    | TC_WB         = writeback & invalidate L2 & L1
1325       * TC    | TC_WB | TC_NC = writeback & invalidate L2 for MTYPE == NC
1326       *         TC_WB | TC_NC = writeback L2 for MTYPE == NC
1327       * TC            | TC_NC = invalidate L2 for MTYPE == NC
1328       * TC    | TC_MD         = writeback & invalidate L2 metadata (DCC, etc.)
1329       * TCL1                  = invalidate L1
1330       */
1331      tc_flags = EVENT_TC_ACTION_ENA | EVENT_TC_MD_ACTION_ENA;
1332
1333      *sqtt_flush_bits |=
1334         RGP_FLUSH_FLUSH_CB | RGP_FLUSH_INVAL_CB | RGP_FLUSH_FLUSH_DB | RGP_FLUSH_INVAL_DB;
1335
1336      /* Ideally flush TC together with CB/DB. */
1337      if (flush_bits & RADV_CMD_FLAG_INV_L2) {
1338         /* Writeback and invalidate everything in L2 & L1. */
1339         tc_flags = EVENT_TC_ACTION_ENA | EVENT_TC_WB_ACTION_ENA;
1340
1341         /* Clear the flags. */
1342         flush_bits &= ~(RADV_CMD_FLAG_INV_L2 | RADV_CMD_FLAG_WB_L2 | RADV_CMD_FLAG_INV_VCACHE);
1343
1344         *sqtt_flush_bits |= RGP_FLUSH_INVAL_L2;
1345      }
1346
1347      assert(flush_cnt);
1348      (*flush_cnt)++;
1349
1350      si_cs_emit_write_event_eop(cs, gfx_level, false, cb_db_event, tc_flags, EOP_DST_SEL_MEM,
1351                                 EOP_DATA_SEL_VALUE_32BIT, flush_va, *flush_cnt, gfx9_eop_bug_va);
1352      radv_cp_wait_mem(cs, WAIT_REG_MEM_EQUAL, flush_va, *flush_cnt, 0xffffffff);
1353   }
1354
1355   /* VGT state sync */
1356   if (flush_bits & RADV_CMD_FLAG_VGT_FLUSH) {
1357      radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1358      radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
1359   }
1360
1361   /* VGT streamout state sync */
1362   if (flush_bits & RADV_CMD_FLAG_VGT_STREAMOUT_SYNC) {
1363      radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1364      radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_STREAMOUT_SYNC) | EVENT_INDEX(0));
1365   }
1366
1367   /* Make sure ME is idle (it executes most packets) before continuing.
1368    * This prevents read-after-write hazards between PFP and ME.
1369    */
1370   if ((cp_coher_cntl || (flush_bits & (RADV_CMD_FLAG_CS_PARTIAL_FLUSH | RADV_CMD_FLAG_INV_VCACHE |
1371                                        RADV_CMD_FLAG_INV_L2 | RADV_CMD_FLAG_WB_L2))) &&
1372       !is_mec) {
1373      radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
1374      radeon_emit(cs, 0);
1375
1376      *sqtt_flush_bits |= RGP_FLUSH_PFP_SYNC_ME;
1377   }
1378
1379   if ((flush_bits & RADV_CMD_FLAG_INV_L2) ||
1380       (gfx_level <= GFX7 && (flush_bits & RADV_CMD_FLAG_WB_L2))) {
1381      si_emit_acquire_mem(cs, is_mec, gfx_level == GFX9,
1382                          cp_coher_cntl | S_0085F0_TC_ACTION_ENA(1) | S_0085F0_TCL1_ACTION_ENA(1) |
1383                             S_0301F0_TC_WB_ACTION_ENA(gfx_level >= GFX8));
1384      cp_coher_cntl = 0;
1385
1386      *sqtt_flush_bits |= RGP_FLUSH_INVAL_L2 | RGP_FLUSH_INVAL_VMEM_L0;
1387   } else {
1388      if (flush_bits & RADV_CMD_FLAG_WB_L2) {
1389         /* WB = write-back
1390          * NC = apply to non-coherent MTYPEs
1391          *      (i.e. MTYPE <= 1, which is what we use everywhere)
1392          *
1393          * WB doesn't work without NC.
1394          */
1395         si_emit_acquire_mem(
1396            cs, is_mec, gfx_level == GFX9,
1397            cp_coher_cntl | S_0301F0_TC_WB_ACTION_ENA(1) | S_0301F0_TC_NC_ACTION_ENA(1));
1398         cp_coher_cntl = 0;
1399
1400         *sqtt_flush_bits |= RGP_FLUSH_FLUSH_L2 | RGP_FLUSH_INVAL_VMEM_L0;
1401      }
1402      if (flush_bits & RADV_CMD_FLAG_INV_VCACHE) {
1403         si_emit_acquire_mem(cs, is_mec, gfx_level == GFX9,
1404                             cp_coher_cntl | S_0085F0_TCL1_ACTION_ENA(1));
1405         cp_coher_cntl = 0;
1406
1407         *sqtt_flush_bits |= RGP_FLUSH_INVAL_VMEM_L0;
1408      }
1409   }
1410
1411   /* When one of the DEST_BASE flags is set, SURFACE_SYNC waits for idle.
1412    * Therefore, it should be last. Done in PFP.
1413    */
1414   if (cp_coher_cntl)
1415      si_emit_acquire_mem(cs, is_mec, gfx_level == GFX9, cp_coher_cntl);
1416
1417   if (flush_bits & RADV_CMD_FLAG_START_PIPELINE_STATS) {
1418      radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1419      radeon_emit(cs, EVENT_TYPE(V_028A90_PIPELINESTAT_START) | EVENT_INDEX(0));
1420   } else if (flush_bits & RADV_CMD_FLAG_STOP_PIPELINE_STATS) {
1421      radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1422      radeon_emit(cs, EVENT_TYPE(V_028A90_PIPELINESTAT_STOP) | EVENT_INDEX(0));
1423   }
1424}
1425
1426void
1427si_emit_cache_flush(struct radv_cmd_buffer *cmd_buffer)
1428{
1429   bool is_compute = cmd_buffer->qf == RADV_QUEUE_COMPUTE;
1430
1431   if (is_compute)
1432      cmd_buffer->state.flush_bits &=
1433         ~(RADV_CMD_FLAG_FLUSH_AND_INV_CB | RADV_CMD_FLAG_FLUSH_AND_INV_CB_META |
1434           RADV_CMD_FLAG_FLUSH_AND_INV_DB | RADV_CMD_FLAG_FLUSH_AND_INV_DB_META |
1435           RADV_CMD_FLAG_INV_L2_METADATA | RADV_CMD_FLAG_PS_PARTIAL_FLUSH |
1436           RADV_CMD_FLAG_VS_PARTIAL_FLUSH | RADV_CMD_FLAG_VGT_FLUSH |
1437           RADV_CMD_FLAG_START_PIPELINE_STATS | RADV_CMD_FLAG_STOP_PIPELINE_STATS);
1438
1439   if (!cmd_buffer->state.flush_bits) {
1440      radv_describe_barrier_end_delayed(cmd_buffer);
1441      return;
1442   }
1443
1444   radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 128);
1445
1446   si_cs_emit_cache_flush(cmd_buffer->cs, cmd_buffer->device->physical_device->rad_info.gfx_level,
1447                          &cmd_buffer->gfx9_fence_idx, cmd_buffer->gfx9_fence_va,
1448                          radv_cmd_buffer_uses_mec(cmd_buffer), cmd_buffer->state.flush_bits,
1449                          &cmd_buffer->state.sqtt_flush_bits, cmd_buffer->gfx9_eop_bug_va);
1450
1451   if (unlikely(cmd_buffer->device->trace_bo))
1452      radv_cmd_buffer_trace_emit(cmd_buffer);
1453
1454   if (cmd_buffer->state.flush_bits & RADV_CMD_FLAG_INV_L2)
1455      cmd_buffer->state.rb_noncoherent_dirty = false;
1456
1457   /* Clear the caches that have been flushed to avoid syncing too much
1458    * when there is some pending active queries.
1459    */
1460   cmd_buffer->active_query_flush_bits &= ~cmd_buffer->state.flush_bits;
1461
1462   cmd_buffer->state.flush_bits = 0;
1463
1464   /* If the driver used a compute shader for resetting a query pool, it
1465    * should be finished at this point.
1466    */
1467   cmd_buffer->pending_reset_query = false;
1468
1469   radv_describe_barrier_end_delayed(cmd_buffer);
1470}
1471
1472/* sets the CP predication state using a boolean stored at va */
1473void
1474si_emit_set_predication_state(struct radv_cmd_buffer *cmd_buffer, bool draw_visible,
1475                              unsigned pred_op, uint64_t va)
1476{
1477   uint32_t op = 0;
1478
1479   if (va) {
1480      assert(pred_op == PREDICATION_OP_BOOL32 || pred_op == PREDICATION_OP_BOOL64);
1481
1482      op = PRED_OP(pred_op);
1483
1484      /* PREDICATION_DRAW_VISIBLE means that if the 32-bit value is
1485       * zero, all rendering commands are discarded. Otherwise, they
1486       * are discarded if the value is non zero.
1487       */
1488      op |= draw_visible ? PREDICATION_DRAW_VISIBLE : PREDICATION_DRAW_NOT_VISIBLE;
1489   }
1490   if (cmd_buffer->device->physical_device->rad_info.gfx_level >= GFX9) {
1491      radeon_emit(cmd_buffer->cs, PKT3(PKT3_SET_PREDICATION, 2, 0));
1492      radeon_emit(cmd_buffer->cs, op);
1493      radeon_emit(cmd_buffer->cs, va);
1494      radeon_emit(cmd_buffer->cs, va >> 32);
1495   } else {
1496      radeon_emit(cmd_buffer->cs, PKT3(PKT3_SET_PREDICATION, 1, 0));
1497      radeon_emit(cmd_buffer->cs, va);
1498      radeon_emit(cmd_buffer->cs, op | ((va >> 32) & 0xFF));
1499   }
1500}
1501
1502/* Set this if you want the 3D engine to wait until CP DMA is done.
1503 * It should be set on the last CP DMA packet. */
1504#define CP_DMA_SYNC (1 << 0)
1505
1506/* Set this if the source data was used as a destination in a previous CP DMA
1507 * packet. It's for preventing a read-after-write (RAW) hazard between two
1508 * CP DMA packets. */
1509#define CP_DMA_RAW_WAIT (1 << 1)
1510#define CP_DMA_USE_L2   (1 << 2)
1511#define CP_DMA_CLEAR    (1 << 3)
1512
1513/* Alignment for optimal performance. */
1514#define SI_CPDMA_ALIGNMENT 32
1515
1516/* The max number of bytes that can be copied per packet. */
1517static inline unsigned
1518cp_dma_max_byte_count(enum amd_gfx_level gfx_level)
1519{
1520   unsigned max = gfx_level >= GFX11 ? 32767 :
1521                  gfx_level >= GFX9 ? S_415_BYTE_COUNT_GFX9(~0u) : S_415_BYTE_COUNT_GFX6(~0u);
1522
1523   /* make it aligned for optimal performance */
1524   return max & ~(SI_CPDMA_ALIGNMENT - 1);
1525}
1526
1527/* Emit a CP DMA packet to do a copy from one buffer to another, or to clear
1528 * a buffer. The size must fit in bits [20:0]. If CP_DMA_CLEAR is set, src_va is a 32-bit
1529 * clear value.
1530 */
1531static void
1532si_cs_emit_cp_dma(struct radv_device *device, struct radeon_cmdbuf *cs, bool predicating,
1533                  uint64_t dst_va, uint64_t src_va, unsigned size, unsigned flags)
1534{
1535   uint32_t header = 0, command = 0;
1536
1537   assert(size <= cp_dma_max_byte_count(device->physical_device->rad_info.gfx_level));
1538
1539   radeon_check_space(device->ws, cs, 9);
1540   if (device->physical_device->rad_info.gfx_level >= GFX9)
1541      command |= S_415_BYTE_COUNT_GFX9(size);
1542   else
1543      command |= S_415_BYTE_COUNT_GFX6(size);
1544
1545   /* Sync flags. */
1546   if (flags & CP_DMA_SYNC)
1547      header |= S_411_CP_SYNC(1);
1548   else {
1549      if (device->physical_device->rad_info.gfx_level >= GFX9)
1550         command |= S_415_DISABLE_WR_CONFIRM_GFX9(1);
1551      else
1552         command |= S_415_DISABLE_WR_CONFIRM_GFX6(1);
1553   }
1554
1555   if (flags & CP_DMA_RAW_WAIT)
1556      command |= S_415_RAW_WAIT(1);
1557
1558   /* Src and dst flags. */
1559   if (device->physical_device->rad_info.gfx_level >= GFX9 && !(flags & CP_DMA_CLEAR) &&
1560       src_va == dst_va)
1561      header |= S_411_DST_SEL(V_411_NOWHERE); /* prefetch only */
1562   else if (flags & CP_DMA_USE_L2)
1563      header |= S_411_DST_SEL(V_411_DST_ADDR_TC_L2);
1564
1565   if (flags & CP_DMA_CLEAR)
1566      header |= S_411_SRC_SEL(V_411_DATA);
1567   else if (flags & CP_DMA_USE_L2)
1568      header |= S_411_SRC_SEL(V_411_SRC_ADDR_TC_L2);
1569
1570   if (device->physical_device->rad_info.gfx_level >= GFX7) {
1571      radeon_emit(cs, PKT3(PKT3_DMA_DATA, 5, predicating));
1572      radeon_emit(cs, header);
1573      radeon_emit(cs, src_va);       /* SRC_ADDR_LO [31:0] */
1574      radeon_emit(cs, src_va >> 32); /* SRC_ADDR_HI [31:0] */
1575      radeon_emit(cs, dst_va);       /* DST_ADDR_LO [31:0] */
1576      radeon_emit(cs, dst_va >> 32); /* DST_ADDR_HI [31:0] */
1577      radeon_emit(cs, command);
1578   } else {
1579      assert(!(flags & CP_DMA_USE_L2));
1580      header |= S_411_SRC_ADDR_HI(src_va >> 32);
1581      radeon_emit(cs, PKT3(PKT3_CP_DMA, 4, predicating));
1582      radeon_emit(cs, src_va);                  /* SRC_ADDR_LO [31:0] */
1583      radeon_emit(cs, header);                  /* SRC_ADDR_HI [15:0] + flags. */
1584      radeon_emit(cs, dst_va);                  /* DST_ADDR_LO [31:0] */
1585      radeon_emit(cs, (dst_va >> 32) & 0xffff); /* DST_ADDR_HI [15:0] */
1586      radeon_emit(cs, command);
1587   }
1588}
1589
1590static void
1591si_emit_cp_dma(struct radv_cmd_buffer *cmd_buffer, uint64_t dst_va, uint64_t src_va, unsigned size,
1592               unsigned flags)
1593{
1594   struct radeon_cmdbuf *cs = cmd_buffer->cs;
1595   struct radv_device *device = cmd_buffer->device;
1596   bool predicating = cmd_buffer->state.predicating;
1597
1598   si_cs_emit_cp_dma(device, cs, predicating, dst_va, src_va, size, flags);
1599
1600   /* CP DMA is executed in ME, but index buffers are read by PFP.
1601    * This ensures that ME (CP DMA) is idle before PFP starts fetching
1602    * indices. If we wanted to execute CP DMA in PFP, this packet
1603    * should precede it.
1604    */
1605   if (flags & CP_DMA_SYNC) {
1606      if (cmd_buffer->qf == RADV_QUEUE_GENERAL) {
1607         radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, cmd_buffer->state.predicating));
1608         radeon_emit(cs, 0);
1609      }
1610
1611      /* CP will see the sync flag and wait for all DMAs to complete. */
1612      cmd_buffer->state.dma_is_busy = false;
1613   }
1614
1615   if (unlikely(cmd_buffer->device->trace_bo))
1616      radv_cmd_buffer_trace_emit(cmd_buffer);
1617}
1618
1619void
1620si_cs_cp_dma_prefetch(const struct radv_device *device, struct radeon_cmdbuf *cs, uint64_t va,
1621                      unsigned size, bool predicating)
1622{
1623   struct radeon_winsys *ws = device->ws;
1624   enum amd_gfx_level gfx_level = device->physical_device->rad_info.gfx_level;
1625   uint32_t header = 0, command = 0;
1626
1627   if (gfx_level >= GFX11)
1628      size = MIN2(size, 32768 - SI_CPDMA_ALIGNMENT);
1629
1630   assert(size <= cp_dma_max_byte_count(gfx_level));
1631
1632   radeon_check_space(ws, cs, 9);
1633
1634   uint64_t aligned_va = va & ~(SI_CPDMA_ALIGNMENT - 1);
1635   uint64_t aligned_size =
1636      ((va + size + SI_CPDMA_ALIGNMENT - 1) & ~(SI_CPDMA_ALIGNMENT - 1)) - aligned_va;
1637
1638   if (gfx_level >= GFX9) {
1639      command |= S_415_BYTE_COUNT_GFX9(aligned_size) |
1640                 S_415_DISABLE_WR_CONFIRM_GFX9(1);
1641      header |= S_411_DST_SEL(V_411_NOWHERE);
1642   } else {
1643      command |= S_415_BYTE_COUNT_GFX6(aligned_size) |
1644                 S_415_DISABLE_WR_CONFIRM_GFX6(1);
1645      header |= S_411_DST_SEL(V_411_DST_ADDR_TC_L2);
1646   }
1647
1648   header |= S_411_SRC_SEL(V_411_SRC_ADDR_TC_L2);
1649
1650   radeon_emit(cs, PKT3(PKT3_DMA_DATA, 5, predicating));
1651   radeon_emit(cs, header);
1652   radeon_emit(cs, aligned_va);       /* SRC_ADDR_LO [31:0] */
1653   radeon_emit(cs, aligned_va >> 32); /* SRC_ADDR_HI [31:0] */
1654   radeon_emit(cs, aligned_va);       /* DST_ADDR_LO [31:0] */
1655   radeon_emit(cs, aligned_va >> 32); /* DST_ADDR_HI [31:0] */
1656   radeon_emit(cs, command);
1657}
1658
1659void
1660si_cp_dma_prefetch(struct radv_cmd_buffer *cmd_buffer, uint64_t va, unsigned size)
1661{
1662   si_cs_cp_dma_prefetch(cmd_buffer->device, cmd_buffer->cs, va, size,
1663                         cmd_buffer->state.predicating);
1664
1665   if (unlikely(cmd_buffer->device->trace_bo))
1666      radv_cmd_buffer_trace_emit(cmd_buffer);
1667}
1668
1669static void
1670si_cp_dma_prepare(struct radv_cmd_buffer *cmd_buffer, uint64_t byte_count, uint64_t remaining_size,
1671                  unsigned *flags)
1672{
1673
1674   /* Flush the caches for the first copy only.
1675    * Also wait for the previous CP DMA operations.
1676    */
1677   if (cmd_buffer->state.flush_bits) {
1678      si_emit_cache_flush(cmd_buffer);
1679      *flags |= CP_DMA_RAW_WAIT;
1680   }
1681
1682   /* Do the synchronization after the last dma, so that all data
1683    * is written to memory.
1684    */
1685   if (byte_count == remaining_size)
1686      *flags |= CP_DMA_SYNC;
1687}
1688
1689static void
1690si_cp_dma_realign_engine(struct radv_cmd_buffer *cmd_buffer, unsigned size)
1691{
1692   uint64_t va;
1693   uint32_t offset;
1694   unsigned dma_flags = 0;
1695   unsigned buf_size = SI_CPDMA_ALIGNMENT * 2;
1696   void *ptr;
1697
1698   assert(size < SI_CPDMA_ALIGNMENT);
1699
1700   radv_cmd_buffer_upload_alloc(cmd_buffer, buf_size, &offset, &ptr);
1701
1702   va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
1703   va += offset;
1704
1705   si_cp_dma_prepare(cmd_buffer, size, size, &dma_flags);
1706
1707   si_emit_cp_dma(cmd_buffer, va, va + SI_CPDMA_ALIGNMENT, size, dma_flags);
1708}
1709
1710void
1711si_cp_dma_buffer_copy(struct radv_cmd_buffer *cmd_buffer, uint64_t src_va, uint64_t dest_va,
1712                      uint64_t size)
1713{
1714   enum amd_gfx_level gfx_level = cmd_buffer->device->physical_device->rad_info.gfx_level;
1715   uint64_t main_src_va, main_dest_va;
1716   uint64_t skipped_size = 0, realign_size = 0;
1717
1718   /* Assume that we are not going to sync after the last DMA operation. */
1719   cmd_buffer->state.dma_is_busy = true;
1720
1721   if (cmd_buffer->device->physical_device->rad_info.family <= CHIP_CARRIZO ||
1722       cmd_buffer->device->physical_device->rad_info.family == CHIP_STONEY) {
1723      /* If the size is not aligned, we must add a dummy copy at the end
1724       * just to align the internal counter. Otherwise, the DMA engine
1725       * would slow down by an order of magnitude for following copies.
1726       */
1727      if (size % SI_CPDMA_ALIGNMENT)
1728         realign_size = SI_CPDMA_ALIGNMENT - (size % SI_CPDMA_ALIGNMENT);
1729
1730      /* If the copy begins unaligned, we must start copying from the next
1731       * aligned block and the skipped part should be copied after everything
1732       * else has been copied. Only the src alignment matters, not dst.
1733       */
1734      if (src_va % SI_CPDMA_ALIGNMENT) {
1735         skipped_size = SI_CPDMA_ALIGNMENT - (src_va % SI_CPDMA_ALIGNMENT);
1736         /* The main part will be skipped if the size is too small. */
1737         skipped_size = MIN2(skipped_size, size);
1738         size -= skipped_size;
1739      }
1740   }
1741   main_src_va = src_va + skipped_size;
1742   main_dest_va = dest_va + skipped_size;
1743
1744   while (size) {
1745      unsigned dma_flags = 0;
1746      unsigned byte_count = MIN2(size, cp_dma_max_byte_count(gfx_level));
1747
1748      if (cmd_buffer->device->physical_device->rad_info.gfx_level >= GFX9) {
1749         /* DMA operations via L2 are coherent and faster.
1750          * TODO: GFX7-GFX8 should also support this but it
1751          * requires tests/benchmarks.
1752          *
1753          * Also enable on GFX9 so we can use L2 at rest on GFX9+. On Raven
1754          * this didn't seem to be worse.
1755          *
1756          * Note that we only use CP DMA for sizes < RADV_BUFFER_OPS_CS_THRESHOLD,
1757          * which is 4k at the moment, so this is really unlikely to cause
1758          * significant thrashing.
1759          */
1760         dma_flags |= CP_DMA_USE_L2;
1761      }
1762
1763      si_cp_dma_prepare(cmd_buffer, byte_count, size + skipped_size + realign_size, &dma_flags);
1764
1765      dma_flags &= ~CP_DMA_SYNC;
1766
1767      si_emit_cp_dma(cmd_buffer, main_dest_va, main_src_va, byte_count, dma_flags);
1768
1769      size -= byte_count;
1770      main_src_va += byte_count;
1771      main_dest_va += byte_count;
1772   }
1773
1774   if (skipped_size) {
1775      unsigned dma_flags = 0;
1776
1777      si_cp_dma_prepare(cmd_buffer, skipped_size, size + skipped_size + realign_size, &dma_flags);
1778
1779      si_emit_cp_dma(cmd_buffer, dest_va, src_va, skipped_size, dma_flags);
1780   }
1781   if (realign_size)
1782      si_cp_dma_realign_engine(cmd_buffer, realign_size);
1783}
1784
1785void
1786si_cp_dma_clear_buffer(struct radv_cmd_buffer *cmd_buffer, uint64_t va, uint64_t size,
1787                       unsigned value)
1788{
1789   if (!size)
1790      return;
1791
1792   assert(va % 4 == 0 && size % 4 == 0);
1793
1794   enum amd_gfx_level gfx_level = cmd_buffer->device->physical_device->rad_info.gfx_level;
1795
1796   /* Assume that we are not going to sync after the last DMA operation. */
1797   cmd_buffer->state.dma_is_busy = true;
1798
1799   while (size) {
1800      unsigned byte_count = MIN2(size, cp_dma_max_byte_count(gfx_level));
1801      unsigned dma_flags = CP_DMA_CLEAR;
1802
1803      if (cmd_buffer->device->physical_device->rad_info.gfx_level >= GFX9) {
1804         /* DMA operations via L2 are coherent and faster.
1805          * TODO: GFX7-GFX8 should also support this but it
1806          * requires tests/benchmarks.
1807          *
1808          * Also enable on GFX9 so we can use L2 at rest on GFX9+.
1809          */
1810         dma_flags |= CP_DMA_USE_L2;
1811      }
1812
1813      si_cp_dma_prepare(cmd_buffer, byte_count, size, &dma_flags);
1814
1815      /* Emit the clear packet. */
1816      si_emit_cp_dma(cmd_buffer, va, value, byte_count, dma_flags);
1817
1818      size -= byte_count;
1819      va += byte_count;
1820   }
1821}
1822
1823void
1824si_cp_dma_wait_for_idle(struct radv_cmd_buffer *cmd_buffer)
1825{
1826   if (cmd_buffer->device->physical_device->rad_info.gfx_level < GFX7)
1827      return;
1828
1829   if (!cmd_buffer->state.dma_is_busy)
1830      return;
1831
1832   /* Issue a dummy DMA that copies zero bytes.
1833    *
1834    * The DMA engine will see that there's no work to do and skip this
1835    * DMA request, however, the CP will see the sync flag and still wait
1836    * for all DMAs to complete.
1837    */
1838   si_emit_cp_dma(cmd_buffer, 0, 0, 0, CP_DMA_SYNC);
1839
1840   cmd_buffer->state.dma_is_busy = false;
1841}
1842
1843/* For MSAA sample positions. */
1844#define FILL_SREG(s0x, s0y, s1x, s1y, s2x, s2y, s3x, s3y)                                          \
1845   ((((unsigned)(s0x)&0xf) << 0) | (((unsigned)(s0y)&0xf) << 4) | (((unsigned)(s1x)&0xf) << 8) |   \
1846    (((unsigned)(s1y)&0xf) << 12) | (((unsigned)(s2x)&0xf) << 16) |                                \
1847    (((unsigned)(s2y)&0xf) << 20) | (((unsigned)(s3x)&0xf) << 24) | (((unsigned)(s3y)&0xf) << 28))
1848
1849/* For obtaining location coordinates from registers */
1850#define SEXT4(x)               ((int)((x) | ((x)&0x8 ? 0xfffffff0 : 0)))
1851#define GET_SFIELD(reg, index) SEXT4(((reg) >> ((index)*4)) & 0xf)
1852#define GET_SX(reg, index)     GET_SFIELD((reg)[(index) / 4], ((index) % 4) * 2)
1853#define GET_SY(reg, index)     GET_SFIELD((reg)[(index) / 4], ((index) % 4) * 2 + 1)
1854
1855/* 1x MSAA */
1856static const uint32_t sample_locs_1x = FILL_SREG(0, 0, 0, 0, 0, 0, 0, 0);
1857static const unsigned max_dist_1x = 0;
1858static const uint64_t centroid_priority_1x = 0x0000000000000000ull;
1859
1860/* 2xMSAA */
1861static const uint32_t sample_locs_2x = FILL_SREG(4, 4, -4, -4, 0, 0, 0, 0);
1862static const unsigned max_dist_2x = 4;
1863static const uint64_t centroid_priority_2x = 0x1010101010101010ull;
1864
1865/* 4xMSAA */
1866static const uint32_t sample_locs_4x = FILL_SREG(-2, -6, 6, -2, -6, 2, 2, 6);
1867static const unsigned max_dist_4x = 6;
1868static const uint64_t centroid_priority_4x = 0x3210321032103210ull;
1869
1870/* 8xMSAA */
1871static const uint32_t sample_locs_8x[] = {
1872   FILL_SREG(1, -3, -1, 3, 5, 1, -3, -5),
1873   FILL_SREG(-5, 5, -7, -1, 3, 7, 7, -7),
1874   /* The following are unused by hardware, but we emit them to IBs
1875    * instead of multiple SET_CONTEXT_REG packets. */
1876   0,
1877   0,
1878};
1879static const unsigned max_dist_8x = 7;
1880static const uint64_t centroid_priority_8x = 0x7654321076543210ull;
1881
1882unsigned
1883radv_get_default_max_sample_dist(int log_samples)
1884{
1885   unsigned max_dist[] = {
1886      max_dist_1x,
1887      max_dist_2x,
1888      max_dist_4x,
1889      max_dist_8x,
1890   };
1891   return max_dist[log_samples];
1892}
1893
1894void
1895radv_emit_default_sample_locations(struct radeon_cmdbuf *cs, int nr_samples)
1896{
1897   switch (nr_samples) {
1898   default:
1899   case 1:
1900      radeon_set_context_reg_seq(cs, R_028BD4_PA_SC_CENTROID_PRIORITY_0, 2);
1901      radeon_emit(cs, (uint32_t)centroid_priority_1x);
1902      radeon_emit(cs, centroid_priority_1x >> 32);
1903      radeon_set_context_reg(cs, R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, sample_locs_1x);
1904      radeon_set_context_reg(cs, R_028C08_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0, sample_locs_1x);
1905      radeon_set_context_reg(cs, R_028C18_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0, sample_locs_1x);
1906      radeon_set_context_reg(cs, R_028C28_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0, sample_locs_1x);
1907      break;
1908   case 2:
1909      radeon_set_context_reg_seq(cs, R_028BD4_PA_SC_CENTROID_PRIORITY_0, 2);
1910      radeon_emit(cs, (uint32_t)centroid_priority_2x);
1911      radeon_emit(cs, centroid_priority_2x >> 32);
1912      radeon_set_context_reg(cs, R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, sample_locs_2x);
1913      radeon_set_context_reg(cs, R_028C08_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0, sample_locs_2x);
1914      radeon_set_context_reg(cs, R_028C18_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0, sample_locs_2x);
1915      radeon_set_context_reg(cs, R_028C28_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0, sample_locs_2x);
1916      break;
1917   case 4:
1918      radeon_set_context_reg_seq(cs, R_028BD4_PA_SC_CENTROID_PRIORITY_0, 2);
1919      radeon_emit(cs, (uint32_t)centroid_priority_4x);
1920      radeon_emit(cs, centroid_priority_4x >> 32);
1921      radeon_set_context_reg(cs, R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, sample_locs_4x);
1922      radeon_set_context_reg(cs, R_028C08_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0, sample_locs_4x);
1923      radeon_set_context_reg(cs, R_028C18_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0, sample_locs_4x);
1924      radeon_set_context_reg(cs, R_028C28_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0, sample_locs_4x);
1925      break;
1926   case 8:
1927      radeon_set_context_reg_seq(cs, R_028BD4_PA_SC_CENTROID_PRIORITY_0, 2);
1928      radeon_emit(cs, (uint32_t)centroid_priority_8x);
1929      radeon_emit(cs, centroid_priority_8x >> 32);
1930      radeon_set_context_reg_seq(cs, R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, 14);
1931      radeon_emit_array(cs, sample_locs_8x, 4);
1932      radeon_emit_array(cs, sample_locs_8x, 4);
1933      radeon_emit_array(cs, sample_locs_8x, 4);
1934      radeon_emit_array(cs, sample_locs_8x, 2);
1935      break;
1936   }
1937}
1938
1939static void
1940radv_get_sample_position(struct radv_device *device, unsigned sample_count, unsigned sample_index,
1941                         float *out_value)
1942{
1943   const uint32_t *sample_locs;
1944
1945   switch (sample_count) {
1946   case 1:
1947   default:
1948      sample_locs = &sample_locs_1x;
1949      break;
1950   case 2:
1951      sample_locs = &sample_locs_2x;
1952      break;
1953   case 4:
1954      sample_locs = &sample_locs_4x;
1955      break;
1956   case 8:
1957      sample_locs = sample_locs_8x;
1958      break;
1959   }
1960
1961   out_value[0] = (GET_SX(sample_locs, sample_index) + 8) / 16.0f;
1962   out_value[1] = (GET_SY(sample_locs, sample_index) + 8) / 16.0f;
1963}
1964
1965void
1966radv_device_init_msaa(struct radv_device *device)
1967{
1968   int i;
1969
1970   radv_get_sample_position(device, 1, 0, device->sample_locations_1x[0]);
1971
1972   for (i = 0; i < 2; i++)
1973      radv_get_sample_position(device, 2, i, device->sample_locations_2x[i]);
1974   for (i = 0; i < 4; i++)
1975      radv_get_sample_position(device, 4, i, device->sample_locations_4x[i]);
1976   for (i = 0; i < 8; i++)
1977      radv_get_sample_position(device, 8, i, device->sample_locations_8x[i]);
1978}
1979