1#include "nir/nir_builder.h"
2#include "radv_meta.h"
3
4#include "radv_cs.h"
5#include "sid.h"
6
7static nir_shader *
8build_buffer_fill_shader(struct radv_device *dev)
9{
10   nir_builder b = radv_meta_init_shader(dev, MESA_SHADER_COMPUTE, "meta_buffer_fill");
11   b.shader->info.workgroup_size[0] = 64;
12
13   nir_ssa_def *pconst = nir_load_push_constant(&b, 4, 32, nir_imm_int(&b, 0), .range = 16);
14   nir_ssa_def *buffer_addr = nir_pack_64_2x32(&b, nir_channels(&b, pconst, 0b0011));
15   nir_ssa_def *size_minus16 = nir_channel(&b, pconst, 2);
16   nir_ssa_def *data = nir_swizzle(&b, nir_channel(&b, pconst, 3), (unsigned[]){0, 0, 0, 0}, 4);
17
18   nir_ssa_def *global_id =
19      nir_iadd(&b,
20               nir_imul_imm(&b, nir_channel(&b, nir_load_workgroup_id(&b, 32), 0),
21                            b.shader->info.workgroup_size[0]),
22               nir_load_local_invocation_index(&b));
23
24   nir_ssa_def *offset = nir_imin(&b, nir_imul_imm(&b, global_id, 16), size_minus16);
25   nir_ssa_def *dst_addr = nir_iadd(&b, buffer_addr, nir_u2u64(&b, offset));
26   nir_build_store_global(&b, data, dst_addr, .align_mul = 4);
27
28   return b.shader;
29}
30
31static nir_shader *
32build_buffer_copy_shader(struct radv_device *dev)
33{
34   nir_builder b = radv_meta_init_shader(dev, MESA_SHADER_COMPUTE, "meta_buffer_copy");
35   b.shader->info.workgroup_size[0] = 64;
36
37   nir_ssa_def *pconst = nir_load_push_constant(&b, 4, 32, nir_imm_int(&b, 0), .range = 16);
38   nir_ssa_def *size_minus16 =
39      nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 0), .base = 16, .range = 4);
40   nir_ssa_def *src_addr = nir_pack_64_2x32(&b, nir_channels(&b, pconst, 0b0011));
41   nir_ssa_def *dst_addr = nir_pack_64_2x32(&b, nir_channels(&b, pconst, 0b1100));
42
43   nir_ssa_def *global_id =
44      nir_iadd(&b,
45               nir_imul_imm(&b, nir_channel(&b, nir_load_workgroup_id(&b, 32), 0),
46                            b.shader->info.workgroup_size[0]),
47               nir_load_local_invocation_index(&b));
48
49   nir_ssa_def *offset = nir_u2u64(&b, nir_imin(&b, nir_imul_imm(&b, global_id, 16), size_minus16));
50
51   nir_ssa_def *data =
52      nir_build_load_global(&b, 4, 32, nir_iadd(&b, src_addr, offset), .align_mul = 4);
53   nir_build_store_global(&b, data, nir_iadd(&b, dst_addr, offset), .align_mul = 4);
54
55   return b.shader;
56}
57
58struct fill_constants {
59   uint64_t addr;
60   uint32_t size_minus16;
61   uint32_t data;
62};
63
64struct copy_constants {
65   uint64_t src_addr;
66   uint64_t dst_addr;
67   uint32_t size_minus16;
68};
69
70VkResult
71radv_device_init_meta_buffer_state(struct radv_device *device)
72{
73   VkResult result;
74   nir_shader *fill_cs = build_buffer_fill_shader(device);
75   nir_shader *copy_cs = build_buffer_copy_shader(device);
76
77   VkPipelineLayoutCreateInfo fill_pl_create_info = {
78      .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
79      .setLayoutCount = 0,
80      .pushConstantRangeCount = 1,
81      .pPushConstantRanges =
82         &(VkPushConstantRange){VK_SHADER_STAGE_COMPUTE_BIT, 0, sizeof(struct fill_constants)},
83   };
84
85   result = radv_CreatePipelineLayout(radv_device_to_handle(device), &fill_pl_create_info,
86                                      &device->meta_state.alloc,
87                                      &device->meta_state.buffer.fill_p_layout);
88   if (result != VK_SUCCESS)
89      goto fail;
90
91   VkPipelineLayoutCreateInfo copy_pl_create_info = {
92      .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
93      .setLayoutCount = 0,
94      .pushConstantRangeCount = 1,
95      .pPushConstantRanges =
96         &(VkPushConstantRange){VK_SHADER_STAGE_COMPUTE_BIT, 0, sizeof(struct copy_constants)},
97   };
98
99   result = radv_CreatePipelineLayout(radv_device_to_handle(device), &copy_pl_create_info,
100                                      &device->meta_state.alloc,
101                                      &device->meta_state.buffer.copy_p_layout);
102   if (result != VK_SUCCESS)
103      goto fail;
104
105   VkPipelineShaderStageCreateInfo fill_pipeline_shader_stage = {
106      .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
107      .stage = VK_SHADER_STAGE_COMPUTE_BIT,
108      .module = vk_shader_module_handle_from_nir(fill_cs),
109      .pName = "main",
110      .pSpecializationInfo = NULL,
111   };
112
113   VkComputePipelineCreateInfo fill_vk_pipeline_info = {
114      .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
115      .stage = fill_pipeline_shader_stage,
116      .flags = 0,
117      .layout = device->meta_state.buffer.fill_p_layout,
118   };
119
120   result = radv_CreateComputePipelines(
121      radv_device_to_handle(device), radv_pipeline_cache_to_handle(&device->meta_state.cache), 1,
122      &fill_vk_pipeline_info, NULL, &device->meta_state.buffer.fill_pipeline);
123   if (result != VK_SUCCESS)
124      goto fail;
125
126   VkPipelineShaderStageCreateInfo copy_pipeline_shader_stage = {
127      .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
128      .stage = VK_SHADER_STAGE_COMPUTE_BIT,
129      .module = vk_shader_module_handle_from_nir(copy_cs),
130      .pName = "main",
131      .pSpecializationInfo = NULL,
132   };
133
134   VkComputePipelineCreateInfo copy_vk_pipeline_info = {
135      .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
136      .stage = copy_pipeline_shader_stage,
137      .flags = 0,
138      .layout = device->meta_state.buffer.copy_p_layout,
139   };
140
141   result = radv_CreateComputePipelines(
142      radv_device_to_handle(device), radv_pipeline_cache_to_handle(&device->meta_state.cache), 1,
143      &copy_vk_pipeline_info, NULL, &device->meta_state.buffer.copy_pipeline);
144   if (result != VK_SUCCESS)
145      goto fail;
146
147   ralloc_free(fill_cs);
148   ralloc_free(copy_cs);
149   return VK_SUCCESS;
150fail:
151   ralloc_free(fill_cs);
152   ralloc_free(copy_cs);
153   return result;
154}
155
156void
157radv_device_finish_meta_buffer_state(struct radv_device *device)
158{
159   struct radv_meta_state *state = &device->meta_state;
160
161   radv_DestroyPipeline(radv_device_to_handle(device), state->buffer.copy_pipeline, &state->alloc);
162   radv_DestroyPipeline(radv_device_to_handle(device), state->buffer.fill_pipeline, &state->alloc);
163   radv_DestroyPipelineLayout(radv_device_to_handle(device), state->buffer.copy_p_layout,
164                              &state->alloc);
165   radv_DestroyPipelineLayout(radv_device_to_handle(device), state->buffer.fill_p_layout,
166                              &state->alloc);
167}
168
169static void
170fill_buffer_shader(struct radv_cmd_buffer *cmd_buffer, uint64_t va, uint64_t size, uint32_t data)
171{
172   struct radv_device *device = cmd_buffer->device;
173   struct radv_meta_saved_state saved_state;
174
175   radv_meta_save(
176      &saved_state, cmd_buffer,
177      RADV_META_SAVE_COMPUTE_PIPELINE | RADV_META_SAVE_CONSTANTS | RADV_META_SAVE_DESCRIPTORS);
178
179   radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer), VK_PIPELINE_BIND_POINT_COMPUTE,
180                        device->meta_state.buffer.fill_pipeline);
181
182   assert(size >= 16 && size <= UINT32_MAX);
183
184   struct fill_constants fill_consts = {
185      .addr = va,
186      .size_minus16 = size - 16,
187      .data = data,
188   };
189
190   radv_CmdPushConstants(radv_cmd_buffer_to_handle(cmd_buffer),
191                         device->meta_state.buffer.fill_p_layout, VK_SHADER_STAGE_COMPUTE_BIT, 0,
192                         sizeof(fill_consts), &fill_consts);
193
194   radv_unaligned_dispatch(cmd_buffer, DIV_ROUND_UP(size, 16), 1, 1);
195
196   radv_meta_restore(&saved_state, cmd_buffer);
197}
198
199static void
200copy_buffer_shader(struct radv_cmd_buffer *cmd_buffer, uint64_t src_va, uint64_t dst_va,
201                   uint64_t size)
202{
203   struct radv_device *device = cmd_buffer->device;
204   struct radv_meta_saved_state saved_state;
205
206   radv_meta_save(
207      &saved_state, cmd_buffer,
208      RADV_META_SAVE_COMPUTE_PIPELINE | RADV_META_SAVE_CONSTANTS | RADV_META_SAVE_DESCRIPTORS);
209
210   radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer), VK_PIPELINE_BIND_POINT_COMPUTE,
211                        device->meta_state.buffer.copy_pipeline);
212
213   assert(size >= 16 && size <= UINT32_MAX);
214
215   struct copy_constants copy_consts = {
216      .src_addr = src_va,
217      .dst_addr = dst_va,
218      .size_minus16 = size - 16,
219   };
220
221   radv_CmdPushConstants(radv_cmd_buffer_to_handle(cmd_buffer),
222                         device->meta_state.buffer.copy_p_layout, VK_SHADER_STAGE_COMPUTE_BIT, 0,
223                         sizeof(copy_consts), &copy_consts);
224
225   radv_unaligned_dispatch(cmd_buffer, DIV_ROUND_UP(size, 16), 1, 1);
226
227   radv_meta_restore(&saved_state, cmd_buffer);
228}
229
230static bool
231radv_prefer_compute_dma(const struct radv_device *device, uint64_t size,
232                        struct radeon_winsys_bo *src_bo, struct radeon_winsys_bo *dst_bo)
233{
234   bool use_compute = size >= RADV_BUFFER_OPS_CS_THRESHOLD;
235
236   if (device->physical_device->rad_info.gfx_level >= GFX10 &&
237       device->physical_device->rad_info.has_dedicated_vram) {
238      if ((src_bo && !(src_bo->initial_domain & RADEON_DOMAIN_VRAM)) ||
239          (dst_bo && !(dst_bo->initial_domain & RADEON_DOMAIN_VRAM))) {
240         /* Prefer CP DMA for GTT on dGPUS due to slow PCIe. */
241         use_compute = false;
242      }
243   }
244
245   return use_compute;
246}
247
248uint32_t
249radv_fill_buffer(struct radv_cmd_buffer *cmd_buffer, const struct radv_image *image,
250                 struct radeon_winsys_bo *bo, uint64_t va, uint64_t size, uint32_t value)
251{
252   bool use_compute = radv_prefer_compute_dma(cmd_buffer->device, size, NULL, bo);
253   uint32_t flush_bits = 0;
254
255   assert(!(va & 3));
256   assert(!(size & 3));
257
258   if (bo)
259      radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, bo);
260
261   if (use_compute) {
262      cmd_buffer->state.flush_bits |=
263         radv_dst_access_flush(cmd_buffer, VK_ACCESS_2_SHADER_WRITE_BIT, image);
264
265      fill_buffer_shader(cmd_buffer, va, size, value);
266
267      flush_bits = RADV_CMD_FLAG_CS_PARTIAL_FLUSH | RADV_CMD_FLAG_INV_VCACHE |
268                   radv_src_access_flush(cmd_buffer, VK_ACCESS_2_SHADER_WRITE_BIT, image);
269   } else if (size)
270      si_cp_dma_clear_buffer(cmd_buffer, va, size, value);
271
272   return flush_bits;
273}
274
275void
276radv_copy_buffer(struct radv_cmd_buffer *cmd_buffer, struct radeon_winsys_bo *src_bo,
277                 struct radeon_winsys_bo *dst_bo, uint64_t src_offset, uint64_t dst_offset,
278                 uint64_t size)
279{
280   bool use_compute = !(size & 3) && !(src_offset & 3) && !(dst_offset & 3) &&
281                      radv_prefer_compute_dma(cmd_buffer->device, size, src_bo, dst_bo);
282
283   uint64_t src_va = radv_buffer_get_va(src_bo) + src_offset;
284   uint64_t dst_va = radv_buffer_get_va(dst_bo) + dst_offset;
285
286   radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, src_bo);
287   radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, dst_bo);
288
289   if (use_compute)
290      copy_buffer_shader(cmd_buffer, src_va, dst_va, size);
291   else if (size)
292      si_cp_dma_buffer_copy(cmd_buffer, src_va, dst_va, size);
293}
294
295VKAPI_ATTR void VKAPI_CALL
296radv_CmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
297                   VkDeviceSize fillSize, uint32_t data)
298{
299   RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
300   RADV_FROM_HANDLE(radv_buffer, dst_buffer, dstBuffer);
301
302   fillSize = vk_buffer_range(&dst_buffer->vk, dstOffset, fillSize) & ~3ull;
303
304   radv_fill_buffer(cmd_buffer, NULL, dst_buffer->bo,
305                    radv_buffer_get_va(dst_buffer->bo) + dst_buffer->offset + dstOffset, fillSize,
306                    data);
307}
308
309static void
310copy_buffer(struct radv_cmd_buffer *cmd_buffer, struct radv_buffer *src_buffer,
311            struct radv_buffer *dst_buffer, const VkBufferCopy2 *region)
312{
313   bool old_predicating;
314
315   /* VK_EXT_conditional_rendering says that copy commands should not be
316    * affected by conditional rendering.
317    */
318   old_predicating = cmd_buffer->state.predicating;
319   cmd_buffer->state.predicating = false;
320
321   radv_copy_buffer(cmd_buffer, src_buffer->bo, dst_buffer->bo,
322                    src_buffer->offset + region->srcOffset, dst_buffer->offset + region->dstOffset,
323                    region->size);
324
325   /* Restore conditional rendering. */
326   cmd_buffer->state.predicating = old_predicating;
327}
328
329VKAPI_ATTR void VKAPI_CALL
330radv_CmdCopyBuffer2(VkCommandBuffer commandBuffer, const VkCopyBufferInfo2 *pCopyBufferInfo)
331{
332   RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
333   RADV_FROM_HANDLE(radv_buffer, src_buffer, pCopyBufferInfo->srcBuffer);
334   RADV_FROM_HANDLE(radv_buffer, dst_buffer, pCopyBufferInfo->dstBuffer);
335
336   for (unsigned r = 0; r < pCopyBufferInfo->regionCount; r++) {
337      copy_buffer(cmd_buffer, src_buffer, dst_buffer, &pCopyBufferInfo->pRegions[r]);
338   }
339}
340
341void
342radv_update_buffer_cp(struct radv_cmd_buffer *cmd_buffer, uint64_t va, const void *data,
343                      uint64_t size)
344{
345   uint64_t words = size / 4;
346   bool mec = radv_cmd_buffer_uses_mec(cmd_buffer);
347
348   assert(size < RADV_BUFFER_UPDATE_THRESHOLD);
349
350   si_emit_cache_flush(cmd_buffer);
351   radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, words + 4);
352
353   radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 2 + words, 0));
354   radeon_emit(cmd_buffer->cs, S_370_DST_SEL(mec ? V_370_MEM : V_370_MEM_GRBM) |
355                                  S_370_WR_CONFIRM(1) | S_370_ENGINE_SEL(V_370_ME));
356   radeon_emit(cmd_buffer->cs, va);
357   radeon_emit(cmd_buffer->cs, va >> 32);
358   radeon_emit_array(cmd_buffer->cs, data, words);
359
360   if (unlikely(cmd_buffer->device->trace_bo))
361      radv_cmd_buffer_trace_emit(cmd_buffer);
362}
363
364VKAPI_ATTR void VKAPI_CALL
365radv_CmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
366                     VkDeviceSize dataSize, const void *pData)
367{
368   RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
369   RADV_FROM_HANDLE(radv_buffer, dst_buffer, dstBuffer);
370   uint64_t va = radv_buffer_get_va(dst_buffer->bo);
371   va += dstOffset + dst_buffer->offset;
372
373   assert(!(dataSize & 3));
374   assert(!(va & 3));
375
376   if (!dataSize)
377      return;
378
379   if (dataSize < RADV_BUFFER_UPDATE_THRESHOLD) {
380      radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, dst_buffer->bo);
381      radv_update_buffer_cp(cmd_buffer, va, pData, dataSize);
382   } else {
383      uint32_t buf_offset;
384      radv_cmd_buffer_upload_data(cmd_buffer, dataSize, pData, &buf_offset);
385      radv_copy_buffer(cmd_buffer, cmd_buffer->upload.upload_bo, dst_buffer->bo, buf_offset,
386                       dstOffset + dst_buffer->offset, dataSize);
387   }
388}
389