1/*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23/**
24 * @file iris_batch.c
25 *
26 * Batchbuffer and command submission module.
27 *
28 * Every API draw call results in a number of GPU commands, which we
29 * collect into a "batch buffer".  Typically, many draw calls are grouped
30 * into a single batch to amortize command submission overhead.
31 *
32 * We submit batches to the kernel using the I915_GEM_EXECBUFFER2 ioctl.
33 * One critical piece of data is the "validation list", which contains a
34 * list of the buffer objects (BOs) which the commands in the GPU need.
35 * The kernel will make sure these are resident and pinned at the correct
36 * virtual memory address before executing our batch.  If a BO is not in
37 * the validation list, it effectively does not exist, so take care.
38 */
39
40#include "iris_batch.h"
41#include "iris_bufmgr.h"
42#include "iris_context.h"
43#include "iris_fence.h"
44#include "iris_utrace.h"
45
46#include "drm-uapi/i915_drm.h"
47
48#include "common/intel_aux_map.h"
49#include "intel/common/intel_gem.h"
50#include "intel/ds/intel_tracepoints.h"
51#include "util/hash_table.h"
52#include "util/debug.h"
53#include "util/set.h"
54#include "util/u_upload_mgr.h"
55
56#include <errno.h>
57#include <xf86drm.h>
58
59#if HAVE_VALGRIND
60#include <valgrind.h>
61#include <memcheck.h>
62#define VG(x) x
63#else
64#define VG(x)
65#endif
66
67#define FILE_DEBUG_FLAG DEBUG_BUFMGR
68
69static void
70iris_batch_reset(struct iris_batch *batch);
71
72static unsigned
73num_fences(struct iris_batch *batch)
74{
75   return util_dynarray_num_elements(&batch->exec_fences,
76                                     struct drm_i915_gem_exec_fence);
77}
78
79/**
80 * Debugging code to dump the fence list, used by INTEL_DEBUG=submit.
81 */
82static void
83dump_fence_list(struct iris_batch *batch)
84{
85   fprintf(stderr, "Fence list (length %u):      ", num_fences(batch));
86
87   util_dynarray_foreach(&batch->exec_fences,
88                         struct drm_i915_gem_exec_fence, f) {
89      fprintf(stderr, "%s%u%s ",
90              (f->flags & I915_EXEC_FENCE_WAIT) ? "..." : "",
91              f->handle,
92              (f->flags & I915_EXEC_FENCE_SIGNAL) ? "!" : "");
93   }
94
95   fprintf(stderr, "\n");
96}
97
98/**
99 * Debugging code to dump the validation list, used by INTEL_DEBUG=submit.
100 */
101static void
102dump_bo_list(struct iris_batch *batch)
103{
104   fprintf(stderr, "BO list (length %d):\n", batch->exec_count);
105
106   for (int i = 0; i < batch->exec_count; i++) {
107      struct iris_bo *bo = batch->exec_bos[i];
108      struct iris_bo *backing = iris_get_backing_bo(bo);
109      bool written = BITSET_TEST(batch->bos_written, i);
110      bool exported = iris_bo_is_exported(bo);
111      bool imported = iris_bo_is_imported(bo);
112
113      fprintf(stderr, "[%2d]: %3d (%3d) %-14s @ 0x%016"PRIx64" (%-15s %8"PRIu64"B) %2d refs %s%s%s\n",
114              i,
115              bo->gem_handle,
116              backing->gem_handle,
117              bo->name,
118              bo->address,
119              iris_heap_to_string[backing->real.heap],
120              bo->size,
121              bo->refcount,
122              written ? " write" : "",
123              exported ? " exported" : "",
124              imported ? " imported" : "");
125   }
126}
127
128/**
129 * Return BO information to the batch decoder (for debugging).
130 */
131static struct intel_batch_decode_bo
132decode_get_bo(void *v_batch, bool ppgtt, uint64_t address)
133{
134   struct iris_batch *batch = v_batch;
135
136   assert(ppgtt);
137
138   for (int i = 0; i < batch->exec_count; i++) {
139      struct iris_bo *bo = batch->exec_bos[i];
140      /* The decoder zeroes out the top 16 bits, so we need to as well */
141      uint64_t bo_address = bo->address & (~0ull >> 16);
142
143      if (address >= bo_address && address < bo_address + bo->size) {
144         return (struct intel_batch_decode_bo) {
145            .addr = bo_address,
146            .size = bo->size,
147            .map = iris_bo_map(batch->dbg, bo, MAP_READ | MAP_ASYNC),
148         };
149      }
150   }
151
152   return (struct intel_batch_decode_bo) { };
153}
154
155static unsigned
156decode_get_state_size(void *v_batch,
157                      uint64_t address,
158                      UNUSED uint64_t base_address)
159{
160   struct iris_batch *batch = v_batch;
161   unsigned size = (uintptr_t)
162      _mesa_hash_table_u64_search(batch->state_sizes, address);
163
164   return size;
165}
166
167/**
168 * Decode the current batch.
169 */
170static void
171decode_batch(struct iris_batch *batch)
172{
173   void *map = iris_bo_map(batch->dbg, batch->exec_bos[0], MAP_READ);
174   intel_print_batch(&batch->decoder, map, batch->primary_batch_size,
175                     batch->exec_bos[0]->address, false);
176}
177
178static void
179iris_init_batch(struct iris_context *ice,
180                enum iris_batch_name name)
181{
182   struct iris_batch *batch = &ice->batches[name];
183   struct iris_screen *screen = (void *) ice->ctx.screen;
184
185   /* Note: screen, ctx_id, exec_flags and has_engines_context fields are
186    * initialized at an earlier phase when contexts are created.
187    *
188    * See iris_init_batches(), which calls either iris_init_engines_context()
189    * or iris_init_non_engine_contexts().
190    */
191
192   batch->dbg = &ice->dbg;
193   batch->reset = &ice->reset;
194   batch->state_sizes = ice->state.sizes;
195   batch->name = name;
196   batch->ice = ice;
197   batch->contains_fence_signal = false;
198
199   batch->fine_fences.uploader =
200      u_upload_create(&ice->ctx, 4096, PIPE_BIND_CUSTOM,
201                      PIPE_USAGE_STAGING, 0);
202   iris_fine_fence_init(batch);
203
204   util_dynarray_init(&batch->exec_fences, ralloc_context(NULL));
205   util_dynarray_init(&batch->syncobjs, ralloc_context(NULL));
206
207   batch->exec_count = 0;
208   batch->max_gem_handle = 0;
209   batch->exec_array_size = 128;
210   batch->exec_bos =
211      malloc(batch->exec_array_size * sizeof(batch->exec_bos[0]));
212   batch->bos_written =
213      rzalloc_array(NULL, BITSET_WORD, BITSET_WORDS(batch->exec_array_size));
214
215   batch->cache.render = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
216                                                 _mesa_key_pointer_equal);
217
218   batch->num_other_batches = 0;
219   memset(batch->other_batches, 0, sizeof(batch->other_batches));
220
221   iris_foreach_batch(ice, other_batch) {
222      if (batch != other_batch)
223         batch->other_batches[batch->num_other_batches++] = other_batch;
224   }
225
226   if (INTEL_DEBUG(DEBUG_ANY)) {
227      const unsigned decode_flags =
228         INTEL_BATCH_DECODE_FULL |
229         (INTEL_DEBUG(DEBUG_COLOR) ? INTEL_BATCH_DECODE_IN_COLOR : 0) |
230         INTEL_BATCH_DECODE_OFFSETS |
231         INTEL_BATCH_DECODE_FLOATS;
232
233      intel_batch_decode_ctx_init(&batch->decoder, &screen->compiler->isa,
234                                  &screen->devinfo,
235                                  stderr, decode_flags, NULL,
236                                  decode_get_bo, decode_get_state_size, batch);
237      batch->decoder.dynamic_base = IRIS_MEMZONE_DYNAMIC_START;
238      batch->decoder.instruction_base = IRIS_MEMZONE_SHADER_START;
239      batch->decoder.surface_base = IRIS_MEMZONE_BINDER_START;
240      batch->decoder.max_vbo_decoded_lines = 32;
241      if (batch->name == IRIS_BATCH_BLITTER)
242         batch->decoder.engine = I915_ENGINE_CLASS_COPY;
243   }
244
245   iris_init_batch_measure(ice, batch);
246
247   u_trace_init(&batch->trace, &ice->ds.trace_context);
248
249   iris_batch_reset(batch);
250}
251
252static void
253iris_init_non_engine_contexts(struct iris_context *ice, int priority)
254{
255   struct iris_screen *screen = (void *) ice->ctx.screen;
256
257   iris_foreach_batch(ice, batch) {
258      batch->ctx_id = iris_create_hw_context(screen->bufmgr);
259      batch->exec_flags = I915_EXEC_RENDER;
260      batch->has_engines_context = false;
261      assert(batch->ctx_id);
262      iris_hw_context_set_priority(screen->bufmgr, batch->ctx_id, priority);
263   }
264
265   ice->batches[IRIS_BATCH_BLITTER].exec_flags = I915_EXEC_BLT;
266}
267
268static int
269iris_create_engines_context(struct iris_context *ice, int priority)
270{
271   struct iris_screen *screen = (void *) ice->ctx.screen;
272   const struct intel_device_info *devinfo = &screen->devinfo;
273   int fd = iris_bufmgr_get_fd(screen->bufmgr);
274
275   struct drm_i915_query_engine_info *engines_info =
276      intel_i915_query_alloc(fd, DRM_I915_QUERY_ENGINE_INFO, NULL);
277
278   if (!engines_info)
279      return -1;
280
281   if (intel_gem_count_engines(engines_info, I915_ENGINE_CLASS_RENDER) < 1) {
282      free(engines_info);
283      return -1;
284   }
285
286   STATIC_ASSERT(IRIS_BATCH_COUNT == 3);
287   uint16_t engine_classes[IRIS_BATCH_COUNT] = {
288      [IRIS_BATCH_RENDER] = I915_ENGINE_CLASS_RENDER,
289      [IRIS_BATCH_COMPUTE] = I915_ENGINE_CLASS_RENDER,
290      [IRIS_BATCH_BLITTER] = I915_ENGINE_CLASS_COPY,
291   };
292
293   /* Blitter is only supported on Gfx12+ */
294   unsigned num_batches = IRIS_BATCH_COUNT - (devinfo->ver >= 12 ? 0 : 1);
295
296   if (env_var_as_boolean("INTEL_COMPUTE_CLASS", false) &&
297       intel_gem_count_engines(engines_info, I915_ENGINE_CLASS_COMPUTE) > 0)
298      engine_classes[IRIS_BATCH_COMPUTE] = I915_ENGINE_CLASS_COMPUTE;
299
300   int engines_ctx =
301      intel_gem_create_context_engines(fd, engines_info, num_batches,
302                                       engine_classes);
303
304   if (engines_ctx < 0) {
305      free(engines_info);
306      return -1;
307   }
308
309   iris_hw_context_set_unrecoverable(screen->bufmgr, engines_ctx);
310   iris_hw_context_set_vm_id(screen->bufmgr, engines_ctx);
311
312   free(engines_info);
313   return engines_ctx;
314}
315
316static bool
317iris_init_engines_context(struct iris_context *ice, int priority)
318{
319   int engines_ctx = iris_create_engines_context(ice, priority);
320   if (engines_ctx < 0)
321      return false;
322
323   struct iris_screen *screen = (void *) ice->ctx.screen;
324   iris_hw_context_set_priority(screen->bufmgr, engines_ctx, priority);
325
326   iris_foreach_batch(ice, batch) {
327      unsigned i = batch - &ice->batches[0];
328      batch->ctx_id = engines_ctx;
329      batch->exec_flags = i;
330      batch->has_engines_context = true;
331   }
332
333   return true;
334}
335
336void
337iris_init_batches(struct iris_context *ice, int priority)
338{
339   /* We have to do this early for iris_foreach_batch() to work */
340   for (int i = 0; i < IRIS_BATCH_COUNT; i++)
341      ice->batches[i].screen = (void *) ice->ctx.screen;
342
343   if (!iris_init_engines_context(ice, priority))
344      iris_init_non_engine_contexts(ice, priority);
345   iris_foreach_batch(ice, batch)
346      iris_init_batch(ice, batch - &ice->batches[0]);
347}
348
349static int
350find_exec_index(struct iris_batch *batch, struct iris_bo *bo)
351{
352   unsigned index = READ_ONCE(bo->index);
353
354   if (index < batch->exec_count && batch->exec_bos[index] == bo)
355      return index;
356
357   /* May have been shared between multiple active batches */
358   for (index = 0; index < batch->exec_count; index++) {
359      if (batch->exec_bos[index] == bo)
360         return index;
361   }
362
363   return -1;
364}
365
366static void
367ensure_exec_obj_space(struct iris_batch *batch, uint32_t count)
368{
369   while (batch->exec_count + count > batch->exec_array_size) {
370      unsigned old_size = batch->exec_array_size;
371
372      batch->exec_array_size *= 2;
373      batch->exec_bos =
374         realloc(batch->exec_bos,
375                 batch->exec_array_size * sizeof(batch->exec_bos[0]));
376      batch->bos_written =
377         rerzalloc(NULL, batch->bos_written, BITSET_WORD,
378                   BITSET_WORDS(old_size),
379                   BITSET_WORDS(batch->exec_array_size));
380   }
381}
382
383static void
384add_bo_to_batch(struct iris_batch *batch, struct iris_bo *bo, bool writable)
385{
386   assert(batch->exec_array_size > batch->exec_count);
387
388   iris_bo_reference(bo);
389
390   batch->exec_bos[batch->exec_count] = bo;
391
392   if (writable)
393      BITSET_SET(batch->bos_written, batch->exec_count);
394
395   bo->index = batch->exec_count;
396   batch->exec_count++;
397   batch->aperture_space += bo->size;
398
399   batch->max_gem_handle =
400      MAX2(batch->max_gem_handle, iris_get_backing_bo(bo)->gem_handle);
401}
402
403static void
404flush_for_cross_batch_dependencies(struct iris_batch *batch,
405                                   struct iris_bo *bo,
406                                   bool writable)
407{
408   if (batch->measure && bo == batch->measure->bo)
409      return;
410
411   /* When a batch uses a buffer for the first time, or newly writes a buffer
412    * it had already referenced, we may need to flush other batches in order
413    * to correctly synchronize them.
414    */
415   for (int b = 0; b < batch->num_other_batches; b++) {
416      struct iris_batch *other_batch = batch->other_batches[b];
417      int other_index = find_exec_index(other_batch, bo);
418
419      /* If the buffer is referenced by another batch, and either batch
420       * intends to write it, then flush the other batch and synchronize.
421       *
422       * Consider these cases:
423       *
424       * 1. They read, we read   =>  No synchronization required.
425       * 2. They read, we write  =>  Synchronize (they need the old value)
426       * 3. They write, we read  =>  Synchronize (we need their new value)
427       * 4. They write, we write =>  Synchronize (order writes)
428       *
429       * The read/read case is very common, as multiple batches usually
430       * share a streaming state buffer or shader assembly buffer, and
431       * we want to avoid synchronizing in this case.
432       */
433      if (other_index != -1 &&
434          (writable || BITSET_TEST(other_batch->bos_written, other_index)))
435         iris_batch_flush(other_batch);
436   }
437}
438
439/**
440 * Add a buffer to the current batch's validation list.
441 *
442 * You must call this on any BO you wish to use in this batch, to ensure
443 * that it's resident when the GPU commands execute.
444 */
445void
446iris_use_pinned_bo(struct iris_batch *batch,
447                   struct iris_bo *bo,
448                   bool writable, enum iris_domain access)
449{
450   assert(iris_get_backing_bo(bo)->real.kflags & EXEC_OBJECT_PINNED);
451   assert(bo != batch->bo);
452
453   /* Never mark the workaround BO with EXEC_OBJECT_WRITE.  We don't care
454    * about the order of any writes to that buffer, and marking it writable
455    * would introduce data dependencies between multiple batches which share
456    * the buffer. It is added directly to the batch using add_bo_to_batch()
457    * during batch reset time.
458    */
459   if (bo == batch->screen->workaround_bo)
460      return;
461
462   if (access < NUM_IRIS_DOMAINS) {
463      assert(batch->sync_region_depth);
464      iris_bo_bump_seqno(bo, batch->next_seqno, access);
465   }
466
467   int existing_index = find_exec_index(batch, bo);
468
469   if (existing_index == -1) {
470      flush_for_cross_batch_dependencies(batch, bo, writable);
471
472      ensure_exec_obj_space(batch, 1);
473      add_bo_to_batch(batch, bo, writable);
474   } else if (writable && !BITSET_TEST(batch->bos_written, existing_index)) {
475      flush_for_cross_batch_dependencies(batch, bo, writable);
476
477      /* The BO is already in the list; mark it writable */
478      BITSET_SET(batch->bos_written, existing_index);
479   }
480}
481
482static void
483create_batch(struct iris_batch *batch)
484{
485   struct iris_screen *screen = batch->screen;
486   struct iris_bufmgr *bufmgr = screen->bufmgr;
487
488   /* TODO: We probably could suballocate batches... */
489   batch->bo = iris_bo_alloc(bufmgr, "command buffer",
490                             BATCH_SZ + BATCH_RESERVED, 8,
491                             IRIS_MEMZONE_OTHER, BO_ALLOC_NO_SUBALLOC);
492   iris_get_backing_bo(batch->bo)->real.kflags |= EXEC_OBJECT_CAPTURE;
493   batch->map = iris_bo_map(NULL, batch->bo, MAP_READ | MAP_WRITE);
494   batch->map_next = batch->map;
495
496   ensure_exec_obj_space(batch, 1);
497   add_bo_to_batch(batch, batch->bo, false);
498}
499
500static void
501iris_batch_maybe_noop(struct iris_batch *batch)
502{
503   /* We only insert the NOOP at the beginning of the batch. */
504   assert(iris_batch_bytes_used(batch) == 0);
505
506   if (batch->noop_enabled) {
507      /* Emit MI_BATCH_BUFFER_END to prevent any further command to be
508       * executed.
509       */
510      uint32_t *map = batch->map_next;
511
512      map[0] = (0xA << 23);
513
514      batch->map_next += 4;
515   }
516}
517
518static void
519iris_batch_reset(struct iris_batch *batch)
520{
521   struct iris_screen *screen = batch->screen;
522   struct iris_bufmgr *bufmgr = screen->bufmgr;
523   const struct intel_device_info *devinfo = &screen->devinfo;
524
525   u_trace_fini(&batch->trace);
526
527   iris_bo_unreference(batch->bo);
528   batch->primary_batch_size = 0;
529   batch->total_chained_batch_size = 0;
530   batch->contains_draw = false;
531   batch->contains_fence_signal = false;
532   if (devinfo->ver < 11)
533      batch->decoder.surface_base = batch->last_binder_address;
534   else
535      batch->decoder.bt_pool_base = batch->last_binder_address;
536
537   create_batch(batch);
538   assert(batch->bo->index == 0);
539
540   memset(batch->bos_written, 0,
541          sizeof(BITSET_WORD) * BITSET_WORDS(batch->exec_array_size));
542
543   struct iris_syncobj *syncobj = iris_create_syncobj(bufmgr);
544   iris_batch_add_syncobj(batch, syncobj, I915_EXEC_FENCE_SIGNAL);
545   iris_syncobj_reference(bufmgr, &syncobj, NULL);
546
547   assert(!batch->sync_region_depth);
548   iris_batch_sync_boundary(batch);
549   iris_batch_mark_reset_sync(batch);
550
551   /* Always add the workaround BO, it contains a driver identifier at the
552    * beginning quite helpful to debug error states.
553    */
554   add_bo_to_batch(batch, screen->workaround_bo, false);
555
556   iris_batch_maybe_noop(batch);
557
558   u_trace_init(&batch->trace, &batch->ice->ds.trace_context);
559   batch->begin_trace_recorded = false;
560}
561
562static void
563iris_batch_free(struct iris_batch *batch)
564{
565   struct iris_screen *screen = batch->screen;
566   struct iris_bufmgr *bufmgr = screen->bufmgr;
567
568   for (int i = 0; i < batch->exec_count; i++) {
569      iris_bo_unreference(batch->exec_bos[i]);
570   }
571   free(batch->exec_bos);
572   ralloc_free(batch->bos_written);
573
574   ralloc_free(batch->exec_fences.mem_ctx);
575
576   pipe_resource_reference(&batch->fine_fences.ref.res, NULL);
577
578   util_dynarray_foreach(&batch->syncobjs, struct iris_syncobj *, s)
579      iris_syncobj_reference(bufmgr, s, NULL);
580   ralloc_free(batch->syncobjs.mem_ctx);
581
582   iris_fine_fence_reference(batch->screen, &batch->last_fence, NULL);
583   u_upload_destroy(batch->fine_fences.uploader);
584
585   iris_bo_unreference(batch->bo);
586   batch->bo = NULL;
587   batch->map = NULL;
588   batch->map_next = NULL;
589
590   /* iris_destroy_batches() will destroy engines contexts. */
591   if (!batch->has_engines_context)
592      iris_destroy_kernel_context(bufmgr, batch->ctx_id);
593
594   iris_destroy_batch_measure(batch->measure);
595   batch->measure = NULL;
596
597   u_trace_fini(&batch->trace);
598
599   _mesa_hash_table_destroy(batch->cache.render, NULL);
600
601   if (INTEL_DEBUG(DEBUG_ANY))
602      intel_batch_decode_ctx_finish(&batch->decoder);
603}
604
605void
606iris_destroy_batches(struct iris_context *ice)
607{
608   /* If we are using an engines context, then a single kernel context is
609    * created, with multiple hardware contexts. So, we only need to destroy
610    * the context on the first batch.
611    */
612   if (ice->batches[0].has_engines_context) {
613      iris_destroy_kernel_context(ice->batches[0].screen->bufmgr,
614                                  ice->batches[0].ctx_id);
615   }
616
617   iris_foreach_batch(ice, batch)
618      iris_batch_free(batch);
619}
620
621/**
622 * If we've chained to a secondary batch, or are getting near to the end,
623 * then flush.  This should only be called between draws.
624 */
625void
626iris_batch_maybe_flush(struct iris_batch *batch, unsigned estimate)
627{
628   if (batch->bo != batch->exec_bos[0] ||
629       iris_batch_bytes_used(batch) + estimate >= BATCH_SZ) {
630      iris_batch_flush(batch);
631   }
632}
633
634static void
635record_batch_sizes(struct iris_batch *batch)
636{
637   unsigned batch_size = iris_batch_bytes_used(batch);
638
639   VG(VALGRIND_CHECK_MEM_IS_DEFINED(batch->map, batch_size));
640
641   if (batch->bo == batch->exec_bos[0])
642      batch->primary_batch_size = batch_size;
643
644   batch->total_chained_batch_size += batch_size;
645}
646
647void
648iris_chain_to_new_batch(struct iris_batch *batch)
649{
650   uint32_t *cmd = batch->map_next;
651   uint64_t *addr = batch->map_next + 4;
652   batch->map_next += 12;
653
654   record_batch_sizes(batch);
655
656   /* No longer held by batch->bo, still held by validation list */
657   iris_bo_unreference(batch->bo);
658   create_batch(batch);
659
660   /* Emit MI_BATCH_BUFFER_START to chain to another batch. */
661   *cmd = (0x31 << 23) | (1 << 8) | (3 - 2);
662   *addr = batch->bo->address;
663}
664
665static void
666add_aux_map_bos_to_batch(struct iris_batch *batch)
667{
668   void *aux_map_ctx = iris_bufmgr_get_aux_map_context(batch->screen->bufmgr);
669   if (!aux_map_ctx)
670      return;
671
672   uint32_t count = intel_aux_map_get_num_buffers(aux_map_ctx);
673   ensure_exec_obj_space(batch, count);
674   intel_aux_map_fill_bos(aux_map_ctx,
675                          (void**)&batch->exec_bos[batch->exec_count], count);
676   for (uint32_t i = 0; i < count; i++) {
677      struct iris_bo *bo = batch->exec_bos[batch->exec_count];
678      add_bo_to_batch(batch, bo, false);
679   }
680}
681
682static void
683finish_seqno(struct iris_batch *batch)
684{
685   struct iris_fine_fence *sq = iris_fine_fence_new(batch, IRIS_FENCE_END);
686   if (!sq)
687      return;
688
689   iris_fine_fence_reference(batch->screen, &batch->last_fence, sq);
690   iris_fine_fence_reference(batch->screen, &sq, NULL);
691}
692
693/**
694 * Terminate a batch with MI_BATCH_BUFFER_END.
695 */
696static void
697iris_finish_batch(struct iris_batch *batch)
698{
699   const struct intel_device_info *devinfo = &batch->screen->devinfo;
700
701   if (devinfo->ver == 12 && batch->name == IRIS_BATCH_RENDER) {
702      /* We re-emit constants at the beginning of every batch as a hardware
703       * bug workaround, so invalidate indirect state pointers in order to
704       * save ourselves the overhead of restoring constants redundantly when
705       * the next render batch is executed.
706       */
707      iris_emit_pipe_control_flush(batch, "ISP invalidate at batch end",
708                                   PIPE_CONTROL_INDIRECT_STATE_POINTERS_DISABLE |
709                                   PIPE_CONTROL_STALL_AT_SCOREBOARD |
710                                   PIPE_CONTROL_CS_STALL);
711   }
712
713   add_aux_map_bos_to_batch(batch);
714
715   finish_seqno(batch);
716
717   trace_intel_end_batch(&batch->trace, batch->name);
718
719   /* Emit MI_BATCH_BUFFER_END to finish our batch. */
720   uint32_t *map = batch->map_next;
721
722   map[0] = (0xA << 23);
723
724   batch->map_next += 4;
725
726   record_batch_sizes(batch);
727}
728
729/**
730 * Replace our current GEM context with a new one (in case it got banned).
731 */
732static bool
733replace_kernel_ctx(struct iris_batch *batch)
734{
735   struct iris_screen *screen = batch->screen;
736   struct iris_bufmgr *bufmgr = screen->bufmgr;
737
738   if (batch->has_engines_context) {
739      struct iris_context *ice = batch->ice;
740      int priority = iris_kernel_context_get_priority(bufmgr, batch->ctx_id);
741      uint32_t old_ctx = batch->ctx_id;
742      int new_ctx = iris_create_engines_context(ice, priority);
743      if (new_ctx < 0)
744         return false;
745      iris_foreach_batch(ice, bat) {
746         bat->ctx_id = new_ctx;
747         /* Notify the context that state must be re-initialized. */
748         iris_lost_context_state(bat);
749      }
750      iris_destroy_kernel_context(bufmgr, old_ctx);
751   } else {
752      uint32_t new_ctx = iris_clone_hw_context(bufmgr, batch->ctx_id);
753      if (!new_ctx)
754         return false;
755
756      iris_destroy_kernel_context(bufmgr, batch->ctx_id);
757      batch->ctx_id = new_ctx;
758
759      /* Notify the context that state must be re-initialized. */
760      iris_lost_context_state(batch);
761   }
762
763   return true;
764}
765
766enum pipe_reset_status
767iris_batch_check_for_reset(struct iris_batch *batch)
768{
769   struct iris_screen *screen = batch->screen;
770   enum pipe_reset_status status = PIPE_NO_RESET;
771   struct drm_i915_reset_stats stats = { .ctx_id = batch->ctx_id };
772
773   if (intel_ioctl(screen->fd, DRM_IOCTL_I915_GET_RESET_STATS, &stats))
774      DBG("DRM_IOCTL_I915_GET_RESET_STATS failed: %s\n", strerror(errno));
775
776   if (stats.batch_active != 0) {
777      /* A reset was observed while a batch from this hardware context was
778       * executing.  Assume that this context was at fault.
779       */
780      status = PIPE_GUILTY_CONTEXT_RESET;
781   } else if (stats.batch_pending != 0) {
782      /* A reset was observed while a batch from this context was in progress,
783       * but the batch was not executing.  In this case, assume that the
784       * context was not at fault.
785       */
786      status = PIPE_INNOCENT_CONTEXT_RESET;
787   }
788
789   if (status != PIPE_NO_RESET) {
790      /* Our context is likely banned, or at least in an unknown state.
791       * Throw it away and start with a fresh context.  Ideally this may
792       * catch the problem before our next execbuf fails with -EIO.
793       */
794      replace_kernel_ctx(batch);
795   }
796
797   return status;
798}
799
800static void
801move_syncobj_to_batch(struct iris_batch *batch,
802                      struct iris_syncobj **p_syncobj,
803                      unsigned flags)
804{
805   struct iris_bufmgr *bufmgr = batch->screen->bufmgr;
806
807   if (!*p_syncobj)
808      return;
809
810   bool found = false;
811   util_dynarray_foreach(&batch->syncobjs, struct iris_syncobj *, s) {
812      if (*p_syncobj == *s) {
813         found = true;
814         break;
815      }
816   }
817
818   if (!found)
819      iris_batch_add_syncobj(batch, *p_syncobj, flags);
820
821   iris_syncobj_reference(bufmgr, p_syncobj, NULL);
822}
823
824static void
825update_bo_syncobjs(struct iris_batch *batch, struct iris_bo *bo, bool write)
826{
827   struct iris_screen *screen = batch->screen;
828   struct iris_bufmgr *bufmgr = screen->bufmgr;
829   struct iris_context *ice = batch->ice;
830
831   /* Make sure bo->deps is big enough */
832   if (screen->id >= bo->deps_size) {
833      int new_size = screen->id + 1;
834      bo->deps = realloc(bo->deps, new_size * sizeof(bo->deps[0]));
835      memset(&bo->deps[bo->deps_size], 0,
836             sizeof(bo->deps[0]) * (new_size - bo->deps_size));
837
838      bo->deps_size = new_size;
839   }
840
841   /* When it comes to execbuf submission of non-shared buffers, we only need
842    * to care about the reads and writes done by the other batches of our own
843    * screen, and we also don't care about the reads and writes done by our
844    * own batch, although we need to track them. Just note that other places of
845    * our code may need to care about all the operations done by every batch
846    * on every screen.
847    */
848   struct iris_bo_screen_deps *bo_deps = &bo->deps[screen->id];
849   int batch_idx = batch->name;
850
851   /* Make our batch depend on additional syncobjs depending on what other
852    * batches have been doing to this bo.
853    *
854    * We also look at the dependencies set by our own batch since those could
855    * have come from a different context, and apps don't like it when we don't
856    * do inter-context tracking.
857    */
858   iris_foreach_batch(ice, batch_i) {
859      unsigned i = batch_i->name;
860
861      /* If the bo is being written to by others, wait for them. */
862      if (bo_deps->write_syncobjs[i])
863         move_syncobj_to_batch(batch, &bo_deps->write_syncobjs[i],
864                               I915_EXEC_FENCE_WAIT);
865
866      /* If we're writing to the bo, wait on the reads from other batches. */
867      if (write)
868         move_syncobj_to_batch(batch, &bo_deps->read_syncobjs[i],
869                               I915_EXEC_FENCE_WAIT);
870   }
871
872   struct iris_syncobj *batch_syncobj =
873      iris_batch_get_signal_syncobj(batch);
874
875   /* Update bo_deps depending on what we're doing with the bo in this batch
876    * by putting the batch's syncobj in the bo_deps lists accordingly. Only
877    * keep track of the last time we wrote to or read the BO.
878    */
879   if (write) {
880      iris_syncobj_reference(bufmgr, &bo_deps->write_syncobjs[batch_idx],
881                             batch_syncobj);
882   } else {
883      iris_syncobj_reference(bufmgr, &bo_deps->read_syncobjs[batch_idx],
884                             batch_syncobj);
885   }
886}
887
888static void
889update_batch_syncobjs(struct iris_batch *batch)
890{
891   for (int i = 0; i < batch->exec_count; i++) {
892      struct iris_bo *bo = batch->exec_bos[i];
893      bool write = BITSET_TEST(batch->bos_written, i);
894
895      if (bo == batch->screen->workaround_bo)
896         continue;
897
898      update_bo_syncobjs(batch, bo, write);
899   }
900}
901
902/**
903 * Submit the batch to the GPU via execbuffer2.
904 */
905static int
906submit_batch(struct iris_batch *batch)
907{
908   struct iris_bufmgr *bufmgr = batch->screen->bufmgr;
909   simple_mtx_t *bo_deps_lock = iris_bufmgr_get_bo_deps_lock(bufmgr);
910
911   iris_bo_unmap(batch->bo);
912
913   struct drm_i915_gem_exec_object2 *validation_list =
914      malloc(batch->exec_count * sizeof(*validation_list));
915
916   unsigned *index_for_handle =
917      calloc(batch->max_gem_handle + 1, sizeof(unsigned));
918
919   unsigned validation_count = 0;
920   for (int i = 0; i < batch->exec_count; i++) {
921      struct iris_bo *bo = iris_get_backing_bo(batch->exec_bos[i]);
922      assert(bo->gem_handle != 0);
923
924      bool written = BITSET_TEST(batch->bos_written, i);
925      unsigned prev_index = index_for_handle[bo->gem_handle];
926      if (prev_index > 0) {
927         if (written)
928            validation_list[prev_index].flags |= EXEC_OBJECT_WRITE;
929      } else {
930         index_for_handle[bo->gem_handle] = validation_count;
931         validation_list[validation_count] =
932            (struct drm_i915_gem_exec_object2) {
933               .handle = bo->gem_handle,
934               .offset = bo->address,
935               .flags  = bo->real.kflags | (written ? EXEC_OBJECT_WRITE : 0) |
936                         (iris_bo_is_external(bo) ? 0 : EXEC_OBJECT_ASYNC),
937            };
938         ++validation_count;
939      }
940   }
941
942   free(index_for_handle);
943
944   /* The decode operation may map and wait on the batch buffer, which could
945    * in theory try to grab bo_deps_lock. Let's keep it safe and decode
946    * outside the lock.
947    */
948   if (INTEL_DEBUG(DEBUG_BATCH))
949      decode_batch(batch);
950
951   simple_mtx_lock(bo_deps_lock);
952
953   update_batch_syncobjs(batch);
954
955   if (INTEL_DEBUG(DEBUG_BATCH | DEBUG_SUBMIT)) {
956      dump_fence_list(batch);
957      dump_bo_list(batch);
958   }
959
960   /* The requirement for using I915_EXEC_NO_RELOC are:
961    *
962    *   The addresses written in the objects must match the corresponding
963    *   reloc.address which in turn must match the corresponding
964    *   execobject.offset.
965    *
966    *   Any render targets written to in the batch must be flagged with
967    *   EXEC_OBJECT_WRITE.
968    *
969    *   To avoid stalling, execobject.offset should match the current
970    *   address of that object within the active context.
971    */
972   struct drm_i915_gem_execbuffer2 execbuf = {
973      .buffers_ptr = (uintptr_t) validation_list,
974      .buffer_count = validation_count,
975      .batch_start_offset = 0,
976      /* This must be QWord aligned. */
977      .batch_len = ALIGN(batch->primary_batch_size, 8),
978      .flags = batch->exec_flags |
979               I915_EXEC_NO_RELOC |
980               I915_EXEC_BATCH_FIRST |
981               I915_EXEC_HANDLE_LUT,
982      .rsvd1 = batch->ctx_id, /* rsvd1 is actually the context ID */
983   };
984
985   if (num_fences(batch)) {
986      execbuf.flags |= I915_EXEC_FENCE_ARRAY;
987      execbuf.num_cliprects = num_fences(batch);
988      execbuf.cliprects_ptr =
989         (uintptr_t)util_dynarray_begin(&batch->exec_fences);
990   }
991
992   int ret = 0;
993   if (!batch->screen->devinfo.no_hw &&
994       intel_ioctl(batch->screen->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf))
995      ret = -errno;
996
997   simple_mtx_unlock(bo_deps_lock);
998
999   for (int i = 0; i < batch->exec_count; i++) {
1000      struct iris_bo *bo = batch->exec_bos[i];
1001
1002      bo->idle = false;
1003      bo->index = -1;
1004
1005      iris_get_backing_bo(bo)->idle = false;
1006
1007      iris_bo_unreference(bo);
1008   }
1009
1010   free(validation_list);
1011
1012   return ret;
1013}
1014
1015const char *
1016iris_batch_name_to_string(enum iris_batch_name name)
1017{
1018   const char *names[IRIS_BATCH_COUNT] = {
1019      [IRIS_BATCH_RENDER]  = "render",
1020      [IRIS_BATCH_COMPUTE] = "compute",
1021      [IRIS_BATCH_BLITTER] = "blitter",
1022   };
1023   return names[name];
1024}
1025
1026/**
1027 * Flush the batch buffer, submitting it to the GPU and resetting it so
1028 * we're ready to emit the next batch.
1029 */
1030void
1031_iris_batch_flush(struct iris_batch *batch, const char *file, int line)
1032{
1033   struct iris_screen *screen = batch->screen;
1034   struct iris_context *ice = batch->ice;
1035
1036   /* If a fence signals we need to flush it. */
1037   if (iris_batch_bytes_used(batch) == 0 && !batch->contains_fence_signal)
1038      return;
1039
1040   iris_measure_batch_end(ice, batch);
1041
1042   iris_finish_batch(batch);
1043
1044   if (INTEL_DEBUG(DEBUG_BATCH | DEBUG_SUBMIT | DEBUG_PIPE_CONTROL)) {
1045      const char *basefile = strstr(file, "iris/");
1046      if (basefile)
1047         file = basefile + 5;
1048
1049      fprintf(stderr, "%19s:%-3d: %s batch [%u] flush with %5db (%0.1f%%) "
1050              "(cmds), %4d BOs (%0.1fMb aperture)\n",
1051              file, line, iris_batch_name_to_string(batch->name), batch->ctx_id,
1052              batch->total_chained_batch_size,
1053              100.0f * batch->total_chained_batch_size / BATCH_SZ,
1054              batch->exec_count,
1055              (float) batch->aperture_space / (1024 * 1024));
1056
1057   }
1058
1059   uint64_t start_ts = intel_ds_begin_submit(batch->ds);
1060   uint64_t submission_id = batch->ds->submission_id;
1061   int ret = submit_batch(batch);
1062   intel_ds_end_submit(batch->ds, start_ts);
1063
1064   /* When batch submission fails, our end-of-batch syncobj remains
1065    * unsignalled, and in fact is not even considered submitted.
1066    *
1067    * In the hang recovery case (-EIO) or -ENOMEM, we recreate our context and
1068    * attempt to carry on.  In that case, we need to signal our syncobj,
1069    * dubiously claiming that this batch completed, because future batches may
1070    * depend on it.  If we don't, then execbuf would fail with -EINVAL for
1071    * those batches, because they depend on a syncobj that's considered to be
1072    * "never submitted".  This would lead to an abort().  So here, we signal
1073    * the failing batch's syncobj to try and allow further progress to be
1074    * made, knowing we may have broken our dependency tracking.
1075    */
1076   if (ret < 0)
1077      iris_syncobj_signal(screen->bufmgr, iris_batch_get_signal_syncobj(batch));
1078
1079   batch->exec_count = 0;
1080   batch->max_gem_handle = 0;
1081   batch->aperture_space = 0;
1082
1083   util_dynarray_foreach(&batch->syncobjs, struct iris_syncobj *, s)
1084      iris_syncobj_reference(screen->bufmgr, s, NULL);
1085   util_dynarray_clear(&batch->syncobjs);
1086
1087   util_dynarray_clear(&batch->exec_fences);
1088
1089   if (INTEL_DEBUG(DEBUG_SYNC)) {
1090      dbg_printf("waiting for idle\n");
1091      iris_bo_wait_rendering(batch->bo); /* if execbuf failed; this is a nop */
1092   }
1093
1094   if (u_trace_context_actively_tracing(&ice->ds.trace_context))
1095      iris_utrace_flush(batch, submission_id);
1096
1097   /* Start a new batch buffer. */
1098   iris_batch_reset(batch);
1099
1100   /* EIO means our context is banned.  In this case, try and replace it
1101    * with a new logical context, and inform iris_context that all state
1102    * has been lost and needs to be re-initialized.  If this succeeds,
1103    * dubiously claim success...
1104    * Also handle ENOMEM here.
1105    */
1106   if ((ret == -EIO || ret == -ENOMEM) && replace_kernel_ctx(batch)) {
1107      if (batch->reset->reset) {
1108         /* Tell gallium frontends the device is lost and it was our fault. */
1109         batch->reset->reset(batch->reset->data, PIPE_GUILTY_CONTEXT_RESET);
1110      }
1111
1112      ret = 0;
1113   }
1114
1115   if (ret < 0) {
1116#ifdef DEBUG
1117      const bool color = INTEL_DEBUG(DEBUG_COLOR);
1118      fprintf(stderr, "%siris: Failed to submit batchbuffer: %-80s%s\n",
1119              color ? "\e[1;41m" : "", strerror(-ret), color ? "\e[0m" : "");
1120#endif
1121      abort();
1122   }
1123}
1124
1125/**
1126 * Does the current batch refer to the given BO?
1127 *
1128 * (In other words, is the BO in the current batch's validation list?)
1129 */
1130bool
1131iris_batch_references(struct iris_batch *batch, struct iris_bo *bo)
1132{
1133   return find_exec_index(batch, bo) != -1;
1134}
1135
1136/**
1137 * Updates the state of the noop feature.  Returns true if there was a noop
1138 * transition that led to state invalidation.
1139 */
1140bool
1141iris_batch_prepare_noop(struct iris_batch *batch, bool noop_enable)
1142{
1143   if (batch->noop_enabled == noop_enable)
1144      return 0;
1145
1146   batch->noop_enabled = noop_enable;
1147
1148   iris_batch_flush(batch);
1149
1150   /* If the batch was empty, flush had no effect, so insert our noop. */
1151   if (iris_batch_bytes_used(batch) == 0)
1152      iris_batch_maybe_noop(batch);
1153
1154   /* We only need to update the entire state if we transition from noop ->
1155    * not-noop.
1156    */
1157   return !batch->noop_enabled;
1158}
1159