Lines Matching refs:batch

47 /* Terminating the batch takes either 4 bytes for MI_BATCH_BUFFER_END or 12
54 /* Our target batch size - flush approximately at this point. */
69 /** What batch is this? (e.g. IRIS_BATCH_RENDER/COMPUTE) */
77 /** Size of the primary batch being submitted to execbuf (in bytes). */
90 /** A list of all BOs referenced by this batch */
94 /** Bitset of whether this batch writes to BO `i'. */
98 /** Whether INTEL_BLACKHOLE_RENDER is enabled in the batch (aka first
108 * A list of iris_syncobjs associated with this batch.
111 * that this batch has completed. The others are likely to be sync-points
112 * to wait on before executing the batch.
134 /** A seqno (and syncobj) for the last batch that was submitted. */
155 * current end point of the batch. For every i and j,
174 * operations in the batch until the next sync boundary.
178 /** Have we emitted any draw calls to this batch? */
189 * matching iris_batch_sync_region_end() on this batch.
204 void iris_chain_to_new_batch(struct iris_batch *batch);
206 void iris_batch_maybe_flush(struct iris_batch *batch, unsigned estimate);
208 void _iris_batch_flush(struct iris_batch *batch, const char *file, int line);
209 #define iris_batch_flush(batch) _iris_batch_flush((batch), __FILE__, __LINE__)
211 bool iris_batch_references(struct iris_batch *batch, struct iris_bo *bo);
213 bool iris_batch_prepare_noop(struct iris_batch *batch, bool noop_enable);
217 void iris_use_pinned_bo(struct iris_batch *batch, struct iris_bo *bo,
220 enum pipe_reset_status iris_batch_check_for_reset(struct iris_batch *batch);
223 iris_batch_bytes_used(struct iris_batch *batch)
225 return batch->map_next - batch->map;
230 * remaining. If not, this creates a secondary batch buffer and emits
231 * a jump from the primary batch to the start of the secondary.
236 iris_require_command_space(struct iris_batch *batch, unsigned size)
238 const unsigned required_bytes = iris_batch_bytes_used(batch) + size;
241 iris_chain_to_new_batch(batch);
252 iris_get_command_space(struct iris_batch *batch, unsigned bytes)
254 if (!batch->begin_trace_recorded) {
255 batch->begin_trace_recorded = true;
256 trace_intel_begin_batch(&batch->trace);
258 iris_require_command_space(batch, bytes);
259 void *map = batch->map_next;
260 batch->map_next += bytes;
268 iris_batch_emit(struct iris_batch *batch, const void *data, unsigned size)
270 void *map = iris_get_command_space(batch, size);
275 * Get a pointer to the batch's signalling syncobj. Does not refcount.
278 iris_batch_get_signal_syncobj(struct iris_batch *batch)
282 ((struct iris_syncobj **) util_dynarray_begin(&batch->syncobjs))[0];
288 * Take a reference to the batch's signalling syncobj.
290 * Callers can use this to wait for the the current batch under construction
294 iris_batch_reference_signal_syncobj(struct iris_batch *batch,
297 struct iris_syncobj *syncobj = iris_batch_get_signal_syncobj(batch);
298 iris_syncobj_reference(batch->screen->bufmgr, out_syncobj, syncobj);
316 * Mark the start of a region in the batch with stable synchronization
317 * sequence number. Any buffer object accessed by the batch buffer only needs
322 iris_batch_sync_region_start(struct iris_batch *batch)
324 batch->sync_region_depth++;
328 * Mark the end of a region in the batch with stable synchronization sequence
333 iris_batch_sync_region_end(struct iris_batch *batch)
335 assert(batch->sync_region_depth);
336 batch->sync_region_depth--;
340 * Start a new synchronization section at the current point of the batch,
344 iris_batch_sync_boundary(struct iris_batch *batch)
346 if (!batch->sync_region_depth) {
347 batch->contains_draw_with_next_seqno = false;
348 batch->next_seqno = p_atomic_inc_return(&batch->screen->last_seqno);
349 assert(batch->next_seqno > 0);
354 * Update the cache coherency status of the batch to reflect a flush of the
358 iris_batch_mark_flush_sync(struct iris_batch *batch,
361 const struct intel_device_info *devinfo = &batch->screen->devinfo;
364 batch->l3_coherent_seqnos[access] = batch->next_seqno - 1;
366 batch->coherent_seqnos[access][access] = batch->next_seqno - 1;
370 * Update the cache coherency status of the batch to reflect an invalidation
375 iris_batch_mark_invalidate_sync(struct iris_batch *batch,
378 const struct intel_device_info *devinfo = &batch->screen->devinfo;
392 batch->coherent_seqnos[access][i] =
394 batch->l3_coherent_seqnos[i] : batch->coherent_seqnos[i][i];
401 batch->coherent_seqnos[access][i] = batch->l3_coherent_seqnos[i];
407 batch->coherent_seqnos[access][i] = batch->coherent_seqnos[i][i];
413 * Update the cache coherency status of the batch to reflect a reset. All
415 * thanks to the kernel's heavyweight flushing at batch buffer boundaries.
418 iris_batch_mark_reset_sync(struct iris_batch *batch)
421 batch->l3_coherent_seqnos[i] = batch->next_seqno - 1;
423 batch->coherent_seqnos[i][j] = batch->next_seqno - 1;
430 #define iris_foreach_batch(ice, batch) \
431 for (struct iris_batch *batch = &ice->batches[0]; \
432 batch <= &ice->batches[((struct iris_screen *)ice->ctx.screen)->devinfo.ver >= 12 ? IRIS_BATCH_BLITTER : IRIS_BATCH_COMPUTE]; \
433 ++batch)