Home
last modified time | relevance | path

Searched refs:batches (Results 1 - 25 of 53) sorted by relevance

123

/third_party/mesa3d/src/gallium/drivers/freedreno/
H A Dfreedreno_batch_cache.c63 * So each resource holds a hashset of batches which have reference them
144 /* Flushes all batches in the batch cache. Used at glFlush() and similar times. */
151 * can cause batches to be unref'd and freed under our feet, so grab
152 * a reference to all the batches we need up-front.
154 struct fd_batch *batches[ARRAY_SIZE(cache->batches)] = {0}; variable
162 fd_batch_reference_locked(&batches[n++], batch);
168 * batch. So when the current batch gets flushed, all other batches
175 if (batches[i] && (batches[
188 fd_batch_flush(batches[i]); global() variable
221 struct fd_batch *batch, *batches[32] = {}; global() variable
235 fd_batch_flush(batches[i]); global() variable
[all...]
H A Dfreedreno_batch_cache.h45 /* set of active batches.. there is an upper limit on the number of
46 * in-flight batches, for two reasons:
47 * 1) to avoid big spikes in number of batches in edge cases, such as
51 * batches have reference to the resource
53 struct fd_batch *batches[32]; member
57 /* note: if batches get unref'd in the body of the loop, they are removed
64 _m && ((batch) = (cache)->batches[u_bit_scan(&_m)]); _m &= (mask))
/third_party/mesa3d/src/gallium/drivers/d3d12/
H A Dd3d12_context.h180 struct d3d12_batch batches[4]; member
286 assert(ctx->current_batch_idx < ARRAY_SIZE(ctx->batches)); in d3d12_current_batch()
287 return ctx->batches + ctx->current_batch_idx; in d3d12_current_batch()
291 unsigned oldest = (ctx->current_batch_idx + 1) % ARRAY_SIZE(ctx->batches); \
292 while (ctx->batches[oldest].fence == NULL && oldest != ctx->current_batch_idx) \
293 oldest = (oldest + 1) % ARRAY_SIZE(ctx->batches); \
294 struct d3d12_batch *batch = &ctx->batches[oldest]; \
296 oldest = (oldest + 1) % ARRAY_SIZE(ctx->batches), \
297 batch = &ctx->batches[oldest])
/third_party/mesa3d/src/gallium/drivers/crocus/
H A Dcrocus_pipe_control.c299 struct crocus_batch *render_batch = &ice->batches[CROCUS_BATCH_RENDER]; in crocus_texture_barrier()
300 struct crocus_batch *compute_batch = &ice->batches[CROCUS_BATCH_COMPUTE]; in crocus_texture_barrier()
336 const struct intel_device_info *devinfo = &ice->batches[0].screen->devinfo; in crocus_memory_barrier()
363 if (ice->batches[i].contains_draw) { in crocus_memory_barrier()
364 crocus_batch_maybe_flush(&ice->batches[i], 24); in crocus_memory_barrier()
365 crocus_emit_pipe_control_flush(&ice->batches[i], "API: memory barrier", in crocus_memory_barrier()
H A Dcrocus_fence.c116 * to stale render batches that are no longer of interest, so we can free
220 crocus_batch_flush(&ice->batches[i]); in crocus_fence_flush()
236 struct crocus_batch *batch = &ice->batches[b]; in crocus_fence_flush()
276 struct crocus_batch *batch = &ice->batches[b]; in crocus_fence_await()
341 if (fine->syncobj == crocus_batch_get_signal_syncobj(&ice->batches[i])) in crocus_fence_finish()
342 crocus_batch_flush(&ice->batches[i]); in crocus_fence_finish()
451 * batches had already completed, their syncobj's had been signalled, in crocus_fence_get_fd()
550 ice->batches[b].contains_fence_signal = true; in crocus_fence_signal()
551 crocus_batch_add_syncobj(&ice->batches[b], fine->syncobj, in crocus_fence_signal()
554 if (ice->batches[ in crocus_fence_signal()
[all...]
H A Dcrocus_context.c117 crocus_batch_check_for_reset(&ice->batches[i]); in crocus_get_device_reset_status()
205 crocus_batch_free(&ice->batches[CROCUS_BATCH_RENDER]); in crocus_destroy_context()
206 if (ice->batches[CROCUS_BATCH_COMPUTE].ice) in crocus_destroy_context()
207 crocus_batch_free(&ice->batches[CROCUS_BATCH_COMPUTE]); in crocus_destroy_context()
318 screen->vtbl.init_render_context(&ice->batches[CROCUS_BATCH_RENDER]); in crocus_create_context()
320 screen->vtbl.init_compute_context(&ice->batches[CROCUS_BATCH_COMPUTE]); in crocus_create_context()
H A Dcrocus_monitor.c130 crocus_emit_pipe_control_flush(&ice->batches[CROCUS_BATCH_RENDER],
142 struct crocus_batch *batch = &ice->batches[CROCUS_BATCH_RENDER]; in crocus_monitor_emit_mi_report_perf_count()
151 _crocus_batch_flush(&ice->batches[CROCUS_BATCH_RENDER], __FILE__, __LINE__); in crocus_monitor_batchbuffer_flush()
161 struct crocus_batch *batch = &ice->batches[CROCUS_BATCH_RENDER];
170 struct crocus_batch *batch = &ice->batches[CROCUS_BATCH_RENDER];
319 ice->batches[CROCUS_BATCH_RENDER].hw_ctx_id, in crocus_init_monitor_ctx()
434 struct crocus_batch *batch = &ice->batches[CROCUS_BATCH_RENDER]; in crocus_get_monitor_result()
H A Dcrocus_query.c155 struct crocus_batch *batch = &ice->batches[q->batch_idx]; in mark_available()
192 struct crocus_batch *batch = &ice->batches[q->batch_idx]; in write_value()
210 crocus_pipelined_write(&ice->batches[CROCUS_BATCH_RENDER], q, in write_value()
218 crocus_pipelined_write(&ice->batches[CROCUS_BATCH_RENDER], q, in write_value()
276 struct crocus_batch *batch = &ice->batches[CROCUS_BATCH_RENDER]; in write_overflow_values()
598 struct crocus_batch *batch = &ice->batches[q->batch_idx]; in crocus_end_query()
671 struct crocus_batch *batch = &ice->batches[q->batch_idx]; in crocus_get_query_result()
713 struct crocus_batch *batch = &ice->batches[q->batch_idx]; in crocus_get_query_result_resource()
822 struct crocus_batch *batch = &ice->batches[CROCUS_BATCH_RENDER]; in set_predicate_for_result()
H A Dcrocus_draw.c273 struct crocus_batch *batch = &ice->batches[CROCUS_BATCH_RENDER]; in crocus_indirect_draw_vbo()
322 struct crocus_batch *batch = &ice->batches[CROCUS_BATCH_RENDER]; in crocus_simple_draw_vbo()
373 struct crocus_batch *batch = &ice->batches[CROCUS_BATCH_RENDER]; in crocus_draw_vbo()
482 struct crocus_batch *batch = &ice->batches[CROCUS_BATCH_COMPUTE]; in crocus_launch_grid()
497 crocus_predraw_resolve_inputs(ice, &ice->batches[CROCUS_BATCH_RENDER], NULL, in crocus_launch_grid()
/third_party/mesa3d/src/mesa/main/
H A Dglthread.c77 unsigned batch_index = batch - ctx->GLThread.batches; in glthread_unmarshal_batch()
120 glthread->batches[i].ctx = ctx; in _mesa_glthread_init()
121 util_queue_fence_init(&glthread->batches[i].fence); in _mesa_glthread_init()
123 glthread->next_batch = &glthread->batches[glthread->next]; in _mesa_glthread_init()
174 util_queue_fence_destroy(&glthread->batches[i].fence); in _mesa_glthread_destroy()
245 glthread->next_batch = &glthread->batches[glthread->next]; in _mesa_glthread_flush_batch()
250 * Waits for all pending batches have been unmarshaled.
270 struct glthread_batch *last = &glthread->batches[glthread->last]; in _mesa_glthread_finish()
291 /* It's not a sync because we don't enqueue partial batches, but in _mesa_glthread_finish()
/third_party/mesa3d/src/gallium/drivers/iris/
H A Diris_perf.c35 iris_emit_end_of_pipe_sync(&ice->batches[IRIS_BATCH_RENDER], in iris_perf_emit_stall_at_pixel_scoreboard()
47 struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER]; in iris_perf_emit_mi_report_perf_count()
55 _iris_batch_flush(&ice->batches[IRIS_BATCH_RENDER], __FILE__, __LINE__); in iris_perf_batchbuffer_flush()
64 struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER]; in iris_perf_store_register_mem()
H A Diris_query.c134 struct iris_batch *batch = &ice->batches[q->batch_idx]; in mark_available()
172 struct iris_batch *batch = &ice->batches[q->batch_idx]; in write_value()
197 iris_pipelined_write(&ice->batches[IRIS_BATCH_RENDER], q, in write_value()
205 iris_pipelined_write(&ice->batches[IRIS_BATCH_RENDER], q, in write_value()
248 struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER]; in write_overflow_values()
557 struct iris_batch *batch = &ice->batches[q->batch_idx]; in iris_end_query()
629 struct iris_batch *batch = &ice->batches[q->batch_idx]; in iris_get_query_result()
662 struct iris_batch *batch = &ice->batches[q->batch_idx]; in iris_get_query_result_resource()
766 struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER]; in set_predicate_for_result()
H A Diris_performance_query.c81 ice->batches[IRIS_BATCH_RENDER].ctx_id, in iris_init_perf_query_info()
210 intel_perf_wait_query(perf_ctx, obj, &ice->batches[IRIS_BATCH_RENDER]); in iris_wait_perf_query()
227 &ice->batches[IRIS_BATCH_RENDER]); in iris_is_perf_query_ready()
243 intel_perf_get_query_data(perf_ctx, obj, &ice->batches[IRIS_BATCH_RENDER], in iris_get_perf_query_data()
H A Diris_batch.c32 * We submit batches to the kernel using the I915_GEM_EXECBUFFER2 ioctl.
182 struct iris_batch *batch = &ice->batches[name]; in iris_init_batch()
265 ice->batches[IRIS_BATCH_BLITTER].exec_flags = I915_EXEC_BLT; in iris_init_non_engine_contexts()
327 unsigned i = batch - &ice->batches[0]; in iris_init_engines_context()
341 ice->batches[i].screen = (void *) ice->ctx.screen; in iris_init_batches()
346 iris_init_batch(ice, batch - &ice->batches[0]); in iris_init_batches()
357 /* May have been shared between multiple active batches */ in find_exec_index()
412 * it had already referenced, we may need to flush other batches in order in flush_for_cross_batch_dependencies()
429 * The read/read case is very common, as multiple batches usually in flush_for_cross_batch_dependencies()
455 * would introduce data dependencies between multiple batches whic in iris_use_pinned_bo()
[all...]
H A Diris_clear.c43 struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER]; in iris_is_color_fast_clear_compatible()
170 struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER]; in fast_clear_color()
317 struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER]; in clear_color()
428 struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER]; in fast_clear_depth()
523 struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER]; in clear_depth_stencil()
H A Diris_draw.c191 struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER]; in iris_indirect_draw_vbo()
244 struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER]; in iris_simple_draw_vbo()
274 struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER]; in iris_draw_vbo()
385 struct iris_batch *batch = &ice->batches[IRIS_BATCH_COMPUTE]; in iris_launch_grid()
H A Diris_batch.h80 /** Total size of all chained batches (in bytes). */
137 /** List of other batches which we might need to flush to use a BO */
431 for (struct iris_batch *batch = &ice->batches[0]; \
432 batch <= &ice->batches[((struct iris_screen *)ice->ctx.screen)->devinfo.ver >= 12 ? IRIS_BATCH_BLITTER : IRIS_BATCH_COMPUTE]; \
H A Diris_pipe_control.c170 * that any data dependencies between batches are satisfied.
173 * for concurrent updates from other batches, we provide the guarantee that a
356 struct iris_batch *render_batch = &ice->batches[IRIS_BATCH_RENDER]; in iris_texture_barrier()
357 struct iris_batch *compute_batch = &ice->batches[IRIS_BATCH_COMPUTE]; in iris_texture_barrier()
H A Diris_context.c372 /* Do this before initializing the batches */ in iris_create_context()
377 screen->vtbl.init_render_context(&ice->batches[IRIS_BATCH_RENDER]); in iris_create_context()
378 screen->vtbl.init_compute_context(&ice->batches[IRIS_BATCH_COMPUTE]); in iris_create_context()
H A Diris_monitor.c173 ice->batches[IRIS_BATCH_RENDER].ctx_id, in iris_init_monitor_ctx()
287 struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER]; in iris_get_monitor_result()
/third_party/mesa3d/src/gallium/drivers/panfrost/
H A Dpan_job.c43 BITSET_FOREACH_SET(idx, ctx->batches.active, PAN_MAX_BATCHES)
48 return batch - batch->ctx->batches.slots; in panfrost_batch_idx()
73 batch->seqnum = ++ctx->batches.seqnum; in panfrost_batch_init()
147 BITSET_CLEAR(ctx->batches.active, batch_idx); in panfrost_batch_cleanup()
161 if (ctx->batches.slots[i].seqnum && in panfrost_get_batch()
162 util_framebuffer_state_equal(&ctx->batches.slots[i].key, key)) { in panfrost_get_batch()
166 ctx->batches.slots[i].seqnum = ++ctx->batches.seqnum; in panfrost_get_batch()
167 return &ctx->batches.slots[i]; in panfrost_get_batch()
170 if (!batch || batch->seqnum > ctx->batches in panfrost_get_batch()
[all...]
/third_party/skia/third_party/externals/dawn/src/dawn_native/
H A DIndirectDrawValidationEncoder.cpp213 std::vector<Batch> batches; in EncodeIndirectDrawValidationCommands() member
216 // First stage is grouping all batches into passes. We try to pack as many batches into a in EncodeIndirectDrawValidationCommands()
268 currentPass->batches.push_back(newBatch); in EncodeIndirectDrawValidationCommands()
279 newPass.batches.push_back(newBatch); in EncodeIndirectDrawValidationCommands()
304 for (Batch& batch : pass.batches) { in EncodeIndirectDrawValidationCommands()
363 for (const Batch& batch : pass.batches) { in EncodeIndirectDrawValidationCommands()
/third_party/mesa3d/src/virtio/vulkan/
H A Dvn_renderer_util.h26 .batches = in vn_renderer_submit_simple()
H A Dvn_queue.c53 const void *batches; member
66 void *batches; member
157 submit->temp.batches = submit->temp.storage; in vn_queue_submission_alloc_storage()
329 const VkSubmitInfo *batches, in vn_queue_submit()
339 batches, fence_handle); in vn_queue_submit()
342 vn_async_vkQueueSubmit(instance, queue_handle, batch_count, batches, in vn_queue_submit()
398 /* TODO intercept original submit batches to append the fence feedback cmd in vn_QueueSubmit()
859 .batches = in vn_create_sync_file()
326 vn_queue_submit(struct vn_instance *instance, VkQueue queue_handle, uint32_t batch_count, const VkSubmitInfo *batches, VkFence fence_handle, bool sync_submit) vn_queue_submit() argument
H A Dvn_renderer_util.c20 .batches = in vn_renderer_submit_simple_sync()

Completed in 19 milliseconds

123