Lines Matching defs:batch
46 * the actual batch buffers as well as link them together and handle
280 anv_batch_emit_dwords(struct anv_batch *batch, int num_dwords)
282 if (batch->next + num_dwords * 4 > batch->end) {
283 VkResult result = batch->extend_cb(batch, batch->user_data);
285 anv_batch_set_error(batch, result);
290 void *p = batch->next;
292 batch->next += num_dwords * 4;
293 assert(batch->next <= batch->end);
299 anv_batch_address(struct anv_batch *batch, void *batch_location)
301 assert(batch->start <= batch_location);
303 /* Allow a jump at the current location of the batch. */
304 assert(batch->next >= batch_location);
306 return anv_address_add(batch->start_addr, batch_location - batch->start);
310 anv_batch_emit_batch(struct anv_batch *batch, struct anv_batch *other)
317 if (batch->next + size > batch->end) {
318 VkResult result = batch->extend_cb(batch, batch->user_data);
320 anv_batch_set_error(batch, result);
325 assert(batch->next + size <= batch->end);
328 memcpy(batch->next, other->start, size);
330 offset = batch->next - batch->start;
331 VkResult result = anv_reloc_list_append(batch->relocs, batch->alloc,
334 anv_batch_set_error(batch, result);
338 batch->next += size;
415 anv_batch_bo_start(struct anv_batch_bo *bbo, struct anv_batch *batch,
418 anv_batch_set_storage(batch, (struct anv_address) { .bo = bbo->bo, },
420 batch->relocs = &bbo->relocs;
425 anv_batch_bo_continue(struct anv_batch_bo *bbo, struct anv_batch *batch,
428 batch->start_addr = (struct anv_address) { .bo = bbo->bo, };
429 batch->start = bbo->bo->map;
430 batch->next = bbo->bo->map + bbo->length;
431 batch->end = bbo->bo->map + bbo->bo->size - batch_padding;
432 batch->relocs = &bbo->relocs;
436 anv_batch_bo_finish(struct anv_batch_bo *bbo, struct anv_batch *batch)
438 assert(batch->start == bbo->bo->map);
439 bbo->length = batch->next - batch->start;
440 VG(VALGRIND_CHECK_MEM_IS_DEFINED(batch->start, bbo->length));
445 struct anv_batch *batch, size_t additional,
448 assert(batch->start == bbo->bo->map);
449 bbo->length = batch->next - batch->start;
469 anv_batch_bo_continue(bbo, batch, batch_padding);
590 anv_batch_emit(&cmd_buffer->batch, GFX8_MI_BATCH_BUFFER_START, bbs) {
603 struct anv_batch *batch = &cmd_buffer->batch;
607 /* We set the end of the batch a little short so we would be sure we
611 batch->end += GFX8_MI_BATCH_BUFFER_START_length * 4;
612 assert(batch->end == current_bbo->bo->map + current_bbo->bo->size);
616 anv_batch_bo_finish(current_bbo, batch);
659 uint32_t *batch = cmd_buffer->batch_end;
660 anv_pack_struct(batch, GFX8_MI_BATCH_BUFFER_END,
665 anv_cmd_buffer_chain_batch(struct anv_batch *batch, void *_data)
690 anv_batch_bo_start(new_bbo, batch, GFX8_MI_BATCH_BUFFER_START_length * 4);
696 anv_cmd_buffer_grow_batch(struct anv_batch *batch, void *_data)
701 anv_batch_bo_grow(cmd_buffer, bbo, &cmd_buffer->batch, 4096,
717 * on-the-fly into the batch and have a batch buffer smaller than 64k. This
830 anv_batch_set_error(&cmd_buffer->batch, VK_ERROR_OUT_OF_HOST_MEMORY);
863 cmd_buffer->batch.alloc = &cmd_buffer->vk.pool->alloc;
864 cmd_buffer->batch.user_data = cmd_buffer;
867 cmd_buffer->batch.extend_cb = anv_cmd_buffer_chain_batch;
869 cmd_buffer->batch.extend_cb = anv_cmd_buffer_grow_batch;
872 anv_batch_bo_start(batch_bo, &cmd_buffer->batch,
921 /* Destroy all of the batch buffers */
932 /* Delete all but the first batch bo */
942 &cmd_buffer->batch,
976 /* When we start a batch buffer, we subtract a certain amount of
979 * that padding before we end the batch; otherwise, we may end up
982 cmd_buffer->batch.end += GFX8_MI_BATCH_BUFFER_START_length * 4;
983 assert(cmd_buffer->batch.start == batch_bo->bo->map);
984 assert(cmd_buffer->batch.end == batch_bo->bo->map + batch_bo->bo->size);
987 cmd_buffer->batch_end = cmd_buffer->batch.next;
996 anv_batch_emit(&cmd_buffer->batch, GFX8_MI_BATCH_BUFFER_END, bbe);
998 /* Round batch up to an even number of dwords. */
999 if ((cmd_buffer->batch.next - cmd_buffer->batch.start) & 4)
1000 anv_batch_emit(&cmd_buffer->batch, GFX8_MI_NOOP, noop);
1010 const uint32_t length = cmd_buffer->batch.next - cmd_buffer->batch.start;
1025 cmd_buffer->batch.next - cmd_buffer->batch.start;
1028 anv_batch_emit(&cmd_buffer->batch, GFX8_MI_NOOP, noop);
1032 anv_batch_emitn(&cmd_buffer->batch,
1038 cmd_buffer->return_addr = anv_batch_address(&cmd_buffer->batch, jump_addr);
1040 /* The emit above may have caused us to chain batch buffers which
1046 /* If the secondary has exactly one batch buffer in its list *and*
1047 * that batch buffer is less than half of the maximum size, we're
1048 * probably better of simply copying it into our batch.
1056 * MI_BATCH_BUFFER_START which will jump back to the calling batch.
1061 * We set the end of the batch a little short so we would be sure we
1065 cmd_buffer->batch.end += GFX8_MI_BATCH_BUFFER_START_length * 4;
1066 assert(cmd_buffer->batch.start == batch_bo->bo->map);
1067 assert(cmd_buffer->batch.end == batch_bo->bo->map + batch_bo->bo->size);
1070 assert(cmd_buffer->batch.start == batch_bo->bo->map);
1076 anv_batch_bo_finish(batch_bo, &cmd_buffer->batch);
1101 anv_batch_emit_batch(&primary->batch, &secondary->batch);
1105 unsigned length = secondary->batch.end - secondary->batch.start;
1106 anv_batch_bo_grow(primary, bbo, &primary->batch, length,
1108 anv_batch_emit_batch(&primary->batch, &secondary->batch);
1120 assert(primary->batch.start == this_bbo->bo->map);
1121 uint32_t offset = primary->batch.next - primary->batch.start;
1124 * MI_BATCH_BUFFER_START in the primary batch.
1150 anv_batch_bo_continue(last_bbo, &primary->batch,
1159 anv_batch_emitn(&primary->batch,
1168 anv_address_physical(anv_batch_address(&primary->batch,
1169 primary->batch.next));
1442 * should only be called on batch buffers, so we know it isn't in
1554 /* Since we own all of the batch buffers, we know what values are stored
1572 * ensure that the relocation list is valid. All relocations on the batch
1690 * will get added automatically by processing relocations on the batch
1738 /* Chain the N-1 first batch buffers */
1843 * needs to be valid. All relocations on the batch buffers are
1860 * on the batch buffers are already valid and kept up-to-date. Since
1876 * batch buffer to execute. We can simply swap the element
1924 struct anv_batch *batch = &cmd_buffers[0]->batch;
1929 /* On platforms that cannot chain batch buffers because of the i915
1930 * command parser, we have to provide the batch length. Everywhere else
1933 .batch_len = device->can_chain_batches ? 0 : batch->next - batch->start,
2008 .batch_len = flush->batch.next - flush->batch.start,
2416 struct anv_batch *batch)
2430 uint32_t batch_size = align_u32(batch->next - batch->start, 8);
2437 memcpy(batch_bo->map, batch->start, batch_size);