Lines Matching refs:device

357    result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool,
371 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, bbo->bo);
390 result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool,
407 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, bbo->bo);
459 VkResult result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool,
466 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, bbo->bo);
488 if (anv_use_relocations(cmd_buffer->device->physical)) {
501 write_reloc(cmd_buffer->device,
512 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, bbo->bo);
562 struct anv_state_pool *pool = anv_binding_table_pool(cmd_buffer->device);
591 bbs.DWordLength = cmd_buffer->device->info.ver < 8 ?
623 assert(!anv_use_relocations(cmd_buffer_from->device->physical));
653 assert(!anv_use_relocations(cmd_buffer->device->physical));
795 if (cmd_buffer->device->info.verx10 >= 125) {
812 struct isl_device *isl_dev = &cmd_buffer->device->isl_dev;
834 *bt_block = anv_binding_table_pool_alloc(cmd_buffer->device);
866 if (cmd_buffer->device->can_chain_batches) {
914 anv_binding_table_pool_free(cmd_buffer->device, *bt_block);
947 anv_binding_table_pool_free(cmd_buffer->device, *bt_block);
965 assert(!cmd_buffer->device->can_chain_batches ||
1011 if (!cmd_buffer->device->can_chain_batches) {
1013 } else if (cmd_buffer->device->physical->use_call_secondary) {
1021 const struct intel_device_info *devinfo = &cmd_buffer->device->info;
1255 anv_execbuf_add_bo_bitset(struct anv_device *device,
1262 anv_execbuf_add_bo(struct anv_device *device,
1286 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
1292 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
1348 result = anv_execbuf_add_bo(device, exec, relocs->reloc_bos[i],
1355 return anv_execbuf_add_bo_bitset(device, exec, relocs->dep_words,
1364 anv_execbuf_add_bo_bitset(struct anv_device *device,
1375 struct anv_bo *bo = anv_device_lookup_bo(device, gem_handle);
1378 anv_execbuf_add_bo(device, exec, bo, NULL, extra_flags);
1446 write_reloc(pool->block_pool.device,
1455 anv_reloc_list_apply(struct anv_device *device,
1469 write_reloc(device, p, target_bo->offset + list->relocs[i].delta, true);
1549 anv_bo_unwrap(cmd_buffer->device->surface_state_pool.block_pool.bo);
1550 anv_reloc_list_apply(cmd_buffer->device, &cmd_buffer->surface_relocs,
1560 anv_reloc_list_apply(cmd_buffer->device,
1584 anv_execbuf_add_syncobj(struct anv_device *device,
1597 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
1609 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
1626 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
1642 anv_execbuf_add_sync(struct anv_device *device,
1661 return anv_execbuf_add_bo(device, execbuf, bo_sync->bo, NULL,
1669 return anv_execbuf_add_syncobj(device, execbuf, syncobj->syncobj,
1683 &cmd_buffer->device->surface_state_pool;
1688 if (anv_use_relocations(cmd_buffer->device->physical)) {
1694 result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
1701 anv_execbuf_add_bo_bitset(cmd_buffer->device, execbuf,
1714 result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
1752 struct anv_device *device = queue->device;
1753 struct anv_state_pool *ss_pool = &device->surface_state_pool;
1769 if (!anv_use_relocations(device->physical)) {
1771 result = anv_execbuf_add_bo(device, execbuf, bo, NULL, 0);
1777 pool = &device->dynamic_state_pool.block_pool;
1779 result = anv_execbuf_add_bo(device, execbuf, bo, NULL, 0);
1784 pool = &device->general_state_pool.block_pool;
1786 result = anv_execbuf_add_bo(device, execbuf, bo, NULL, 0);
1791 pool = &device->instruction_state_pool.block_pool;
1793 result = anv_execbuf_add_bo(device, execbuf, bo, NULL, 0);
1798 pool = &device->binding_table_pool.block_pool;
1800 result = anv_execbuf_add_bo(device, execbuf, bo, NULL, 0);
1809 &device->memory_objects, link) {
1810 result = anv_execbuf_add_bo(device, execbuf, mem->bo, NULL, 0);
1853 anv_reloc_list_apply(device, &cmd_buffers[i]->surface_relocs,
1854 device->surface_state_pool.block_pool.bo,
1897 if (!anv_use_relocations(device->physical))
1914 if (device->physical->memory.need_clflush) {
1933 .batch_len = device->can_chain_batches ? 0 : batch->next - batch->start,
1939 .rsvd1 = device->context_id,
1949 struct anv_device *device = queue->device;
1950 VkResult result = anv_execbuf_add_bo(device, execbuf,
1951 device->trivial_batch_bo,
1962 .rsvd1 = device->context_id,
1973 struct anv_device *device = queue->device;
1974 VkResult result = anv_execbuf_add_bo(device, execbuf,
1980 result = anv_execbuf_add_sync(device, execbuf, flush->sync,
2001 if (device->physical->memory.need_clflush)
2011 .rsvd1 = device->context_id,
2026 struct anv_device *device = queue->device;
2029 execbuf.alloc = &device->vk.alloc;
2036 int ret = queue->device->info.no_hw ? 0 :
2037 anv_gem_execbuffer(queue->device, &execbuf.execbuf);
2074 * Since the only other things that ever take the device lock such as block
2089 struct anv_device *device = queue->device;
2093 execbuf.alloc = &queue->device->vk.alloc;
2107 result = anv_execbuf_add_sync(device, &execbuf,
2121 anv_execbuf_add_bo(device, &execbuf, device->workaround_bo, NULL, 0);
2126 result = anv_execbuf_add_sync(device, &execbuf,
2135 result = anv_execbuf_add_sync(device, &execbuf,
2144 result = anv_execbuf_add_sync(device, &execbuf,
2179 fprintf(stderr, "Batch on queue %d\n", (int)(queue - device->queues));
2186 intel_print_batch(&device->decoder_ctx,
2194 device->cmd_buffer_being_decoded = cmd_buffers[i];
2195 intel_print_batch(&device->decoder_ctx, (*bo)->bo->map,
2197 device->cmd_buffer_being_decoded = NULL;
2200 intel_print_batch(&device->decoder_ctx,
2201 device->trivial_batch_bo->map,
2202 device->trivial_batch_bo->size,
2203 device->trivial_batch_bo->offset, false);
2231 int ret = intel_ioctl(device->perf_fd, I915_PERF_IOCTL_CONFIG,
2234 result = vk_device_set_lost(&device->vk,
2253 .rsvd1 = device->context_id,
2256 int ret = queue->device->info.no_hw ? 0 :
2257 anv_gem_execbuffer(queue->device, &query_pass_execbuf);
2262 int ret = queue->device->info.no_hw ? 0 :
2263 anv_gem_execbuffer(queue->device, &execbuf.execbuf);
2268 VkResult result = vk_sync_wait(&device->vk,
2366 * anv_GetFenceStatus does take the global device lock before checking
2378 pthread_cond_broadcast(&queue->device->queue_submit);
2388 struct anv_device *device = queue->device;
2391 if (queue->device->info.no_hw) {
2393 result = vk_sync_signal(&device->vk,
2404 pthread_mutex_lock(&device->mutex);
2407 pthread_mutex_unlock(&device->mutex);
2418 struct anv_device *device = queue->device;
2422 if (queue->device->info.no_hw)
2425 /* This is only used by device init so we can assume the queue is empty and
2433 result = anv_bo_pool_alloc(&device->batch_bo_pool, batch_size, &batch_bo);
2438 if (device->physical->memory.need_clflush)
2443 execbuf.alloc = &queue->device->vk.alloc;
2446 result = anv_execbuf_add_bo(device, &execbuf, batch_bo, NULL, 0);
2451 intel_print_batch(&device->decoder_ctx,
2463 .rsvd1 = device->context_id,
2467 err = anv_gem_execbuffer(device, &execbuf.execbuf);
2469 result = vk_device_set_lost(&device->vk, "anv_gem_execbuffer failed: %m");
2473 result = anv_device_wait(device, batch_bo, INT64_MAX);
2475 result = vk_device_set_lost(&device->vk,
2482 anv_bo_pool_free(&device->batch_bo_pool, batch_bo);