/third_party/mesa3d/src/gallium/drivers/radeonsi/ |
H A D | si_cp_reg_shadowing.c | 34 uint64_t gpu_address = shadow_regs->gpu_address; in si_build_load_reg() local 43 gpu_address += SI_SHADOWED_UCONFIG_REG_OFFSET; in si_build_load_reg() 48 gpu_address += SI_SHADOWED_CONTEXT_REG_OFFSET; in si_build_load_reg() 53 gpu_address += SI_SHADOWED_SH_REG_OFFSET; in si_build_load_reg() 60 si_pm4_cmd_add(pm4, gpu_address); in si_build_load_reg() 61 si_pm4_cmd_add(pm4, gpu_address >> 32); in si_build_load_reg()
|
H A D | si_cp_dma.c | 195 uint64_t va = (sdst ? sdst->gpu_address : 0) + offset; in si_cp_dma_clear_buffer() 275 va = sctx->scratch_buffer->gpu_address; in si_cp_dma_realign_engine() 307 dst_offset += si_resource(dst)->gpu_address; in si_cp_dma_copy_buffer() 310 src_offset += si_resource(src)->gpu_address; in si_cp_dma_copy_buffer() 459 uint64_t va = buf->gpu_address + offset; in si_cp_write_data() 482 uint64_t dst_va = (dst ? dst->gpu_address : 0ull) + dst_offset; in si_cp_copy_data() 483 uint64_t src_va = (src ? src->gpu_address : 0ull) + src_offset; in si_cp_copy_data()
|
H A D | si_compute.c | 323 uint64_t base_address = program->shader.bo->gpu_address; in si_bind_compute_state() 366 va = si_resource(resources[i])->gpu_address; in si_set_global_binding() 397 radeon_set_config_reg(R_00950C_TA_CS_BC_BASE_ADDR, sctx->border_color_buffer->gpu_address >> 8); in si_emit_initial_compute_regs() 415 uint64_t bc_va = sctx->border_color_buffer->gpu_address; in si_emit_initial_compute_regs() 490 uint64_t scratch_va = sctx->compute_scratch_buffer->gpu_address; in si_setup_compute_scratch_buffer() 555 shader_va = shader->bo->gpu_address + offset; in si_switch_compute_shader() 575 radeon_emit(sctx->compute_scratch_buffer->gpu_address >> 8); in si_switch_compute_shader() 576 radeon_emit(sctx->compute_scratch_buffer->gpu_address >> 40); in si_switch_compute_shader() 603 uint64_t scratch_va = sctx->compute_scratch_buffer->gpu_address; in setup_scratch_rsrc_user_sgprs() 695 dispatch_va = dispatch_buf->gpu_address in si_setup_user_sgprs_co_v2() [all...] |
H A D | si_buffer.c | 185 res->gpu_address = sscreen->ws->buffer_get_virtual_address(res->buf); in si_alloc_resource() 188 uint64_t start = res->gpu_address; in si_alloc_resource() 205 res->gpu_address, res->gpu_address + res->buf->size, res->buf->size); in si_alloc_resource() 289 sdst->gpu_address = ssrc->gpu_address; in si_replace_buffer_storage() 650 buf->gpu_address = ws->buffer_get_virtual_address(buf->buf); in si_buffer_from_user_memory() 702 res->gpu_address = sscreen->ws->buffer_get_virtual_address(res->buf) + offset; in si_buffer_from_winsys_buffer()
|
H A D | si_fence.c | 113 radeon_emit(scratch->gpu_address); in si_cp_release_mem() 114 radeon_emit(scratch->gpu_address >> 32); in si_cp_release_mem() 132 uint64_t va = scratch->gpu_address; in si_cp_release_mem() 271 uint64_t fence_va = fine->buf->gpu_address + fine->offset; in si_fine_fence_set()
|
H A D | si_descriptors.c | 149 desc->gpu_address = si_desc_extract_buffer_address(descriptor); in si_upload_descriptors() 159 desc->gpu_address = 0; in si_upload_descriptors() 171 desc->gpu_address = desc->buffer->gpu_address + buffer_offset; in si_upload_descriptors() 174 assert((desc->buffer->gpu_address >> 32) == sctx->screen->info.address32_hi); in si_upload_descriptors() 175 assert((desc->gpu_address >> 32) == sctx->screen->info.address32_hi); in si_upload_descriptors() 270 uint64_t va = buf->gpu_address + offset; in si_set_buf_desc_address() 300 va = tex->buffer.gpu_address; in si_set_mutable_tex_desc_fields() 323 meta_va = tex->buffer.gpu_address + tex->surface.meta_offset; in si_set_mutable_tex_desc_fields() 335 meta_va = tex->buffer.gpu_address in si_set_mutable_tex_desc_fields() [all...] |
H A D | si_sdma_copy_image.c | 116 uint64_t dst_address = sdst->buffer.gpu_address + sdst->surface.u.gfx9.surf_offset; in si_sdma_v4_v5_copy_texture() 117 uint64_t src_address = ssrc->buffer.gpu_address + ssrc->surface.u.gfx9.surf_offset; in si_sdma_v4_v5_copy_texture() 229 uint64_t dst_address = sdst->buffer.gpu_address + sdst->surface.u.legacy.level[0].offset_256B * 256; in cik_sdma_copy_texture() 230 uint64_t src_address = ssrc->buffer.gpu_address + ssrc->surface.u.legacy.level[0].offset_256B * 256; in cik_sdma_copy_texture()
|
H A D | si_state_streamout.c | 273 va = t[i]->buf_filled_size->gpu_address + t[i]->buf_filled_size_offset; in si_emit_streamout_begin() 298 uint64_t va = t[i]->buf_filled_size->gpu_address + t[i]->buf_filled_size_offset; in si_emit_streamout_begin() 340 uint64_t va = t[i]->buf_filled_size->gpu_address + t[i]->buf_filled_size_offset; in si_emit_streamout_end()
|
H A D | si_texture.c | 351 tex->cmask_base_address_reg = tex->buffer.gpu_address >> 8; in si_texture_discard_cmask() 474 tex->buffer.gpu_address = new_tex->buffer.gpu_address; in si_reallocate_texture_inplace() 1031 resource->gpu_address = plane0->buffer.gpu_address; in si_texture_create_object() 1045 resource->gpu_address = sscreen->ws->buffer_get_virtual_address(resource->buf); in si_texture_create_object() 1147 tex->cmask_base_address_reg = (tex->buffer.gpu_address + tex->surface.cmask_offset) >> 8; in si_texture_create_object() 1153 tex->buffer.gpu_address, tex->buffer.gpu_address + tex->buffer.buf->size, in si_texture_create_object() 1815 tex->cmask_base_address_reg = (tex->buffer.gpu_address in si_texture_invalidate_storage() [all...] |
H A D | gfx10_query.c | 176 uint64_t fence_va = query->last->buf->gpu_address; in gfx10_sh_query_end() 399 va = qbuf->buf->gpu_address; in gfx10_sh_query_get_result_resource()
|
H A D | si_perfcounter.c | 129 si_cp_copy_data(sctx, &sctx->gfx_cs, COPY_DATA_DST_MEM, buffer, va - buffer->gpu_address, in si_pc_emit_start() 327 uint64_t va = query->buffer.buf->gpu_address + query->buffer.results_end; in si_pc_query_resume() 338 uint64_t va = query->buffer.buf->gpu_address + query->buffer.results_end; in si_pc_query_suspend()
|
H A D | si_query.c | 922 va = query->buffer.buf->gpu_address + query->buffer.results_end; in si_query_hw_emit_start() 1024 va = query->buffer.buf->gpu_address + query->buffer.results_end; in si_query_hw_emit_stop() 1098 uint64_t va_base = qbuf->buf->gpu_address; in si_emit_query_predication() 1158 uint64_t va = query->workaround_buf->gpu_address + query->workaround_offset; in si_emit_query_predication() 1168 uint64_t va_base = qbuf->buf->gpu_address; in si_emit_query_predication() 1652 va = qbuf->buf->gpu_address + qbuf->results_end - query->result_size; in si_query_hw_get_result_resource()
|
H A D | si_state_shaders.cpp | 694 va = shader->bo->gpu_address; in si_shader_ls() 715 va = shader->bo->gpu_address; in si_shader_hs() 803 va = shader->bo->gpu_address; in si_shader_es() 1068 va = shader->bo->gpu_address; in si_shader_gs() 1375 va = shader->bo->gpu_address; 1675 va = shader->bo->gpu_address; 2005 va = shader->bo->gpu_address; 3940 uint64_t scratch_va = sctx->scratch_buffer->gpu_address; 4108 si_resource(sctx->tess_rings)->gpu_address + sctx->screen->hs.tess_offchip_ring_size; 4154 uint64_t va = si_resource(tf_ring)->gpu_address [all...] |
H A D | si_state_draw.cpp | 313 shader->bo->gpu_address); in si_update_shaders() 412 uint64_t address = si_resource(buf)->gpu_address + offset; 774 si_resource(sctx->tess_rings_tmz) : si_resource(sctx->tess_rings))->gpu_address; 1531 index_va = si_resource(indexbuf)->gpu_address + index_offset; 1554 uint64_t indirect_va = si_resource(indirect->buffer)->gpu_address; 1597 count_va = params_buf->gpu_address + indirect->indirect_draw_count_offset; 1824 uint64_t va = buf->gpu_address + offset; 2024 sctx->vb_descriptors_buffer->gpu_address +
|
H A D | si_state.c | 2720 surf->db_depth_base = tex->buffer.gpu_address >> 8; in si_init_depth_surface() 2721 surf->db_stencil_base = (tex->buffer.gpu_address + tex->surface.u.gfx9.zs.stencil_offset) >> 8; in si_init_depth_surface() 2750 surf->db_htile_data_base = (tex->buffer.gpu_address + tex->surface.meta_offset) >> 8; in si_init_depth_surface() 2764 (tex->buffer.gpu_address >> 8) + tex->surface.u.legacy.level[level].offset_256B; in si_init_depth_surface() 2766 (tex->buffer.gpu_address >> 8) + tex->surface.u.legacy.zs.stencil_level[level].offset_256B; in si_init_depth_surface() 2822 surf->db_htile_data_base = (tex->buffer.gpu_address + tex->surface.meta_offset) >> 8; in si_init_depth_surface() 3240 cb_color_base = tex->buffer.gpu_address >> 8; in si_emit_framebuffer_state() 3265 cb_color_fmask = (tex->buffer.gpu_address + tex->surface.fmask_offset) >> 8; in si_emit_framebuffer_state() 3281 cb_dcc_base = (tex->buffer.gpu_address + tex->surface.meta_offset) >> 8; in si_emit_framebuffer_state() 4122 va = tex->buffer.gpu_address in gfx10_make_texture_descriptor() [all...] |
/third_party/mesa3d/src/gallium/drivers/r600/ |
H A D | r600_buffer_common.c | 186 res->gpu_address = rscreen->ws->buffer_get_virtual_address(res->buf); in r600_alloc_resource() 188 res->gpu_address = 0; in r600_alloc_resource() 197 res->gpu_address, res->gpu_address + res->buf->size, in r600_alloc_resource() 251 uint64_t old_gpu_address = rdst->gpu_address; in r600_replace_buffer_storage() 254 rdst->gpu_address = rsrc->gpu_address; in r600_replace_buffer_storage() 643 rbuffer->gpu_address = in r600_buffer_from_user_memory() 646 rbuffer->gpu_address = 0; in r600_buffer_from_user_memory()
|
H A D | evergreen_hw_context.c | 49 dst_offset += rdst->gpu_address; in evergreen_dma_copy_buffer() 50 src_offset += rsrc->gpu_address; in evergreen_dma_copy_buffer() 99 offset += r600_resource(dst)->gpu_address; in evergreen_cp_dma_clear_buffer()
|
H A D | r600_streamout.c | 196 uint64_t va = r600_resource(t[i]->b.buffer)->gpu_address; in r600_emit_streamout_begin() 221 uint64_t va = t[i]->buf_filled_size->gpu_address + in r600_emit_streamout_begin() 267 va = t[i]->buf_filled_size->gpu_address + t[i]->buf_filled_size_offset; in r600_emit_streamout_end()
|
H A D | evergreen_state.c | 661 va = tmp->resource.gpu_address + params->offset; in evergreen_fill_buffer_resource_words() 706 if (tmp->resource.gpu_address) in texture_buffer_sampler_view() 831 va = tmp->resource.gpu_address; in evergreen_fill_tex_resource_words() 1110 color->offset = (res->gpu_address + first_element) >> 8; in evergreen_set_color_surface_buffer() 1136 color->offset += rtex->resource.gpu_address; in evergreen_set_color_surface_common() 1281 color->fmask = (rtex->resource.gpu_address + rtex->fmask.offset) >> 8; in evergreen_set_color_surface_common() 1368 offset = rtex->resource.gpu_address; in evergreen_init_depth_surface() 1420 stencil_offset += rtex->resource.gpu_address; in evergreen_init_depth_surface() 1431 uint64_t va = rtex->resource.gpu_address + rtex->htile_offset; in evergreen_init_depth_surface() 1776 radeon_compute_set_context_reg(cs, R_028B9C_CB_IMMED0_BASE + (idx * 4), resource->immed_buffer->gpu_address >> in evergreen_emit_image_state() [all...] |
H A D | r600_hw_context.c | 465 va = buf->gpu_address + offset; in r600_emit_pfp_sync_me() 515 dst_offset += r600_resource(dst)->gpu_address; in r600_cp_dma_copy_buffer() 516 src_offset += r600_resource(src)->gpu_address; in r600_cp_dma_copy_buffer()
|
H A D | r600_texture.c | 342 rtex->cmask.base_address_reg = rtex->resource.gpu_address >> 8; in r600_texture_discard_cmask() 409 rtex->resource.gpu_address = new_tex->resource.gpu_address; in r600_reallocate_texture_inplace() 734 rtex->cmask.base_address_reg = rtex->cmask_buffer->gpu_address >> 8; in r600_texture_alloc_cmask_separate() 974 resource->gpu_address = rscreen->ws->buffer_get_virtual_address(resource->buf); in r600_texture_create_object() 1001 (rtex->resource.gpu_address + rtex->cmask.offset) >> 8; in r600_texture_create_object() 1005 rtex->resource.gpu_address, in r600_texture_create_object() 1006 rtex->resource.gpu_address + rtex->resource.buf->size, in r600_texture_create_object() 1276 (rtex->resource.gpu_address + rtex->cmask.offset) >> 8; in r600_texture_invalidate_storage()
|
H A D | r600_uvd.c | 128 resources[i]->resource.gpu_address = ctx->b.ws->buffer_get_virtual_address( in r600_video_buffer_create()
|
H A D | r600_query.c | 799 va = query->buffer.buf->gpu_address + query->buffer.results_end; in r600_query_hw_emit_start() 886 va = query->buffer.buf->gpu_address + query->buffer.results_end; in r600_query_hw_emit_stop() 954 uint64_t va_base = qbuf->buf->gpu_address; in r600_emit_query_predication() 1738 va = qbuf->buf->gpu_address + qbuf->results_end - query->result_size; in r600_query_hw_get_result_resource() 1903 radeon_emit(cs, buffer->gpu_address); in r600_query_fix_enabled_rb_mask() 1904 radeon_emit(cs, buffer->gpu_address >> 32); in r600_query_fix_enabled_rb_mask()
|
H A D | r600_state_common.c | 429 if (view->tex_resource->gpu_address && in r600_sampler_view_destroy() 1800 radeon_set_config_reg(cs, ring_base_reg, (rbuffer->gpu_address + size_per_se * se) >> 8); in r600_setup_scratch_area_for_shader() 2414 uint64_t va = r600_resource(indirect->buffer)->gpu_address; in r600_draw_vbo() 2447 uint64_t va = r600_resource(indexbuf)->gpu_address + index_offset; in r600_draw_vbo() 2485 uint64_t va = t->buf_filled_size->gpu_address + t->buf_filled_size_offset; in r600_draw_vbo() 3380 uint64_t va = rbuffer->gpu_address + offset; in r600_invalidate_buffer()
|
/third_party/mesa3d/src/amd/vulkan/ |
H A D | radv_image.c | 717 uint64_t gpu_address = radv_buffer_get_va(buffer->bo); in radv_make_buffer_descriptor() local 718 uint64_t va = gpu_address + buffer->offset; in radv_make_buffer_descriptor() 775 uint64_t gpu_address = binding->bo ? radv_buffer_get_va(binding->bo) + binding->offset : 0; in si_set_mutable_tex_desc_fields() local 776 uint64_t va = gpu_address; in si_set_mutable_tex_desc_fields() 797 meta_va = gpu_address + plane->surface.meta_offset; in si_set_mutable_tex_desc_fields() 805 meta_va = gpu_address + plane->surface.meta_offset; in si_set_mutable_tex_desc_fields() 1060 uint64_t gpu_address = radv_buffer_get_va(image->bindings[0].bo); in gfx10_make_texture_descriptor() local 1066 va = gpu_address + image->bindings[0].offset + image->planes[0].surface.fmask_offset; in gfx10_make_texture_descriptor() 1099 va = gpu_address + image->bindings[0].offset + image->planes[0].surface.cmask_offset; in gfx10_make_texture_descriptor() 1239 uint64_t gpu_address in si_make_texture_descriptor() local [all...] |