Lines Matching refs:image

82    /* Do not enable TC-compatible HTILE if the image isn't readable by a
124 const struct radv_image *image)
129 if (image->info.samples <= 1 && image->info.width * image->info.height <= 512 * 512) {
132 * clear. RadeonSI does this, but the image threshold is
138 return !!(image->vk.usage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT);
143 const struct radv_image *image)
148 return radv_image_use_fast_clear_for_image_early(device, image) &&
149 (image->exclusive ||
154 radv_image_use_dcc_image_stores(device, image));
228 radv_use_dcc_for_image_early(struct radv_device *device, struct radv_image *image,
239 if (image->shareable && image->vk.tiling != VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT)
264 if (!radv_image_use_fast_clear_for_image_early(device, image) &&
265 image->vk.tiling != VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT)
288 radv_use_dcc_for_image_late(struct radv_device *device, struct radv_image *image)
290 if (!radv_image_has_dcc(image))
293 if (image->vk.tiling == VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT)
296 if (!radv_image_use_fast_clear_for_image(device, image))
299 /* TODO: Fix storage images with DCC without DCC image stores.
301 if ((image->vk.usage & VK_IMAGE_USAGE_STORAGE_BIT) &&
302 !radv_image_use_dcc_image_stores(device, image))
309 * Whether to enable image stores with DCC compression for this image. If
310 * this function returns false the image subresource should be decompressed
311 * before using it with image stores.
316 * This function assumes the image uses DCC compression.
319 radv_image_use_dcc_image_stores(const struct radv_device *device, const struct radv_image *image)
322 &image->planes[0].surface);
327 * state. This can be used to avoid decompressing an image multiple times.
330 radv_image_use_dcc_predication(const struct radv_device *device, const struct radv_image *image)
332 return radv_image_has_dcc(image) && !radv_image_use_dcc_image_stores(device, image);
336 radv_use_fmask_for_image(const struct radv_device *device, const struct radv_image *image)
338 return image->info.samples > 1 && ((image->vk.usage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) ||
343 radv_use_htile_for_image(const struct radv_device *device, const struct radv_image *image)
350 image->info.array_size == 1 && device->physical_device->rad_info.gfx_level >= GFX10;
354 image->vk.format == VK_FORMAT_D32_SFLOAT_S8_UINT && image->info.levels > 1)
359 image->vk_format == VK_FORMAT_D32_SFLOAT_S8_UINT && image->info.levels > 1)
365 if (image->info.width * image->info.height < 8 * 8 &&
370 return (image->info.levels == 1 || use_htile_for_mips) && !image->shareable;
374 radv_use_tc_compat_cmask_for_image(struct radv_device *device, struct radv_image *image)
384 if ((image->vk.usage & VK_IMAGE_USAGE_STORAGE_BIT) &&
388 /* Do not enable TC-compatible if the image isn't readable by a shader
391 if (!(image->vk.usage & (VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT |
395 /* If the image doesn't have FMASK, it can't be fetchable. */
396 if (!radv_image_has_fmask(image))
451 radv_patch_image_dimensions(struct radv_device *device, struct radv_image *image,
455 unsigned width = image->info.width;
456 unsigned height = image->info.height;
478 if (image->info.width == width && image->info.height == height)
481 if (width < image->info.width || height < image->info.height) {
483 "The imported image has smaller dimensions than the internal\n"
487 image->info.width, image->info.height, width, height);
491 "Tried to import an image with inconsistent width on GFX10.\n"
495 image->info.width, image->info.height, width, height);
499 "Tried to import an image with inconsistent width on pre-GFX10.\n"
503 image->info.width, image->info.height, width, height);
512 radv_patch_image_from_extra_info(struct radv_device *device, struct radv_image *image,
516 VkResult result = radv_patch_image_dimensions(device, image, create_info, image_info);
520 for (unsigned plane = 0; plane < image->plane_count; ++plane) {
522 radv_patch_surface_from_metadata(device, &image->planes[plane].surface,
527 image->planes[plane].surface.flags |= RADEON_SURF_SCANOUT;
529 image->planes[plane].surface.flags |= RADEON_SURF_DISABLE_DCC;
531 image->info.surf_index = NULL;
536 image->planes[plane].surface.flags |= RADEON_SURF_DISABLE_DCC;
568 radv_image_get_plane_format(const struct radv_physical_device *pdev, const struct radv_image *image,
572 vk_format_description(image->vk.format)->layout == UTIL_FORMAT_LAYOUT_ETC) {
574 return image->vk.format;
575 return etc2_emulation_format(image->vk.format);
577 return vk_format_get_plane_format(image->vk.format, plane);
581 radv_get_surface_flags(struct radv_device *device, struct radv_image *image, unsigned plane_id,
586 VkFormat format = radv_image_get_plane_format(device->physical_device, image, plane_id);
612 unreachable("unhandled image type");
621 if (radv_use_htile_for_image(device, image) &&
638 if (!radv_use_dcc_for_image_early(device, image, pCreateInfo, image_format,
639 &image->dcc_sign_reinterpret))
642 if (!radv_use_fmask_for_image(device, image))
767 si_set_mutable_tex_desc_fields(struct radv_device *device, struct radv_image *image,
773 struct radv_image_plane *plane = &image->planes[plane_id];
774 struct radv_image_binding *binding = image->disjoint ? &image->bindings[plane_id] : &image->bindings[0];
796 if (!disable_compression && radv_dcc_enabled(image, first_level)) {
804 } else if (!disable_compression && radv_image_is_tc_compat_htile(image)) {
835 if (radv_dcc_enabled(image, first_level) && is_storage_image && enable_write_compression)
906 unreachable("illegal image type");
954 gfx10_make_texture_descriptor(struct radv_device *device, struct radv_image *image,
972 if (image->vk.format == VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK &&
975 } else if (image->vk.format == VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK &&
985 assert(image->vk.image_type == VK_IMAGE_TYPE_3D);
988 type = radv_tex_dim(image->vk.image_type, view_type, image->info.array_size, image->info.samples,
994 depth = image->info.array_size;
997 depth = image->info.array_size;
999 depth = image->info.array_size / 6;
1011 S_00A00C_BASE_LEVEL(image->info.samples > 1 ? 0 : first_level) |
1012 S_00A00C_LAST_LEVEL(image->info.samples > 1 ? util_logbase2(image->info.samples)
1038 image->info.samples > 1 ? util_logbase2(image->info.samples) : image->info.levels - 1;
1046 if (radv_dcc_enabled(image, first_level)) {
1049 image->planes[0].surface.u.gfx9.color.dcc.max_compressed_block_size) |
1053 if (radv_image_get_iterate256(device, image)) {
1059 if (radv_image_has_fmask(image)) {
1060 uint64_t gpu_address = radv_buffer_get_va(image->bindings[0].bo);
1064 assert(image->plane_count == 1);
1066 va = gpu_address + image->bindings[0].offset + image->planes[0].surface.fmask_offset;
1068 switch (image->info.samples) {
1082 fmask_state[0] = (va >> 8) | image->planes[0].surface.fmask_tile_swizzle;
1090 S_00A00C_SW_MODE(image->planes[0].surface.u.gfx9.color.fmask_swizzle_mode) |
1092 radv_tex_dim(image->vk.image_type, view_type, image->info.array_size, 0, false, false));
1098 if (radv_image_is_tc_compat_cmask(image)) {
1099 va = gpu_address + image->bindings[0].offset + image->planes[0].surface.cmask_offset;
1114 si_make_texture_descriptor(struct radv_device *device, struct radv_image *image,
1132 if (image->vk.format == VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK &&
1135 } else if (image->vk.format == VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK &&
1156 radv_image_is_tc_compat_htile(image)) {
1157 if (image->vk.format == VK_FORMAT_D32_SFLOAT_S8_UINT)
1159 else if (image->vk.format == VK_FORMAT_D16_UNORM_S8_UINT)
1165 assert(image->vk.image_type == VK_IMAGE_TYPE_3D);
1168 type = radv_tex_dim(image->vk.image_type, view_type, image->info.array_size, image->info.samples,
1174 depth = image->info.array_size;
1177 depth = image->info.array_size;
1179 depth = image->info.array_size / 6;
1190 S_008F1C_BASE_LEVEL(image->info.samples > 1 ? 0 : first_level) |
1191 S_008F1C_LAST_LEVEL(image->info.samples > 1 ? util_logbase2(image->info.samples)
1211 state[5] |= S_008F24_MAX_MIP(image->info.samples > 1 ? util_logbase2(image->info.samples)
1212 : image->info.levels - 1);
1214 state[3] |= S_008F1C_POW2_PAD(image->info.levels > 1);
1218 if (!(image->planes[0].surface.flags & RADEON_SURF_Z_OR_SBUFFER) &&
1219 image->planes[0].surface.meta_offset) {
1226 if (device->physical_device->rad_info.gfx_level <= GFX7 && image->info.samples <= 1) {
1237 if (radv_image_has_fmask(image)) {
1239 uint64_t gpu_address = radv_buffer_get_va(image->bindings[0].bo);
1242 assert(image->plane_count == 1);
1244 va = gpu_address + image->bindings[0].offset + image->planes[0].surface.fmask_offset;
1248 switch (image->info.samples) {
1262 switch (image->info.samples) {
1280 fmask_state[0] |= image->planes[0].surface.fmask_tile_swizzle;
1288 radv_tex_dim(image->vk.image_type, view_type, image->info.array_size, 0, false, false));
1295 fmask_state[3] |= S_008F1C_SW_MODE(image->planes[0].surface.u.gfx9.color.fmask_swizzle_mode);
1297 S_008F20_PITCH(image->planes[0].surface.u.gfx9.color.fmask_epitch);
1300 if (radv_image_is_tc_compat_cmask(image)) {
1301 va = gpu_address + image->bindings[0].offset + image->planes[0].surface.cmask_offset;
1309 S_008F1C_TILING_INDEX(image->planes[0].surface.u.legacy.color.fmask.tiling_index);
1312 S_008F20_PITCH(image->planes[0].surface.u.legacy.color.fmask.pitch_in_pixels - 1);
1315 if (radv_image_is_tc_compat_cmask(image)) {
1316 va = gpu_address + image->bindings[0].offset + image->planes[0].surface.cmask_offset;
1328 radv_make_texture_descriptor(struct radv_device *device, struct radv_image *image,
1336 gfx10_make_texture_descriptor(device, image, is_storage_image, view_type, vk_format, mapping,
1340 si_make_texture_descriptor(device, image, is_storage_image, view_type, vk_format, mapping,
1347 radv_query_opaque_metadata(struct radv_device *device, struct radv_image *image,
1353 assert(image->plane_count == 1);
1355 radv_make_texture_descriptor(device, image, false, (VkImageViewType)image->vk.image_type,
1356 image->vk.format, &fixedmapping, 0, image->info.levels - 1, 0,
1357 image->info.array_size - 1, image->info.width, image->info.height,
1358 image->info.depth, 0.0f, desc, NULL, 0);
1360 si_set_mutable_tex_desc_fields(device, image, &image->planes[0].surface.u.legacy.level[0], 0, 0,
1361 0, image->planes[0].surface.blk_w, false, false, false, false,
1364 ac_surface_get_umd_metadata(&device->physical_device->rad_info, &image->planes[0].surface,
1365 image->info.levels, desc, &md->size_metadata, md->metadata);
1369 radv_init_metadata(struct radv_device *device, struct radv_image *image,
1372 struct radeon_surf *surface = &image->planes[0].surface;
1378 image->bindings[0].offset +
1404 radv_query_opaque_metadata(device, image, metadata);
1408 radv_image_override_offset_stride(struct radv_device *device, struct radv_image *image,
1411 ac_surface_override_offset_stride(&device->physical_device->rad_info, &image->planes[0].surface,
1412 image->info.levels, offset, stride);
1417 const struct radv_image *image, struct radeon_surf *surf)
1419 if (!surf->cmask_size || surf->cmask_offset || surf->bpe > 8 || image->info.levels > 1 ||
1420 image->info.depth > 1 || radv_image_has_dcc(image) ||
1421 !radv_image_use_fast_clear_for_image(device, image) ||
1422 (image->vk.create_flags & VK_IMAGE_CREATE_SPARSE_BINDING_BIT))
1425 assert(image->info.storage_samples == 1);
1433 radv_image_alloc_values(const struct radv_device *device, struct radv_image *image)
1436 if (image->vk.tiling == VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT)
1439 if (radv_image_has_cmask(image) || (radv_image_has_dcc(image) && !image->support_comp_to_single)) {
1440 image->fce_pred_offset = image->size;
1441 image->size += 8 * image->info.levels;
1444 if (radv_image_use_dcc_predication(device, image)) {
1445 image->dcc_pred_offset = image->size;
1446 image->size += 8 * image->info.levels;
1449 if ((radv_image_has_dcc(image) && !image->support_comp_to_single) ||
1450 radv_image_has_cmask(image) || radv_image_has_htile(image)) {
1451 image->clear_value_offset = image->size;
1452 image->size += 8 * image->info.levels;
1455 if (radv_image_is_tc_compat_htile(image) &&
1461 image->tc_compat_zrange_offset = image->size;
1462 image->size += image->info.levels * 4;
1466 /* Determine if the image is affected by the pipe misaligned metadata issue
1470 radv_image_is_pipe_misaligned(const struct radv_device *device, const struct radv_image *image)
1473 int log2_samples = util_logbase2(image->info.samples);
1477 for (unsigned i = 0; i < image->plane_count; ++i) {
1478 VkFormat fmt = radv_image_get_plane_format(device->physical_device, image, i);
1485 if (vk_format_has_depth(image->vk.format) && image->info.array_size >= 8) {
1495 if (vk_format_has_depth(image->vk.format)) {
1496 if (radv_image_is_tc_compat_htile(image) && overlap) {
1504 /* TODO: It shouldn't be necessary if the image has DCC but
1507 if ((radv_image_has_dcc(image) || radv_image_is_tc_compat_cmask(image)) &&
1518 radv_image_is_l2_coherent(const struct radv_device *device, const struct radv_image *image)
1522 !radv_image_is_pipe_misaligned(device, image);
1524 if (image->info.samples == 1 &&
1525 (image->vk.usage &
1527 !vk_format_has_stencil(image->vk.format)) {
1540 * Determine if the given image can be fast cleared.
1543 radv_image_can_fast_clear(const struct radv_device *device, const struct radv_image *image)
1548 if (vk_format_is_color(image->vk.format)) {
1549 if (!radv_image_has_cmask(image) && !radv_image_has_dcc(image))
1553 if (!radv_image_has_dcc(image) && device->physical_device->rad_info.family == CHIP_STONEY)
1556 if (!radv_image_has_htile(image))
1561 if (image->vk.image_type == VK_IMAGE_TYPE_3D)
1568 * Determine if the given image can be fast cleared using comp-to-single.
1571 radv_image_use_comp_to_single(const struct radv_device *device, const struct radv_image *image)
1577 /* If the image can't be fast cleared, comp-to-single can't be used. */
1578 if (!radv_image_can_fast_clear(device, image))
1581 /* If the image doesn't have DCC, it can't be fast cleared using comp-to-single */
1582 if (!radv_image_has_dcc(image))
1586 unsigned bytes_per_pixel = vk_format_get_blocksize(image->vk.format);
1602 radv_image_reset_layout(const struct radv_physical_device *pdev, struct radv_image *image)
1604 image->size = 0;
1605 image->alignment = 1;
1607 image->tc_compatible_cmask = 0;
1608 image->fce_pred_offset = image->dcc_pred_offset = 0;
1609 image->clear_value_offset = image->tc_compat_zrange_offset = 0;
1611 unsigned plane_count = radv_get_internal_plane_count(pdev, image->vk.format);
1613 VkFormat format = radv_image_get_plane_format(pdev, image, i);
1617 uint64_t flags = image->planes[i].surface.flags;
1618 uint64_t modifier = image->planes[i].surface.modifier;
1619 memset(image->planes + i, 0, sizeof(image->planes[i]));
1621 image->planes[i].surface.flags = flags;
1622 image->planes[i].surface.modifier = modifier;
1623 image->planes[i].surface.blk_w = vk_format_get_blockwidth(format);
1624 image->planes[i].surface.blk_h = vk_format_get_blockheight(format);
1625 image->planes[i].surface.bpe = vk_format_get_blocksize(format);
1628 if (image->planes[i].surface.bpe == 3) {
1629 image->planes[i].surface.bpe = 4;
1637 struct radv_image *image)
1643 struct ac_surf_info image_info = image->info;
1644 VkResult result = radv_patch_image_from_extra_info(device, image, &create_info, &image_info);
1648 assert(!mod_info || mod_info->drmFormatModifierPlaneCount >= image->plane_count);
1650 radv_image_reset_layout(device->physical_device, image);
1652 unsigned plane_count = radv_get_internal_plane_count(device->physical_device, image->vk.format);
1658 info.width = vk_format_get_plane_width(image->vk.format, plane, info.width);
1659 info.height = vk_format_get_plane_height(image->vk.format, plane, info.height);
1662 image->planes[plane].surface.flags |=
1666 device->ws->surface_init(device->ws, &info, &image->planes[plane].surface);
1669 if (!radv_use_dcc_for_image_late(device, image))
1670 ac_surface_zero_dcc_fields(&image->planes[0].surface);
1675 &image->planes[plane].surface, image_info.storage_samples,
1682 radv_image_alloc_single_sample_cmask(device, image, &image->planes[plane].surface);
1685 if (mod_info->pPlaneLayouts[plane].rowPitch % image->planes[plane].surface.bpe ||
1690 stride = mod_info->pPlaneLayouts[plane].rowPitch / image->planes[plane].surface.bpe;
1692 offset = image->disjoint ? 0 :
1693 align64(image->size, 1 << image->planes[plane].surface.alignment_log2);
1698 &image->planes[plane].surface, image->info.levels,
1704 unsigned mem_planes = ac_surface_get_nplanes(&image->planes[plane].surface);
1710 &image->planes[plane].surface, i,
1716 image->size = MAX2(image->size, offset + image->planes[plane].surface.total_size);
1717 image->alignment = MAX2(image->alignment, 1 << image->planes[plane].surface.alignment_log2);
1719 image->planes[plane].format =
1720 radv_image_get_plane_format(device->physical_device, image, plane);
1723 image->tc_compatible_cmask =
1724 radv_image_has_cmask(image) && radv_use_tc_compat_cmask_for_image(device, image);
1726 image->l2_coherent = radv_image_is_l2_coherent(device, image);
1728 image->support_comp_to_single = radv_image_use_comp_to_single(device, image);
1730 radv_image_alloc_values(device, image);
1732 assert(image->planes[0].surface.surf_size);
1733 assert(image->planes[0].surface.modifier == DRM_FORMAT_MOD_INVALID ||
1734 ac_modifier_has_dcc(image->planes[0].surface.modifier) == radv_image_has_dcc(image));
1740 struct radv_image *image)
1742 if ((image->vk.create_flags & VK_IMAGE_CREATE_SPARSE_BINDING_BIT) && image->bindings[0].bo)
1743 device->ws->buffer_destroy(device->ws, image->bindings[0].bo);
1745 if (image->owned_memory != VK_NULL_HANDLE) {
1746 RADV_FROM_HANDLE(radv_device_memory, mem, image->owned_memory);
1750 vk_image_finish(&image->vk);
1751 vk_free2(&device->vk.alloc, pAllocator, image);
1755 radv_image_print_info(struct radv_device *device, struct radv_image *image)
1762 image->size, image->alignment, image->info.width, image->info.height,
1763 image->info.array_size, image->info.levels);
1764 for (unsigned i = 0; i < image->plane_count; ++i) {
1765 const struct radv_image_plane *plane = &image->planes[i];
1823 struct radv_image *image = NULL;
1833 const size_t image_struct_size = sizeof(*image) + sizeof(struct radv_image_plane) * plane_count;
1842 image =
1844 if (!image)
1847 vk_image_init(&device->vk, &image->vk, pCreateInfo);
1849 image->info.width = pCreateInfo->extent.width;
1850 image->info.height = pCreateInfo->extent.height;
1851 image->info.depth = pCreateInfo->extent.depth;
1852 image->info.samples = pCreateInfo->samples;
1853 image->info.storage_samples = pCreateInfo->samples;
1854 image->info.array_size = pCreateInfo->arrayLayers;
1855 image->info.levels = pCreateInfo->mipLevels;
1856 image->info.num_channels = vk_format_get_nr_components(format);
1858 image->plane_count = vk_format_get_plane_count(format);
1859 image->disjoint = image->plane_count > 1 && pCreateInfo->flags & VK_IMAGE_CREATE_DISJOINT_BIT;
1861 image->exclusive = pCreateInfo->sharingMode == VK_SHARING_MODE_EXCLUSIVE;
1866 image->queue_family_mask |= (1u << RADV_MAX_QUEUE_FAMILIES) - 1u;
1868 image->queue_family_mask |= 1u << vk_queue_to_radv(device->physical_device,
1875 image->shareable = external_info;
1876 if (!vk_format_is_depth_or_stencil(format) && !image->shareable &&
1877 !(image->vk.create_flags & VK_IMAGE_CREATE_SPARSE_ALIASED_BIT) &&
1879 image->info.surf_index = &device->image_mrt_offset_counter;
1888 image->planes[plane].surface.flags =
1889 radv_get_surface_flags(device, image, plane, pCreateInfo, format);
1890 image->planes[plane].surface.modifier = modifier;
1898 *pImage = radv_image_to_handle(image);
1899 assert(!(image->vk.create_flags & VK_IMAGE_CREATE_SPARSE_BINDING_BIT));
1903 VkResult result = radv_image_create_layout(device, *create_info, explicit_mod, image);
1905 radv_destroy_image(device, alloc, image);
1909 if (image->vk.create_flags & VK_IMAGE_CREATE_SPARSE_BINDING_BIT) {
1910 image->alignment = MAX2(image->alignment, 4096);
1911 image->size = align64(image->size, image->alignment);
1912 image->bindings[0].offset = 0;
1915 device->ws->buffer_create(device->ws, image->size, image->alignment, 0,
1917 &image->bindings[0].bo);
1919 radv_destroy_image(device, alloc, image);
1925 radv_image_print_info(device, image);
1928 *pImage = radv_image_to_handle(image);
1941 struct radv_image *image = iview->image;
1942 struct radv_image_plane *plane = &image->planes[plane_id];
1962 device, image, is_storage_image, iview->vk.view_type, vk_format, components, hw_level,
1965 vk_format_get_plane_width(image->vk.format, plane_id, iview->extent.width),
1966 vk_format_get_plane_height(image->vk.format, plane_id, iview->extent.height),
1979 bool enable_write_compression = radv_image_use_dcc_image_stores(device, image);
1982 si_set_mutable_tex_desc_fields(device, image, base_level_info, plane_id, iview->vk.base_mip_level,
2006 radv_get_aspect_format(struct radv_image *image, VkImageAspectFlags mask)
2010 return image->planes[0].format;
2012 return image->planes[1].format;
2014 return image->planes[2].format;
2016 return vk_format_stencil_only(image->vk.format);
2018 return vk_format_depth_only(image->vk.format);
2020 return vk_format_depth_only(image->vk.format);
2022 return image->vk.format;
2027 * Determine if the given image view can be fast cleared.
2033 struct radv_image *image;
2037 image = iview->image;
2039 /* Only fast clear if the image itself can be fast cleared. */
2040 if (!radv_image_can_fast_clear(device, image))
2044 if (iview->vk.base_array_layer > 0 || iview->vk.layer_count != image->info.array_size)
2047 /* Only fast clear if the view covers the whole image. */
2048 if (!radv_image_extent_compare(image, &iview->extent))
2060 RADV_FROM_HANDLE(radv_image, image, pCreateInfo->image);
2074 switch (image->vk.image_type) {
2077 assert(range->baseArrayLayer + radv_get_layerCount(image, range) - 1 <=
2078 image->info.array_size);
2081 assert(range->baseArrayLayer + radv_get_layerCount(image, range) - 1 <=
2082 radv_minify(image->info.depth, range->baseMipLevel));
2087 iview->image = image;
2090 /* If the image has an Android external format, pCreateInfo->format will be
2093 iview->vk.format = image->vk.format;
2094 iview->vk.view_format = image->vk.format;
2108 if (vk_format_get_plane_count(image->vk.format) > 1 &&
2114 vk_format_description(image->vk.format)->layout == UTIL_FORMAT_LAYOUT_ETC) {
2127 .width = image->info.width,
2128 .height = image->info.height,
2129 .depth = image->info.depth,
2135 if (iview->vk.format != image->planes[iview->plane_id].format) {
2138 unsigned img_bw = vk_format_get_blockwidth(image->planes[iview->plane_id].format);
2139 unsigned img_bh = vk_format_get_blockheight(image->planes[iview->plane_id].format);
2145 * If we have the following image:
2171 vk_format_is_compressed(image->vk.format) && !vk_format_is_compressed(iview->vk.format)) {
2176 iview->extent.width = iview->image->planes[0].surface.u.gfx9.base_mip_width;
2177 iview->extent.height = iview->image->planes[0].surface.u.gfx9.base_mip_height;
2179 unsigned lvl_width = radv_minify(image->info.width, range->baseMipLevel);
2180 unsigned lvl_height = radv_minify(image->info.height, range->baseMipLevel);
2189 iview->image->planes[0].surface.u.gfx9.base_mip_width);
2191 iview->image->planes[0].surface.u.gfx9.base_mip_height);
2219 radv_layout_is_htile_compressed(const struct radv_device *device, const struct radv_image *image,
2227 return radv_image_has_htile(image);
2229 return radv_image_is_tc_compat_htile(image) ||
2230 (radv_image_has_htile(image) && queue_mask == (1u << RADV_QUEUE_GENERAL));
2235 * if the image doesn't have the storage bit set. This
2243 if (radv_image_is_tc_compat_htile(image) && queue_mask & (1u << RADV_QUEUE_GENERAL) &&
2255 if (radv_image_is_tc_compat_htile(image) ||
2256 (radv_image_has_htile(image) &&
2257 !(image->vk.usage & (VK_IMAGE_USAGE_SAMPLED_BIT |
2259 /* Keep HTILE compressed if the image is only going to
2268 return radv_image_is_tc_compat_htile(image);
2273 radv_layout_can_fast_clear(const struct radv_device *device, const struct radv_image *image,
2277 if (radv_dcc_enabled(image, level) &&
2278 !radv_layout_dcc_compressed(device, image, level, layout, in_render_loop, queue_mask))
2281 if (!(image->vk.usage & RADV_IMAGE_USAGE_WRITE_BITS))
2292 return queue_mask == (1u << RADV_QUEUE_GENERAL) || radv_image_use_comp_to_single(device, image);
2296 radv_layout_dcc_compressed(const struct radv_device *device, const struct radv_image *image,
2300 if (!radv_dcc_enabled(image, level))
2303 if (image->vk.tiling == VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT &&
2307 /* If the image is read-only, we can always just keep it compressed */
2308 if (!(image->vk.usage & RADV_IMAGE_USAGE_WRITE_BITS))
2311 /* Don't compress compute transfer dst when image stores are not supported. */
2313 (queue_mask & (1u << RADV_QUEUE_COMPUTE)) && !radv_image_use_dcc_image_stores(device, image))
2327 radv_layout_fmask_compressed(const struct radv_device *device, const struct radv_image *image,
2330 if (!radv_image_has_fmask(image))
2333 /* Don't compress compute transfer dst because image stores ignore FMASK and it needs to be
2342 (queue_mask == (1u << RADV_QUEUE_GENERAL) || radv_image_is_tc_compat_cmask(image));
2346 radv_image_queue_family_mask(const struct radv_image *image,
2350 if (!image->exclusive)
2351 return image->queue_family_mask;
2389 RADV_FROM_HANDLE(radv_image, image, _image);
2391 if (!image)
2394 radv_destroy_image(device, pAllocator, image);
2401 RADV_FROM_HANDLE(radv_image, image, _image);
2407 if (vk_format_get_plane_count(image->vk.format) > 1)
2410 struct radv_image_plane *plane = &image->planes[plane_id];
2413 if (image->vk.tiling == VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT) {
2432 if (image->vk.format == VK_FORMAT_R32G32B32_UINT ||
2433 image->vk.format == VK_FORMAT_R32G32B32_SINT ||
2434 image->vk.format == VK_FORMAT_R32G32B32_SFLOAT) {
2451 if (image->vk.image_type == VK_IMAGE_TYPE_3D)
2452 pLayout->size *= u_minify(image->info.depth, level);
2460 if (image->vk.image_type == VK_IMAGE_TYPE_3D)
2461 pLayout->size *= u_minify(image->info.depth, level);
2469 RADV_FROM_HANDLE(radv_image, image, _image);
2471 pProperties->drmFormatModifier = image->planes[0].surface.modifier;
2479 RADV_FROM_HANDLE(radv_image, image, pCreateInfo->image);
2488 radv_image_view_init(view, device, pCreateInfo, image->vk.create_flags,