* IN THE SOFTWARE.
*/
+#include "radv_debug.h"
#include "radv_private.h"
#include "vk_format.h"
+#include "vk_util.h"
#include "radv_radeon_winsys.h"
#include "sid.h"
+#include "gfx9d.h"
#include "util/debug.h"
+#include "util/u_atomic.h"
static unsigned
-radv_choose_tiling(struct radv_device *Device,
+radv_choose_tiling(struct radv_device *device,
const struct radv_image_create_info *create_info)
{
const VkImageCreateInfo *pCreateInfo = create_info->vk_info;
return RADEON_SURF_MODE_LINEAR_ALIGNED;
}
- /* Textures with a very small height are recommended to be linear. */
- if (pCreateInfo->imageType == VK_IMAGE_TYPE_1D ||
- /* Only very thin and long 2D textures should benefit from
- * linear_aligned. */
- (pCreateInfo->extent.width > 8 && pCreateInfo->extent.height <= 2))
- return RADEON_SURF_MODE_LINEAR_ALIGNED;
+ if (!vk_format_is_compressed(pCreateInfo->format) &&
+ !vk_format_is_depth_or_stencil(pCreateInfo->format)
+ && device->physical_device->rad_info.chip_class <= VI) {
+ /* this causes hangs in some VK CTS tests on GFX9. */
+ /* Textures with a very small height are recommended to be linear. */
+ if (pCreateInfo->imageType == VK_IMAGE_TYPE_1D ||
+ /* Only very thin and long 2D textures should benefit from
+ * linear_aligned. */
+ (pCreateInfo->extent.width > 8 && pCreateInfo->extent.height <= 2))
+ return RADEON_SURF_MODE_LINEAR_ALIGNED;
+ }
/* MSAA resources must be 2D tiled. */
if (pCreateInfo->samples > 1)
return RADEON_SURF_MODE_2D;
}
+
+static bool
+radv_use_tc_compat_htile_for_image(struct radv_device *device,
+ const VkImageCreateInfo *pCreateInfo)
+{
+ /* TC-compat HTILE is only available for GFX8+. */
+ if (device->physical_device->rad_info.chip_class < VI)
+ return false;
+
+ if (pCreateInfo->usage & VK_IMAGE_USAGE_STORAGE_BIT)
+ return false;
+
+ if (pCreateInfo->flags & (VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT |
+ VK_IMAGE_CREATE_EXTENDED_USAGE_BIT_KHR))
+ return false;
+
+ if (pCreateInfo->tiling == VK_IMAGE_TILING_LINEAR)
+ return false;
+
+ if (pCreateInfo->mipLevels > 1)
+ return false;
+
+ /* FIXME: for some reason TC compat with 2/4/8 samples breaks some cts
+ * tests - disable for now */
+ if (pCreateInfo->samples >= 2 &&
+ pCreateInfo->format == VK_FORMAT_D32_SFLOAT_S8_UINT)
+ return false;
+
+ /* GFX9 supports both 32-bit and 16-bit depth surfaces, while GFX8 only
+ * supports 32-bit. Though, it's possible to enable TC-compat for
+ * 16-bit depth surfaces if no Z planes are compressed.
+ */
+ if (pCreateInfo->format != VK_FORMAT_D32_SFLOAT_S8_UINT &&
+ pCreateInfo->format != VK_FORMAT_D32_SFLOAT &&
+ pCreateInfo->format != VK_FORMAT_D16_UNORM)
+ return false;
+
+ return true;
+}
+
+static bool
+radv_use_dcc_for_image(struct radv_device *device,
+ const struct radv_image_create_info *create_info,
+ const VkImageCreateInfo *pCreateInfo)
+{
+ bool dcc_compatible_formats;
+ bool blendable;
+ bool shareable = vk_find_struct_const(pCreateInfo->pNext,
+ EXTERNAL_MEMORY_IMAGE_CREATE_INFO_KHR) != NULL;
+
+ /* DCC (Delta Color Compression) is only available for GFX8+. */
+ if (device->physical_device->rad_info.chip_class < VI)
+ return false;
+
+ if (device->instance->debug_flags & RADV_DEBUG_NO_DCC)
+ return false;
+
+ /* FIXME: DCC is broken for shareable images starting with GFX9 */
+ if (device->physical_device->rad_info.chip_class >= GFX9 &&
+ shareable)
+ return false;
+
+ /* TODO: Enable DCC for storage images. */
+ if ((pCreateInfo->usage & VK_IMAGE_USAGE_STORAGE_BIT) ||
+ (pCreateInfo->flags & VK_IMAGE_CREATE_EXTENDED_USAGE_BIT_KHR))
+ return false;
+
+ if (pCreateInfo->tiling == VK_IMAGE_TILING_LINEAR)
+ return false;
+
+ /* TODO: Enable DCC for mipmaps and array layers. */
+ if (pCreateInfo->mipLevels > 1 || pCreateInfo->arrayLayers > 1)
+ return false;
+
+ if (create_info->scanout)
+ return false;
+
+ /* FIXME: DCC for MSAA with 4x and 8x samples doesn't work yet, while
+ * 2x can be enabled with an option.
+ */
+ if (pCreateInfo->samples > 2 ||
+ (pCreateInfo->samples == 2 &&
+ !device->physical_device->dcc_msaa_allowed))
+ return false;
+
+ /* Determine if the formats are DCC compatible. */
+ dcc_compatible_formats =
+ radv_is_colorbuffer_format_supported(pCreateInfo->format,
+ &blendable);
+
+ if (pCreateInfo->flags & VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT) {
+ const struct VkImageFormatListCreateInfoKHR *format_list =
+ (const struct VkImageFormatListCreateInfoKHR *)
+ vk_find_struct_const(pCreateInfo->pNext,
+ IMAGE_FORMAT_LIST_CREATE_INFO_KHR);
+
+ /* We have to ignore the existence of the list if viewFormatCount = 0 */
+ if (format_list && format_list->viewFormatCount) {
+ /* compatibility is transitive, so we only need to check
+ * one format with everything else. */
+ for (unsigned i = 0; i < format_list->viewFormatCount; ++i) {
+ if (!radv_dcc_formats_compatible(pCreateInfo->format,
+ format_list->pViewFormats[i]))
+ dcc_compatible_formats = false;
+ }
+ } else {
+ dcc_compatible_formats = false;
+ }
+ }
+
+ if (!dcc_compatible_formats)
+ return false;
+
+ return true;
+}
+
static int
radv_init_surface(struct radv_device *device,
struct radeon_surf *surface,
unsigned array_mode = radv_choose_tiling(device, create_info);
const struct vk_format_description *desc =
vk_format_description(pCreateInfo->format);
- bool is_depth, is_stencil, blendable;
+ bool is_depth, is_stencil;
is_depth = vk_format_has_depth(desc);
is_stencil = vk_format_has_stencil(desc);
surface->blk_w = vk_format_get_blockwidth(pCreateInfo->format);
surface->blk_h = vk_format_get_blockheight(pCreateInfo->format);
- surface->bpe = vk_format_get_blocksize(pCreateInfo->format);
+ surface->bpe = vk_format_get_blocksize(vk_format_depth_only(pCreateInfo->format));
/* align byte per element on dword */
if (surface->bpe == 3) {
surface->bpe = 4;
if (is_depth) {
surface->flags |= RADEON_SURF_ZBUFFER;
+ if (radv_use_tc_compat_htile_for_image(device, pCreateInfo))
+ surface->flags |= RADEON_SURF_TC_COMPATIBLE_HTILE;
}
if (is_stencil)
surface->flags |= RADEON_SURF_SBUFFER;
- surface->flags |= RADEON_SURF_HAS_TILE_MODE_INDEX;
+ surface->flags |= RADEON_SURF_OPTIMIZE_FOR_SPACE;
- if ((pCreateInfo->usage & (VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
- VK_IMAGE_USAGE_STORAGE_BIT)) ||
- (pCreateInfo->flags & VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT) ||
- (pCreateInfo->tiling == VK_IMAGE_TILING_LINEAR) ||
- device->physical_device->rad_info.chip_class < VI ||
- create_info->scanout || (device->debug_flags & RADV_DEBUG_NO_DCC) ||
- !radv_is_colorbuffer_format_supported(pCreateInfo->format, &blendable))
+ if (!radv_use_dcc_for_image(device, create_info, pCreateInfo))
surface->flags |= RADEON_SURF_DISABLE_DCC;
+
if (create_info->scanout)
surface->flags |= RADEON_SURF_SCANOUT;
return 0;
}
-#define ATI_VENDOR_ID 0x1002
+
static uint32_t si_get_bo_metadata_word1(struct radv_device *device)
{
return (ATI_VENDOR_ID << 16) | device->physical_device->rad_info.pci_id;
si_tile_mode_index(const struct radv_image *image, unsigned level, bool stencil)
{
if (stencil)
- return image->surface.stencil_tiling_index[level];
+ return image->surface.u.legacy.stencil_tiling_index[level];
else
- return image->surface.tiling_index[level];
+ return image->surface.u.legacy.tiling_index[level];
}
static unsigned radv_map_swizzle(unsigned swizzle)
{
const struct vk_format_description *desc;
unsigned stride;
- uint64_t gpu_address = device->ws->buffer_get_va(buffer->bo);
+ uint64_t gpu_address = radv_buffer_get_va(buffer->bo);
uint64_t va = gpu_address + buffer->offset;
unsigned num_format, data_format;
int first_non_void;
state[0] = va;
state[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
S_008F04_STRIDE(stride);
+
+ if (device->physical_device->rad_info.chip_class != VI && stride) {
+ range /= stride;
+ }
+
state[2] = range;
state[3] = S_008F0C_DST_SEL_X(radv_map_swizzle(desc->swizzle[0])) |
S_008F0C_DST_SEL_Y(radv_map_swizzle(desc->swizzle[1])) |
static void
si_set_mutable_tex_desc_fields(struct radv_device *device,
struct radv_image *image,
- const struct radeon_surf_level *base_level_info,
+ const struct legacy_surf_level *base_level_info,
unsigned base_level, unsigned first_level,
unsigned block_width, bool is_stencil,
- uint32_t *state)
+ bool is_storage_image, uint32_t *state)
{
- uint64_t gpu_address = device->ws->buffer_get_va(image->bo) + image->offset;
- uint64_t va = gpu_address + base_level_info->offset;
- unsigned pitch = base_level_info->nblk_x * block_width;
+ uint64_t gpu_address = image->bo ? radv_buffer_get_va(image->bo) + image->offset : 0;
+ uint64_t va = gpu_address;
+ enum chip_class chip_class = device->physical_device->rad_info.chip_class;
+ uint64_t meta_va = 0;
+ if (chip_class >= GFX9) {
+ if (is_stencil)
+ va += image->surface.u.gfx9.stencil_offset;
+ else
+ va += image->surface.u.gfx9.surf_offset;
+ } else
+ va += base_level_info->offset;
+ state[0] = va >> 8;
+ if (chip_class >= GFX9 ||
+ base_level_info->mode == RADEON_SURF_MODE_2D)
+ state[0] |= image->surface.tile_swizzle;
state[1] &= C_008F14_BASE_ADDRESS_HI;
- state[3] &= C_008F1C_TILING_INDEX;
- state[4] &= C_008F20_PITCH_GFX6;
- state[6] &= C_008F28_COMPRESSION_EN;
+ state[1] |= S_008F14_BASE_ADDRESS_HI(va >> 40);
- assert(!(va & 255));
+ if (chip_class >= VI) {
+ state[6] &= C_008F28_COMPRESSION_EN;
+ state[7] = 0;
+ if (!is_storage_image && radv_dcc_enabled(image, first_level)) {
+ meta_va = gpu_address + image->dcc_offset;
+ if (chip_class <= VI)
+ meta_va += base_level_info->dcc_offset;
+ } else if (!is_storage_image &&
+ radv_image_is_tc_compat_htile(image)) {
+ meta_va = gpu_address + image->htile_offset;
+ }
- state[0] = va >> 8;
- state[1] |= S_008F14_BASE_ADDRESS_HI(va >> 40);
- state[3] |= S_008F1C_TILING_INDEX(si_tile_mode_index(image, base_level,
- is_stencil));
- state[4] |= S_008F20_PITCH_GFX6(pitch - 1);
+ if (meta_va) {
+ state[6] |= S_008F28_COMPRESSION_EN(1);
+ state[7] = meta_va >> 8;
+ state[7] |= image->surface.tile_swizzle;
+ }
+ }
- if (image->surface.dcc_size && first_level < image->surface.num_dcc_levels) {
- state[6] |= S_008F28_COMPRESSION_EN(1);
- state[7] = (gpu_address +
- image->dcc_offset +
- base_level_info->dcc_offset) >> 8;
+ if (chip_class >= GFX9) {
+ state[3] &= C_008F1C_SW_MODE;
+ state[4] &= C_008F20_PITCH_GFX9;
+
+ if (is_stencil) {
+ state[3] |= S_008F1C_SW_MODE(image->surface.u.gfx9.stencil.swizzle_mode);
+ state[4] |= S_008F20_PITCH_GFX9(image->surface.u.gfx9.stencil.epitch);
+ } else {
+ state[3] |= S_008F1C_SW_MODE(image->surface.u.gfx9.surf.swizzle_mode);
+ state[4] |= S_008F20_PITCH_GFX9(image->surface.u.gfx9.surf.epitch);
+ }
+
+ state[5] &= C_008F24_META_DATA_ADDRESS &
+ C_008F24_META_PIPE_ALIGNED &
+ C_008F24_META_RB_ALIGNED;
+ if (meta_va) {
+ struct gfx9_surf_meta_flags meta;
+
+ if (image->dcc_offset)
+ meta = image->surface.u.gfx9.dcc;
+ else
+ meta = image->surface.u.gfx9.htile;
+
+ state[5] |= S_008F24_META_DATA_ADDRESS(meta_va >> 40) |
+ S_008F24_META_PIPE_ALIGNED(meta.pipe_aligned) |
+ S_008F24_META_RB_ALIGNED(meta.rb_aligned);
+ }
+ } else {
+ /* SI-CI-VI */
+ unsigned pitch = base_level_info->nblk_x * block_width;
+ unsigned index = si_tile_mode_index(image, base_level, is_stencil);
+
+ state[3] &= C_008F1C_TILING_INDEX;
+ state[3] |= S_008F1C_TILING_INDEX(index);
+ state[4] &= C_008F20_PITCH_GFX6;
+ state[4] |= S_008F20_PITCH_GFX6(pitch - 1);
}
}
static unsigned radv_tex_dim(VkImageType image_type, VkImageViewType view_type,
- unsigned nr_layers, unsigned nr_samples, bool is_storage_image)
+ unsigned nr_layers, unsigned nr_samples, bool is_storage_image, bool gfx9)
{
if (view_type == VK_IMAGE_VIEW_TYPE_CUBE || view_type == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY)
return is_storage_image ? V_008F1C_SQ_RSRC_IMG_2D_ARRAY : V_008F1C_SQ_RSRC_IMG_CUBE;
+
+ /* GFX9 allocates 1D textures as 2D. */
+ if (gfx9 && image_type == VK_IMAGE_TYPE_1D)
+ image_type = VK_IMAGE_TYPE_2D;
switch (image_type) {
case VK_IMAGE_TYPE_1D:
return nr_layers > 1 ? V_008F1C_SQ_RSRC_IMG_1D_ARRAY : V_008F1C_SQ_RSRC_IMG_1D;
else
return V_008F1C_SQ_RSRC_IMG_2D_ARRAY;
default:
- unreachable("illegale image type");
+ unreachable("illegal image type");
}
}
+
+static unsigned gfx9_border_color_swizzle(const enum vk_swizzle swizzle[4])
+{
+ unsigned bc_swizzle = V_008F20_BC_SWIZZLE_XYZW;
+
+ if (swizzle[3] == VK_SWIZZLE_X) {
+ /* For the pre-defined border color values (white, opaque
+ * black, transparent black), the only thing that matters is
+ * that the alpha channel winds up in the correct place
+ * (because the RGB channels are all the same) so either of
+ * these enumerations will work.
+ */
+ if (swizzle[2] == VK_SWIZZLE_Y)
+ bc_swizzle = V_008F20_BC_SWIZZLE_WZYX;
+ else
+ bc_swizzle = V_008F20_BC_SWIZZLE_WXYZ;
+ } else if (swizzle[0] == VK_SWIZZLE_X) {
+ if (swizzle[1] == VK_SWIZZLE_Y)
+ bc_swizzle = V_008F20_BC_SWIZZLE_XYZW;
+ else
+ bc_swizzle = V_008F20_BC_SWIZZLE_XWYZ;
+ } else if (swizzle[1] == VK_SWIZZLE_X) {
+ bc_swizzle = V_008F20_BC_SWIZZLE_YXWZ;
+ } else if (swizzle[2] == VK_SWIZZLE_X) {
+ bc_swizzle = V_008F20_BC_SWIZZLE_ZYXW;
+ }
+
+ return bc_swizzle;
+}
+
/**
* Build the sampler view descriptor for a texture.
*/
static void
si_make_texture_descriptor(struct radv_device *device,
struct radv_image *image,
- bool sampler,
+ bool is_storage_image,
VkImageViewType view_type,
VkFormat vk_format,
const VkComponentMapping *mapping,
data_format = 0;
}
+ /* S8 with either Z16 or Z32 HTILE need a special format. */
+ if (device->physical_device->rad_info.chip_class >= GFX9 &&
+ vk_format == VK_FORMAT_S8_UINT &&
+ radv_image_is_tc_compat_htile(image)) {
+ if (image->vk_format == VK_FORMAT_D32_SFLOAT_S8_UINT)
+ data_format = V_008F14_IMG_DATA_FORMAT_S8_32;
+ else if (image->vk_format == VK_FORMAT_D16_UNORM_S8_UINT)
+ data_format = V_008F14_IMG_DATA_FORMAT_S8_16;
+ }
type = radv_tex_dim(image->type, view_type, image->info.array_size, image->info.samples,
- (image->usage & VK_IMAGE_USAGE_STORAGE_BIT));
+ is_storage_image, device->physical_device->rad_info.chip_class >= GFX9);
if (type == V_008F1C_SQ_RSRC_IMG_1D_ARRAY) {
height = 1;
depth = image->info.array_size;
S_008F1C_LAST_LEVEL(image->info.samples > 1 ?
util_logbase2(image->info.samples) :
last_level) |
- S_008F1C_POW2_PAD(image->info.levels > 1) |
S_008F1C_TYPE(type));
- state[4] = S_008F20_DEPTH(depth - 1);
- state[5] = (S_008F24_BASE_ARRAY(first_layer) |
- S_008F24_LAST_ARRAY(last_layer));
+ state[4] = 0;
+ state[5] = S_008F24_BASE_ARRAY(first_layer);
state[6] = 0;
state[7] = 0;
+ if (device->physical_device->rad_info.chip_class >= GFX9) {
+ unsigned bc_swizzle = gfx9_border_color_swizzle(swizzle);
+
+ /* Depth is the last accessible layer on Gfx9.
+ * The hw doesn't need to know the total number of layers.
+ */
+ if (type == V_008F1C_SQ_RSRC_IMG_3D)
+ state[4] |= S_008F20_DEPTH(depth - 1);
+ else
+ state[4] |= S_008F20_DEPTH(last_layer);
+
+ state[4] |= S_008F20_BC_SWIZZLE(bc_swizzle);
+ state[5] |= S_008F24_MAX_MIP(image->info.samples > 1 ?
+ util_logbase2(image->info.samples) :
+ image->info.levels - 1);
+ } else {
+ state[3] |= S_008F1C_POW2_PAD(image->info.levels > 1);
+ state[4] |= S_008F20_DEPTH(depth - 1);
+ state[5] |= S_008F24_LAST_ARRAY(last_layer);
+ }
if (image->dcc_offset) {
unsigned swap = radv_translate_colorswap(vk_format, FALSE);
}
/* Initialize the sampler view for FMASK. */
- if (image->fmask.size) {
- uint32_t fmask_format;
- uint64_t gpu_address = device->ws->buffer_get_va(image->bo);
+ if (radv_image_has_fmask(image)) {
+ uint32_t fmask_format, num_format;
+ uint64_t gpu_address = radv_buffer_get_va(image->bo);
uint64_t va;
va = gpu_address + image->offset + image->fmask.offset;
- switch (image->info.samples) {
- case 2:
- fmask_format = V_008F14_IMG_DATA_FORMAT_FMASK8_S2_F2;
- break;
- case 4:
- fmask_format = V_008F14_IMG_DATA_FORMAT_FMASK8_S4_F4;
- break;
- case 8:
- fmask_format = V_008F14_IMG_DATA_FORMAT_FMASK32_S8_F8;
- break;
- default:
- assert(0);
- fmask_format = V_008F14_IMG_DATA_FORMAT_INVALID;
+ if (device->physical_device->rad_info.chip_class >= GFX9) {
+ fmask_format = V_008F14_IMG_DATA_FORMAT_FMASK;
+ switch (image->info.samples) {
+ case 2:
+ num_format = V_008F14_IMG_FMASK_8_2_2;
+ break;
+ case 4:
+ num_format = V_008F14_IMG_FMASK_8_4_4;
+ break;
+ case 8:
+ num_format = V_008F14_IMG_FMASK_32_8_8;
+ break;
+ default:
+ unreachable("invalid nr_samples");
+ }
+ } else {
+ switch (image->info.samples) {
+ case 2:
+ fmask_format = V_008F14_IMG_DATA_FORMAT_FMASK8_S2_F2;
+ break;
+ case 4:
+ fmask_format = V_008F14_IMG_DATA_FORMAT_FMASK8_S4_F4;
+ break;
+ case 8:
+ fmask_format = V_008F14_IMG_DATA_FORMAT_FMASK32_S8_F8;
+ break;
+ default:
+ assert(0);
+ fmask_format = V_008F14_IMG_DATA_FORMAT_INVALID;
+ }
+ num_format = V_008F14_IMG_NUM_FORMAT_UINT;
}
fmask_state[0] = va >> 8;
+ fmask_state[0] |= image->fmask.tile_swizzle;
fmask_state[1] = S_008F14_BASE_ADDRESS_HI(va >> 40) |
S_008F14_DATA_FORMAT_GFX6(fmask_format) |
- S_008F14_NUM_FORMAT_GFX6(V_008F14_IMG_NUM_FORMAT_UINT);
+ S_008F14_NUM_FORMAT_GFX6(num_format);
fmask_state[2] = S_008F18_WIDTH(width - 1) |
S_008F18_HEIGHT(height - 1);
fmask_state[3] = S_008F1C_DST_SEL_X(V_008F1C_SQ_SEL_X) |
S_008F1C_DST_SEL_Y(V_008F1C_SQ_SEL_X) |
S_008F1C_DST_SEL_Z(V_008F1C_SQ_SEL_X) |
S_008F1C_DST_SEL_W(V_008F1C_SQ_SEL_X) |
- S_008F1C_TILING_INDEX(image->fmask.tile_mode_index) |
- S_008F1C_TYPE(radv_tex_dim(image->type, view_type, 1, 0, false));
- fmask_state[4] = S_008F20_DEPTH(depth - 1) |
- S_008F20_PITCH_GFX6(image->fmask.pitch_in_pixels - 1);
- fmask_state[5] = S_008F24_BASE_ARRAY(first_layer) |
- S_008F24_LAST_ARRAY(last_layer);
+ S_008F1C_TYPE(radv_tex_dim(image->type, view_type, image->info.array_size, 0, false, false));
+ fmask_state[4] = 0;
+ fmask_state[5] = S_008F24_BASE_ARRAY(first_layer);
fmask_state[6] = 0;
fmask_state[7] = 0;
- }
+
+ if (device->physical_device->rad_info.chip_class >= GFX9) {
+ fmask_state[3] |= S_008F1C_SW_MODE(image->surface.u.gfx9.fmask.swizzle_mode);
+ fmask_state[4] |= S_008F20_DEPTH(last_layer) |
+ S_008F20_PITCH_GFX9(image->surface.u.gfx9.fmask.epitch);
+ fmask_state[5] |= S_008F24_META_PIPE_ALIGNED(image->surface.u.gfx9.cmask.pipe_aligned) |
+ S_008F24_META_RB_ALIGNED(image->surface.u.gfx9.cmask.rb_aligned);
+ } else {
+ fmask_state[3] |= S_008F1C_TILING_INDEX(image->fmask.tile_mode_index);
+ fmask_state[4] |= S_008F20_DEPTH(depth - 1) |
+ S_008F20_PITCH_GFX6(image->fmask.pitch_in_pixels - 1);
+ fmask_state[5] |= S_008F24_LAST_ARRAY(last_layer);
+ }
+ } else if (fmask_state)
+ memset(fmask_state, 0, 8 * 4);
}
static void
md->metadata[1] = si_get_bo_metadata_word1(device);
- si_make_texture_descriptor(device, image, true,
+ si_make_texture_descriptor(device, image, false,
(VkImageViewType)image->type, image->vk_format,
&fixedmapping, 0, image->info.levels - 1, 0,
image->info.array_size,
image->info.depth,
desc, NULL);
- si_set_mutable_tex_desc_fields(device, image, &image->surface.level[0], 0, 0,
- image->surface.blk_w, false, desc);
+ si_set_mutable_tex_desc_fields(device, image, &image->surface.u.legacy.level[0], 0, 0,
+ image->surface.blk_w, false, false, desc);
/* Clear the base address and set the relative DCC offset. */
desc[0] = 0;
memcpy(&md->metadata[2], desc, sizeof(desc));
/* Dwords [10:..] contain the mipmap level offsets. */
- for (i = 0; i <= image->info.levels - 1; i++)
- md->metadata[10+i] = image->surface.level[i].offset >> 8;
-
- md->size_metadata = (11 + image->info.levels - 1) * 4;
+ if (device->physical_device->rad_info.chip_class <= VI) {
+ for (i = 0; i <= image->info.levels - 1; i++)
+ md->metadata[10+i] = image->surface.u.legacy.level[i].offset >> 8;
+ md->size_metadata = (11 + image->info.levels - 1) * 4;
+ }
}
void
struct radeon_surf *surface = &image->surface;
memset(metadata, 0, sizeof(*metadata));
- metadata->microtile = surface->level[0].mode >= RADEON_SURF_MODE_1D ?
- RADEON_LAYOUT_TILED : RADEON_LAYOUT_LINEAR;
- metadata->macrotile = surface->level[0].mode >= RADEON_SURF_MODE_2D ?
- RADEON_LAYOUT_TILED : RADEON_LAYOUT_LINEAR;
- metadata->pipe_config = surface->pipe_config;
- metadata->bankw = surface->bankw;
- metadata->bankh = surface->bankh;
- metadata->tile_split = surface->tile_split;
- metadata->mtilea = surface->mtilea;
- metadata->num_banks = surface->num_banks;
- metadata->stride = surface->level[0].nblk_x * surface->bpe;
- metadata->scanout = (surface->flags & RADEON_SURF_SCANOUT) != 0;
+ if (device->physical_device->rad_info.chip_class >= GFX9) {
+ metadata->u.gfx9.swizzle_mode = surface->u.gfx9.surf.swizzle_mode;
+ } else {
+ metadata->u.legacy.microtile = surface->u.legacy.level[0].mode >= RADEON_SURF_MODE_1D ?
+ RADEON_LAYOUT_TILED : RADEON_LAYOUT_LINEAR;
+ metadata->u.legacy.macrotile = surface->u.legacy.level[0].mode >= RADEON_SURF_MODE_2D ?
+ RADEON_LAYOUT_TILED : RADEON_LAYOUT_LINEAR;
+ metadata->u.legacy.pipe_config = surface->u.legacy.pipe_config;
+ metadata->u.legacy.bankw = surface->u.legacy.bankw;
+ metadata->u.legacy.bankh = surface->u.legacy.bankh;
+ metadata->u.legacy.tile_split = surface->u.legacy.tile_split;
+ metadata->u.legacy.mtilea = surface->u.legacy.mtilea;
+ metadata->u.legacy.num_banks = surface->u.legacy.num_banks;
+ metadata->u.legacy.stride = surface->u.legacy.level[0].nblk_x * surface->bpe;
+ metadata->u.legacy.scanout = (surface->flags & RADEON_SURF_SCANOUT) != 0;
+ }
radv_query_opaque_metadata(device, image, metadata);
}
unsigned nr_samples,
struct radv_fmask_info *out)
{
- /* FMASK is allocated like an ordinary texture. */
- struct radeon_surf fmask = image->surface;
- struct radeon_surf_info info = image->info;
- memset(out, 0, sizeof(*out));
-
- fmask.surf_alignment = 0;
- fmask.surf_size = 0;
- fmask.flags |= RADEON_SURF_FMASK;
- info.samples = 1;
- /* Force 2D tiling if it wasn't set. This may occur when creating
- * FMASK for MSAA resolve on R6xx. On R6xx, the single-sample
- * destination buffer must have an FMASK too. */
- fmask.flags = RADEON_SURF_CLR(fmask.flags, MODE);
- fmask.flags |= RADEON_SURF_SET(RADEON_SURF_MODE_2D, MODE);
-
- fmask.flags |= RADEON_SURF_HAS_TILE_MODE_INDEX;
-
- switch (nr_samples) {
- case 2:
- case 4:
- fmask.bpe = 1;
- break;
- case 8:
- fmask.bpe = 4;
- break;
- default:
+ if (device->physical_device->rad_info.chip_class >= GFX9) {
+ out->alignment = image->surface.fmask_alignment;
+ out->size = image->surface.fmask_size;
+ out->tile_swizzle = image->surface.fmask_tile_swizzle;
return;
}
- device->ws->surface_init(device->ws, &info, &fmask);
- assert(fmask.level[0].mode == RADEON_SURF_MODE_2D);
+ out->slice_tile_max = image->surface.u.legacy.fmask.slice_tile_max;
+ out->tile_mode_index = image->surface.u.legacy.fmask.tiling_index;
+ out->pitch_in_pixels = image->surface.u.legacy.fmask.pitch_in_pixels;
+ out->bank_height = image->surface.u.legacy.fmask.bankh;
+ out->tile_swizzle = image->surface.fmask_tile_swizzle;
+ out->alignment = image->surface.fmask_alignment;
+ out->size = image->surface.fmask_size;
- out->slice_tile_max = (fmask.level[0].nblk_x * fmask.level[0].nblk_y) / 64;
- if (out->slice_tile_max)
- out->slice_tile_max -= 1;
-
- out->tile_mode_index = fmask.tiling_index[0];
- out->pitch_in_pixels = fmask.level[0].nblk_x;
- out->bank_height = fmask.bankh;
- out->alignment = MAX2(256, fmask.surf_alignment);
- out->size = fmask.surf_size;
+ assert(!out->tile_swizzle || !image->shareable);
}
static void
unsigned num_pipes = device->physical_device->rad_info.num_tile_pipes;
unsigned cl_width, cl_height;
+ if (device->physical_device->rad_info.chip_class >= GFX9) {
+ out->alignment = image->surface.cmask_alignment;
+ out->size = image->surface.cmask_size;
+ return;
+ }
+
switch (num_pipes) {
case 2:
cl_width = 32;
radv_image_alloc_cmask(struct radv_device *device,
struct radv_image *image)
{
+ uint32_t clear_value_size = 0;
radv_image_get_cmask_info(device, image, &image->cmask);
image->cmask.offset = align64(image->size, image->cmask.alignment);
/* + 8 for storing the clear values */
- image->clear_value_offset = image->cmask.offset + image->cmask.size;
- image->size = image->cmask.offset + image->cmask.size + 8;
+ if (!image->clear_value_offset) {
+ image->clear_value_offset = image->cmask.offset + image->cmask.size;
+ clear_value_size = 8;
+ }
+ image->size = image->cmask.offset + image->cmask.size + clear_value_size;
image->alignment = MAX2(image->alignment, image->cmask.alignment);
}
static void
-radv_image_alloc_dcc(struct radv_device *device,
- struct radv_image *image)
+radv_image_alloc_dcc(struct radv_image *image)
{
image->dcc_offset = align64(image->size, image->surface.dcc_alignment);
- /* + 8 for storing the clear values */
+ /* + 16 for storing the clear values + dcc pred */
image->clear_value_offset = image->dcc_offset + image->surface.dcc_size;
- image->size = image->dcc_offset + image->surface.dcc_size + 8;
+ image->dcc_pred_offset = image->clear_value_offset + 8;
+ image->size = image->dcc_offset + image->surface.dcc_size + 16;
image->alignment = MAX2(image->alignment, image->surface.dcc_alignment);
}
static void
-radv_image_alloc_htile(struct radv_device *device,
- struct radv_image *image)
+radv_image_alloc_htile(struct radv_image *image)
{
- if ((device->debug_flags & RADV_DEBUG_NO_HIZ) || image->info.levels > 1) {
- image->surface.htile_size = 0;
- return;
- }
-
image->htile_offset = align64(image->size, image->surface.htile_alignment);
/* + 8 for storing the clear values */
image->alignment = align64(image->alignment, image->surface.htile_alignment);
}
+static inline bool
+radv_image_can_enable_dcc_or_cmask(struct radv_image *image)
+{
+ if (image->info.samples <= 1 &&
+ image->info.width * image->info.height <= 512 * 512) {
+ /* Do not enable CMASK or DCC for small surfaces where the cost
+ * of the eliminate pass can be higher than the benefit of fast
+ * clear. RadeonSI does this, but the image threshold is
+ * different.
+ */
+ return false;
+ }
+
+ return image->usage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT &&
+ (image->exclusive || image->queue_family_mask == 1);
+}
+
+static inline bool
+radv_image_can_enable_dcc(struct radv_image *image)
+{
+ return radv_image_can_enable_dcc_or_cmask(image) &&
+ radv_image_has_dcc(image);
+}
+
+static inline bool
+radv_image_can_enable_cmask(struct radv_image *image)
+{
+ if (image->surface.bpe > 8 && image->info.samples == 1) {
+ /* Do not enable CMASK for non-MSAA images (fast color clear)
+ * because 128 bit formats are not supported, but FMASK might
+ * still be used.
+ */
+ return false;
+ }
+
+ return radv_image_can_enable_dcc_or_cmask(image) &&
+ image->info.levels == 1 &&
+ image->info.depth == 1 &&
+ !image->surface.is_linear;
+}
+
+static inline bool
+radv_image_can_enable_fmask(struct radv_image *image)
+{
+ return image->info.samples > 1 && vk_format_is_color(image->vk_format);
+}
+
+static inline bool
+radv_image_can_enable_htile(struct radv_image *image)
+{
+ return image->info.levels == 1 && vk_format_is_depth(image->vk_format);
+}
+
VkResult
radv_image_create(VkDevice _device,
const struct radv_image_create_info *create_info,
RADV_FROM_HANDLE(radv_device, device, _device);
const VkImageCreateInfo *pCreateInfo = create_info->vk_info;
struct radv_image *image = NULL;
- bool can_cmask_dcc = false;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO);
radv_assert(pCreateInfo->mipLevels > 0);
radv_assert(pCreateInfo->extent.height > 0);
radv_assert(pCreateInfo->extent.depth > 0);
- image = vk_alloc2(&device->alloc, alloc, sizeof(*image), 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ image = vk_zalloc2(&device->alloc, alloc, sizeof(*image), 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!image)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
- memset(image, 0, sizeof(*image));
image->type = pCreateInfo->imageType;
image->info.width = pCreateInfo->extent.width;
image->info.height = pCreateInfo->extent.height;
image->info.depth = pCreateInfo->extent.depth;
image->info.samples = pCreateInfo->samples;
+ image->info.color_samples = pCreateInfo->samples;
image->info.array_size = pCreateInfo->arrayLayers;
image->info.levels = pCreateInfo->mipLevels;
+ image->info.num_channels = vk_format_get_nr_components(pCreateInfo->format);
image->vk_format = pCreateInfo->format;
image->tiling = pCreateInfo->tiling;
image->exclusive = pCreateInfo->sharingMode == VK_SHARING_MODE_EXCLUSIVE;
if (pCreateInfo->sharingMode == VK_SHARING_MODE_CONCURRENT) {
for (uint32_t i = 0; i < pCreateInfo->queueFamilyIndexCount; ++i)
- if (pCreateInfo->pQueueFamilyIndices[i] == VK_QUEUE_FAMILY_EXTERNAL_KHX)
+ if (pCreateInfo->pQueueFamilyIndices[i] == VK_QUEUE_FAMILY_EXTERNAL_KHR)
image->queue_family_mask |= (1u << RADV_MAX_QUEUE_FAMILIES) - 1u;
else
image->queue_family_mask |= 1u << pCreateInfo->pQueueFamilyIndices[i];
}
+ image->shareable = vk_find_struct_const(pCreateInfo->pNext,
+ EXTERNAL_MEMORY_IMAGE_CREATE_INFO_KHR) != NULL;
+ if (!vk_format_is_depth(pCreateInfo->format) && !create_info->scanout && !image->shareable) {
+ image->info.surf_index = &device->image_mrt_offset_counter;
+ }
+
radv_init_surface(device, &image->surface, create_info);
device->ws->surface_init(device->ws, &image->info, &image->surface);
image->size = image->surface.surf_size;
image->alignment = image->surface.surf_alignment;
- if (image->exclusive || image->queue_family_mask == 1)
- can_cmask_dcc = true;
+ if (!create_info->no_metadata_planes) {
+ /* Try to enable DCC first. */
+ if (radv_image_can_enable_dcc(image)) {
+ radv_image_alloc_dcc(image);
+ if (image->info.samples > 1) {
+ /* CMASK should be enabled because DCC fast
+ * clear with MSAA needs it.
+ */
+ assert(radv_image_can_enable_cmask(image));
+ radv_image_alloc_cmask(device, image);
+ }
+ } else {
+ /* When DCC cannot be enabled, try CMASK. */
+ image->surface.dcc_size = 0;
+ if (radv_image_can_enable_cmask(image)) {
+ radv_image_alloc_cmask(device, image);
+ }
+ }
- if ((pCreateInfo->usage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) &&
- image->surface.dcc_size && can_cmask_dcc)
- radv_image_alloc_dcc(device, image);
- else
+ /* Try to enable FMASK for multisampled images. */
+ if (radv_image_can_enable_fmask(image)) {
+ radv_image_alloc_fmask(device, image);
+ } else {
+ /* Otherwise, try to enable HTILE for depth surfaces. */
+ if (radv_image_can_enable_htile(image) &&
+ !(device->instance->debug_flags & RADV_DEBUG_NO_HIZ)) {
+ radv_image_alloc_htile(image);
+ image->tc_compatible_htile = image->surface.flags & RADEON_SURF_TC_COMPATIBLE_HTILE;
+ } else {
+ image->surface.htile_size = 0;
+ }
+ }
+ } else {
image->surface.dcc_size = 0;
-
- if ((pCreateInfo->usage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) &&
- pCreateInfo->mipLevels == 1 &&
- !image->surface.dcc_size && image->info.depth == 1 && can_cmask_dcc)
- radv_image_alloc_cmask(device, image);
- if (image->info.samples > 1 && vk_format_is_color(pCreateInfo->format)) {
- radv_image_alloc_fmask(device, image);
- } else if (vk_format_is_depth(pCreateInfo->format)) {
-
- radv_image_alloc_htile(device, image);
+ image->surface.htile_size = 0;
}
if (pCreateInfo->flags & VK_IMAGE_CREATE_SPARSE_BINDING_BIT) {
0, RADEON_FLAG_VIRTUAL);
if (!image->bo) {
vk_free2(&device->alloc, alloc, image);
- return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
+ return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
}
}
return VK_SUCCESS;
}
+static void
+radv_image_view_make_descriptor(struct radv_image_view *iview,
+ struct radv_device *device,
+ const VkComponentMapping *components,
+ bool is_storage_image)
+{
+ struct radv_image *image = iview->image;
+ bool is_stencil = iview->aspect_mask == VK_IMAGE_ASPECT_STENCIL_BIT;
+ uint32_t blk_w;
+ uint32_t *descriptor;
+ uint32_t hw_level = 0;
+
+ if (is_storage_image) {
+ descriptor = iview->storage_descriptor;
+ } else {
+ descriptor = iview->descriptor;
+ }
+
+ assert(image->surface.blk_w % vk_format_get_blockwidth(image->vk_format) == 0);
+ blk_w = image->surface.blk_w / vk_format_get_blockwidth(image->vk_format) * vk_format_get_blockwidth(iview->vk_format);
+
+ if (device->physical_device->rad_info.chip_class >= GFX9)
+ hw_level = iview->base_mip;
+ si_make_texture_descriptor(device, image, is_storage_image,
+ iview->type,
+ iview->vk_format,
+ components,
+ hw_level, hw_level + iview->level_count - 1,
+ iview->base_layer,
+ iview->base_layer + iview->layer_count - 1,
+ iview->extent.width,
+ iview->extent.height,
+ iview->extent.depth,
+ descriptor,
+ descriptor + 8);
+
+ const struct legacy_surf_level *base_level_info = NULL;
+ if (device->physical_device->rad_info.chip_class <= GFX9) {
+ if (is_stencil)
+ base_level_info = &image->surface.u.legacy.stencil_level[iview->base_mip];
+ else
+ base_level_info = &image->surface.u.legacy.level[iview->base_mip];
+ }
+ si_set_mutable_tex_desc_fields(device, image,
+ base_level_info,
+ iview->base_mip,
+ iview->base_mip,
+ blk_w, is_stencil, is_storage_image, descriptor);
+}
+
void
radv_image_view_init(struct radv_image_view *iview,
struct radv_device *device,
- const VkImageViewCreateInfo* pCreateInfo,
- struct radv_cmd_buffer *cmd_buffer,
- VkImageUsageFlags usage_mask)
+ const VkImageViewCreateInfo* pCreateInfo)
{
RADV_FROM_HANDLE(radv_image, image, pCreateInfo->image);
const VkImageSubresourceRange *range = &pCreateInfo->subresourceRange;
- uint32_t blk_w;
- bool is_stencil = false;
+
switch (image->type) {
case VK_IMAGE_TYPE_1D:
case VK_IMAGE_TYPE_2D:
iview->aspect_mask = pCreateInfo->subresourceRange.aspectMask;
if (iview->aspect_mask == VK_IMAGE_ASPECT_STENCIL_BIT) {
- is_stencil = true;
iview->vk_format = vk_format_stencil_only(iview->vk_format);
} else if (iview->aspect_mask == VK_IMAGE_ASPECT_DEPTH_BIT) {
iview->vk_format = vk_format_depth_only(iview->vk_format);
}
- iview->extent = (VkExtent3D) {
- .width = radv_minify(image->info.width , range->baseMipLevel),
- .height = radv_minify(image->info.height, range->baseMipLevel),
- .depth = radv_minify(image->info.depth , range->baseMipLevel),
- };
+ if (device->physical_device->rad_info.chip_class >= GFX9) {
+ iview->extent = (VkExtent3D) {
+ .width = image->info.width,
+ .height = image->info.height,
+ .depth = image->info.depth,
+ };
+ } else {
+ iview->extent = (VkExtent3D) {
+ .width = radv_minify(image->info.width , range->baseMipLevel),
+ .height = radv_minify(image->info.height, range->baseMipLevel),
+ .depth = radv_minify(image->info.depth , range->baseMipLevel),
+ };
+ }
- iview->extent.width = round_up_u32(iview->extent.width * vk_format_get_blockwidth(iview->vk_format),
- vk_format_get_blockwidth(image->vk_format));
- iview->extent.height = round_up_u32(iview->extent.height * vk_format_get_blockheight(iview->vk_format),
- vk_format_get_blockheight(image->vk_format));
+ if (iview->vk_format != image->vk_format) {
+ unsigned view_bw = vk_format_get_blockwidth(iview->vk_format);
+ unsigned view_bh = vk_format_get_blockheight(iview->vk_format);
+ unsigned img_bw = vk_format_get_blockwidth(image->vk_format);
+ unsigned img_bh = vk_format_get_blockheight(image->vk_format);
+
+ iview->extent.width = round_up_u32(iview->extent.width * view_bw, img_bw);
+ iview->extent.height = round_up_u32(iview->extent.height * view_bh, img_bh);
+
+ /* Comment ported from amdvlk -
+ * If we have the following image:
+ * Uncompressed pixels Compressed block sizes (4x4)
+ * mip0: 22 x 22 6 x 6
+ * mip1: 11 x 11 3 x 3
+ * mip2: 5 x 5 2 x 2
+ * mip3: 2 x 2 1 x 1
+ * mip4: 1 x 1 1 x 1
+ *
+ * On GFX9 the descriptor is always programmed with the WIDTH and HEIGHT of the base level and the HW is
+ * calculating the degradation of the block sizes down the mip-chain as follows (straight-up
+ * divide-by-two integer math):
+ * mip0: 6x6
+ * mip1: 3x3
+ * mip2: 1x1
+ * mip3: 1x1
+ *
+ * This means that mip2 will be missing texels.
+ *
+ * Fix this by calculating the base mip's width and height, then convert that, and round it
+ * back up to get the level 0 size.
+ * Clamp the converted size between the original values, and next power of two, which
+ * means we don't oversize the image.
+ */
+ if (device->physical_device->rad_info.chip_class >= GFX9 &&
+ vk_format_is_compressed(image->vk_format) &&
+ !vk_format_is_compressed(iview->vk_format)) {
+ unsigned rounded_img_w = util_next_power_of_two(iview->extent.width);
+ unsigned rounded_img_h = util_next_power_of_two(iview->extent.height);
+ unsigned lvl_width = radv_minify(image->info.width , range->baseMipLevel);
+ unsigned lvl_height = radv_minify(image->info.height, range->baseMipLevel);
+
+ lvl_width = round_up_u32(lvl_width * view_bw, img_bw);
+ lvl_height = round_up_u32(lvl_height * view_bh, img_bh);
+
+ lvl_width <<= range->baseMipLevel;
+ lvl_height <<= range->baseMipLevel;
+
+ iview->extent.width = CLAMP(lvl_width, iview->extent.width, rounded_img_w);
+ iview->extent.height = CLAMP(lvl_height, iview->extent.height, rounded_img_h);
+ }
+ }
- assert(image->surface.blk_w % vk_format_get_blockwidth(image->vk_format) == 0);
- blk_w = image->surface.blk_w / vk_format_get_blockwidth(image->vk_format) * vk_format_get_blockwidth(iview->vk_format);
iview->base_layer = range->baseArrayLayer;
iview->layer_count = radv_get_layerCount(image, range);
iview->base_mip = range->baseMipLevel;
+ iview->level_count = radv_get_levelCount(image, range);
- si_make_texture_descriptor(device, image, false,
- iview->type,
- iview->vk_format,
- &pCreateInfo->components,
- 0, radv_get_levelCount(image, range) - 1,
- range->baseArrayLayer,
- range->baseArrayLayer + radv_get_layerCount(image, range) - 1,
- iview->extent.width,
- iview->extent.height,
- iview->extent.depth,
- iview->descriptor,
- iview->fmask_descriptor);
- si_set_mutable_tex_desc_fields(device, image,
- is_stencil ? &image->surface.stencil_level[range->baseMipLevel] : &image->surface.level[range->baseMipLevel], range->baseMipLevel,
- range->baseMipLevel,
- blk_w, is_stencil, iview->descriptor);
+ radv_image_view_make_descriptor(iview, device, &pCreateInfo->components, false);
+ radv_image_view_make_descriptor(iview, device, &pCreateInfo->components, true);
}
bool radv_layout_has_htile(const struct radv_image *image,
VkImageLayout layout,
unsigned queue_mask)
{
- return image->surface.htile_size &&
+ if (radv_image_is_tc_compat_htile(image))
+ return layout != VK_IMAGE_LAYOUT_GENERAL;
+
+ return radv_image_has_htile(image) &&
(layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL ||
layout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) &&
queue_mask == (1u << RADV_QUEUE_GENERAL);
VkImageLayout layout,
unsigned queue_mask)
{
- return image->surface.htile_size &&
+ if (radv_image_is_tc_compat_htile(image))
+ return layout != VK_IMAGE_LAYOUT_GENERAL;
+
+ return radv_image_has_htile(image) &&
(layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL ||
layout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) &&
queue_mask == (1u << RADV_QUEUE_GENERAL);
queue_mask == (1u << RADV_QUEUE_GENERAL);
}
+bool radv_layout_dcc_compressed(const struct radv_image *image,
+ VkImageLayout layout,
+ unsigned queue_mask)
+{
+ /* Don't compress compute transfer dst, as image stores are not supported. */
+ if (layout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL &&
+ (queue_mask & (1u << RADV_QUEUE_COMPUTE)))
+ return false;
+
+ return radv_image_has_dcc(image) && layout != VK_IMAGE_LAYOUT_GENERAL;
+}
+
unsigned radv_image_queue_family_mask(const struct radv_image *image, uint32_t family, uint32_t queue_family)
{
if (!image->exclusive)
return image->queue_family_mask;
- if (family == VK_QUEUE_FAMILY_EXTERNAL_KHX)
+ if (family == VK_QUEUE_FAMILY_EXTERNAL_KHR)
return (1u << RADV_MAX_QUEUE_FAMILIES) - 1u;
if (family == VK_QUEUE_FAMILY_IGNORED)
return 1u << queue_family;
const VkAllocationCallbacks *pAllocator,
VkImage *pImage)
{
+#ifdef ANDROID
+ const VkNativeBufferANDROID *gralloc_info =
+ vk_find_struct_const(pCreateInfo->pNext, NATIVE_BUFFER_ANDROID);
+
+ if (gralloc_info)
+ return radv_image_from_gralloc(device, pCreateInfo, gralloc_info,
+ pAllocator, pImage);
+#endif
+
+ const struct wsi_image_create_info *wsi_info =
+ vk_find_struct_const(pCreateInfo->pNext, WSI_IMAGE_CREATE_INFO_MESA);
+ bool scanout = wsi_info && wsi_info->scanout;
+
return radv_image_create(device,
&(struct radv_image_create_info) {
.vk_info = pCreateInfo,
- .scanout = false,
- },
+ .scanout = scanout,
+ },
pAllocator,
pImage);
}
if (image->flags & VK_IMAGE_CREATE_SPARSE_BINDING_BIT)
device->ws->buffer_destroy(image->bo);
+ if (image->owned_memory != VK_NULL_HANDLE)
+ radv_FreeMemory(_device, image->owned_memory, pAllocator);
+
vk_free2(&device->alloc, pAllocator, image);
}
void radv_GetImageSubresourceLayout(
- VkDevice device,
+ VkDevice _device,
VkImage _image,
const VkImageSubresource* pSubresource,
VkSubresourceLayout* pLayout)
{
RADV_FROM_HANDLE(radv_image, image, _image);
+ RADV_FROM_HANDLE(radv_device, device, _device);
int level = pSubresource->mipLevel;
int layer = pSubresource->arrayLayer;
struct radeon_surf *surface = &image->surface;
- pLayout->offset = surface->level[level].offset + surface->level[level].slice_size * layer;
- pLayout->rowPitch = surface->level[level].nblk_x * surface->bpe;
- pLayout->arrayPitch = surface->level[level].slice_size;
- pLayout->depthPitch = surface->level[level].slice_size;
- pLayout->size = surface->level[level].slice_size;
- if (image->type == VK_IMAGE_TYPE_3D)
- pLayout->size *= u_minify(image->info.depth, level);
+ if (device->physical_device->rad_info.chip_class >= GFX9) {
+ pLayout->offset = surface->u.gfx9.offset[level] + surface->u.gfx9.surf_slice_size * layer;
+ pLayout->rowPitch = surface->u.gfx9.surf_pitch * surface->bpe;
+ pLayout->arrayPitch = surface->u.gfx9.surf_slice_size;
+ pLayout->depthPitch = surface->u.gfx9.surf_slice_size;
+ pLayout->size = surface->u.gfx9.surf_slice_size;
+ if (image->type == VK_IMAGE_TYPE_3D)
+ pLayout->size *= u_minify(image->info.depth, level);
+ } else {
+ pLayout->offset = surface->u.legacy.level[level].offset + (uint64_t)surface->u.legacy.level[level].slice_size_dw * 4 * layer;
+ pLayout->rowPitch = surface->u.legacy.level[level].nblk_x * surface->bpe;
+ pLayout->arrayPitch = (uint64_t)surface->u.legacy.level[level].slice_size_dw * 4;
+ pLayout->depthPitch = (uint64_t)surface->u.legacy.level[level].slice_size_dw * 4;
+ pLayout->size = (uint64_t)surface->u.legacy.level[level].slice_size_dw * 4;
+ if (image->type == VK_IMAGE_TYPE_3D)
+ pLayout->size *= u_minify(image->info.depth, level);
+ }
}
view = vk_alloc2(&device->alloc, pAllocator, sizeof(*view), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (view == NULL)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
- radv_image_view_init(view, device, pCreateInfo, NULL, ~0);
+ radv_image_view_init(view, device, pCreateInfo);
*pView = radv_image_view_to_handle(view);
void radv_buffer_view_init(struct radv_buffer_view *view,
struct radv_device *device,
- const VkBufferViewCreateInfo* pCreateInfo,
- struct radv_cmd_buffer *cmd_buffer)
+ const VkBufferViewCreateInfo* pCreateInfo)
{
RADV_FROM_HANDLE(radv_buffer, buffer, pCreateInfo->buffer);
view = vk_alloc2(&device->alloc, pAllocator, sizeof(*view), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!view)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
- radv_buffer_view_init(view, device, pCreateInfo, NULL);
+ radv_buffer_view_init(view, device, pCreateInfo);
*pView = radv_buffer_view_to_handle(view);