anv: pCreateInfo->pApplicationInfo parameter to vkCreateInstance may be NULL
[mesa.git] / src / vulkan / anv_formats.c
index b739b0fb3123027c0bd2abda76895022dfcc055e..09cd8b9ddf9fe35e38cd2175403efff2df2cb360 100644 (file)
 
 #include "gen7_pack.h"
 
-#define fmt(__vk_fmt, __hw_fmt, ...) \
+#define RGBA ((struct anv_format_swizzle) { 0, 1, 2, 3 })
+#define BGRA ((struct anv_format_swizzle) { 2, 1, 0, 3 })
+
+#define swiz_fmt(__vk_fmt, __hw_fmt, __swizzle, ...)     \
    [__vk_fmt] = { \
       .vk_format = __vk_fmt, \
       .name = #__vk_fmt, \
-      .surface_format = __hw_fmt, \
+      .isl_format = __hw_fmt, \
       .isl_layout = &isl_format_layouts[__hw_fmt], \
+      .swizzle = __swizzle, \
       __VA_ARGS__ \
    }
 
+#define fmt(__vk_fmt, __hw_fmt, ...) \
+   swiz_fmt(__vk_fmt, __hw_fmt, RGBA, __VA_ARGS__)
+
+/* HINT: For array formats, the ISL name should match the VK name.  For
+ * packed formats, they should have the channels in reverse order from each
+ * other.  The reason for this is that, for packed formats, the ISL (and
+ * bspec) names are in LSB -> MSB order while VK formats are MSB -> LSB.
+ */
 static const struct anv_format anv_formats[] = {
-   fmt(VK_FORMAT_UNDEFINED,               ISL_FORMAT_RAW,                    .num_channels = 1),
+   fmt(VK_FORMAT_UNDEFINED,               ISL_FORMAT_RAW),
    fmt(VK_FORMAT_R4G4_UNORM_PACK8,        ISL_FORMAT_UNSUPPORTED),
-   fmt(VK_FORMAT_R4G4B4A4_UNORM_PACK16,   ISL_FORMAT_UNSUPPORTED),
-   fmt(VK_FORMAT_B4G4R4A4_UNORM_PACK16,   ISL_FORMAT_UNSUPPORTED),
-   fmt(VK_FORMAT_R5G6B5_UNORM_PACK16,     ISL_FORMAT_UNSUPPORTED),
-   fmt(VK_FORMAT_B5G6R5_UNORM_PACK16,     ISL_FORMAT_UNSUPPORTED),
-   fmt(VK_FORMAT_R5G5B5A1_UNORM_PACK16,   ISL_FORMAT_UNSUPPORTED),
+   fmt(VK_FORMAT_R4G4B4A4_UNORM_PACK16,   ISL_FORMAT_A4B4G4R4_UNORM),
+   swiz_fmt(VK_FORMAT_B4G4R4A4_UNORM_PACK16,   ISL_FORMAT_A4B4G4R4_UNORM,  BGRA),
+   fmt(VK_FORMAT_R5G6B5_UNORM_PACK16,     ISL_FORMAT_B5G6R5_UNORM),
+   swiz_fmt(VK_FORMAT_B5G6R5_UNORM_PACK16,     ISL_FORMAT_B5G6R5_UNORM, BGRA),
+   fmt(VK_FORMAT_R5G5B5A1_UNORM_PACK16,   ISL_FORMAT_A1B5G5R5_UNORM),
    fmt(VK_FORMAT_B5G5R5A1_UNORM_PACK16,   ISL_FORMAT_UNSUPPORTED),
-   fmt(VK_FORMAT_A1R5G5B5_UNORM_PACK16,   ISL_FORMAT_UNSUPPORTED),
-   fmt(VK_FORMAT_R8_UNORM,                ISL_FORMAT_R8_UNORM,               .num_channels = 1),
-   fmt(VK_FORMAT_R8_SNORM,                ISL_FORMAT_R8_SNORM,               .num_channels = 1),
-   fmt(VK_FORMAT_R8_USCALED,              ISL_FORMAT_R8_USCALED,             .num_channels = 1),
-   fmt(VK_FORMAT_R8_SSCALED,              ISL_FORMAT_R8_SSCALED,             .num_channels = 1),
-   fmt(VK_FORMAT_R8_UINT,                 ISL_FORMAT_R8_UINT,                .num_channels = 1),
-   fmt(VK_FORMAT_R8_SINT,                 ISL_FORMAT_R8_SINT,                .num_channels = 1),
+   fmt(VK_FORMAT_A1R5G5B5_UNORM_PACK16,   ISL_FORMAT_B5G5R5A1_UNORM),
+   fmt(VK_FORMAT_R8_UNORM,                ISL_FORMAT_R8_UNORM),
+   fmt(VK_FORMAT_R8_SNORM,                ISL_FORMAT_R8_SNORM),
+   fmt(VK_FORMAT_R8_USCALED,              ISL_FORMAT_R8_USCALED),
+   fmt(VK_FORMAT_R8_SSCALED,              ISL_FORMAT_R8_SSCALED),
+   fmt(VK_FORMAT_R8_UINT,                 ISL_FORMAT_R8_UINT),
+   fmt(VK_FORMAT_R8_SINT,                 ISL_FORMAT_R8_SINT),
    fmt(VK_FORMAT_R8_SRGB,                 ISL_FORMAT_UNSUPPORTED),
-   fmt(VK_FORMAT_R8G8_UNORM,              ISL_FORMAT_R8G8_UNORM,             .num_channels = 2),
-   fmt(VK_FORMAT_R8G8_SNORM,              ISL_FORMAT_R8G8_SNORM,             .num_channels = 2),
-   fmt(VK_FORMAT_R8G8_USCALED,            ISL_FORMAT_R8G8_USCALED,           .num_channels = 2),
-   fmt(VK_FORMAT_R8G8_SSCALED,            ISL_FORMAT_R8G8_SSCALED,           .num_channels = 2),
-   fmt(VK_FORMAT_R8G8_UINT,               ISL_FORMAT_R8G8_UINT,              .num_channels = 2),
-   fmt(VK_FORMAT_R8G8_SINT,               ISL_FORMAT_R8G8_SINT,              .num_channels = 2),
+   fmt(VK_FORMAT_R8G8_UNORM,              ISL_FORMAT_R8G8_UNORM),
+   fmt(VK_FORMAT_R8G8_SNORM,              ISL_FORMAT_R8G8_SNORM),
+   fmt(VK_FORMAT_R8G8_USCALED,            ISL_FORMAT_R8G8_USCALED),
+   fmt(VK_FORMAT_R8G8_SSCALED,            ISL_FORMAT_R8G8_SSCALED),
+   fmt(VK_FORMAT_R8G8_UINT,               ISL_FORMAT_R8G8_UINT),
+   fmt(VK_FORMAT_R8G8_SINT,               ISL_FORMAT_R8G8_SINT),
    fmt(VK_FORMAT_R8G8_SRGB,               ISL_FORMAT_UNSUPPORTED), /* L8A8_UNORM_SRGB */
-   fmt(VK_FORMAT_R8G8B8_UNORM,            ISL_FORMAT_R8G8B8X8_UNORM,         .num_channels = 3),
-   fmt(VK_FORMAT_R8G8B8_SNORM,            ISL_FORMAT_R8G8B8_SNORM,           .num_channels = 3),
-   fmt(VK_FORMAT_R8G8B8_USCALED,          ISL_FORMAT_R8G8B8_USCALED,         .num_channels = 3),
-   fmt(VK_FORMAT_R8G8B8_SSCALED,          ISL_FORMAT_R8G8B8_SSCALED,         .num_channels = 3),
-   fmt(VK_FORMAT_R8G8B8_UINT,             ISL_FORMAT_R8G8B8_UINT,            .num_channels = 3),
-   fmt(VK_FORMAT_R8G8B8_SINT,             ISL_FORMAT_R8G8B8_SINT,            .num_channels = 3),
+   fmt(VK_FORMAT_R8G8B8_UNORM,            ISL_FORMAT_R8G8B8_UNORM),
+   fmt(VK_FORMAT_R8G8B8_SNORM,            ISL_FORMAT_R8G8B8_SNORM),
+   fmt(VK_FORMAT_R8G8B8_USCALED,          ISL_FORMAT_R8G8B8_USCALED),
+   fmt(VK_FORMAT_R8G8B8_SSCALED,          ISL_FORMAT_R8G8B8_SSCALED),
+   fmt(VK_FORMAT_R8G8B8_UINT,             ISL_FORMAT_R8G8B8_UINT),
+   fmt(VK_FORMAT_R8G8B8_SINT,             ISL_FORMAT_R8G8B8_SINT),
    fmt(VK_FORMAT_R8G8B8_SRGB,             ISL_FORMAT_UNSUPPORTED), /* B8G8R8A8_UNORM_SRGB */
-   fmt(VK_FORMAT_R8G8B8A8_UNORM,          ISL_FORMAT_R8G8B8A8_UNORM,         .num_channels = 4),
-   fmt(VK_FORMAT_R8G8B8A8_SNORM,          ISL_FORMAT_R8G8B8A8_SNORM,         .num_channels = 4),
-   fmt(VK_FORMAT_R8G8B8A8_USCALED,        ISL_FORMAT_R8G8B8A8_USCALED,       .num_channels = 4),
-   fmt(VK_FORMAT_R8G8B8A8_SSCALED,        ISL_FORMAT_R8G8B8A8_SSCALED,       .num_channels = 4),
-   fmt(VK_FORMAT_R8G8B8A8_UINT,           ISL_FORMAT_R8G8B8A8_UINT,          .num_channels = 4),
-   fmt(VK_FORMAT_R8G8B8A8_SINT,           ISL_FORMAT_R8G8B8A8_SINT,          .num_channels = 4),
-   fmt(VK_FORMAT_R8G8B8A8_SRGB,           ISL_FORMAT_R8G8B8A8_UNORM_SRGB,    .num_channels = 4),
-   fmt(VK_FORMAT_A2R10G10B10_UNORM_PACK32, ISL_FORMAT_B10G10R10A2_UNORM,      .num_channels = 4),
-   fmt(VK_FORMAT_A2R10G10B10_SNORM_PACK32, ISL_FORMAT_B10G10R10A2_SNORM,      .num_channels = 4),
-   fmt(VK_FORMAT_A2R10G10B10_USCALED_PACK32, ISL_FORMAT_B10G10R10A2_USCALED,    .num_channels = 4),
-   fmt(VK_FORMAT_A2R10G10B10_SSCALED_PACK32, ISL_FORMAT_B10G10R10A2_SSCALED,    .num_channels = 4),
-   fmt(VK_FORMAT_A2R10G10B10_UINT_PACK32, ISL_FORMAT_B10G10R10A2_UINT,       .num_channels = 4),
-   fmt(VK_FORMAT_A2R10G10B10_SINT_PACK32, ISL_FORMAT_B10G10R10A2_SINT,       .num_channels = 4),
-   fmt(VK_FORMAT_A2B10G10R10_UNORM_PACK32, ISL_FORMAT_R10G10B10A2_UNORM,      .num_channels = 4),
-   fmt(VK_FORMAT_A2B10G10R10_SNORM_PACK32, ISL_FORMAT_R10G10B10A2_SNORM,      .num_channels = 4),
-   fmt(VK_FORMAT_A2B10G10R10_USCALED_PACK32, ISL_FORMAT_R10G10B10A2_USCALED,    .num_channels = 4),
-   fmt(VK_FORMAT_A2B10G10R10_SSCALED_PACK32, ISL_FORMAT_R10G10B10A2_SSCALED,    .num_channels = 4),
-   fmt(VK_FORMAT_A2B10G10R10_UINT_PACK32, ISL_FORMAT_R10G10B10A2_UINT,       .num_channels = 4),
-   fmt(VK_FORMAT_A2B10G10R10_SINT_PACK32, ISL_FORMAT_R10G10B10A2_SINT,       .num_channels = 4),
-   fmt(VK_FORMAT_R16_UNORM,               ISL_FORMAT_R16_UNORM,              .num_channels = 1),
-   fmt(VK_FORMAT_R16_SNORM,               ISL_FORMAT_R16_SNORM,              .num_channels = 1),
-   fmt(VK_FORMAT_R16_USCALED,             ISL_FORMAT_R16_USCALED,            .num_channels = 1),
-   fmt(VK_FORMAT_R16_SSCALED,             ISL_FORMAT_R16_SSCALED,            .num_channels = 1),
-   fmt(VK_FORMAT_R16_UINT,                ISL_FORMAT_R16_UINT,               .num_channels = 1),
-   fmt(VK_FORMAT_R16_SINT,                ISL_FORMAT_R16_SINT,               .num_channels = 1),
-   fmt(VK_FORMAT_R16_SFLOAT,              ISL_FORMAT_R16_FLOAT,              .num_channels = 1),
-   fmt(VK_FORMAT_R16G16_UNORM,            ISL_FORMAT_R16G16_UNORM,           .num_channels = 2),
-   fmt(VK_FORMAT_R16G16_SNORM,            ISL_FORMAT_R16G16_SNORM,           .num_channels = 2),
-   fmt(VK_FORMAT_R16G16_USCALED,          ISL_FORMAT_R16G16_USCALED,         .num_channels = 2),
-   fmt(VK_FORMAT_R16G16_SSCALED,          ISL_FORMAT_R16G16_SSCALED,         .num_channels = 2),
-   fmt(VK_FORMAT_R16G16_UINT,             ISL_FORMAT_R16G16_UINT,            .num_channels = 2),
-   fmt(VK_FORMAT_R16G16_SINT,             ISL_FORMAT_R16G16_SINT,            .num_channels = 2),
-   fmt(VK_FORMAT_R16G16_SFLOAT,           ISL_FORMAT_R16G16_FLOAT,           .num_channels = 2),
-   fmt(VK_FORMAT_R16G16B16_UNORM,         ISL_FORMAT_R16G16B16_UNORM,        .num_channels = 3),
-   fmt(VK_FORMAT_R16G16B16_SNORM,         ISL_FORMAT_R16G16B16_SNORM,        .num_channels = 3),
-   fmt(VK_FORMAT_R16G16B16_USCALED,       ISL_FORMAT_R16G16B16_USCALED,      .num_channels = 3),
-   fmt(VK_FORMAT_R16G16B16_SSCALED,       ISL_FORMAT_R16G16B16_SSCALED,      .num_channels = 3),
-   fmt(VK_FORMAT_R16G16B16_UINT,          ISL_FORMAT_R16G16B16_UINT,         .num_channels = 3),
-   fmt(VK_FORMAT_R16G16B16_SINT,          ISL_FORMAT_R16G16B16_SINT,         .num_channels = 3),
-   fmt(VK_FORMAT_R16G16B16_SFLOAT,        ISL_FORMAT_R16G16B16_FLOAT,        .num_channels = 3),
-   fmt(VK_FORMAT_R16G16B16A16_UNORM,      ISL_FORMAT_R16G16B16A16_UNORM,     .num_channels = 4),
-   fmt(VK_FORMAT_R16G16B16A16_SNORM,      ISL_FORMAT_R16G16B16A16_SNORM,     .num_channels = 4),
-   fmt(VK_FORMAT_R16G16B16A16_USCALED,    ISL_FORMAT_R16G16B16A16_USCALED,   .num_channels = 4),
-   fmt(VK_FORMAT_R16G16B16A16_SSCALED,    ISL_FORMAT_R16G16B16A16_SSCALED,   .num_channels = 4),
-   fmt(VK_FORMAT_R16G16B16A16_UINT,       ISL_FORMAT_R16G16B16A16_UINT,      .num_channels = 4),
-   fmt(VK_FORMAT_R16G16B16A16_SINT,       ISL_FORMAT_R16G16B16A16_SINT,      .num_channels = 4),
-   fmt(VK_FORMAT_R16G16B16A16_SFLOAT,     ISL_FORMAT_R16G16B16A16_FLOAT,     .num_channels = 4),
-   fmt(VK_FORMAT_R32_UINT,                ISL_FORMAT_R32_UINT,               .num_channels = 1,),
-   fmt(VK_FORMAT_R32_SINT,                ISL_FORMAT_R32_SINT,               .num_channels = 1,),
-   fmt(VK_FORMAT_R32_SFLOAT,              ISL_FORMAT_R32_FLOAT,              .num_channels = 1,),
-   fmt(VK_FORMAT_R32G32_UINT,             ISL_FORMAT_R32G32_UINT,            .num_channels = 2,),
-   fmt(VK_FORMAT_R32G32_SINT,             ISL_FORMAT_R32G32_SINT,            .num_channels = 2,),
-   fmt(VK_FORMAT_R32G32_SFLOAT,           ISL_FORMAT_R32G32_FLOAT,           .num_channels = 2,),
-   fmt(VK_FORMAT_R32G32B32_UINT,          ISL_FORMAT_R32G32B32_UINT,         .num_channels = 3,),
-   fmt(VK_FORMAT_R32G32B32_SINT,          ISL_FORMAT_R32G32B32_SINT,         .num_channels = 3,),
-   fmt(VK_FORMAT_R32G32B32_SFLOAT,        ISL_FORMAT_R32G32B32_FLOAT,        .num_channels = 3,),
-   fmt(VK_FORMAT_R32G32B32A32_UINT,       ISL_FORMAT_R32G32B32A32_UINT,      .num_channels = 4,),
-   fmt(VK_FORMAT_R32G32B32A32_SINT,       ISL_FORMAT_R32G32B32A32_SINT,      .num_channels = 4,),
-   fmt(VK_FORMAT_R32G32B32A32_SFLOAT,     ISL_FORMAT_R32G32B32A32_FLOAT,     .num_channels = 4,),
-   fmt(VK_FORMAT_R64_SFLOAT,              ISL_FORMAT_R64_FLOAT,              .num_channels = 1),
-   fmt(VK_FORMAT_R64G64_SFLOAT,           ISL_FORMAT_R64G64_FLOAT,           .num_channels = 2),
-   fmt(VK_FORMAT_R64G64B64_SFLOAT,        ISL_FORMAT_R64G64B64_FLOAT,        .num_channels = 3),
-   fmt(VK_FORMAT_R64G64B64A64_SFLOAT,     ISL_FORMAT_R64G64B64A64_FLOAT,     .num_channels = 4),
-   fmt(VK_FORMAT_B10G11R11_UFLOAT_PACK32, ISL_FORMAT_R11G11B10_FLOAT,        .num_channels = 3),
-   fmt(VK_FORMAT_E5B9G9R9_UFLOAT_PACK32,  ISL_FORMAT_R9G9B9E5_SHAREDEXP,     .num_channels = 3),
-
-   fmt(VK_FORMAT_D16_UNORM,               ISL_FORMAT_R16_UNORM,              .num_channels = 1, .depth_format = D16_UNORM),
-   fmt(VK_FORMAT_X8_D24_UNORM_PACK32,     ISL_FORMAT_R24_UNORM_X8_TYPELESS,  .num_channels = 1, .depth_format = D24_UNORM_X8_UINT),
-   fmt(VK_FORMAT_D32_SFLOAT,              ISL_FORMAT_R32_FLOAT,              .num_channels = 1, .depth_format = D32_FLOAT),
-   fmt(VK_FORMAT_S8_UINT,                 ISL_FORMAT_R8_UINT,                .num_channels = 1,                                       .has_stencil = true),
-   fmt(VK_FORMAT_D16_UNORM_S8_UINT,       ISL_FORMAT_R16_UNORM,              .num_channels = 2, .depth_format = D16_UNORM,            .has_stencil = true),
-   fmt(VK_FORMAT_D24_UNORM_S8_UINT,       ISL_FORMAT_R24_UNORM_X8_TYPELESS,  .num_channels = 2, .depth_format = D24_UNORM_X8_UINT,    .has_stencil = true),
-   fmt(VK_FORMAT_D32_SFLOAT_S8_UINT,      ISL_FORMAT_R32_FLOAT,              .num_channels = 2, .depth_format = D32_FLOAT,            .has_stencil = true),
-
-   fmt(VK_FORMAT_BC1_RGB_UNORM_BLOCK,     ISL_FORMAT_UNSUPPORTED),
-   fmt(VK_FORMAT_BC1_RGB_SRGB_BLOCK,      ISL_FORMAT_UNSUPPORTED),
-   fmt(VK_FORMAT_BC1_RGBA_UNORM_BLOCK,    ISL_FORMAT_UNSUPPORTED),
-   fmt(VK_FORMAT_BC1_RGBA_SRGB_BLOCK,     ISL_FORMAT_UNSUPPORTED),
-   fmt(VK_FORMAT_BC2_UNORM_BLOCK,         ISL_FORMAT_UNSUPPORTED),
-   fmt(VK_FORMAT_BC2_SRGB_BLOCK,          ISL_FORMAT_UNSUPPORTED),
-   fmt(VK_FORMAT_BC3_UNORM_BLOCK,         ISL_FORMAT_UNSUPPORTED),
-   fmt(VK_FORMAT_BC3_SRGB_BLOCK,          ISL_FORMAT_UNSUPPORTED),
-   fmt(VK_FORMAT_BC4_UNORM_BLOCK,         ISL_FORMAT_UNSUPPORTED),
-   fmt(VK_FORMAT_BC4_SNORM_BLOCK,         ISL_FORMAT_UNSUPPORTED),
-   fmt(VK_FORMAT_BC5_UNORM_BLOCK,         ISL_FORMAT_UNSUPPORTED),
-   fmt(VK_FORMAT_BC5_SNORM_BLOCK,         ISL_FORMAT_UNSUPPORTED),
-   fmt(VK_FORMAT_BC6H_UFLOAT_BLOCK,       ISL_FORMAT_UNSUPPORTED),
-   fmt(VK_FORMAT_BC6H_SFLOAT_BLOCK,       ISL_FORMAT_UNSUPPORTED),
-   fmt(VK_FORMAT_BC7_UNORM_BLOCK,         ISL_FORMAT_UNSUPPORTED),
-   fmt(VK_FORMAT_BC7_SRGB_BLOCK,          ISL_FORMAT_UNSUPPORTED),
+   fmt(VK_FORMAT_R8G8B8A8_UNORM,          ISL_FORMAT_R8G8B8A8_UNORM),
+   fmt(VK_FORMAT_R8G8B8A8_SNORM,          ISL_FORMAT_R8G8B8A8_SNORM),
+   fmt(VK_FORMAT_R8G8B8A8_USCALED,        ISL_FORMAT_R8G8B8A8_USCALED),
+   fmt(VK_FORMAT_R8G8B8A8_SSCALED,        ISL_FORMAT_R8G8B8A8_SSCALED),
+   fmt(VK_FORMAT_R8G8B8A8_UINT,           ISL_FORMAT_R8G8B8A8_UINT),
+   fmt(VK_FORMAT_R8G8B8A8_SINT,           ISL_FORMAT_R8G8B8A8_SINT),
+   fmt(VK_FORMAT_R8G8B8A8_SRGB,           ISL_FORMAT_R8G8B8A8_UNORM_SRGB),
+   fmt(VK_FORMAT_A8B8G8R8_UNORM_PACK32,   ISL_FORMAT_R8G8B8A8_UNORM),
+   fmt(VK_FORMAT_A8B8G8R8_SNORM_PACK32,   ISL_FORMAT_R8G8B8A8_SNORM),
+   fmt(VK_FORMAT_A8B8G8R8_USCALED_PACK32, ISL_FORMAT_R8G8B8A8_USCALED),
+   fmt(VK_FORMAT_A8B8G8R8_SSCALED_PACK32, ISL_FORMAT_R8G8B8A8_SSCALED),
+   fmt(VK_FORMAT_A8B8G8R8_UINT_PACK32,    ISL_FORMAT_R8G8B8A8_UINT),
+   fmt(VK_FORMAT_A8B8G8R8_SINT_PACK32,    ISL_FORMAT_R8G8B8A8_SINT),
+   fmt(VK_FORMAT_A8B8G8R8_SRGB_PACK32,    ISL_FORMAT_R8G8B8A8_UNORM_SRGB),
+   fmt(VK_FORMAT_A2R10G10B10_UNORM_PACK32, ISL_FORMAT_B10G10R10A2_UNORM),
+   fmt(VK_FORMAT_A2R10G10B10_SNORM_PACK32, ISL_FORMAT_B10G10R10A2_SNORM),
+   fmt(VK_FORMAT_A2R10G10B10_USCALED_PACK32, ISL_FORMAT_B10G10R10A2_USCALED),
+   fmt(VK_FORMAT_A2R10G10B10_SSCALED_PACK32, ISL_FORMAT_B10G10R10A2_SSCALED),
+   fmt(VK_FORMAT_A2R10G10B10_UINT_PACK32, ISL_FORMAT_B10G10R10A2_UINT),
+   fmt(VK_FORMAT_A2R10G10B10_SINT_PACK32, ISL_FORMAT_B10G10R10A2_SINT),
+   fmt(VK_FORMAT_A2B10G10R10_UNORM_PACK32, ISL_FORMAT_R10G10B10A2_UNORM),
+   fmt(VK_FORMAT_A2B10G10R10_SNORM_PACK32, ISL_FORMAT_R10G10B10A2_SNORM),
+   fmt(VK_FORMAT_A2B10G10R10_USCALED_PACK32, ISL_FORMAT_R10G10B10A2_USCALED),
+   fmt(VK_FORMAT_A2B10G10R10_SSCALED_PACK32, ISL_FORMAT_R10G10B10A2_SSCALED),
+   fmt(VK_FORMAT_A2B10G10R10_UINT_PACK32, ISL_FORMAT_R10G10B10A2_UINT),
+   fmt(VK_FORMAT_A2B10G10R10_SINT_PACK32, ISL_FORMAT_R10G10B10A2_SINT),
+   fmt(VK_FORMAT_R16_UNORM,               ISL_FORMAT_R16_UNORM),
+   fmt(VK_FORMAT_R16_SNORM,               ISL_FORMAT_R16_SNORM),
+   fmt(VK_FORMAT_R16_USCALED,             ISL_FORMAT_R16_USCALED),
+   fmt(VK_FORMAT_R16_SSCALED,             ISL_FORMAT_R16_SSCALED),
+   fmt(VK_FORMAT_R16_UINT,                ISL_FORMAT_R16_UINT),
+   fmt(VK_FORMAT_R16_SINT,                ISL_FORMAT_R16_SINT),
+   fmt(VK_FORMAT_R16_SFLOAT,              ISL_FORMAT_R16_FLOAT),
+   fmt(VK_FORMAT_R16G16_UNORM,            ISL_FORMAT_R16G16_UNORM),
+   fmt(VK_FORMAT_R16G16_SNORM,            ISL_FORMAT_R16G16_SNORM),
+   fmt(VK_FORMAT_R16G16_USCALED,          ISL_FORMAT_R16G16_USCALED),
+   fmt(VK_FORMAT_R16G16_SSCALED,          ISL_FORMAT_R16G16_SSCALED),
+   fmt(VK_FORMAT_R16G16_UINT,             ISL_FORMAT_R16G16_UINT),
+   fmt(VK_FORMAT_R16G16_SINT,             ISL_FORMAT_R16G16_SINT),
+   fmt(VK_FORMAT_R16G16_SFLOAT,           ISL_FORMAT_R16G16_FLOAT),
+   fmt(VK_FORMAT_R16G16B16_UNORM,         ISL_FORMAT_R16G16B16_UNORM),
+   fmt(VK_FORMAT_R16G16B16_SNORM,         ISL_FORMAT_R16G16B16_SNORM),
+   fmt(VK_FORMAT_R16G16B16_USCALED,       ISL_FORMAT_R16G16B16_USCALED),
+   fmt(VK_FORMAT_R16G16B16_SSCALED,       ISL_FORMAT_R16G16B16_SSCALED),
+   fmt(VK_FORMAT_R16G16B16_UINT,          ISL_FORMAT_R16G16B16_UINT),
+   fmt(VK_FORMAT_R16G16B16_SINT,          ISL_FORMAT_R16G16B16_SINT),
+   fmt(VK_FORMAT_R16G16B16_SFLOAT,        ISL_FORMAT_R16G16B16_FLOAT),
+   fmt(VK_FORMAT_R16G16B16A16_UNORM,      ISL_FORMAT_R16G16B16A16_UNORM),
+   fmt(VK_FORMAT_R16G16B16A16_SNORM,      ISL_FORMAT_R16G16B16A16_SNORM),
+   fmt(VK_FORMAT_R16G16B16A16_USCALED,    ISL_FORMAT_R16G16B16A16_USCALED),
+   fmt(VK_FORMAT_R16G16B16A16_SSCALED,    ISL_FORMAT_R16G16B16A16_SSCALED),
+   fmt(VK_FORMAT_R16G16B16A16_UINT,       ISL_FORMAT_R16G16B16A16_UINT),
+   fmt(VK_FORMAT_R16G16B16A16_SINT,       ISL_FORMAT_R16G16B16A16_SINT),
+   fmt(VK_FORMAT_R16G16B16A16_SFLOAT,     ISL_FORMAT_R16G16B16A16_FLOAT),
+   fmt(VK_FORMAT_R32_UINT,                ISL_FORMAT_R32_UINT,),
+   fmt(VK_FORMAT_R32_SINT,                ISL_FORMAT_R32_SINT,),
+   fmt(VK_FORMAT_R32_SFLOAT,              ISL_FORMAT_R32_FLOAT,),
+   fmt(VK_FORMAT_R32G32_UINT,             ISL_FORMAT_R32G32_UINT,),
+   fmt(VK_FORMAT_R32G32_SINT,             ISL_FORMAT_R32G32_SINT,),
+   fmt(VK_FORMAT_R32G32_SFLOAT,           ISL_FORMAT_R32G32_FLOAT,),
+   fmt(VK_FORMAT_R32G32B32_UINT,          ISL_FORMAT_R32G32B32_UINT,),
+   fmt(VK_FORMAT_R32G32B32_SINT,          ISL_FORMAT_R32G32B32_SINT,),
+   fmt(VK_FORMAT_R32G32B32_SFLOAT,        ISL_FORMAT_R32G32B32_FLOAT,),
+   fmt(VK_FORMAT_R32G32B32A32_UINT,       ISL_FORMAT_R32G32B32A32_UINT,),
+   fmt(VK_FORMAT_R32G32B32A32_SINT,       ISL_FORMAT_R32G32B32A32_SINT,),
+   fmt(VK_FORMAT_R32G32B32A32_SFLOAT,     ISL_FORMAT_R32G32B32A32_FLOAT,),
+   fmt(VK_FORMAT_R64_UINT,                ISL_FORMAT_R64_PASSTHRU),
+   fmt(VK_FORMAT_R64_SINT,                ISL_FORMAT_R64_PASSTHRU),
+   fmt(VK_FORMAT_R64_SFLOAT,              ISL_FORMAT_R64_FLOAT),
+   fmt(VK_FORMAT_R64G64_UINT,             ISL_FORMAT_R64G64_PASSTHRU),
+   fmt(VK_FORMAT_R64G64_SINT,             ISL_FORMAT_R64G64_PASSTHRU),
+   fmt(VK_FORMAT_R64G64_SFLOAT,           ISL_FORMAT_R64G64_FLOAT),
+   fmt(VK_FORMAT_R64G64B64_UINT,          ISL_FORMAT_R64G64B64_PASSTHRU),
+   fmt(VK_FORMAT_R64G64B64_SINT,          ISL_FORMAT_R64G64B64_PASSTHRU),
+   fmt(VK_FORMAT_R64G64B64_SFLOAT,        ISL_FORMAT_R64G64B64_FLOAT),
+   fmt(VK_FORMAT_R64G64B64A64_UINT,       ISL_FORMAT_R64G64B64A64_PASSTHRU),
+   fmt(VK_FORMAT_R64G64B64A64_SINT,       ISL_FORMAT_R64G64B64A64_PASSTHRU),
+   fmt(VK_FORMAT_R64G64B64A64_SFLOAT,     ISL_FORMAT_R64G64B64A64_FLOAT),
+   fmt(VK_FORMAT_B10G11R11_UFLOAT_PACK32, ISL_FORMAT_R11G11B10_FLOAT),
+   fmt(VK_FORMAT_E5B9G9R9_UFLOAT_PACK32,  ISL_FORMAT_R9G9B9E5_SHAREDEXP),
+
+   fmt(VK_FORMAT_D16_UNORM,               ISL_FORMAT_R16_UNORM,               .has_depth = true),
+   fmt(VK_FORMAT_X8_D24_UNORM_PACK32,     ISL_FORMAT_R24_UNORM_X8_TYPELESS,   .has_depth = true),
+   fmt(VK_FORMAT_D32_SFLOAT,              ISL_FORMAT_R32_FLOAT,               .has_depth = true),
+   fmt(VK_FORMAT_S8_UINT,                 ISL_FORMAT_R8_UINT,                                      .has_stencil = true),
+   fmt(VK_FORMAT_D16_UNORM_S8_UINT,       ISL_FORMAT_R16_UNORM,               .has_depth = true,   .has_stencil = true),
+   fmt(VK_FORMAT_D24_UNORM_S8_UINT,       ISL_FORMAT_R24_UNORM_X8_TYPELESS,   .has_depth = true,   .has_stencil = true),
+   fmt(VK_FORMAT_D32_SFLOAT_S8_UINT,      ISL_FORMAT_R32_FLOAT,               .has_depth = true,   .has_stencil = true),
+
+   fmt(VK_FORMAT_BC1_RGB_UNORM_BLOCK,     ISL_FORMAT_DXT1_RGB),
+   fmt(VK_FORMAT_BC1_RGB_SRGB_BLOCK,      ISL_FORMAT_DXT1_RGB_SRGB),
+   fmt(VK_FORMAT_BC1_RGBA_UNORM_BLOCK,    ISL_FORMAT_BC1_UNORM),
+   fmt(VK_FORMAT_BC1_RGBA_SRGB_BLOCK,     ISL_FORMAT_BC1_UNORM_SRGB),
+   fmt(VK_FORMAT_BC2_UNORM_BLOCK,         ISL_FORMAT_BC2_UNORM),
+   fmt(VK_FORMAT_BC2_SRGB_BLOCK,          ISL_FORMAT_BC2_UNORM_SRGB),
+   fmt(VK_FORMAT_BC3_UNORM_BLOCK,         ISL_FORMAT_BC3_UNORM),
+   fmt(VK_FORMAT_BC3_SRGB_BLOCK,          ISL_FORMAT_BC3_UNORM_SRGB),
+   fmt(VK_FORMAT_BC4_UNORM_BLOCK,         ISL_FORMAT_BC4_UNORM),
+   fmt(VK_FORMAT_BC4_SNORM_BLOCK,         ISL_FORMAT_BC4_SNORM),
+   fmt(VK_FORMAT_BC5_UNORM_BLOCK,         ISL_FORMAT_BC5_UNORM),
+   fmt(VK_FORMAT_BC5_SNORM_BLOCK,         ISL_FORMAT_BC5_SNORM),
+   fmt(VK_FORMAT_BC6H_UFLOAT_BLOCK,       ISL_FORMAT_BC6H_UF16),
+   fmt(VK_FORMAT_BC6H_SFLOAT_BLOCK,       ISL_FORMAT_BC6H_SF16),
+   fmt(VK_FORMAT_BC7_UNORM_BLOCK,         ISL_FORMAT_BC7_UNORM),
+   fmt(VK_FORMAT_BC7_SRGB_BLOCK,          ISL_FORMAT_BC7_UNORM_SRGB),
    fmt(VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK, ISL_FORMAT_ETC2_RGB8),
    fmt(VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK,  ISL_FORMAT_ETC2_SRGB8),
    fmt(VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK, ISL_FORMAT_ETC2_RGB8_PTA),
@@ -201,13 +228,13 @@ static const struct anv_format anv_formats[] = {
    fmt(VK_FORMAT_B8G8R8_UINT,             ISL_FORMAT_UNSUPPORTED),
    fmt(VK_FORMAT_B8G8R8_SINT,             ISL_FORMAT_UNSUPPORTED),
    fmt(VK_FORMAT_B8G8R8_SRGB,             ISL_FORMAT_UNSUPPORTED),
-   fmt(VK_FORMAT_B8G8R8A8_UNORM,          ISL_FORMAT_B8G8R8A8_UNORM,         .num_channels = 4),
+   fmt(VK_FORMAT_B8G8R8A8_UNORM,          ISL_FORMAT_B8G8R8A8_UNORM),
    fmt(VK_FORMAT_B8G8R8A8_SNORM,          ISL_FORMAT_UNSUPPORTED),
    fmt(VK_FORMAT_B8G8R8A8_USCALED,        ISL_FORMAT_UNSUPPORTED),
    fmt(VK_FORMAT_B8G8R8A8_SSCALED,        ISL_FORMAT_UNSUPPORTED),
    fmt(VK_FORMAT_B8G8R8A8_UINT,           ISL_FORMAT_UNSUPPORTED),
    fmt(VK_FORMAT_B8G8R8A8_SINT,           ISL_FORMAT_UNSUPPORTED),
-   fmt(VK_FORMAT_B8G8R8A8_SRGB,           ISL_FORMAT_B8G8R8A8_UNORM_SRGB,    .num_channels = 4),
+   fmt(VK_FORMAT_B8G8R8A8_SRGB,           ISL_FORMAT_B8G8R8A8_UNORM_SRGB),
 };
 
 #undef fmt
@@ -222,19 +249,43 @@ anv_format_for_vk_format(VkFormat format)
  * Exactly one bit must be set in \a aspect.
  */
 enum isl_format
-anv_get_isl_format(VkFormat format, VkImageAspectFlags aspect)
+anv_get_isl_format(VkFormat format, VkImageAspectFlags aspect,
+                   VkImageTiling tiling, struct anv_format_swizzle *swizzle)
 {
    const struct anv_format *anv_fmt = &anv_formats[format];
 
+   if (swizzle)
+      *swizzle = anv_fmt->swizzle;
+
    switch (aspect) {
    case VK_IMAGE_ASPECT_COLOR_BIT:
-      return anv_fmt->surface_format;
+      if (anv_fmt->isl_format == ISL_FORMAT_UNSUPPORTED) {
+         return ISL_FORMAT_UNSUPPORTED;
+      } else if (tiling == VK_IMAGE_TILING_OPTIMAL &&
+                 !util_is_power_of_two(anv_fmt->isl_layout->bs)) {
+         /* Tiled formats *must* be power-of-two because we need up upload
+          * them with the render pipeline.  For 3-channel formats, we fix
+          * this by switching them over to RGBX or RGBA formats under the
+          * hood.
+          */
+         enum isl_format rgbx = isl_format_rgb_to_rgbx(anv_fmt->isl_format);
+         if (rgbx != ISL_FORMAT_UNSUPPORTED)
+            return rgbx;
+         else
+            return isl_format_rgb_to_rgba(anv_fmt->isl_format);
+      } else {
+         return anv_fmt->isl_format;
+      }
+
    case VK_IMAGE_ASPECT_DEPTH_BIT:
-      assert(anv_fmt->depth_format != 0);
-      return anv_fmt->surface_format;
+   case (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT):
+      assert(anv_fmt->has_depth);
+      return anv_fmt->isl_format;
+
    case VK_IMAGE_ASPECT_STENCIL_BIT:
       assert(anv_fmt->has_stencil);
       return ISL_FORMAT_R8_UINT;
+
    default:
       unreachable("bad VkImageAspect");
       return ISL_FORMAT_UNSUPPORTED;
@@ -253,74 +304,118 @@ void anv_validate_GetPhysicalDeviceFormatProperties(
    anv_GetPhysicalDeviceFormatProperties(physicalDevice, _format, pFormatProperties);
 }
 
+static VkFormatFeatureFlags
+get_image_format_properties(int gen, enum isl_format base,
+                            enum isl_format actual,
+                            struct anv_format_swizzle swizzle)
+{
+   const struct brw_surface_format_info *info = &surface_formats[actual];
+
+   if (actual == ISL_FORMAT_UNSUPPORTED || !info->exists)
+      return 0;
+
+   VkFormatFeatureFlags flags = 0;
+   if (info->sampling <= gen) {
+      flags |= VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT |
+               VK_FORMAT_FEATURE_BLIT_SRC_BIT;
+
+      if (info->filtering <= gen)
+         flags |= VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT;
+   }
+
+   /* We can render to swizzled formats.  However, if the alpha channel is
+    * moved, then blending won't work correctly.  The PRM tells us
+    * straight-up not to render to such a surface.
+    */
+   if (info->render_target <= gen && swizzle.a == 3) {
+      flags |= VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT |
+               VK_FORMAT_FEATURE_BLIT_DST_BIT;
+   }
+
+   if (info->alpha_blend <= gen && swizzle.a == 3)
+      flags |= VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT;
+
+   /* Load/store is determined based on base format.  This prevents RGB
+    * formats from showing up as load/store capable.
+    */
+   if (isl_is_storage_image_format(base))
+      flags |= VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT;
+
+   if (base == ISL_FORMAT_R32_SINT || base == ISL_FORMAT_R32_UINT)
+      flags |= VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT;
+
+   return flags;
+}
+
+static VkFormatFeatureFlags
+get_buffer_format_properties(int gen, enum isl_format format)
+{
+   const struct brw_surface_format_info *info = &surface_formats[format];
+
+   if (format == ISL_FORMAT_UNSUPPORTED || !info->exists)
+      return 0;
+
+   VkFormatFeatureFlags flags = 0;
+   if (info->sampling <= gen && !isl_format_is_compressed(format))
+      flags |= VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT;
+
+   if (info->input_vb <= gen)
+      flags |= VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT;
+
+   if (isl_is_storage_image_format(format))
+      flags |= VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT;
+
+   if (format == ISL_FORMAT_R32_SINT || format == ISL_FORMAT_R32_UINT)
+      flags |= VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_ATOMIC_BIT;
+
+   return flags;
+}
+
 static void
 anv_physical_device_get_format_properties(struct anv_physical_device *physical_device,
-                                          const struct anv_format *format,
+                                          VkFormat format,
                                           VkFormatProperties *out_properties)
 {
-   const struct brw_surface_format_info *info;
-   int gen;
-   VkFormatFeatureFlags flags;
-
-   assert(format != NULL);
-
-   gen = physical_device->info->gen * 10;
+   int gen = physical_device->info->gen * 10;
    if (physical_device->info->is_haswell)
       gen += 5;
 
-   if (format->surface_format== ISL_FORMAT_UNSUPPORTED)
-      goto unsupported;
-
-   uint32_t linear = 0, tiled = 0, buffer = 0;
-   if (anv_format_is_depth_or_stencil(format)) {
+   VkFormatFeatureFlags linear = 0, tiled = 0, buffer = 0;
+   if (anv_format_is_depth_or_stencil(&anv_formats[format])) {
       tiled |= VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT;
       if (physical_device->info->gen >= 8) {
          tiled |= VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT;
          tiled |= VK_FORMAT_FEATURE_BLIT_SRC_BIT;
       }
-      if (format->depth_format) {
+      if (anv_formats[format].has_depth) {
          tiled |= VK_FORMAT_FEATURE_BLIT_DST_BIT;
       }
    } else {
-      /* The surface_formats table only contains color formats */
-      info = &surface_formats[format->surface_format];
-      if (!info->exists)
-         goto unsupported;
-
-      if (info->sampling <= gen) {
-         flags = VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT |
-                 VK_FORMAT_FEATURE_BLIT_SRC_BIT;
-         linear |= flags;
-         tiled |= flags;
-
-         if (!isl_format_is_compressed(format->surface_format))
-            buffer |= VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT;
-      }
-      if (info->render_target <= gen) {
-         flags = VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT |
-                 VK_FORMAT_FEATURE_BLIT_DST_BIT;
-         linear |= flags;
-         tiled |= flags;
-      }
-      if (info->alpha_blend <= gen) {
-         linear |= VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT;
-         tiled |= VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT;
-      }
-      if (info->input_vb <= gen) {
-         buffer |= VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT;
-      }
-
-      if (isl_is_storage_image_format(format->surface_format)) {
-         tiled |= VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT;
-         linear |= VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT;
-         buffer |= VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT;
-      }
-
-      if (format->surface_format == ISL_FORMAT_R32_SINT &&
-          format->surface_format == ISL_FORMAT_R32_UINT) {
-         tiled |= VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT;
-         linear |= VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT;
-         buffer |= VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_ATOMIC_BIT;
+      enum isl_format linear_fmt, tiled_fmt;
+      struct anv_format_swizzle linear_swizzle, tiled_swizzle;
+      linear_fmt = anv_get_isl_format(format, VK_IMAGE_ASPECT_COLOR_BIT,
+                                      VK_IMAGE_TILING_LINEAR, &linear_swizzle);
+      tiled_fmt = anv_get_isl_format(format, VK_IMAGE_ASPECT_COLOR_BIT,
+                                     VK_IMAGE_TILING_OPTIMAL, &tiled_swizzle);
+
+      linear = get_image_format_properties(gen, linear_fmt, linear_fmt,
+                                           linear_swizzle);
+      tiled = get_image_format_properties(gen, linear_fmt, tiled_fmt,
+                                          tiled_swizzle);
+      buffer = get_buffer_format_properties(gen, linear_fmt);
+
+      /* XXX: We handle 3-channel formats by switching them out for RGBX or
+       * RGBA formats behind-the-scenes.  This works fine for textures
+       * because the upload process will fill in the extra channel.
+       * We could also support it for render targets, but it will take
+       * substantially more work and we have enough RGBX formats to handle
+       * what most clients will want.
+       */
+      if (linear_fmt != ISL_FORMAT_UNSUPPORTED &&
+          !util_is_power_of_two(isl_format_layouts[linear_fmt].bs) &&
+          isl_format_rgb_to_rgbx(linear_fmt) == ISL_FORMAT_UNSUPPORTED) {
+         tiled &= ~VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT &
+                  ~VK_FORMAT_FEATURE_BLIT_DST_BIT;
       }
    }
 
@@ -329,11 +424,6 @@ anv_physical_device_get_format_properties(struct anv_physical_device *physical_d
    out_properties->bufferFeatures = buffer;
 
    return;
-
- unsupported:
-   out_properties->linearTilingFeatures = 0;
-   out_properties->optimalTilingFeatures = 0;
-   out_properties->bufferFeatures = 0;
 }
 
 
@@ -346,26 +436,26 @@ void anv_GetPhysicalDeviceFormatProperties(
 
    anv_physical_device_get_format_properties(
                physical_device,
-               anv_format_for_vk_format(format),
+               format,
                pFormatProperties);
 }
 
 VkResult anv_GetPhysicalDeviceImageFormatProperties(
     VkPhysicalDevice                            physicalDevice,
-    VkFormat                                    _format,
+    VkFormat                                    format,
     VkImageType                                 type,
     VkImageTiling                               tiling,
     VkImageUsageFlags                           usage,
-    VkImageCreateFlags                          flags,
+    VkImageCreateFlags                          createFlags,
     VkImageFormatProperties*                    pImageFormatProperties)
 {
    ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
-   const struct anv_format *format = anv_format_for_vk_format(_format);
    VkFormatProperties format_props;
    VkFormatFeatureFlags format_feature_flags;
    VkExtent3D maxExtent;
    uint32_t maxMipLevels;
    uint32_t maxArraySize;
+   VkSampleCountFlags sampleCounts = VK_SAMPLE_COUNT_1_BIT;
 
    anv_physical_device_get_format_properties(physical_device, format,
                                              &format_props);
@@ -390,6 +480,7 @@ VkResult anv_GetPhysicalDeviceImageFormatProperties(
       maxExtent.depth = 1;
       maxMipLevels = 15; /* log2(maxWidth) + 1 */
       maxArraySize = 2048;
+      sampleCounts = VK_SAMPLE_COUNT_1_BIT;
       break;
    case VK_IMAGE_TYPE_2D:
       /* FINISHME: Does this really differ for cube maps? The documentation
@@ -410,6 +501,15 @@ VkResult anv_GetPhysicalDeviceImageFormatProperties(
       break;
    }
 
+   if (tiling == VK_IMAGE_TILING_OPTIMAL &&
+       type == VK_IMAGE_TYPE_2D &&
+       (format_feature_flags & (VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT |
+                                VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT)) &&
+       !(createFlags & VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT) &&
+       !(usage & VK_IMAGE_USAGE_STORAGE_BIT)) {
+      sampleCounts = isl_device_get_sample_counts(&physical_device->isl_dev);
+   }
+
    if (usage & VK_IMAGE_USAGE_TRANSFER_SRC_BIT) {
       /* Meta implements transfers by sampling from the source image. */
       if (!(format_feature_flags & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)) {
@@ -417,8 +517,9 @@ VkResult anv_GetPhysicalDeviceImageFormatProperties(
       }
    }
 
+#if 0
    if (usage & VK_IMAGE_USAGE_TRANSFER_DST_BIT) {
-      if (format->has_stencil) {
+      if (anv_format_for_vk_format(format)->has_stencil) {
          /* Not yet implemented because copying to a W-tiled surface is crazy
           * hard.
           */
@@ -427,6 +528,7 @@ VkResult anv_GetPhysicalDeviceImageFormatProperties(
          goto unsupported;
       }
    }
+#endif
 
    if (usage & VK_IMAGE_USAGE_SAMPLED_BIT) {
       if (!(format_feature_flags & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)) {
@@ -466,9 +568,7 @@ VkResult anv_GetPhysicalDeviceImageFormatProperties(
       .maxExtent = maxExtent,
       .maxMipLevels = maxMipLevels,
       .maxArrayLayers = maxArraySize,
-
-      /* FINISHME: Support multisampling */
-      .sampleCounts = VK_SAMPLE_COUNT_1_BIT,
+      .sampleCounts = sampleCounts,
 
       /* FINISHME: Accurately calculate
        * VkImageFormatProperties::maxResourceSize.