#include <assert.h>
#include "isl.h"
-#include "common/gen_device_info.h"
+#include "isl_priv.h"
+#include "dev/gen_device_info.h"
+
+#include "main/macros.h" /* Needed for MAX3 and MAX2 for format_rgb9e5 */
+#include "util/format_srgb.h"
+#include "util/format_rgb9e5.h"
+#include "util/format_r11g11b10f.h"
+
+/* Header-only format conversion include */
+#include "main/format_utils.h"
struct surface_format_info {
bool exists;
uint8_t input_vb;
uint8_t streamed_output_vb;
uint8_t color_processing;
- uint8_t lossless_compression;
+ uint8_t typed_write;
+ uint8_t typed_read;
+ uint8_t ccs_e;
};
/* This macro allows us to write the table almost as it appears in the PRM,
* while restructuring it to turn it into the C code we want.
*/
-#define SF(sampl, filt, shad, ck, rt, ab, vb, so, color, ccs_e, sf) \
- [ISL_FORMAT_##sf] = { true, sampl, filt, shad, ck, rt, ab, vb, so, color, ccs_e},
+#define SF(sampl, filt, shad, ck, rt, ab, vb, so, color, tw, tr, ccs_e, sf) \
+ [ISL_FORMAT_##sf] = { true, sampl, filt, shad, ck, rt, ab, vb, so, color, tw, tr, ccs_e},
#define Y 0
#define x 255
* - Render Target Surface Types [SKL+]
*/
static const struct surface_format_info format_info[] = {
-/* smpl filt shad CK RT AB VB SO color ccs_e */
- SF( Y, 50, x, x, Y, Y, Y, Y, x, 90, R32G32B32A32_FLOAT)
- SF( Y, x, x, x, Y, x, Y, Y, x, 90, R32G32B32A32_SINT)
- SF( Y, x, x, x, Y, x, Y, Y, x, 90, R32G32B32A32_UINT)
- SF( x, x, x, x, x, x, Y, x, x, x, R32G32B32A32_UNORM)
- SF( x, x, x, x, x, x, Y, x, x, x, R32G32B32A32_SNORM)
- SF( x, x, x, x, x, x, Y, x, x, x, R64G64_FLOAT)
- SF( Y, 50, x, x, x, x, x, x, x, x, R32G32B32X32_FLOAT)
- SF( x, x, x, x, x, x, Y, x, x, x, R32G32B32A32_SSCALED)
- SF( x, x, x, x, x, x, Y, x, x, x, R32G32B32A32_USCALED)
- SF( x, x, x, x, x, x, 75, x, x, x, R32G32B32A32_SFIXED)
- SF( x, x, x, x, x, x, 80, x, x, x, R64G64_PASSTHRU)
- SF( Y, 50, x, x, x, x, Y, Y, x, x, R32G32B32_FLOAT)
- SF( Y, x, x, x, x, x, Y, Y, x, x, R32G32B32_SINT)
- SF( Y, x, x, x, x, x, Y, Y, x, x, R32G32B32_UINT)
- SF( x, x, x, x, x, x, Y, x, x, x, R32G32B32_UNORM)
- SF( x, x, x, x, x, x, Y, x, x, x, R32G32B32_SNORM)
- SF( x, x, x, x, x, x, Y, x, x, x, R32G32B32_SSCALED)
- SF( x, x, x, x, x, x, Y, x, x, x, R32G32B32_USCALED)
- SF( x, x, x, x, x, x, 75, x, x, x, R32G32B32_SFIXED)
- SF( Y, Y, x, x, Y, 45, Y, x, 60, 90, R16G16B16A16_UNORM)
- SF( Y, Y, x, x, Y, 60, Y, x, x, 90, R16G16B16A16_SNORM)
- SF( Y, x, x, x, Y, x, Y, x, x, 90, R16G16B16A16_SINT)
- SF( Y, x, x, x, Y, x, Y, x, x, 90, R16G16B16A16_UINT)
- SF( Y, Y, x, x, Y, Y, Y, x, x, 90, R16G16B16A16_FLOAT)
- SF( Y, 50, x, x, Y, Y, Y, Y, x, 90, R32G32_FLOAT)
- SF( Y, 70, x, x, Y, Y, Y, Y, x, x, R32G32_FLOAT_LD)
- SF( Y, x, x, x, Y, x, Y, Y, x, 90, R32G32_SINT)
- SF( Y, x, x, x, Y, x, Y, Y, x, 90, R32G32_UINT)
- SF( Y, 50, Y, x, x, x, x, x, x, x, R32_FLOAT_X8X24_TYPELESS)
- SF( Y, x, x, x, x, x, x, x, x, x, X32_TYPELESS_G8X24_UINT)
- SF( Y, 50, x, x, x, x, x, x, x, x, L32A32_FLOAT)
- SF( x, x, x, x, x, x, Y, x, x, x, R32G32_UNORM)
- SF( x, x, x, x, x, x, Y, x, x, x, R32G32_SNORM)
- SF( x, x, x, x, x, x, Y, x, x, x, R64_FLOAT)
- SF( Y, Y, x, x, x, x, x, x, x, x, R16G16B16X16_UNORM)
- SF( Y, Y, x, x, x, x, x, x, x, 90, R16G16B16X16_FLOAT)
- SF( Y, 50, x, x, x, x, x, x, x, x, A32X32_FLOAT)
- SF( Y, 50, x, x, x, x, x, x, x, x, L32X32_FLOAT)
- SF( Y, 50, x, x, x, x, x, x, x, x, I32X32_FLOAT)
- SF( x, x, x, x, x, x, Y, x, x, x, R16G16B16A16_SSCALED)
- SF( x, x, x, x, x, x, Y, x, x, x, R16G16B16A16_USCALED)
- SF( x, x, x, x, x, x, Y, x, x, x, R32G32_SSCALED)
- SF( x, x, x, x, x, x, Y, x, x, x, R32G32_USCALED)
- SF( x, x, x, x, x, x, 75, x, x, x, R32G32_SFIXED)
- SF( x, x, x, x, x, x, 80, x, x, x, R64_PASSTHRU)
- SF( Y, Y, x, Y, Y, Y, Y, x, 60, 90, B8G8R8A8_UNORM)
- SF( Y, Y, x, x, Y, Y, x, x, x, x, B8G8R8A8_UNORM_SRGB)
-/* smpl filt shad CK RT AB VB SO color ccs_e */
- SF( Y, Y, x, x, Y, Y, Y, x, 60, x, R10G10B10A2_UNORM)
- SF( Y, Y, x, x, x, x, x, x, 60, x, R10G10B10A2_UNORM_SRGB)
- SF( Y, x, x, x, Y, x, Y, x, x, x, R10G10B10A2_UINT)
- SF( Y, Y, x, x, x, x, Y, x, x, x, R10G10B10_SNORM_A2_UNORM)
- SF( Y, Y, x, x, Y, Y, Y, x, 60, 90, R8G8B8A8_UNORM)
- SF( Y, Y, x, x, Y, Y, x, x, 60, x, R8G8B8A8_UNORM_SRGB)
- SF( Y, Y, x, x, Y, 60, Y, x, x, 90, R8G8B8A8_SNORM)
- SF( Y, x, x, x, Y, x, Y, x, x, 90, R8G8B8A8_SINT)
- SF( Y, x, x, x, Y, x, Y, x, x, 90, R8G8B8A8_UINT)
- SF( Y, Y, x, x, Y, 45, Y, x, x, 90, R16G16_UNORM)
- SF( Y, Y, x, x, Y, 60, Y, x, x, 90, R16G16_SNORM)
- SF( Y, x, x, x, Y, x, Y, x, x, 90, R16G16_SINT)
- SF( Y, x, x, x, Y, x, Y, x, x, 90, R16G16_UINT)
- SF( Y, Y, x, x, Y, Y, Y, x, x, 90, R16G16_FLOAT)
- SF( Y, Y, x, x, Y, Y, 75, x, 60, x, B10G10R10A2_UNORM)
- SF( Y, Y, x, x, Y, Y, x, x, 60, x, B10G10R10A2_UNORM_SRGB)
- SF( Y, Y, x, x, Y, Y, Y, x, x, x, R11G11B10_FLOAT)
- SF( Y, x, x, x, Y, x, Y, Y, x, 90, R32_SINT)
- SF( Y, x, x, x, Y, x, Y, Y, x, 90, R32_UINT)
- SF( Y, 50, Y, x, Y, Y, Y, Y, x, 90, R32_FLOAT)
- SF( Y, 50, Y, x, x, x, x, x, x, x, R24_UNORM_X8_TYPELESS)
- SF( Y, x, x, x, x, x, x, x, x, x, X24_TYPELESS_G8_UINT)
- SF( Y, Y, x, x, x, x, x, x, x, x, L16A16_UNORM)
- SF( Y, 50, Y, x, x, x, x, x, x, x, I24X8_UNORM)
- SF( Y, 50, Y, x, x, x, x, x, x, x, L24X8_UNORM)
- SF( Y, 50, Y, x, x, x, x, x, x, x, A24X8_UNORM)
- SF( Y, 50, Y, x, x, x, x, x, x, x, I32_FLOAT)
- SF( Y, 50, Y, x, x, x, x, x, x, x, L32_FLOAT)
- SF( Y, 50, Y, x, x, x, x, x, x, x, A32_FLOAT)
- SF( Y, Y, x, Y, 80, 80, x, x, 60, 90, B8G8R8X8_UNORM)
- SF( Y, Y, x, x, 80, 80, x, x, x, x, B8G8R8X8_UNORM_SRGB)
- SF( Y, Y, x, x, x, x, x, x, x, x, R8G8B8X8_UNORM)
- SF( Y, Y, x, x, x, x, x, x, x, x, R8G8B8X8_UNORM_SRGB)
- SF( Y, Y, x, x, x, x, x, x, x, x, R9G9B9E5_SHAREDEXP)
- SF( Y, Y, x, x, x, x, x, x, x, x, B10G10R10X2_UNORM)
- SF( Y, Y, x, x, x, x, x, x, x, x, L16A16_FLOAT)
- SF( x, x, x, x, x, x, Y, x, x, x, R32_UNORM)
- SF( x, x, x, x, x, x, Y, x, x, x, R32_SNORM)
-/* smpl filt shad CK RT AB VB SO color ccs_e */
- SF( x, x, x, x, x, x, Y, x, x, x, R10G10B10X2_USCALED)
- SF( x, x, x, x, x, x, Y, x, x, x, R8G8B8A8_SSCALED)
- SF( x, x, x, x, x, x, Y, x, x, x, R8G8B8A8_USCALED)
- SF( x, x, x, x, x, x, Y, x, x, x, R16G16_SSCALED)
- SF( x, x, x, x, x, x, Y, x, x, x, R16G16_USCALED)
- SF( x, x, x, x, x, x, Y, x, x, x, R32_SSCALED)
- SF( x, x, x, x, x, x, Y, x, x, x, R32_USCALED)
- SF( Y, Y, x, Y, Y, Y, x, x, x, x, B5G6R5_UNORM)
- SF( Y, Y, x, x, Y, Y, x, x, x, x, B5G6R5_UNORM_SRGB)
- SF( Y, Y, x, Y, Y, Y, x, x, x, x, B5G5R5A1_UNORM)
- SF( Y, Y, x, x, Y, Y, x, x, x, x, B5G5R5A1_UNORM_SRGB)
- SF( Y, Y, x, Y, Y, Y, x, x, x, x, B4G4R4A4_UNORM)
- SF( Y, Y, x, x, Y, Y, x, x, x, x, B4G4R4A4_UNORM_SRGB)
- SF( Y, Y, x, x, Y, Y, Y, x, x, x, R8G8_UNORM)
- SF( Y, Y, x, Y, Y, 60, Y, x, x, x, R8G8_SNORM)
- SF( Y, x, x, x, Y, x, Y, x, x, x, R8G8_SINT)
- SF( Y, x, x, x, Y, x, Y, x, x, x, R8G8_UINT)
- SF( Y, Y, Y, x, Y, 45, Y, x, 70, x, R16_UNORM)
- SF( Y, Y, x, x, Y, 60, Y, x, x, x, R16_SNORM)
- SF( Y, x, x, x, Y, x, Y, x, x, x, R16_SINT)
- SF( Y, x, x, x, Y, x, Y, x, x, x, R16_UINT)
- SF( Y, Y, x, x, Y, Y, Y, x, x, x, R16_FLOAT)
- SF(50, 50, x, x, x, x, x, x, x, x, A8P8_UNORM_PALETTE0)
- SF(50, 50, x, x, x, x, x, x, x, x, A8P8_UNORM_PALETTE1)
- SF( Y, Y, Y, x, x, x, x, x, x, x, I16_UNORM)
- SF( Y, Y, Y, x, x, x, x, x, x, x, L16_UNORM)
- SF( Y, Y, Y, x, x, x, x, x, x, x, A16_UNORM)
- SF( Y, Y, x, Y, x, x, x, x, x, x, L8A8_UNORM)
- SF( Y, Y, Y, x, x, x, x, x, x, x, I16_FLOAT)
- SF( Y, Y, Y, x, x, x, x, x, x, x, L16_FLOAT)
- SF( Y, Y, Y, x, x, x, x, x, x, x, A16_FLOAT)
- SF(45, 45, x, x, x, x, x, x, x, x, L8A8_UNORM_SRGB)
- SF( Y, Y, x, Y, x, x, x, x, x, x, R5G5_SNORM_B6_UNORM)
- SF( x, x, x, x, Y, Y, x, x, x, x, B5G5R5X1_UNORM)
- SF( x, x, x, x, Y, Y, x, x, x, x, B5G5R5X1_UNORM_SRGB)
- SF( x, x, x, x, x, x, Y, x, x, x, R8G8_SSCALED)
- SF( x, x, x, x, x, x, Y, x, x, x, R8G8_USCALED)
-/* smpl filt shad CK RT AB VB SO color ccs_e */
- SF( x, x, x, x, x, x, Y, x, x, x, R16_SSCALED)
- SF( x, x, x, x, x, x, Y, x, x, x, R16_USCALED)
- SF(50, 50, x, x, x, x, x, x, x, x, P8A8_UNORM_PALETTE0)
- SF(50, 50, x, x, x, x, x, x, x, x, P8A8_UNORM_PALETTE1)
- SF( x, x, x, x, x, x, x, x, x, x, A1B5G5R5_UNORM)
+/* smpl filt shad CK RT AB VB SO color TW TR ccs_e */
+ SF( Y, 50, x, x, Y, Y, Y, Y, x, 70, 90, 90, R32G32B32A32_FLOAT)
+ SF( Y, x, x, x, Y, x, Y, Y, x, 70, 90, 90, R32G32B32A32_SINT)
+ SF( Y, x, x, x, Y, x, Y, Y, x, 70, 90, 90, R32G32B32A32_UINT)
+ SF( x, x, x, x, x, x, Y, x, x, x, x, x, R32G32B32A32_UNORM)
+ SF( x, x, x, x, x, x, Y, x, x, x, x, x, R32G32B32A32_SNORM)
+ SF( x, x, x, x, x, x, Y, x, x, x, x, x, R64G64_FLOAT)
+ SF( Y, 50, x, x, 100, 100, x, x, x, x, x, 100, R32G32B32X32_FLOAT)
+ SF( x, x, x, x, x, x, Y, x, x, x, x, x, R32G32B32A32_SSCALED)
+ SF( x, x, x, x, x, x, Y, x, x, x, x, x, R32G32B32A32_USCALED)
+ SF( x, x, x, x, x, x, 75, x, x, x, x, x, R32G32B32A32_SFIXED)
+ SF( x, x, x, x, x, x, 80, x, x, x, x, x, R64G64_PASSTHRU)
+ SF( Y, 50, x, x, x, x, Y, Y, x, x, x, x, R32G32B32_FLOAT)
+ SF( Y, x, x, x, x, x, Y, Y, x, x, x, x, R32G32B32_SINT)
+ SF( Y, x, x, x, x, x, Y, Y, x, x, x, x, R32G32B32_UINT)
+ SF( x, x, x, x, x, x, Y, x, x, x, x, x, R32G32B32_UNORM)
+ SF( x, x, x, x, x, x, Y, x, x, x, x, x, R32G32B32_SNORM)
+ SF( x, x, x, x, x, x, Y, x, x, x, x, x, R32G32B32_SSCALED)
+ SF( x, x, x, x, x, x, Y, x, x, x, x, x, R32G32B32_USCALED)
+ SF( x, x, x, x, x, x, 75, x, x, x, x, x, R32G32B32_SFIXED)
+ SF( Y, Y, x, x, Y, 45, Y, x, 60, 70, 110, 90, R16G16B16A16_UNORM)
+ SF( Y, Y, x, x, Y, 60, Y, x, x, 70, 110, 90, R16G16B16A16_SNORM)
+ SF( Y, x, x, x, Y, x, Y, x, x, 70, 90, 90, R16G16B16A16_SINT)
+ SF( Y, x, x, x, Y, x, Y, x, x, 70, 75, 90, R16G16B16A16_UINT)
+ SF( Y, Y, x, x, Y, Y, Y, x, x, 70, 90, 90, R16G16B16A16_FLOAT)
+ SF( Y, 50, x, x, Y, Y, Y, Y, x, 70, 90, 90, R32G32_FLOAT)
+ SF( Y, 70, x, x, Y, Y, Y, Y, x, x, x, x, R32G32_FLOAT_LD)
+ SF( Y, x, x, x, Y, x, Y, Y, x, 70, 90, 90, R32G32_SINT)
+ SF( Y, x, x, x, Y, x, Y, Y, x, 70, 90, 90, R32G32_UINT)
+ SF( Y, 50, Y, x, x, x, x, x, x, x, x, x, R32_FLOAT_X8X24_TYPELESS)
+ SF( Y, x, x, x, x, x, x, x, x, x, x, x, X32_TYPELESS_G8X24_UINT)
+ SF( Y, 50, x, x, x, x, x, x, x, x, x, x, L32A32_FLOAT)
+ SF( x, x, x, x, x, x, Y, x, x, x, x, x, R32G32_UNORM)
+ SF( x, x, x, x, x, x, Y, x, x, x, x, x, R32G32_SNORM)
+ SF( x, x, x, x, x, x, Y, x, x, x, x, x, R64_FLOAT)
+ SF( Y, Y, x, x, x, x, x, x, x, x, x, x, R16G16B16X16_UNORM)
+ SF( Y, Y, x, x, 90, 90, x, x, x, x, x, 90, R16G16B16X16_FLOAT)
+ SF( Y, 50, x, x, x, x, x, x, x, x, x, x, A32X32_FLOAT)
+ SF( Y, 50, x, x, x, x, x, x, x, x, x, x, L32X32_FLOAT)
+ SF( Y, 50, x, x, x, x, x, x, x, x, x, x, I32X32_FLOAT)
+ SF( x, x, x, x, x, x, Y, x, x, x, x, x, R16G16B16A16_SSCALED)
+ SF( x, x, x, x, x, x, Y, x, x, x, x, x, R16G16B16A16_USCALED)
+ SF( x, x, x, x, x, x, Y, x, x, x, x, x, R32G32_SSCALED)
+ SF( x, x, x, x, x, x, Y, x, x, x, x, x, R32G32_USCALED)
+ SF( x, x, x, x, x, x, 75, x, x, x, x, x, R32G32_SFIXED)
+ SF( x, x, x, x, x, x, 80, x, x, x, x, x, R64_PASSTHRU)
+ SF( Y, Y, x, Y, Y, Y, Y, x, 60, 70, x, 90, B8G8R8A8_UNORM)
+ SF( Y, Y, x, x, Y, Y, x, x, x, x, x, 100, B8G8R8A8_UNORM_SRGB)
+/* smpl filt shad CK RT AB VB SO color TW TR ccs_e */
+ SF( Y, Y, x, x, Y, Y, Y, x, 60, 70, x, 100, R10G10B10A2_UNORM)
+ SF( Y, Y, x, x, x, x, x, x, 60, x, x, 120, R10G10B10A2_UNORM_SRGB)
+ SF( Y, x, x, x, Y, x, Y, x, x, 70, x, 100, R10G10B10A2_UINT)
+ SF( Y, Y, x, x, x, x, Y, x, x, x, x, x, R10G10B10_SNORM_A2_UNORM)
+ SF( Y, Y, x, x, Y, Y, Y, x, 60, 70, 110, 90, R8G8B8A8_UNORM)
+ SF( Y, Y, x, x, Y, Y, x, x, 60, x, x, 100, R8G8B8A8_UNORM_SRGB)
+ SF( Y, Y, x, x, Y, 60, Y, x, x, 70, 110, 90, R8G8B8A8_SNORM)
+ SF( Y, x, x, x, Y, x, Y, x, x, 70, 90, 90, R8G8B8A8_SINT)
+ SF( Y, x, x, x, Y, x, Y, x, x, 70, 75, 90, R8G8B8A8_UINT)
+ SF( Y, Y, x, x, Y, 45, Y, x, x, 70, 110, 90, R16G16_UNORM)
+ SF( Y, Y, x, x, Y, 60, Y, x, x, 70, 110, 90, R16G16_SNORM)
+ SF( Y, x, x, x, Y, x, Y, x, x, 70, 90, 90, R16G16_SINT)
+ SF( Y, x, x, x, Y, x, Y, x, x, 70, 75, 90, R16G16_UINT)
+ SF( Y, Y, x, x, Y, Y, Y, x, x, 70, 90, 90, R16G16_FLOAT)
+ SF( Y, Y, x, x, Y, Y, 75, x, 60, 70, x, 100, B10G10R10A2_UNORM)
+ SF( Y, Y, x, x, Y, Y, x, x, 60, x, x, 100, B10G10R10A2_UNORM_SRGB)
+ SF( Y, Y, x, x, Y, Y, Y, x, x, 70, x, 100, R11G11B10_FLOAT)
+ SF(120, 120, x, x, 120, 120, x, x, x, x, x, 120, R10G10B10_FLOAT_A2_UNORM)
+ SF( Y, x, x, x, Y, x, Y, Y, x, 70, 70, 90, R32_SINT)
+ SF( Y, x, x, x, Y, x, Y, Y, x, 70, 70, 90, R32_UINT)
+ SF( Y, 50, Y, x, Y, Y, Y, Y, x, 70, 70, 90, R32_FLOAT)
+ SF( Y, 50, Y, x, x, x, x, x, x, x, x, 120, R24_UNORM_X8_TYPELESS)
+ SF( Y, x, x, x, x, x, x, x, x, x, x, x, X24_TYPELESS_G8_UINT)
+ SF( Y, Y, x, x, x, x, x, x, x, x, x, x, L16A16_UNORM)
+ SF( Y, 50, Y, x, x, x, x, x, x, x, x, x, I24X8_UNORM)
+ SF( Y, 50, Y, x, x, x, x, x, x, x, x, x, L24X8_UNORM)
+ SF( Y, 50, Y, x, x, x, x, x, x, x, x, x, A24X8_UNORM)
+ SF( Y, 50, Y, x, x, x, x, x, x, x, x, x, I32_FLOAT)
+ SF( Y, 50, Y, x, x, x, x, x, x, x, x, x, L32_FLOAT)
+ SF( Y, 50, Y, x, x, x, x, x, x, x, x, x, A32_FLOAT)
+ SF( Y, Y, x, Y, 80, 80, x, x, 60, x, x, 90, B8G8R8X8_UNORM)
+ SF( Y, Y, x, x, 80, 80, x, x, x, x, x, 100, B8G8R8X8_UNORM_SRGB)
+ SF( Y, Y, x, x, x, x, x, x, x, x, x, x, R8G8B8X8_UNORM)
+ SF( Y, Y, x, x, x, x, x, x, x, x, x, x, R8G8B8X8_UNORM_SRGB)
+ SF( Y, Y, x, x, x, x, x, x, x, x, x, x, R9G9B9E5_SHAREDEXP)
+ SF( Y, Y, x, x, x, x, x, x, x, x, x, x, B10G10R10X2_UNORM)
+ SF( Y, Y, x, x, x, x, x, x, x, x, x, x, L16A16_FLOAT)
+ SF( x, x, x, x, x, x, Y, x, x, x, x, x, R32_UNORM)
+ SF( x, x, x, x, x, x, Y, x, x, x, x, x, R32_SNORM)
+/* smpl filt shad CK RT AB VB SO color TW TR ccs_e */
+ SF( x, x, x, x, x, x, Y, x, x, x, x, x, R10G10B10X2_USCALED)
+ SF( x, x, x, x, x, x, Y, x, x, x, x, x, R8G8B8A8_SSCALED)
+ SF( x, x, x, x, x, x, Y, x, x, x, x, x, R8G8B8A8_USCALED)
+ SF( x, x, x, x, x, x, Y, x, x, x, x, x, R16G16_SSCALED)
+ SF( x, x, x, x, x, x, Y, x, x, x, x, x, R16G16_USCALED)
+ SF( x, x, x, x, x, x, Y, x, x, x, x, x, R32_SSCALED)
+ SF( x, x, x, x, x, x, Y, x, x, x, x, x, R32_USCALED)
+ SF( Y, Y, x, Y, Y, Y, x, x, x, 70, x, 120, B5G6R5_UNORM)
+ SF( Y, Y, x, x, Y, Y, x, x, x, x, x, 120, B5G6R5_UNORM_SRGB)
+ SF( Y, Y, x, Y, Y, Y, x, x, x, 70, x, 120, B5G5R5A1_UNORM)
+ SF( Y, Y, x, x, Y, Y, x, x, x, x, x, 120, B5G5R5A1_UNORM_SRGB)
+ SF( Y, Y, x, Y, Y, Y, x, x, x, 70, x, 120, B4G4R4A4_UNORM)
+ SF( Y, Y, x, x, Y, Y, x, x, x, x, x, 120, B4G4R4A4_UNORM_SRGB)
+ SF( Y, Y, x, x, Y, Y, Y, x, x, 70, 110, 120, R8G8_UNORM)
+ SF( Y, Y, x, Y, Y, 60, Y, x, x, 70, 110, 120, R8G8_SNORM)
+ SF( Y, x, x, x, Y, x, Y, x, x, 70, 90, 120, R8G8_SINT)
+ SF( Y, x, x, x, Y, x, Y, x, x, 70, 75, 120, R8G8_UINT)
+ SF( Y, Y, Y, x, Y, 45, Y, x, 70, 70, 110, 120, R16_UNORM)
+ SF( Y, Y, x, x, Y, 60, Y, x, x, 70, 110, 120, R16_SNORM)
+ SF( Y, x, x, x, Y, x, Y, x, x, 70, 90, 120, R16_SINT)
+ SF( Y, x, x, x, Y, x, Y, x, x, 70, 75, 120, R16_UINT)
+ SF( Y, Y, x, x, Y, Y, Y, x, x, 70, 90, 120, R16_FLOAT)
+ SF( 50, 50, x, x, x, x, x, x, x, x, x, x, A8P8_UNORM_PALETTE0)
+ SF( 50, 50, x, x, x, x, x, x, x, x, x, x, A8P8_UNORM_PALETTE1)
+ SF( Y, Y, Y, x, x, x, x, x, x, x, x, x, I16_UNORM)
+ SF( Y, Y, Y, x, x, x, x, x, x, x, x, x, L16_UNORM)
+ SF( Y, Y, Y, x, x, x, x, x, x, x, x, x, A16_UNORM)
+ SF( Y, Y, x, Y, x, x, x, x, x, x, x, x, L8A8_UNORM)
+ SF( Y, Y, Y, x, x, x, x, x, x, x, x, x, I16_FLOAT)
+ SF( Y, Y, Y, x, x, x, x, x, x, x, x, x, L16_FLOAT)
+ SF( Y, Y, Y, x, x, x, x, x, x, x, x, x, A16_FLOAT)
+ SF( 45, 45, x, x, x, x, x, x, x, x, x, x, L8A8_UNORM_SRGB)
+ SF( Y, Y, x, Y, x, x, x, x, x, x, x, x, R5G5_SNORM_B6_UNORM)
+ SF( x, x, x, x, Y, Y, x, x, x, 70, x, 120, B5G5R5X1_UNORM)
+ SF( x, x, x, x, Y, Y, x, x, x, x, x, 120, B5G5R5X1_UNORM_SRGB)
+ SF( x, x, x, x, x, x, Y, x, x, x, x, x, R8G8_SSCALED)
+ SF( x, x, x, x, x, x, Y, x, x, x, x, x, R8G8_USCALED)
+/* smpl filt shad CK RT AB VB SO color TW TR ccs_e */
+ SF( x, x, x, x, x, x, Y, x, x, x, x, x, R16_SSCALED)
+ SF( x, x, x, x, x, x, Y, x, x, x, x, x, R16_USCALED)
+ SF( 50, 50, x, x, x, x, x, x, x, x, x, x, P8A8_UNORM_PALETTE0)
+ SF( 50, 50, x, x, x, x, x, x, x, x, x, x, P8A8_UNORM_PALETTE1)
+ SF(120, 120, x, x, 120, 120, x, x, x, x, x, 120, A1B5G5R5_UNORM)
/* According to the PRM, A4B4G4R4_UNORM isn't supported until Sky Lake
- * but empirical testing indicates that it works just fine on Broadwell.
+ * but empirical testing indicates that at least sampling works just fine
+ * on Broadwell.
+ */
+ SF( 80, 80, x, x, 90, 120, x, x, x, x, x, 120, A4B4G4R4_UNORM)
+ SF( 90, x, x, x, x, x, x, x, x, x, x, x, L8A8_UINT)
+ SF( 90, x, x, x, x, x, x, x, x, x, x, x, L8A8_SINT)
+ SF( Y, Y, x, 45, Y, Y, Y, x, x, 70, 110, 120, R8_UNORM)
+ SF( Y, Y, x, x, Y, 60, Y, x, x, 70, 110, 120, R8_SNORM)
+ SF( Y, x, x, x, Y, x, Y, x, x, 70, 90, 120, R8_SINT)
+ SF( Y, x, x, x, Y, x, Y, x, x, 70, 75, 120, R8_UINT)
+ SF( Y, Y, x, Y, Y, Y, x, x, x, 70, 110, 120, A8_UNORM)
+ SF( Y, Y, x, x, x, x, x, x, x, x, x, x, I8_UNORM)
+ SF( Y, Y, x, Y, x, x, x, x, x, x, x, x, L8_UNORM)
+ SF( Y, Y, x, x, x, x, x, x, x, x, x, x, P4A4_UNORM_PALETTE0)
+ SF( Y, Y, x, x, x, x, x, x, x, x, x, x, A4P4_UNORM_PALETTE0)
+ SF( x, x, x, x, x, x, Y, x, x, x, x, x, R8_SSCALED)
+ SF( x, x, x, x, x, x, Y, x, x, x, x, x, R8_USCALED)
+ SF( 45, 45, x, x, x, x, x, x, x, x, x, x, P8_UNORM_PALETTE0)
+ SF( 45, 45, x, x, x, x, x, x, x, x, x, x, L8_UNORM_SRGB)
+ SF( 45, 45, x, x, x, x, x, x, x, x, x, x, P8_UNORM_PALETTE1)
+ SF( 45, 45, x, x, x, x, x, x, x, x, x, x, P4A4_UNORM_PALETTE1)
+ SF( 45, 45, x, x, x, x, x, x, x, x, x, x, A4P4_UNORM_PALETTE1)
+ SF( x, x, x, x, x, x, x, x, x, x, x, x, Y8_UNORM)
+ SF( 90, x, x, x, x, x, x, x, x, x, x, x, L8_UINT)
+ SF( 90, x, x, x, x, x, x, x, x, x, x, x, L8_SINT)
+ SF( 90, x, x, x, x, x, x, x, x, x, x, x, I8_UINT)
+ SF( 90, x, x, x, x, x, x, x, x, x, x, x, I8_SINT)
+ SF( 45, 45, x, x, x, x, x, x, x, x, x, x, DXT1_RGB_SRGB)
+ SF( Y, Y, x, x, x, x, x, x, x, x, x, x, R1_UNORM)
+ SF( Y, Y, x, Y, Y, x, x, x, 60, x, x, x, YCRCB_NORMAL)
+ SF( Y, Y, x, Y, Y, x, x, x, 60, x, x, x, YCRCB_SWAPUVY)
+ SF( 45, 45, x, x, x, x, x, x, x, x, x, x, P2_UNORM_PALETTE0)
+ SF( 45, 45, x, x, x, x, x, x, x, x, x, x, P2_UNORM_PALETTE1)
+ SF( Y, Y, x, Y, x, x, x, x, x, x, x, x, BC1_UNORM)
+ SF( Y, Y, x, Y, x, x, x, x, x, x, x, x, BC2_UNORM)
+ SF( Y, Y, x, Y, x, x, x, x, x, x, x, x, BC3_UNORM)
+ SF( Y, Y, x, x, x, x, x, x, x, x, x, x, BC4_UNORM)
+ SF( Y, Y, x, x, x, x, x, x, x, x, x, x, BC5_UNORM)
+ SF( Y, Y, x, x, x, x, x, x, x, x, x, x, BC1_UNORM_SRGB)
+ SF( Y, Y, x, x, x, x, x, x, x, x, x, x, BC2_UNORM_SRGB)
+ SF( Y, Y, x, x, x, x, x, x, x, x, x, x, BC3_UNORM_SRGB)
+ SF( Y, x, x, x, x, x, x, x, x, x, x, x, MONO8)
+ SF( Y, Y, x, x, Y, x, x, x, 60, x, x, x, YCRCB_SWAPUV)
+ SF( Y, Y, x, x, Y, x, x, x, 60, x, x, x, YCRCB_SWAPY)
+ SF( Y, Y, x, x, x, x, x, x, x, x, x, x, DXT1_RGB)
+/* smpl filt shad CK RT AB VB SO color TW TR ccs_e */
+ SF( Y, Y, x, x, x, x, x, x, x, x, x, x, FXT1)
+ SF( 75, 75, x, x, x, x, Y, x, x, x, x, x, R8G8B8_UNORM)
+ SF( 75, 75, x, x, x, x, Y, x, x, x, x, x, R8G8B8_SNORM)
+ SF( x, x, x, x, x, x, Y, x, x, x, x, x, R8G8B8_SSCALED)
+ SF( x, x, x, x, x, x, Y, x, x, x, x, x, R8G8B8_USCALED)
+ SF( x, x, x, x, x, x, Y, x, x, x, x, x, R64G64B64A64_FLOAT)
+ SF( x, x, x, x, x, x, Y, x, x, x, x, x, R64G64B64_FLOAT)
+ SF( Y, Y, x, x, x, x, x, x, x, x, x, x, BC4_SNORM)
+ SF( Y, Y, x, x, x, x, x, x, x, x, x, x, BC5_SNORM)
+ SF( 50, 50, x, x, x, x, 60, x, x, x, x, x, R16G16B16_FLOAT)
+ SF( 75, 75, x, x, x, x, Y, x, x, x, x, x, R16G16B16_UNORM)
+ SF( 75, 75, x, x, x, x, Y, x, x, x, x, x, R16G16B16_SNORM)
+ SF( x, x, x, x, x, x, Y, x, x, x, x, x, R16G16B16_SSCALED)
+ SF( x, x, x, x, x, x, Y, x, x, x, x, x, R16G16B16_USCALED)
+ SF( 70, 70, x, x, x, x, x, x, x, x, x, x, BC6H_SF16)
+ SF( 70, 70, x, x, x, x, x, x, x, x, x, x, BC7_UNORM)
+ SF( 70, 70, x, x, x, x, x, x, x, x, x, x, BC7_UNORM_SRGB)
+ SF( 70, 70, x, x, x, x, x, x, x, x, x, x, BC6H_UF16)
+ SF( x, x, x, x, x, x, x, x, x, x, x, x, PLANAR_420_8)
+ /* The format enum for R8G8B8_UNORM_SRGB first shows up in the HSW PRM but
+ * empirical testing indicates that it doesn't actually sRGB decode and
+ * acts identical to R8G8B8_UNORM. It does work on gen8+.
*/
- SF(80, 80, x, x, 80, x, x, x, x, x, A4B4G4R4_UNORM)
- SF(90, x, x, x, x, x, x, x, x, x, L8A8_UINT)
- SF(90, x, x, x, x, x, x, x, x, x, L8A8_SINT)
- SF( Y, Y, x, 45, Y, Y, Y, x, x, x, R8_UNORM)
- SF( Y, Y, x, x, Y, 60, Y, x, x, x, R8_SNORM)
- SF( Y, x, x, x, Y, x, Y, x, x, x, R8_SINT)
- SF( Y, x, x, x, Y, x, Y, x, x, x, R8_UINT)
- SF( Y, Y, x, Y, Y, Y, x, x, x, x, A8_UNORM)
- SF( Y, Y, x, x, x, x, x, x, x, x, I8_UNORM)
- SF( Y, Y, x, Y, x, x, x, x, x, x, L8_UNORM)
- SF( Y, Y, x, x, x, x, x, x, x, x, P4A4_UNORM_PALETTE0)
- SF( Y, Y, x, x, x, x, x, x, x, x, A4P4_UNORM_PALETTE0)
- SF( x, x, x, x, x, x, Y, x, x, x, R8_SSCALED)
- SF( x, x, x, x, x, x, Y, x, x, x, R8_USCALED)
- SF(45, 45, x, x, x, x, x, x, x, x, P8_UNORM_PALETTE0)
- SF(45, 45, x, x, x, x, x, x, x, x, L8_UNORM_SRGB)
- SF(45, 45, x, x, x, x, x, x, x, x, P8_UNORM_PALETTE1)
- SF(45, 45, x, x, x, x, x, x, x, x, P4A4_UNORM_PALETTE1)
- SF(45, 45, x, x, x, x, x, x, x, x, A4P4_UNORM_PALETTE1)
- SF( x, x, x, x, x, x, x, x, x, x, Y8_UNORM)
- SF(90, x, x, x, x, x, x, x, x, x, L8_UINT)
- SF(90, x, x, x, x, x, x, x, x, x, L8_SINT)
- SF(90, x, x, x, x, x, x, x, x, x, I8_UINT)
- SF(90, x, x, x, x, x, x, x, x, x, I8_SINT)
- SF(45, 45, x, x, x, x, x, x, x, x, DXT1_RGB_SRGB)
- SF( Y, Y, x, x, x, x, x, x, x, x, R1_UNORM)
- SF( Y, Y, x, Y, Y, x, x, x, 60, x, YCRCB_NORMAL)
- SF( Y, Y, x, Y, Y, x, x, x, 60, x, YCRCB_SWAPUVY)
- SF(45, 45, x, x, x, x, x, x, x, x, P2_UNORM_PALETTE0)
- SF(45, 45, x, x, x, x, x, x, x, x, P2_UNORM_PALETTE1)
- SF( Y, Y, x, Y, x, x, x, x, x, x, BC1_UNORM)
- SF( Y, Y, x, Y, x, x, x, x, x, x, BC2_UNORM)
- SF( Y, Y, x, Y, x, x, x, x, x, x, BC3_UNORM)
- SF( Y, Y, x, x, x, x, x, x, x, x, BC4_UNORM)
- SF( Y, Y, x, x, x, x, x, x, x, x, BC5_UNORM)
- SF( Y, Y, x, x, x, x, x, x, x, x, BC1_UNORM_SRGB)
- SF( Y, Y, x, x, x, x, x, x, x, x, BC2_UNORM_SRGB)
- SF( Y, Y, x, x, x, x, x, x, x, x, BC3_UNORM_SRGB)
- SF( Y, x, x, x, x, x, x, x, x, x, MONO8)
- SF( Y, Y, x, x, Y, x, x, x, 60, x, YCRCB_SWAPUV)
- SF( Y, Y, x, x, Y, x, x, x, 60, x, YCRCB_SWAPY)
- SF( Y, Y, x, x, x, x, x, x, x, x, DXT1_RGB)
-/* smpl filt shad CK RT AB VB SO color ccs_e */
- SF( Y, Y, x, x, x, x, x, x, x, x, FXT1)
- SF(75, 75, x, x, x, x, Y, x, x, x, R8G8B8_UNORM)
- SF(75, 75, x, x, x, x, Y, x, x, x, R8G8B8_SNORM)
- SF( x, x, x, x, x, x, Y, x, x, x, R8G8B8_SSCALED)
- SF( x, x, x, x, x, x, Y, x, x, x, R8G8B8_USCALED)
- SF( x, x, x, x, x, x, Y, x, x, x, R64G64B64A64_FLOAT)
- SF( x, x, x, x, x, x, Y, x, x, x, R64G64B64_FLOAT)
- SF( Y, Y, x, x, x, x, x, x, x, x, BC4_SNORM)
- SF( Y, Y, x, x, x, x, x, x, x, x, BC5_SNORM)
- SF(50, 50, x, x, x, x, 60, x, x, x, R16G16B16_FLOAT)
- SF(75, 75, x, x, x, x, Y, x, x, x, R16G16B16_UNORM)
- SF(75, 75, x, x, x, x, Y, x, x, x, R16G16B16_SNORM)
- SF( x, x, x, x, x, x, Y, x, x, x, R16G16B16_SSCALED)
- SF( x, x, x, x, x, x, Y, x, x, x, R16G16B16_USCALED)
- SF(70, 70, x, x, x, x, x, x, x, x, BC6H_SF16)
- SF(70, 70, x, x, x, x, x, x, x, x, BC7_UNORM)
- SF(70, 70, x, x, x, x, x, x, x, x, BC7_UNORM_SRGB)
- SF(70, 70, x, x, x, x, x, x, x, x, BC6H_UF16)
- SF( x, x, x, x, x, x, x, x, x, x, PLANAR_420_8)
- SF(75, 75, x, x, x, x, x, x, x, x, R8G8B8_UNORM_SRGB)
- SF(80, 80, x, x, x, x, x, x, x, x, ETC1_RGB8)
- SF(80, 80, x, x, x, x, x, x, x, x, ETC2_RGB8)
- SF(80, 80, x, x, x, x, x, x, x, x, EAC_R11)
- SF(80, 80, x, x, x, x, x, x, x, x, EAC_RG11)
- SF(80, 80, x, x, x, x, x, x, x, x, EAC_SIGNED_R11)
- SF(80, 80, x, x, x, x, x, x, x, x, EAC_SIGNED_RG11)
- SF(80, 80, x, x, x, x, x, x, x, x, ETC2_SRGB8)
- SF(90, x, x, x, x, x, 75, x, x, x, R16G16B16_UINT)
- SF(90, x, x, x, x, x, 75, x, x, x, R16G16B16_SINT)
- SF( x, x, x, x, x, x, 75, x, x, x, R32_SFIXED)
- SF( x, x, x, x, x, x, 75, x, x, x, R10G10B10A2_SNORM)
- SF( x, x, x, x, x, x, 75, x, x, x, R10G10B10A2_USCALED)
- SF( x, x, x, x, x, x, 75, x, x, x, R10G10B10A2_SSCALED)
- SF( x, x, x, x, x, x, 75, x, x, x, R10G10B10A2_SINT)
- SF( x, x, x, x, x, x, 75, x, x, x, B10G10R10A2_SNORM)
- SF( x, x, x, x, x, x, 75, x, x, x, B10G10R10A2_USCALED)
- SF( x, x, x, x, x, x, 75, x, x, x, B10G10R10A2_SSCALED)
- SF( x, x, x, x, x, x, 75, x, x, x, B10G10R10A2_UINT)
- SF( x, x, x, x, x, x, 75, x, x, x, B10G10R10A2_SINT)
- SF( x, x, x, x, x, x, 80, x, x, x, R64G64B64A64_PASSTHRU)
- SF( x, x, x, x, x, x, 80, x, x, x, R64G64B64_PASSTHRU)
- SF(80, 80, x, x, x, x, x, x, x, x, ETC2_RGB8_PTA)
- SF(80, 80, x, x, x, x, x, x, x, x, ETC2_SRGB8_PTA)
- SF(80, 80, x, x, x, x, x, x, x, x, ETC2_EAC_RGBA8)
- SF(80, 80, x, x, x, x, x, x, x, x, ETC2_EAC_SRGB8_A8)
- SF(90, x, x, x, x, x, 75, x, x, x, R8G8B8_UINT)
- SF(90, x, x, x, x, x, 75, x, x, x, R8G8B8_SINT)
- SF(90, 90, x, x, x, x, x, x, x, x, ASTC_LDR_2D_4X4_FLT16)
- SF(90, 90, x, x, x, x, x, x, x, x, ASTC_LDR_2D_5X4_FLT16)
- SF(90, 90, x, x, x, x, x, x, x, x, ASTC_LDR_2D_5X5_FLT16)
- SF(90, 90, x, x, x, x, x, x, x, x, ASTC_LDR_2D_6X5_FLT16)
- SF(90, 90, x, x, x, x, x, x, x, x, ASTC_LDR_2D_6X6_FLT16)
- SF(90, 90, x, x, x, x, x, x, x, x, ASTC_LDR_2D_8X5_FLT16)
- SF(90, 90, x, x, x, x, x, x, x, x, ASTC_LDR_2D_8X6_FLT16)
- SF(90, 90, x, x, x, x, x, x, x, x, ASTC_LDR_2D_8X8_FLT16)
- SF(90, 90, x, x, x, x, x, x, x, x, ASTC_LDR_2D_10X5_FLT16)
- SF(90, 90, x, x, x, x, x, x, x, x, ASTC_LDR_2D_10X6_FLT16)
- SF(90, 90, x, x, x, x, x, x, x, x, ASTC_LDR_2D_10X8_FLT16)
- SF(90, 90, x, x, x, x, x, x, x, x, ASTC_LDR_2D_10X10_FLT16)
- SF(90, 90, x, x, x, x, x, x, x, x, ASTC_LDR_2D_12X10_FLT16)
- SF(90, 90, x, x, x, x, x, x, x, x, ASTC_LDR_2D_12X12_FLT16)
- SF(90, 90, x, x, x, x, x, x, x, x, ASTC_LDR_2D_4X4_U8SRGB)
- SF(90, 90, x, x, x, x, x, x, x, x, ASTC_LDR_2D_5X4_U8SRGB)
- SF(90, 90, x, x, x, x, x, x, x, x, ASTC_LDR_2D_5X5_U8SRGB)
- SF(90, 90, x, x, x, x, x, x, x, x, ASTC_LDR_2D_6X5_U8SRGB)
- SF(90, 90, x, x, x, x, x, x, x, x, ASTC_LDR_2D_6X6_U8SRGB)
- SF(90, 90, x, x, x, x, x, x, x, x, ASTC_LDR_2D_8X5_U8SRGB)
- SF(90, 90, x, x, x, x, x, x, x, x, ASTC_LDR_2D_8X6_U8SRGB)
- SF(90, 90, x, x, x, x, x, x, x, x, ASTC_LDR_2D_8X8_U8SRGB)
- SF(90, 90, x, x, x, x, x, x, x, x, ASTC_LDR_2D_10X5_U8SRGB)
- SF(90, 90, x, x, x, x, x, x, x, x, ASTC_LDR_2D_10X6_U8SRGB)
- SF(90, 90, x, x, x, x, x, x, x, x, ASTC_LDR_2D_10X8_U8SRGB)
- SF(90, 90, x, x, x, x, x, x, x, x, ASTC_LDR_2D_10X10_U8SRGB)
- SF(90, 90, x, x, x, x, x, x, x, x, ASTC_LDR_2D_12X10_U8SRGB)
- SF(90, 90, x, x, x, x, x, x, x, x, ASTC_LDR_2D_12X12_U8SRGB)
+ SF( 80, 80, x, x, x, x, x, x, x, x, x, x, R8G8B8_UNORM_SRGB)
+ SF( 80, 80, x, x, x, x, x, x, x, x, x, x, ETC1_RGB8)
+ SF( 80, 80, x, x, x, x, x, x, x, x, x, x, ETC2_RGB8)
+ SF( 80, 80, x, x, x, x, x, x, x, x, x, x, EAC_R11)
+ SF( 80, 80, x, x, x, x, x, x, x, x, x, x, EAC_RG11)
+ SF( 80, 80, x, x, x, x, x, x, x, x, x, x, EAC_SIGNED_R11)
+ SF( 80, 80, x, x, x, x, x, x, x, x, x, x, EAC_SIGNED_RG11)
+ SF( 80, 80, x, x, x, x, x, x, x, x, x, x, ETC2_SRGB8)
+ SF( 90, x, x, x, x, x, 75, x, x, x, x, x, R16G16B16_UINT)
+ SF( 90, x, x, x, x, x, 75, x, x, x, x, x, R16G16B16_SINT)
+ SF( x, x, x, x, x, x, 75, x, x, x, x, x, R32_SFIXED)
+ SF( x, x, x, x, x, x, 75, x, x, x, x, x, R10G10B10A2_SNORM)
+ SF( x, x, x, x, x, x, 75, x, x, x, x, x, R10G10B10A2_USCALED)
+ SF( x, x, x, x, x, x, 75, x, x, x, x, x, R10G10B10A2_SSCALED)
+ SF( x, x, x, x, x, x, 75, x, x, x, x, x, R10G10B10A2_SINT)
+ SF( x, x, x, x, x, x, 75, x, x, x, x, x, B10G10R10A2_SNORM)
+ SF( x, x, x, x, x, x, 75, x, x, x, x, x, B10G10R10A2_USCALED)
+ SF( x, x, x, x, x, x, 75, x, x, x, x, x, B10G10R10A2_SSCALED)
+ SF( x, x, x, x, x, x, 75, x, x, x, x, x, B10G10R10A2_UINT)
+ SF( x, x, x, x, x, x, 75, x, x, x, x, x, B10G10R10A2_SINT)
+ SF( x, x, x, x, x, x, 80, x, x, x, x, x, R64G64B64A64_PASSTHRU)
+ SF( x, x, x, x, x, x, 80, x, x, x, x, x, R64G64B64_PASSTHRU)
+ SF( 80, 80, x, x, x, x, x, x, x, x, x, x, ETC2_RGB8_PTA)
+ SF( 80, 80, x, x, x, x, x, x, x, x, x, x, ETC2_SRGB8_PTA)
+ SF( 80, 80, x, x, x, x, x, x, x, x, x, x, ETC2_EAC_RGBA8)
+ SF( 80, 80, x, x, x, x, x, x, x, x, x, x, ETC2_EAC_SRGB8_A8)
+ SF( 90, x, x, x, x, x, 75, x, x, x, x, x, R8G8B8_UINT)
+ SF( 90, x, x, x, x, x, 75, x, x, x, x, x, R8G8B8_SINT)
+ SF( 90, 90, x, x, x, x, x, x, x, x, x, x, ASTC_LDR_2D_4X4_FLT16)
+ SF( 90, 90, x, x, x, x, x, x, x, x, x, x, ASTC_LDR_2D_5X4_FLT16)
+ SF( 90, 90, x, x, x, x, x, x, x, x, x, x, ASTC_LDR_2D_5X5_FLT16)
+ SF( 90, 90, x, x, x, x, x, x, x, x, x, x, ASTC_LDR_2D_6X5_FLT16)
+ SF( 90, 90, x, x, x, x, x, x, x, x, x, x, ASTC_LDR_2D_6X6_FLT16)
+ SF( 90, 90, x, x, x, x, x, x, x, x, x, x, ASTC_LDR_2D_8X5_FLT16)
+ SF( 90, 90, x, x, x, x, x, x, x, x, x, x, ASTC_LDR_2D_8X6_FLT16)
+ SF( 90, 90, x, x, x, x, x, x, x, x, x, x, ASTC_LDR_2D_8X8_FLT16)
+ SF( 90, 90, x, x, x, x, x, x, x, x, x, x, ASTC_LDR_2D_10X5_FLT16)
+ SF( 90, 90, x, x, x, x, x, x, x, x, x, x, ASTC_LDR_2D_10X6_FLT16)
+ SF( 90, 90, x, x, x, x, x, x, x, x, x, x, ASTC_LDR_2D_10X8_FLT16)
+ SF( 90, 90, x, x, x, x, x, x, x, x, x, x, ASTC_LDR_2D_10X10_FLT16)
+ SF( 90, 90, x, x, x, x, x, x, x, x, x, x, ASTC_LDR_2D_12X10_FLT16)
+ SF( 90, 90, x, x, x, x, x, x, x, x, x, x, ASTC_LDR_2D_12X12_FLT16)
+ SF( 90, 90, x, x, x, x, x, x, x, x, x, x, ASTC_LDR_2D_4X4_U8SRGB)
+ SF( 90, 90, x, x, x, x, x, x, x, x, x, x, ASTC_LDR_2D_5X4_U8SRGB)
+ SF( 90, 90, x, x, x, x, x, x, x, x, x, x, ASTC_LDR_2D_5X5_U8SRGB)
+ SF( 90, 90, x, x, x, x, x, x, x, x, x, x, ASTC_LDR_2D_6X5_U8SRGB)
+ SF( 90, 90, x, x, x, x, x, x, x, x, x, x, ASTC_LDR_2D_6X6_U8SRGB)
+ SF( 90, 90, x, x, x, x, x, x, x, x, x, x, ASTC_LDR_2D_8X5_U8SRGB)
+ SF( 90, 90, x, x, x, x, x, x, x, x, x, x, ASTC_LDR_2D_8X6_U8SRGB)
+ SF( 90, 90, x, x, x, x, x, x, x, x, x, x, ASTC_LDR_2D_8X8_U8SRGB)
+ SF( 90, 90, x, x, x, x, x, x, x, x, x, x, ASTC_LDR_2D_10X5_U8SRGB)
+ SF( 90, 90, x, x, x, x, x, x, x, x, x, x, ASTC_LDR_2D_10X6_U8SRGB)
+ SF( 90, 90, x, x, x, x, x, x, x, x, x, x, ASTC_LDR_2D_10X8_U8SRGB)
+ SF( 90, 90, x, x, x, x, x, x, x, x, x, x, ASTC_LDR_2D_10X10_U8SRGB)
+ SF( 90, 90, x, x, x, x, x, x, x, x, x, x, ASTC_LDR_2D_12X10_U8SRGB)
+ SF( 90, 90, x, x, x, x, x, x, x, x, x, x, ASTC_LDR_2D_12X12_U8SRGB)
+ SF(100, 100, x, x, x, x, x, x, x, x, x, x, ASTC_HDR_2D_4X4_FLT16)
+ SF(100, 100, x, x, x, x, x, x, x, x, x, x, ASTC_HDR_2D_5X4_FLT16)
+ SF(100, 100, x, x, x, x, x, x, x, x, x, x, ASTC_HDR_2D_5X5_FLT16)
+ SF(100, 100, x, x, x, x, x, x, x, x, x, x, ASTC_HDR_2D_6X5_FLT16)
+ SF(100, 100, x, x, x, x, x, x, x, x, x, x, ASTC_HDR_2D_6X6_FLT16)
+ SF(100, 100, x, x, x, x, x, x, x, x, x, x, ASTC_HDR_2D_8X5_FLT16)
+ SF(100, 100, x, x, x, x, x, x, x, x, x, x, ASTC_HDR_2D_8X6_FLT16)
+ SF(100, 100, x, x, x, x, x, x, x, x, x, x, ASTC_HDR_2D_8X8_FLT16)
+ SF(100, 100, x, x, x, x, x, x, x, x, x, x, ASTC_HDR_2D_10X5_FLT16)
+ SF(100, 100, x, x, x, x, x, x, x, x, x, x, ASTC_HDR_2D_10X6_FLT16)
+ SF(100, 100, x, x, x, x, x, x, x, x, x, x, ASTC_HDR_2D_10X8_FLT16)
+ SF(100, 100, x, x, x, x, x, x, x, x, x, x, ASTC_HDR_2D_10X10_FLT16)
+ SF(100, 100, x, x, x, x, x, x, x, x, x, x, ASTC_HDR_2D_12X10_FLT16)
+ SF(100, 100, x, x, x, x, x, x, x, x, x, x, ASTC_HDR_2D_12X12_FLT16)
};
#undef x
#undef Y
+
+enum isl_format
+isl_format_for_pipe_format(enum pipe_format pf)
+{
+ static const enum isl_format table[PIPE_FORMAT_COUNT] = {
+ [0 ... PIPE_FORMAT_COUNT-1] = ISL_FORMAT_UNSUPPORTED,
+
+ [PIPE_FORMAT_B8G8R8A8_UNORM] = ISL_FORMAT_B8G8R8A8_UNORM,
+ [PIPE_FORMAT_B8G8R8X8_UNORM] = ISL_FORMAT_B8G8R8X8_UNORM,
+ [PIPE_FORMAT_B5G5R5A1_UNORM] = ISL_FORMAT_B5G5R5A1_UNORM,
+ [PIPE_FORMAT_B4G4R4A4_UNORM] = ISL_FORMAT_B4G4R4A4_UNORM,
+ [PIPE_FORMAT_B5G6R5_UNORM] = ISL_FORMAT_B5G6R5_UNORM,
+ [PIPE_FORMAT_R10G10B10A2_UNORM] = ISL_FORMAT_R10G10B10A2_UNORM,
+
+ [PIPE_FORMAT_Z16_UNORM] = ISL_FORMAT_R16_UNORM,
+ [PIPE_FORMAT_Z32_UNORM] = ISL_FORMAT_R32_UNORM,
+ [PIPE_FORMAT_Z32_FLOAT] = ISL_FORMAT_R32_FLOAT,
+
+ /* We translate the combined depth/stencil formats to depth only here */
+ [PIPE_FORMAT_Z24_UNORM_S8_UINT] = ISL_FORMAT_R24_UNORM_X8_TYPELESS,
+ [PIPE_FORMAT_Z24X8_UNORM] = ISL_FORMAT_R24_UNORM_X8_TYPELESS,
+ [PIPE_FORMAT_Z32_FLOAT_S8X24_UINT] = ISL_FORMAT_R32_FLOAT,
+
+ [PIPE_FORMAT_S8_UINT] = ISL_FORMAT_R8_UINT,
+ [PIPE_FORMAT_X24S8_UINT] = ISL_FORMAT_R8_UINT,
+ [PIPE_FORMAT_X32_S8X24_UINT] = ISL_FORMAT_R8_UINT,
+
+ [PIPE_FORMAT_R64_FLOAT] = ISL_FORMAT_R64_FLOAT,
+ [PIPE_FORMAT_R64G64_FLOAT] = ISL_FORMAT_R64G64_FLOAT,
+ [PIPE_FORMAT_R64G64B64_FLOAT] = ISL_FORMAT_R64G64B64_FLOAT,
+ [PIPE_FORMAT_R64G64B64A64_FLOAT] = ISL_FORMAT_R64G64B64A64_FLOAT,
+ [PIPE_FORMAT_R32_FLOAT] = ISL_FORMAT_R32_FLOAT,
+ [PIPE_FORMAT_R32G32_FLOAT] = ISL_FORMAT_R32G32_FLOAT,
+ [PIPE_FORMAT_R32G32B32_FLOAT] = ISL_FORMAT_R32G32B32_FLOAT,
+ [PIPE_FORMAT_R32G32B32A32_FLOAT] = ISL_FORMAT_R32G32B32A32_FLOAT,
+ [PIPE_FORMAT_R32_UNORM] = ISL_FORMAT_R32_UNORM,
+ [PIPE_FORMAT_R32G32_UNORM] = ISL_FORMAT_R32G32_UNORM,
+ [PIPE_FORMAT_R32G32B32_UNORM] = ISL_FORMAT_R32G32B32_UNORM,
+ [PIPE_FORMAT_R32G32B32A32_UNORM] = ISL_FORMAT_R32G32B32A32_UNORM,
+ [PIPE_FORMAT_R32_USCALED] = ISL_FORMAT_R32_USCALED,
+ [PIPE_FORMAT_R32G32_USCALED] = ISL_FORMAT_R32G32_USCALED,
+ [PIPE_FORMAT_R32G32B32_USCALED] = ISL_FORMAT_R32G32B32_USCALED,
+ [PIPE_FORMAT_R32G32B32A32_USCALED] = ISL_FORMAT_R32G32B32A32_USCALED,
+ [PIPE_FORMAT_R32_SNORM] = ISL_FORMAT_R32_SNORM,
+ [PIPE_FORMAT_R32G32_SNORM] = ISL_FORMAT_R32G32_SNORM,
+ [PIPE_FORMAT_R32G32B32_SNORM] = ISL_FORMAT_R32G32B32_SNORM,
+ [PIPE_FORMAT_R32G32B32A32_SNORM] = ISL_FORMAT_R32G32B32A32_SNORM,
+ [PIPE_FORMAT_R32_SSCALED] = ISL_FORMAT_R32_SSCALED,
+ [PIPE_FORMAT_R32G32_SSCALED] = ISL_FORMAT_R32G32_SSCALED,
+ [PIPE_FORMAT_R32G32B32_SSCALED] = ISL_FORMAT_R32G32B32_SSCALED,
+ [PIPE_FORMAT_R32G32B32A32_SSCALED] = ISL_FORMAT_R32G32B32A32_SSCALED,
+ [PIPE_FORMAT_R16_UNORM] = ISL_FORMAT_R16_UNORM,
+ [PIPE_FORMAT_R16G16_UNORM] = ISL_FORMAT_R16G16_UNORM,
+ [PIPE_FORMAT_R16G16B16_UNORM] = ISL_FORMAT_R16G16B16_UNORM,
+ [PIPE_FORMAT_R16G16B16A16_UNORM] = ISL_FORMAT_R16G16B16A16_UNORM,
+ [PIPE_FORMAT_R16_USCALED] = ISL_FORMAT_R16_USCALED,
+ [PIPE_FORMAT_R16G16_USCALED] = ISL_FORMAT_R16G16_USCALED,
+ [PIPE_FORMAT_R16G16B16_USCALED] = ISL_FORMAT_R16G16B16_USCALED,
+ [PIPE_FORMAT_R16G16B16A16_USCALED] = ISL_FORMAT_R16G16B16A16_USCALED,
+ [PIPE_FORMAT_R16_SNORM] = ISL_FORMAT_R16_SNORM,
+ [PIPE_FORMAT_R16G16_SNORM] = ISL_FORMAT_R16G16_SNORM,
+ [PIPE_FORMAT_R16G16B16_SNORM] = ISL_FORMAT_R16G16B16_SNORM,
+ [PIPE_FORMAT_R16G16B16A16_SNORM] = ISL_FORMAT_R16G16B16A16_SNORM,
+ [PIPE_FORMAT_R16_SSCALED] = ISL_FORMAT_R16_SSCALED,
+ [PIPE_FORMAT_R16G16_SSCALED] = ISL_FORMAT_R16G16_SSCALED,
+ [PIPE_FORMAT_R16G16B16_SSCALED] = ISL_FORMAT_R16G16B16_SSCALED,
+ [PIPE_FORMAT_R16G16B16A16_SSCALED] = ISL_FORMAT_R16G16B16A16_SSCALED,
+ [PIPE_FORMAT_R8_UNORM] = ISL_FORMAT_R8_UNORM,
+ [PIPE_FORMAT_R8G8_UNORM] = ISL_FORMAT_R8G8_UNORM,
+ [PIPE_FORMAT_R8G8B8_UNORM] = ISL_FORMAT_R8G8B8_UNORM,
+ [PIPE_FORMAT_R8G8B8A8_UNORM] = ISL_FORMAT_R8G8B8A8_UNORM,
+ [PIPE_FORMAT_R8_USCALED] = ISL_FORMAT_R8_USCALED,
+ [PIPE_FORMAT_R8G8_USCALED] = ISL_FORMAT_R8G8_USCALED,
+ [PIPE_FORMAT_R8G8B8_USCALED] = ISL_FORMAT_R8G8B8_USCALED,
+ [PIPE_FORMAT_R8G8B8A8_USCALED] = ISL_FORMAT_R8G8B8A8_USCALED,
+ [PIPE_FORMAT_R8_SNORM] = ISL_FORMAT_R8_SNORM,
+ [PIPE_FORMAT_R8G8_SNORM] = ISL_FORMAT_R8G8_SNORM,
+ [PIPE_FORMAT_R8G8B8_SNORM] = ISL_FORMAT_R8G8B8_SNORM,
+ [PIPE_FORMAT_R8G8B8A8_SNORM] = ISL_FORMAT_R8G8B8A8_SNORM,
+ [PIPE_FORMAT_R8_SSCALED] = ISL_FORMAT_R8_SSCALED,
+ [PIPE_FORMAT_R8G8_SSCALED] = ISL_FORMAT_R8G8_SSCALED,
+ [PIPE_FORMAT_R8G8B8_SSCALED] = ISL_FORMAT_R8G8B8_SSCALED,
+ [PIPE_FORMAT_R8G8B8A8_SSCALED] = ISL_FORMAT_R8G8B8A8_SSCALED,
+ [PIPE_FORMAT_R32_FIXED] = ISL_FORMAT_R32_SFIXED,
+ [PIPE_FORMAT_R32G32_FIXED] = ISL_FORMAT_R32G32_SFIXED,
+ [PIPE_FORMAT_R32G32B32_FIXED] = ISL_FORMAT_R32G32B32_SFIXED,
+ [PIPE_FORMAT_R32G32B32A32_FIXED] = ISL_FORMAT_R32G32B32A32_SFIXED,
+ [PIPE_FORMAT_R16_FLOAT] = ISL_FORMAT_R16_FLOAT,
+ [PIPE_FORMAT_R16G16_FLOAT] = ISL_FORMAT_R16G16_FLOAT,
+ [PIPE_FORMAT_R16G16B16_FLOAT] = ISL_FORMAT_R16G16B16_FLOAT,
+ [PIPE_FORMAT_R16G16B16A16_FLOAT] = ISL_FORMAT_R16G16B16A16_FLOAT,
+
+ [PIPE_FORMAT_R8G8B8_SRGB] = ISL_FORMAT_R8G8B8_UNORM_SRGB,
+ [PIPE_FORMAT_B8G8R8A8_SRGB] = ISL_FORMAT_B8G8R8A8_UNORM_SRGB,
+ [PIPE_FORMAT_B8G8R8X8_SRGB] = ISL_FORMAT_B8G8R8X8_UNORM_SRGB,
+ [PIPE_FORMAT_R8G8B8A8_SRGB] = ISL_FORMAT_R8G8B8A8_UNORM_SRGB,
+
+ [PIPE_FORMAT_DXT1_RGB] = ISL_FORMAT_BC1_UNORM,
+ [PIPE_FORMAT_DXT1_RGBA] = ISL_FORMAT_BC1_UNORM,
+ [PIPE_FORMAT_DXT3_RGBA] = ISL_FORMAT_BC2_UNORM,
+ [PIPE_FORMAT_DXT5_RGBA] = ISL_FORMAT_BC3_UNORM,
+
+ [PIPE_FORMAT_DXT1_SRGB] = ISL_FORMAT_BC1_UNORM_SRGB,
+ [PIPE_FORMAT_DXT1_SRGBA] = ISL_FORMAT_BC1_UNORM_SRGB,
+ [PIPE_FORMAT_DXT3_SRGBA] = ISL_FORMAT_BC2_UNORM_SRGB,
+ [PIPE_FORMAT_DXT5_SRGBA] = ISL_FORMAT_BC3_UNORM_SRGB,
+
+ [PIPE_FORMAT_RGTC1_UNORM] = ISL_FORMAT_BC4_UNORM,
+ [PIPE_FORMAT_RGTC1_SNORM] = ISL_FORMAT_BC4_SNORM,
+ [PIPE_FORMAT_RGTC2_UNORM] = ISL_FORMAT_BC5_UNORM,
+ [PIPE_FORMAT_RGTC2_SNORM] = ISL_FORMAT_BC5_SNORM,
+
+ [PIPE_FORMAT_R10G10B10A2_USCALED] = ISL_FORMAT_R10G10B10A2_USCALED,
+ [PIPE_FORMAT_R11G11B10_FLOAT] = ISL_FORMAT_R11G11B10_FLOAT,
+ [PIPE_FORMAT_R9G9B9E5_FLOAT] = ISL_FORMAT_R9G9B9E5_SHAREDEXP,
+ [PIPE_FORMAT_R1_UNORM] = ISL_FORMAT_R1_UNORM,
+ [PIPE_FORMAT_R10G10B10X2_USCALED] = ISL_FORMAT_R10G10B10X2_USCALED,
+ [PIPE_FORMAT_B10G10R10A2_UNORM] = ISL_FORMAT_B10G10R10A2_UNORM,
+ [PIPE_FORMAT_R8G8B8X8_UNORM] = ISL_FORMAT_R8G8B8X8_UNORM,
+
+ /* Just use red formats for these - they're actually renderable,
+ * and faster to sample than the legacy L/I/A/LA formats.
+ */
+ [PIPE_FORMAT_I8_UNORM] = ISL_FORMAT_R8_UNORM,
+ [PIPE_FORMAT_I8_UINT] = ISL_FORMAT_R8_UINT,
+ [PIPE_FORMAT_I8_SINT] = ISL_FORMAT_R8_SINT,
+ [PIPE_FORMAT_I8_SNORM] = ISL_FORMAT_R8_SNORM,
+ [PIPE_FORMAT_I16_UINT] = ISL_FORMAT_R16_UINT,
+ [PIPE_FORMAT_I16_UNORM] = ISL_FORMAT_R16_UNORM,
+ [PIPE_FORMAT_I16_SINT] = ISL_FORMAT_R16_SINT,
+ [PIPE_FORMAT_I16_SNORM] = ISL_FORMAT_R16_SNORM,
+ [PIPE_FORMAT_I16_FLOAT] = ISL_FORMAT_R16_FLOAT,
+ [PIPE_FORMAT_I32_UINT] = ISL_FORMAT_R32_UINT,
+ [PIPE_FORMAT_I32_SINT] = ISL_FORMAT_R32_SINT,
+ [PIPE_FORMAT_I32_FLOAT] = ISL_FORMAT_R32_FLOAT,
+
+ [PIPE_FORMAT_L8_UINT] = ISL_FORMAT_R8_UINT,
+ [PIPE_FORMAT_L8_UNORM] = ISL_FORMAT_R8_UNORM,
+ [PIPE_FORMAT_L8_SINT] = ISL_FORMAT_R8_SINT,
+ [PIPE_FORMAT_L8_SNORM] = ISL_FORMAT_R8_SNORM,
+ [PIPE_FORMAT_L16_UINT] = ISL_FORMAT_R16_UINT,
+ [PIPE_FORMAT_L16_UNORM] = ISL_FORMAT_R16_UNORM,
+ [PIPE_FORMAT_L16_SINT] = ISL_FORMAT_R16_SINT,
+ [PIPE_FORMAT_L16_SNORM] = ISL_FORMAT_R16_SNORM,
+ [PIPE_FORMAT_L16_FLOAT] = ISL_FORMAT_R16_FLOAT,
+ [PIPE_FORMAT_L32_UINT] = ISL_FORMAT_R32_UINT,
+ [PIPE_FORMAT_L32_SINT] = ISL_FORMAT_R32_SINT,
+ [PIPE_FORMAT_L32_FLOAT] = ISL_FORMAT_R32_FLOAT,
+
+ /* We also map alpha and luminance-alpha formats to red as well,
+ * though most of these (other than A8_UNORM) will be non-renderable.
+ */
+ [PIPE_FORMAT_A8_UINT] = ISL_FORMAT_R8_UINT,
+ [PIPE_FORMAT_A8_UNORM] = ISL_FORMAT_R8_UNORM,
+ [PIPE_FORMAT_A8_SINT] = ISL_FORMAT_R8_SINT,
+ [PIPE_FORMAT_A8_SNORM] = ISL_FORMAT_R8_SNORM,
+ [PIPE_FORMAT_A16_UINT] = ISL_FORMAT_R16_UINT,
+ [PIPE_FORMAT_A16_UNORM] = ISL_FORMAT_R16_UNORM,
+ [PIPE_FORMAT_A16_SINT] = ISL_FORMAT_R16_SINT,
+ [PIPE_FORMAT_A16_SNORM] = ISL_FORMAT_R16_SNORM,
+ [PIPE_FORMAT_A16_FLOAT] = ISL_FORMAT_R16_FLOAT,
+ [PIPE_FORMAT_A32_UINT] = ISL_FORMAT_R32_UINT,
+ [PIPE_FORMAT_A32_SINT] = ISL_FORMAT_R32_SINT,
+ [PIPE_FORMAT_A32_FLOAT] = ISL_FORMAT_R32_FLOAT,
+
+ [PIPE_FORMAT_L8A8_UINT] = ISL_FORMAT_R8G8_UINT,
+ [PIPE_FORMAT_L8A8_UNORM] = ISL_FORMAT_R8G8_UNORM,
+ [PIPE_FORMAT_L8A8_SINT] = ISL_FORMAT_R8G8_SINT,
+ [PIPE_FORMAT_L8A8_SNORM] = ISL_FORMAT_R8G8_SNORM,
+ [PIPE_FORMAT_L16A16_UINT] = ISL_FORMAT_R16G16_UINT,
+ [PIPE_FORMAT_L16A16_UNORM] = ISL_FORMAT_R16G16_UNORM,
+ [PIPE_FORMAT_L16A16_SINT] = ISL_FORMAT_R16G16_SINT,
+ [PIPE_FORMAT_L16A16_SNORM] = ISL_FORMAT_R16G16_SNORM,
+ [PIPE_FORMAT_L16A16_FLOAT] = ISL_FORMAT_R16G16_FLOAT,
+ [PIPE_FORMAT_L32A32_UINT] = ISL_FORMAT_R32G32_UINT,
+ [PIPE_FORMAT_L32A32_SINT] = ISL_FORMAT_R32G32_SINT,
+ [PIPE_FORMAT_L32A32_FLOAT] = ISL_FORMAT_R32G32_FLOAT,
+
+ /* Sadly, we have to use luminance[-alpha] formats for sRGB decoding. */
+ [PIPE_FORMAT_R8_SRGB] = ISL_FORMAT_L8_UNORM_SRGB,
+ [PIPE_FORMAT_L8_SRGB] = ISL_FORMAT_L8_UNORM_SRGB,
+ [PIPE_FORMAT_L8A8_SRGB] = ISL_FORMAT_L8A8_UNORM_SRGB,
+
+ [PIPE_FORMAT_R10G10B10A2_SSCALED] = ISL_FORMAT_R10G10B10A2_SSCALED,
+ [PIPE_FORMAT_R10G10B10A2_SNORM] = ISL_FORMAT_R10G10B10A2_SNORM,
+
+ [PIPE_FORMAT_B10G10R10A2_USCALED] = ISL_FORMAT_B10G10R10A2_USCALED,
+ [PIPE_FORMAT_B10G10R10A2_SSCALED] = ISL_FORMAT_B10G10R10A2_SSCALED,
+ [PIPE_FORMAT_B10G10R10A2_SNORM] = ISL_FORMAT_B10G10R10A2_SNORM,
+
+ [PIPE_FORMAT_R8_UINT] = ISL_FORMAT_R8_UINT,
+ [PIPE_FORMAT_R8G8_UINT] = ISL_FORMAT_R8G8_UINT,
+ [PIPE_FORMAT_R8G8B8_UINT] = ISL_FORMAT_R8G8B8_UINT,
+ [PIPE_FORMAT_R8G8B8A8_UINT] = ISL_FORMAT_R8G8B8A8_UINT,
+
+ [PIPE_FORMAT_R8_SINT] = ISL_FORMAT_R8_SINT,
+ [PIPE_FORMAT_R8G8_SINT] = ISL_FORMAT_R8G8_SINT,
+ [PIPE_FORMAT_R8G8B8_SINT] = ISL_FORMAT_R8G8B8_SINT,
+ [PIPE_FORMAT_R8G8B8A8_SINT] = ISL_FORMAT_R8G8B8A8_SINT,
+
+ [PIPE_FORMAT_R16_UINT] = ISL_FORMAT_R16_UINT,
+ [PIPE_FORMAT_R16G16_UINT] = ISL_FORMAT_R16G16_UINT,
+ [PIPE_FORMAT_R16G16B16_UINT] = ISL_FORMAT_R16G16B16_UINT,
+ [PIPE_FORMAT_R16G16B16A16_UINT] = ISL_FORMAT_R16G16B16A16_UINT,
+
+ [PIPE_FORMAT_R16_SINT] = ISL_FORMAT_R16_SINT,
+ [PIPE_FORMAT_R16G16_SINT] = ISL_FORMAT_R16G16_SINT,
+ [PIPE_FORMAT_R16G16B16_SINT] = ISL_FORMAT_R16G16B16_SINT,
+ [PIPE_FORMAT_R16G16B16A16_SINT] = ISL_FORMAT_R16G16B16A16_SINT,
+
+ [PIPE_FORMAT_R32_UINT] = ISL_FORMAT_R32_UINT,
+ [PIPE_FORMAT_R32G32_UINT] = ISL_FORMAT_R32G32_UINT,
+ [PIPE_FORMAT_R32G32B32_UINT] = ISL_FORMAT_R32G32B32_UINT,
+ [PIPE_FORMAT_R32G32B32A32_UINT] = ISL_FORMAT_R32G32B32A32_UINT,
+
+ [PIPE_FORMAT_R32_SINT] = ISL_FORMAT_R32_SINT,
+ [PIPE_FORMAT_R32G32_SINT] = ISL_FORMAT_R32G32_SINT,
+ [PIPE_FORMAT_R32G32B32_SINT] = ISL_FORMAT_R32G32B32_SINT,
+ [PIPE_FORMAT_R32G32B32A32_SINT] = ISL_FORMAT_R32G32B32A32_SINT,
+
+ [PIPE_FORMAT_B10G10R10A2_UINT] = ISL_FORMAT_B10G10R10A2_UINT,
+
+ [PIPE_FORMAT_ETC1_RGB8] = ISL_FORMAT_ETC1_RGB8,
+
+ [PIPE_FORMAT_R8G8B8X8_SRGB] = ISL_FORMAT_R8G8B8X8_UNORM_SRGB,
+ [PIPE_FORMAT_B10G10R10X2_UNORM] = ISL_FORMAT_B10G10R10X2_UNORM,
+ [PIPE_FORMAT_R16G16B16X16_UNORM] = ISL_FORMAT_R16G16B16X16_UNORM,
+ [PIPE_FORMAT_R16G16B16X16_FLOAT] = ISL_FORMAT_R16G16B16X16_FLOAT,
+ [PIPE_FORMAT_R32G32B32X32_FLOAT] = ISL_FORMAT_R32G32B32X32_FLOAT,
+
+ [PIPE_FORMAT_R10G10B10A2_UINT] = ISL_FORMAT_R10G10B10A2_UINT,
+
+ [PIPE_FORMAT_B5G6R5_SRGB] = ISL_FORMAT_B5G6R5_UNORM_SRGB,
+
+ [PIPE_FORMAT_BPTC_RGBA_UNORM] = ISL_FORMAT_BC7_UNORM,
+ [PIPE_FORMAT_BPTC_SRGBA] = ISL_FORMAT_BC7_UNORM_SRGB,
+ [PIPE_FORMAT_BPTC_RGB_FLOAT] = ISL_FORMAT_BC6H_SF16,
+ [PIPE_FORMAT_BPTC_RGB_UFLOAT] = ISL_FORMAT_BC6H_UF16,
+
+ [PIPE_FORMAT_ETC2_RGB8] = ISL_FORMAT_ETC2_RGB8,
+ [PIPE_FORMAT_ETC2_SRGB8] = ISL_FORMAT_ETC2_SRGB8,
+ [PIPE_FORMAT_ETC2_RGB8A1] = ISL_FORMAT_ETC2_RGB8_PTA,
+ [PIPE_FORMAT_ETC2_SRGB8A1] = ISL_FORMAT_ETC2_SRGB8_PTA,
+ [PIPE_FORMAT_ETC2_RGBA8] = ISL_FORMAT_ETC2_EAC_RGBA8,
+ [PIPE_FORMAT_ETC2_SRGBA8] = ISL_FORMAT_ETC2_EAC_SRGB8_A8,
+ [PIPE_FORMAT_ETC2_R11_UNORM] = ISL_FORMAT_EAC_R11,
+ [PIPE_FORMAT_ETC2_R11_SNORM] = ISL_FORMAT_EAC_SIGNED_R11,
+ [PIPE_FORMAT_ETC2_RG11_UNORM] = ISL_FORMAT_EAC_RG11,
+ [PIPE_FORMAT_ETC2_RG11_SNORM] = ISL_FORMAT_EAC_SIGNED_RG11,
+
+ [PIPE_FORMAT_FXT1_RGB] = ISL_FORMAT_FXT1,
+ [PIPE_FORMAT_FXT1_RGBA] = ISL_FORMAT_FXT1,
+
+ [PIPE_FORMAT_ASTC_4x4] = ISL_FORMAT_ASTC_LDR_2D_4X4_FLT16,
+ [PIPE_FORMAT_ASTC_5x4] = ISL_FORMAT_ASTC_LDR_2D_5X4_FLT16,
+ [PIPE_FORMAT_ASTC_5x5] = ISL_FORMAT_ASTC_LDR_2D_5X5_FLT16,
+ [PIPE_FORMAT_ASTC_6x5] = ISL_FORMAT_ASTC_LDR_2D_6X5_FLT16,
+ [PIPE_FORMAT_ASTC_6x6] = ISL_FORMAT_ASTC_LDR_2D_6X6_FLT16,
+ [PIPE_FORMAT_ASTC_8x5] = ISL_FORMAT_ASTC_LDR_2D_8X5_FLT16,
+ [PIPE_FORMAT_ASTC_8x6] = ISL_FORMAT_ASTC_LDR_2D_8X6_FLT16,
+ [PIPE_FORMAT_ASTC_8x8] = ISL_FORMAT_ASTC_LDR_2D_8X8_FLT16,
+ [PIPE_FORMAT_ASTC_10x5] = ISL_FORMAT_ASTC_LDR_2D_10X5_FLT16,
+ [PIPE_FORMAT_ASTC_10x6] = ISL_FORMAT_ASTC_LDR_2D_10X6_FLT16,
+ [PIPE_FORMAT_ASTC_10x8] = ISL_FORMAT_ASTC_LDR_2D_10X8_FLT16,
+ [PIPE_FORMAT_ASTC_10x10] = ISL_FORMAT_ASTC_LDR_2D_10X10_FLT16,
+ [PIPE_FORMAT_ASTC_12x10] = ISL_FORMAT_ASTC_LDR_2D_12X10_FLT16,
+ [PIPE_FORMAT_ASTC_12x12] = ISL_FORMAT_ASTC_LDR_2D_12X12_FLT16,
+
+ [PIPE_FORMAT_ASTC_4x4_SRGB] = ISL_FORMAT_ASTC_LDR_2D_4X4_U8SRGB,
+ [PIPE_FORMAT_ASTC_5x4_SRGB] = ISL_FORMAT_ASTC_LDR_2D_5X4_U8SRGB,
+ [PIPE_FORMAT_ASTC_5x5_SRGB] = ISL_FORMAT_ASTC_LDR_2D_5X5_U8SRGB,
+ [PIPE_FORMAT_ASTC_6x5_SRGB] = ISL_FORMAT_ASTC_LDR_2D_6X5_U8SRGB,
+ [PIPE_FORMAT_ASTC_6x6_SRGB] = ISL_FORMAT_ASTC_LDR_2D_6X6_U8SRGB,
+ [PIPE_FORMAT_ASTC_8x5_SRGB] = ISL_FORMAT_ASTC_LDR_2D_8X5_U8SRGB,
+ [PIPE_FORMAT_ASTC_8x6_SRGB] = ISL_FORMAT_ASTC_LDR_2D_8X6_U8SRGB,
+ [PIPE_FORMAT_ASTC_8x8_SRGB] = ISL_FORMAT_ASTC_LDR_2D_8X8_U8SRGB,
+ [PIPE_FORMAT_ASTC_10x5_SRGB] = ISL_FORMAT_ASTC_LDR_2D_10X5_U8SRGB,
+ [PIPE_FORMAT_ASTC_10x6_SRGB] = ISL_FORMAT_ASTC_LDR_2D_10X6_U8SRGB,
+ [PIPE_FORMAT_ASTC_10x8_SRGB] = ISL_FORMAT_ASTC_LDR_2D_10X8_U8SRGB,
+ [PIPE_FORMAT_ASTC_10x10_SRGB] = ISL_FORMAT_ASTC_LDR_2D_10X10_U8SRGB,
+ [PIPE_FORMAT_ASTC_12x10_SRGB] = ISL_FORMAT_ASTC_LDR_2D_12X10_U8SRGB,
+ [PIPE_FORMAT_ASTC_12x12_SRGB] = ISL_FORMAT_ASTC_LDR_2D_12X12_U8SRGB,
+
+ [PIPE_FORMAT_A1B5G5R5_UNORM] = ISL_FORMAT_A1B5G5R5_UNORM,
+
+ /* We support these so that we know the API expects no alpha channel.
+ * Otherwise, the state tracker would just give us a format with alpha
+ * and we wouldn't know to override the swizzle to 1.
+ */
+ [PIPE_FORMAT_R16G16B16X16_UINT] = ISL_FORMAT_R16G16B16A16_UINT,
+ [PIPE_FORMAT_R16G16B16X16_SINT] = ISL_FORMAT_R16G16B16A16_SINT,
+ [PIPE_FORMAT_R32G32B32X32_UINT] = ISL_FORMAT_R32G32B32A32_UINT,
+ [PIPE_FORMAT_R32G32B32X32_SINT] = ISL_FORMAT_R32G32B32A32_SINT,
+ [PIPE_FORMAT_R10G10B10X2_SNORM] = ISL_FORMAT_R10G10B10A2_SNORM,
+ };
+ assert(pf < PIPE_FORMAT_COUNT);
+ return table[pf];
+}
+
static unsigned
format_gen(const struct gen_device_info *devinfo)
{
return devinfo->gen * 10 + (devinfo->is_g4x || devinfo->is_haswell) * 5;
}
+static bool
+format_info_exists(enum isl_format format)
+{
+ assert(format != ISL_FORMAT_UNSUPPORTED);
+ assert(format < ISL_NUM_FORMATS);
+ return format < ARRAY_SIZE(format_info) && format_info[format].exists;
+}
+
bool
isl_format_supports_rendering(const struct gen_device_info *devinfo,
enum isl_format format)
{
- if (!format_info[format].exists)
+ if (!format_info_exists(format))
return false;
return format_gen(devinfo) >= format_info[format].render_target;
isl_format_supports_alpha_blending(const struct gen_device_info *devinfo,
enum isl_format format)
{
- if (!format_info[format].exists)
+ if (!format_info_exists(format))
return false;
return format_gen(devinfo) >= format_info[format].alpha_blend;
isl_format_supports_sampling(const struct gen_device_info *devinfo,
enum isl_format format)
{
- if (!format_info[format].exists)
+ if (!format_info_exists(format))
return false;
if (devinfo->is_baytrail) {
return true;
} else if (devinfo->is_cherryview) {
const struct isl_format_layout *fmtl = isl_format_get_layout(format);
- /* Support for ASTC exists on Cherry View even though big-core
+ /* Support for ASTC LDR exists on Cherry View even though big-core
* GPUs didn't get it until Skylake.
*/
+ if (fmtl->txc == ISL_TXC_ASTC)
+ return format < ISL_FORMAT_ASTC_HDR_2D_4X4_FLT16;
+ } else if (gen_device_info_is_9lp(devinfo)) {
+ const struct isl_format_layout *fmtl = isl_format_get_layout(format);
+ /* Support for ASTC HDR exists on Broxton even though big-core
+ * GPUs didn't get it until Cannonlake.
+ */
if (fmtl->txc == ISL_TXC_ASTC)
return true;
}
isl_format_supports_filtering(const struct gen_device_info *devinfo,
enum isl_format format)
{
- if (!format_info[format].exists)
+ if (!format_info_exists(format))
return false;
if (devinfo->is_baytrail) {
return true;
} else if (devinfo->is_cherryview) {
const struct isl_format_layout *fmtl = isl_format_get_layout(format);
- /* Support for ASTC exists on Cherry View even though big-core
+ /* Support for ASTC LDR exists on Cherry View even though big-core
* GPUs didn't get it until Skylake.
*/
+ if (fmtl->txc == ISL_TXC_ASTC)
+ return format < ISL_FORMAT_ASTC_HDR_2D_4X4_FLT16;
+ } else if (gen_device_info_is_9lp(devinfo)) {
+ const struct isl_format_layout *fmtl = isl_format_get_layout(format);
+ /* Support for ASTC HDR exists on Broxton even though big-core
+ * GPUs didn't get it until Cannonlake.
+ */
if (fmtl->txc == ISL_TXC_ASTC)
return true;
}
isl_format_supports_vertex_fetch(const struct gen_device_info *devinfo,
enum isl_format format)
{
- if (!format_info[format].exists)
+ if (!format_info_exists(format))
return false;
/* For vertex fetch, Bay Trail supports the same set of formats as Haswell
return format_gen(devinfo) >= format_info[format].input_vb;
}
+/**
+ * Returns true if the given format can support typed writes.
+ */
+bool
+isl_format_supports_typed_writes(const struct gen_device_info *devinfo,
+ enum isl_format format)
+{
+ if (!format_info_exists(format))
+ return false;
+
+ return format_gen(devinfo) >= format_info[format].typed_write;
+}
+
+
+/**
+ * Returns true if the given format can support typed reads with format
+ * conversion fully handled by hardware. On Sky Lake, all formats which are
+ * supported for typed writes also support typed reads but some of them return
+ * the raw image data and don't provide format conversion.
+ *
+ * For anyone looking to find this data in the PRM, the easiest way to find
+ * format tables is to search for R11G11B10. There are only a few
+ * occurrences.
+ */
+bool
+isl_format_supports_typed_reads(const struct gen_device_info *devinfo,
+ enum isl_format format)
+{
+ if (!format_info_exists(format))
+ return false;
+
+ return format_gen(devinfo) >= format_info[format].typed_read;
+}
+
+/**
+ * Returns true if the given format can support single-sample fast clears.
+ * This function only checks the format. In order to determine if a surface
+ * supports CCS_E, several other factors need to be considered such as tiling
+ * and sample count. See isl_surf_get_ccs_surf for details.
+ */
bool
-isl_format_supports_lossless_compression(const struct gen_device_info *devinfo,
- enum isl_format format)
+isl_format_supports_ccs_d(const struct gen_device_info *devinfo,
+ enum isl_format format)
{
- if (!format_info[format].exists)
+ /* Clear-only compression was first added on Ivy Bridge and was last
+ * implemented on Ice lake (see BSpec: 43862).
+ */
+ if (devinfo->gen < 7 || devinfo->gen > 11)
+ return false;
+
+ if (!isl_format_supports_rendering(devinfo, format))
return false;
- return format_gen(devinfo) >= format_info[format].lossless_compression;
+ const struct isl_format_layout *fmtl = isl_format_get_layout(format);
+
+ return fmtl->bpb == 32 || fmtl->bpb == 64 || fmtl->bpb == 128;
+}
+
+/**
+ * Returns true if the given format can support single-sample color
+ * compression. This function only checks the format. In order to determine
+ * if a surface supports CCS_E, several other factors need to be considered
+ * such as tiling and sample count. See isl_surf_get_ccs_surf for details.
+ */
+bool
+isl_format_supports_ccs_e(const struct gen_device_info *devinfo,
+ enum isl_format format)
+{
+ if (!format_info_exists(format))
+ return false;
+
+ /* For simplicity, only report that a format supports CCS_E if blorp can
+ * perform bit-for-bit copies with an image of that format while compressed.
+ * Unfortunately, R11G11B10_FLOAT is in a compression class of its own and
+ * there is no way to copy to/from it which doesn't potentially loose data
+ * if one of the bit patterns being copied isn't valid finite floats.
+ */
+ if (format == ISL_FORMAT_R11G11B10_FLOAT)
+ return false;
+
+ return format_gen(devinfo) >= format_info[format].ccs_e;
}
bool
* - any compressed texture format (BC*)
* - any YCRCB* format
*
- * The restriction on the format's size is removed on Broadwell. Also,
- * there is an exception for HiZ which we treat as a compressed format and
- * is allowed to be multisampled on Broadwell and earlier.
+ * The restriction on the format's size is removed on Broadwell. Moreover,
+ * empirically it looks that even IvyBridge can handle multisampled surfaces
+ * with format sizes all the way to 128-bits (RGBA32F, RGBA32I, RGBA32UI).
+ *
+ * Also, there is an exception for HiZ which we treat as a compressed
+ * format and is allowed to be multisampled on Broadwell and earlier.
*/
if (format == ISL_FORMAT_HIZ) {
/* On SKL+, HiZ is always single-sampled even when the primary surface
* is multisampled. See also isl_surf_get_hiz_surf().
*/
return devinfo->gen <= 8;
- } else if (devinfo->gen < 8 && isl_format_get_layout(format)->bpb > 64) {
+ } else if (devinfo->gen < 7 && isl_format_get_layout(format)->bpb > 64) {
return false;
} else if (isl_format_is_compressed(format)) {
return false;
}
}
-static inline bool
+/**
+ * Returns true if the two formats are "CCS_E compatible" meaning that you can
+ * render in one format with CCS_E enabled and then texture using the other
+ * format without needing a resolve.
+ *
+ * Note: Even if the formats are compatible, special care must be taken if a
+ * clear color is involved because the encoding of the clear color is heavily
+ * format-dependent.
+ */
+bool
+isl_formats_are_ccs_e_compatible(const struct gen_device_info *devinfo,
+ enum isl_format format1,
+ enum isl_format format2)
+{
+ /* They must support CCS_E */
+ if (!isl_format_supports_ccs_e(devinfo, format1) ||
+ !isl_format_supports_ccs_e(devinfo, format2))
+ return false;
+
+ /* Gen12 added CCS_E support for A8_UNORM, A8_UNORM and R8_UNORM share the
+ * same aux map format encoding so they are definitely compatible.
+ */
+ if (format1 == ISL_FORMAT_A8_UNORM)
+ format1 = ISL_FORMAT_R8_UNORM;
+
+ if (format2 == ISL_FORMAT_A8_UNORM)
+ format2 = ISL_FORMAT_R8_UNORM;
+
+ const struct isl_format_layout *fmtl1 = isl_format_get_layout(format1);
+ const struct isl_format_layout *fmtl2 = isl_format_get_layout(format2);
+
+ /* The compression used by CCS is not dependent on the actual data encoding
+ * of the format but only depends on the bit-layout of the channels.
+ */
+ return fmtl1->channels.r.bits == fmtl2->channels.r.bits &&
+ fmtl1->channels.g.bits == fmtl2->channels.g.bits &&
+ fmtl1->channels.b.bits == fmtl2->channels.b.bits &&
+ fmtl1->channels.a.bits == fmtl2->channels.a.bits;
+}
+
+static bool
isl_format_has_channel_type(enum isl_format fmt, enum isl_base_type type)
{
const struct isl_format_layout *fmtl = isl_format_get_layout(fmt);
return isl_format_has_channel_type(fmt, ISL_SINT);
}
+bool
+isl_format_has_color_component(enum isl_format fmt, int component)
+{
+ const struct isl_format_layout *fmtl = isl_format_get_layout(fmt);
+ const uint8_t intensity = fmtl->channels.i.bits;
+ const uint8_t luminance = fmtl->channels.l.bits;
+
+ switch (component) {
+ case 0:
+ return (fmtl->channels.r.bits + intensity + luminance) > 0;
+ case 1:
+ return (fmtl->channels.g.bits + intensity + luminance) > 0;
+ case 2:
+ return (fmtl->channels.b.bits + intensity + luminance) > 0;
+ case 3:
+ return (fmtl->channels.a.bits + intensity) > 0;
+ default:
+ assert(!"Invalid color component: must be 0..3");
+ return false;
+ }
+}
+
unsigned
isl_format_get_num_channels(enum isl_format fmt)
{
return ISL_FORMAT_UNSUPPORTED;
}
}
+
+enum isl_format
+isl_format_rgbx_to_rgba(enum isl_format rgbx)
+{
+ assert(isl_format_is_rgbx(rgbx));
+
+ switch (rgbx) {
+ case ISL_FORMAT_R32G32B32X32_FLOAT:
+ return ISL_FORMAT_R32G32B32A32_FLOAT;
+ case ISL_FORMAT_R16G16B16X16_UNORM:
+ return ISL_FORMAT_R16G16B16A16_UNORM;
+ case ISL_FORMAT_R16G16B16X16_FLOAT:
+ return ISL_FORMAT_R16G16B16A16_FLOAT;
+ case ISL_FORMAT_B8G8R8X8_UNORM:
+ return ISL_FORMAT_B8G8R8A8_UNORM;
+ case ISL_FORMAT_B8G8R8X8_UNORM_SRGB:
+ return ISL_FORMAT_B8G8R8A8_UNORM_SRGB;
+ case ISL_FORMAT_R8G8B8X8_UNORM:
+ return ISL_FORMAT_R8G8B8A8_UNORM;
+ case ISL_FORMAT_R8G8B8X8_UNORM_SRGB:
+ return ISL_FORMAT_R8G8B8A8_UNORM_SRGB;
+ case ISL_FORMAT_B10G10R10X2_UNORM:
+ return ISL_FORMAT_B10G10R10A2_UNORM;
+ case ISL_FORMAT_B5G5R5X1_UNORM:
+ return ISL_FORMAT_B5G5R5A1_UNORM;
+ case ISL_FORMAT_B5G5R5X1_UNORM_SRGB:
+ return ISL_FORMAT_B5G5R5A1_UNORM_SRGB;
+ default:
+ assert(!"Invalid RGBX format");
+ return rgbx;
+ }
+}
+
+static inline void
+pack_channel(const union isl_color_value *value, unsigned i,
+ const struct isl_channel_layout *layout,
+ enum isl_colorspace colorspace,
+ uint32_t data_out[4])
+{
+ if (layout->type == ISL_VOID)
+ return;
+
+ if (colorspace == ISL_COLORSPACE_SRGB)
+ assert(layout->type == ISL_UNORM);
+
+ uint32_t packed;
+ switch (layout->type) {
+ case ISL_UNORM:
+ if (colorspace == ISL_COLORSPACE_SRGB) {
+ if (layout->bits == 8) {
+ packed = util_format_linear_float_to_srgb_8unorm(value->f32[i]);
+ } else {
+ float srgb = util_format_linear_to_srgb_float(value->f32[i]);
+ packed = _mesa_float_to_unorm(srgb, layout->bits);
+ }
+ } else {
+ packed = _mesa_float_to_unorm(value->f32[i], layout->bits);
+ }
+ break;
+ case ISL_SNORM:
+ packed = _mesa_float_to_snorm(value->f32[i], layout->bits);
+ break;
+ case ISL_SFLOAT:
+ assert(layout->bits == 16 || layout->bits == 32);
+ if (layout->bits == 16) {
+ packed = _mesa_float_to_half(value->f32[i]);
+ } else {
+ packed = value->u32[i];
+ }
+ break;
+ case ISL_UINT:
+ packed = MIN(value->u32[i], MAX_UINT(layout->bits));
+ break;
+ case ISL_SINT:
+ packed = MIN(MAX(value->u32[i], MIN_INT(layout->bits)),
+ MAX_INT(layout->bits));
+ break;
+
+ default:
+ unreachable("Invalid channel type");
+ }
+
+ unsigned dword = layout->start_bit / 32;
+ unsigned bit = layout->start_bit % 32;
+ assert(bit + layout->bits <= 32);
+ data_out[dword] |= (packed & MAX_UINT(layout->bits)) << bit;
+}
+
+/**
+ * Take an isl_color_value and pack it into the actual bits as specified by
+ * the isl_format. This function is very slow for a format conversion
+ * function but should be fine for a single pixel worth of data.
+ */
+void
+isl_color_value_pack(const union isl_color_value *value,
+ enum isl_format format,
+ uint32_t *data_out)
+{
+ const struct isl_format_layout *fmtl = isl_format_get_layout(format);
+ assert(fmtl->colorspace == ISL_COLORSPACE_LINEAR ||
+ fmtl->colorspace == ISL_COLORSPACE_SRGB);
+ assert(!isl_format_is_compressed(format));
+
+ memset(data_out, 0, isl_align(fmtl->bpb, 32) / 8);
+
+ if (format == ISL_FORMAT_R9G9B9E5_SHAREDEXP) {
+ data_out[0] = float3_to_rgb9e5(value->f32);
+ return;
+ } else if (format == ISL_FORMAT_R11G11B10_FLOAT) {
+ data_out[0] = float3_to_r11g11b10f(value->f32);
+ return;
+ }
+
+ pack_channel(value, 0, &fmtl->channels.r, fmtl->colorspace, data_out);
+ pack_channel(value, 1, &fmtl->channels.g, fmtl->colorspace, data_out);
+ pack_channel(value, 2, &fmtl->channels.b, fmtl->colorspace, data_out);
+ pack_channel(value, 3, &fmtl->channels.a, ISL_COLORSPACE_LINEAR, data_out);
+ pack_channel(value, 0, &fmtl->channels.l, fmtl->colorspace, data_out);
+ pack_channel(value, 0, &fmtl->channels.i, ISL_COLORSPACE_LINEAR, data_out);
+ assert(fmtl->channels.p.bits == 0);
+}
+
+/** Extend an N-bit signed integer to 32 bits */
+static inline int32_t
+sign_extend(int32_t x, unsigned bits)
+{
+ if (bits < 32) {
+ unsigned shift = 32 - bits;
+ return (x << shift) >> shift;
+ } else {
+ return x;
+ }
+}
+
+static inline void
+unpack_channel(union isl_color_value *value,
+ unsigned start, unsigned count,
+ const struct isl_channel_layout *layout,
+ enum isl_colorspace colorspace,
+ const uint32_t *data_in)
+{
+ if (layout->type == ISL_VOID)
+ return;
+
+ unsigned dword = layout->start_bit / 32;
+ unsigned bit = layout->start_bit % 32;
+ assert(bit + layout->bits <= 32);
+ uint32_t packed = (data_in[dword] >> bit) & MAX_UINT(layout->bits);
+
+ union {
+ uint32_t u32;
+ float f32;
+ } unpacked;
+
+ if (colorspace == ISL_COLORSPACE_SRGB)
+ assert(layout->type == ISL_UNORM);
+
+ switch (layout->type) {
+ case ISL_UNORM:
+ unpacked.f32 = _mesa_unorm_to_float(packed, layout->bits);
+ if (colorspace == ISL_COLORSPACE_SRGB) {
+ if (layout->bits == 8) {
+ unpacked.f32 = util_format_srgb_8unorm_to_linear_float(packed);
+ } else {
+ float srgb = _mesa_unorm_to_float(packed, layout->bits);
+ unpacked.f32 = util_format_srgb_to_linear_float(srgb);
+ }
+ } else {
+ unpacked.f32 = _mesa_unorm_to_float(packed, layout->bits);
+ }
+ break;
+ case ISL_SNORM:
+ unpacked.f32 = _mesa_snorm_to_float(sign_extend(packed, layout->bits),
+ layout->bits);
+ break;
+ case ISL_SFLOAT:
+ assert(layout->bits == 16 || layout->bits == 32);
+ if (layout->bits == 16) {
+ unpacked.f32 = _mesa_half_to_float(packed);
+ } else {
+ unpacked.u32 = packed;
+ }
+ break;
+ case ISL_UINT:
+ unpacked.u32 = packed;
+ break;
+ case ISL_SINT:
+ unpacked.u32 = sign_extend(packed, layout->bits);
+ break;
+
+ default:
+ unreachable("Invalid channel type");
+ }
+
+ for (unsigned i = 0; i < count; i++)
+ value->u32[start + i] = unpacked.u32;
+}
+
+/**
+ * Take unpack an isl_color_value from the actual bits as specified by
+ * the isl_format. This function is very slow for a format conversion
+ * function but should be fine for a single pixel worth of data.
+ */
+void
+isl_color_value_unpack(union isl_color_value *value,
+ enum isl_format format,
+ const uint32_t data_in[4])
+{
+ const struct isl_format_layout *fmtl = isl_format_get_layout(format);
+ assert(fmtl->colorspace == ISL_COLORSPACE_LINEAR ||
+ fmtl->colorspace == ISL_COLORSPACE_SRGB);
+ assert(!isl_format_is_compressed(format));
+
+ /* Default to opaque black. */
+ memset(value, 0, sizeof(*value));
+ if (isl_format_has_int_channel(format)) {
+ value->u32[3] = 1u;
+ } else {
+ value->f32[3] = 1.0f;
+ }
+
+ if (format == ISL_FORMAT_R9G9B9E5_SHAREDEXP) {
+ rgb9e5_to_float3(data_in[0], value->f32);
+ return;
+ } else if (format == ISL_FORMAT_R11G11B10_FLOAT) {
+ r11g11b10f_to_float3(data_in[0], value->f32);
+ return;
+ }
+
+ unpack_channel(value, 0, 1, &fmtl->channels.r, fmtl->colorspace, data_in);
+ unpack_channel(value, 1, 1, &fmtl->channels.g, fmtl->colorspace, data_in);
+ unpack_channel(value, 2, 1, &fmtl->channels.b, fmtl->colorspace, data_in);
+ unpack_channel(value, 3, 1, &fmtl->channels.a, ISL_COLORSPACE_LINEAR, data_in);
+ unpack_channel(value, 0, 3, &fmtl->channels.l, fmtl->colorspace, data_in);
+ unpack_channel(value, 0, 4, &fmtl->channels.i, ISL_COLORSPACE_LINEAR, data_in);
+ assert(fmtl->channels.p.bits == 0);
+}