-/* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
-
/*
* Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
*
*/
#include "pipe/p_defines.h"
-#include "util/u_format.h"
+#include "util/format/u_format.h"
#include "fd4_format.h"
boolean present;
};
-#define RB4_NONE ~0
-
/* vertex + texture */
#define VT(pipe, fmt, rbfmt, swapfmt) \
[PIPE_FORMAT_ ## pipe] = { \
#define _T(pipe, fmt, rbfmt, swapfmt) \
[PIPE_FORMAT_ ## pipe] = { \
.present = 1, \
- .vtx = ~0, \
+ .vtx = VFMT4_NONE, \
.tex = TFMT4_ ## fmt, \
.rb = RB4_ ## rbfmt, \
.swap = swapfmt \
[PIPE_FORMAT_ ## pipe] = { \
.present = 1, \
.vtx = VFMT4_ ## fmt, \
- .tex = ~0, \
+ .tex = TFMT4_NONE, \
.rb = RB4_ ## rbfmt, \
.swap = swapfmt \
}
static struct fd4_format formats[PIPE_FORMAT_COUNT] = {
/* 8-bit */
VT(R8_UNORM, 8_UNORM, R8_UNORM, WZYX),
- VT(R8_SNORM, 8_SNORM, NONE, WZYX),
- VT(R8_UINT, 8_UINT, NONE, WZYX),
- VT(R8_SINT, 8_SINT, NONE, WZYX),
+ VT(R8_SNORM, 8_SNORM, R8_SNORM, WZYX),
+ VT(R8_UINT, 8_UINT, R8_UINT, WZYX),
+ VT(R8_SINT, 8_SINT, R8_SINT, WZYX),
V_(R8_USCALED, 8_UINT, NONE, WZYX),
- V_(R8_SSCALED, 8_UINT, NONE, WZYX),
+ V_(R8_SSCALED, 8_SINT, NONE, WZYX),
_T(A8_UNORM, 8_UNORM, A8_UNORM, WZYX),
_T(L8_UNORM, 8_UNORM, R8_UNORM, WZYX),
_T(S8_UINT, 8_UINT, R8_UNORM, WZYX),
/* 16-bit */
- V_(R16_UNORM, 16_UNORM, NONE, WZYX),
- V_(R16_SNORM, 16_SNORM, NONE, WZYX),
- VT(R16_UINT, 16_UINT, R16_UINT, WZYX),
- VT(R16_SINT, 16_SINT, R16_SINT, WZYX),
- V_(R16_USCALED, 16_UINT, NONE, WZYX),
- V_(R16_SSCALED, 16_UINT, NONE, WZYX),
- VT(R16_FLOAT, 16_FLOAT, R16_FLOAT,WZYX),
-
- _T(A16_UINT, 16_UINT, NONE, WZYX),
- _T(A16_SINT, 16_SINT, NONE, WZYX),
- _T(L16_UINT, 16_UINT, NONE, WZYX),
- _T(L16_SINT, 16_SINT, NONE, WZYX),
- _T(I16_UINT, 16_UINT, NONE, WZYX),
- _T(I16_SINT, 16_SINT, NONE, WZYX),
+ VT(R16_UNORM, 16_UNORM, R16_UNORM, WZYX),
+ VT(R16_SNORM, 16_SNORM, R16_SNORM, WZYX),
+ VT(R16_UINT, 16_UINT, R16_UINT, WZYX),
+ VT(R16_SINT, 16_SINT, R16_SINT, WZYX),
+ V_(R16_USCALED, 16_UINT, NONE, WZYX),
+ V_(R16_SSCALED, 16_SINT, NONE, WZYX),
+ VT(R16_FLOAT, 16_FLOAT, R16_FLOAT, WZYX),
+
+ _T(A16_UNORM, 16_UNORM, NONE, WZYX),
+ _T(A16_SNORM, 16_SNORM, NONE, WZYX),
+ _T(A16_UINT, 16_UINT, NONE, WZYX),
+ _T(A16_SINT, 16_SINT, NONE, WZYX),
+ _T(L16_UNORM, 16_UNORM, NONE, WZYX),
+ _T(L16_SNORM, 16_SNORM, NONE, WZYX),
+ _T(L16_UINT, 16_UINT, NONE, WZYX),
+ _T(L16_SINT, 16_SINT, NONE, WZYX),
+ _T(I16_UNORM, 16_UNORM, NONE, WZYX),
+ _T(I16_SNORM, 16_SNORM, NONE, WZYX),
+ _T(I16_UINT, 16_UINT, NONE, WZYX),
+ _T(I16_SINT, 16_SINT, NONE, WZYX),
VT(R8G8_UNORM, 8_8_UNORM, R8G8_UNORM, WZYX),
VT(R8G8_SNORM, 8_8_SNORM, R8G8_SNORM, WZYX),
_T(L8A8_UINT, 8_8_UINT, NONE, WZYX),
_T(L8A8_SINT, 8_8_SINT, NONE, WZYX),
+ _T(B5G6R5_UNORM, 5_6_5_UNORM, R5G6B5_UNORM, WXYZ),
_T(B5G5R5A1_UNORM, 5_5_5_1_UNORM, R5G5B5A1_UNORM, WXYZ),
_T(B5G5R5X1_UNORM, 5_5_5_1_UNORM, R5G5B5A1_UNORM, WXYZ),
_T(B4G4R4A4_UNORM, 4_4_4_4_UNORM, R4G4B4A4_UNORM, WXYZ),
VT(R32_UINT, 32_UINT, R32_UINT, WZYX),
VT(R32_SINT, 32_SINT, R32_SINT, WZYX),
V_(R32_USCALED, 32_UINT, NONE, WZYX),
- V_(R32_SSCALED, 32_UINT, NONE, WZYX),
+ V_(R32_SSCALED, 32_SINT, NONE, WZYX),
VT(R32_FLOAT, 32_FLOAT, R32_FLOAT,WZYX),
V_(R32_FIXED, 32_FIXED, NONE, WZYX),
_T(I32_UINT, 32_UINT, NONE, WZYX),
_T(I32_SINT, 32_SINT, NONE, WZYX),
- V_(R16G16_UNORM, 16_16_UNORM, NONE, WZYX),
- V_(R16G16_SNORM, 16_16_SNORM, NONE, WZYX),
- VT(R16G16_UINT, 16_16_UINT, R16G16_UINT, WZYX),
- VT(R16G16_SINT, 16_16_SINT, R16G16_SINT, WZYX),
- V_(R16G16_USCALED, 16_16_UINT, NONE, WZYX),
- V_(R16G16_SSCALED, 16_16_SINT, NONE, WZYX),
- VT(R16G16_FLOAT, 16_16_FLOAT, R16G16_FLOAT,WZYX),
+ VT(R16G16_UNORM, 16_16_UNORM, R16G16_UNORM, WZYX),
+ VT(R16G16_SNORM, 16_16_SNORM, R16G16_SNORM, WZYX),
+ VT(R16G16_UINT, 16_16_UINT, R16G16_UINT, WZYX),
+ VT(R16G16_SINT, 16_16_SINT, R16G16_SINT, WZYX),
+ V_(R16G16_USCALED, 16_16_UINT, NONE, WZYX),
+ V_(R16G16_SSCALED, 16_16_SINT, NONE, WZYX),
+ VT(R16G16_FLOAT, 16_16_FLOAT, R16G16_FLOAT, WZYX),
- _T(L16A16_UINT, 16_16_UINT, NONE, WZYX),
- _T(L16A16_SINT, 16_16_SINT, NONE, WZYX),
+ _T(L16A16_UNORM, 16_16_UNORM, NONE, WZYX),
+ _T(L16A16_SNORM, 16_16_SNORM, NONE, WZYX),
+ _T(L16A16_UINT, 16_16_UINT, NONE, WZYX),
+ _T(L16A16_SINT, 16_16_SINT, NONE, WZYX),
VT(R8G8B8A8_UNORM, 8_8_8_8_UNORM, R8G8B8A8_UNORM, WZYX),
_T(R8G8B8X8_UNORM, 8_8_8_8_UNORM, R8G8B8A8_UNORM, WZYX),
VT(B10G10R10A2_UNORM, 10_10_10_2_UNORM, R10G10B10A2_UNORM, WXYZ),
_T(B10G10R10X2_UNORM, 10_10_10_2_UNORM, R10G10B10A2_UNORM, WXYZ),
V_(R10G10B10A2_SNORM, 10_10_10_2_SNORM, NONE, WZYX),
- V_(R10G10B10A2_UINT, 10_10_10_2_UINT, NONE, WZYX),
+ V_(B10G10R10A2_SNORM, 10_10_10_2_SNORM, NONE, WXYZ),
+ VT(R10G10B10A2_UINT, 10_10_10_2_UINT, R10G10B10A2_UINT, WZYX),
+ VT(B10G10R10A2_UINT, 10_10_10_2_UINT, R10G10B10A2_UINT, WXYZ),
V_(R10G10B10A2_USCALED, 10_10_10_2_UINT, NONE, WZYX),
+ V_(B10G10R10A2_USCALED, 10_10_10_2_UINT, NONE, WXYZ),
V_(R10G10B10A2_SSCALED, 10_10_10_2_SINT, NONE, WZYX),
+ V_(B10G10R10A2_SSCALED, 10_10_10_2_SINT, NONE, WXYZ),
- _T(R11G11B10_FLOAT, 11_11_10_FLOAT, R11G11B10_FLOAT, WZYX),
+ VT(R11G11B10_FLOAT, 11_11_10_FLOAT, R11G11B10_FLOAT, WZYX),
_T(R9G9B9E5_FLOAT, 9_9_9_E5_FLOAT, NONE, WZYX),
- _T(Z24X8_UNORM, X8Z24_UNORM, R8G8B8A8_UNORM, WZYX),
- _T(Z24_UNORM_S8_UINT, X8Z24_UNORM, R8G8B8A8_UNORM, WZYX),
- _T(Z32_FLOAT, 32_FLOAT, R8G8B8A8_UNORM, WZYX),
- _T(Z32_FLOAT_S8X24_UINT, 32_FLOAT,R8G8B8A8_UNORM, WZYX),
+ _T(Z16_UNORM, 16_UNORM, R8G8_UNORM, WZYX),
+ _T(Z24X8_UNORM, X8Z24_UNORM, R8G8B8A8_UNORM, WZYX),
+ _T(X24S8_UINT, 8_8_8_8_UINT, R8G8B8A8_UINT, XYZW),
+ _T(Z24_UNORM_S8_UINT, X8Z24_UNORM, R8G8B8A8_UNORM, WZYX),
+ _T(Z32_FLOAT, 32_FLOAT, R8G8B8A8_UNORM, WZYX),
+ _T(Z32_FLOAT_S8X24_UINT, 32_FLOAT, R8G8B8A8_UNORM, WZYX),
+ _T(X32_S8X24_UINT, 8_UINT, R8_UINT, WZYX),
/* 48-bit */
V_(R16G16B16_UNORM, 16_16_16_UNORM, NONE, WZYX),
V_(R16G16B16_FLOAT, 16_16_16_FLOAT, NONE, WZYX),
/* 64-bit */
- V_(R16G16B16A16_UNORM, 16_16_16_16_UNORM, NONE, WZYX),
- V_(R16G16B16A16_SNORM, 16_16_16_16_SNORM, NONE, WZYX),
+ VT(R16G16B16A16_UNORM, 16_16_16_16_UNORM, R16G16B16A16_UNORM, WZYX),
+ VT(R16G16B16X16_UNORM, 16_16_16_16_UNORM, R16G16B16A16_UNORM, WZYX),
+ VT(R16G16B16A16_SNORM, 16_16_16_16_SNORM, R16G16B16A16_SNORM, WZYX),
+ VT(R16G16B16X16_SNORM, 16_16_16_16_SNORM, R16G16B16A16_SNORM, WZYX),
VT(R16G16B16A16_UINT, 16_16_16_16_UINT, R16G16B16A16_UINT, WZYX),
_T(R16G16B16X16_UINT, 16_16_16_16_UINT, R16G16B16A16_UINT, WZYX),
VT(R16G16B16A16_SINT, 16_16_16_16_SINT, R16G16B16A16_SINT, WZYX),
_T(L32A32_SINT, 32_32_SINT, NONE, WZYX),
/* 96-bit */
- V_(R32G32B32_UINT, 32_32_32_UINT, NONE, WZYX),
- V_(R32G32B32_SINT, 32_32_32_SINT, NONE, WZYX),
+ VT(R32G32B32_UINT, 32_32_32_UINT, NONE, WZYX),
+ VT(R32G32B32_SINT, 32_32_32_SINT, NONE, WZYX),
V_(R32G32B32_USCALED, 32_32_32_UINT, NONE, WZYX),
V_(R32G32B32_SSCALED, 32_32_32_SINT, NONE, WZYX),
- V_(R32G32B32_FLOAT, 32_32_32_FLOAT, NONE, WZYX),
+ VT(R32G32B32_FLOAT, 32_32_32_FLOAT, NONE, WZYX),
V_(R32G32B32_FIXED, 32_32_32_FIXED, NONE, WZYX),
/* 128-bit */
VT(R32G32B32A32_FLOAT, 32_32_32_32_FLOAT, R32G32B32A32_FLOAT, WZYX),
_T(R32G32B32X32_FLOAT, 32_32_32_32_FLOAT, R32G32B32A32_FLOAT, WZYX),
V_(R32G32B32A32_FIXED, 32_32_32_32_FIXED, NONE, WZYX),
+
+ /* compressed */
+ _T(ETC1_RGB8, ETC1, NONE, WZYX),
+ _T(ETC2_RGB8, ETC2_RGB8, NONE, WZYX),
+ _T(ETC2_SRGB8, ETC2_RGB8, NONE, WZYX),
+ _T(ETC2_RGB8A1, ETC2_RGB8A1, NONE, WZYX),
+ _T(ETC2_SRGB8A1, ETC2_RGB8A1, NONE, WZYX),
+ _T(ETC2_RGBA8, ETC2_RGBA8, NONE, WZYX),
+ _T(ETC2_SRGBA8, ETC2_RGBA8, NONE, WZYX),
+ _T(ETC2_R11_UNORM, ETC2_R11_UNORM, NONE, WZYX),
+ _T(ETC2_R11_SNORM, ETC2_R11_SNORM, NONE, WZYX),
+ _T(ETC2_RG11_UNORM, ETC2_RG11_UNORM, NONE, WZYX),
+ _T(ETC2_RG11_SNORM, ETC2_RG11_SNORM, NONE, WZYX),
+
+ _T(DXT1_RGB, DXT1, NONE, WZYX),
+ _T(DXT1_SRGB, DXT1, NONE, WZYX),
+ _T(DXT1_RGBA, DXT1, NONE, WZYX),
+ _T(DXT1_SRGBA, DXT1, NONE, WZYX),
+ _T(DXT3_RGBA, DXT3, NONE, WZYX),
+ _T(DXT3_SRGBA, DXT3, NONE, WZYX),
+ _T(DXT5_RGBA, DXT5, NONE, WZYX),
+ _T(DXT5_SRGBA, DXT5, NONE, WZYX),
+
+ _T(BPTC_RGBA_UNORM, BPTC, NONE, WZYX),
+ _T(BPTC_SRGBA, BPTC, NONE, WZYX),
+ _T(BPTC_RGB_FLOAT, BPTC_FLOAT, NONE, WZYX),
+ _T(BPTC_RGB_UFLOAT, BPTC_UFLOAT, NONE, WZYX),
+
+ _T(RGTC1_UNORM, RGTC1_UNORM, NONE, WZYX),
+ _T(RGTC1_SNORM, RGTC1_SNORM, NONE, WZYX),
+ _T(RGTC2_UNORM, RGTC2_UNORM, NONE, WZYX),
+ _T(RGTC2_SNORM, RGTC2_SNORM, NONE, WZYX),
+ _T(LATC1_UNORM, RGTC1_UNORM, NONE, WZYX),
+ _T(LATC1_SNORM, RGTC1_SNORM, NONE, WZYX),
+ _T(LATC2_UNORM, RGTC2_UNORM, NONE, WZYX),
+ _T(LATC2_SNORM, RGTC2_SNORM, NONE, WZYX),
+
+ _T(ASTC_4x4, ASTC_4x4, NONE, WZYX),
+ _T(ASTC_5x4, ASTC_5x4, NONE, WZYX),
+ _T(ASTC_5x5, ASTC_5x5, NONE, WZYX),
+ _T(ASTC_6x5, ASTC_6x5, NONE, WZYX),
+ _T(ASTC_6x6, ASTC_6x6, NONE, WZYX),
+ _T(ASTC_8x5, ASTC_8x5, NONE, WZYX),
+ _T(ASTC_8x6, ASTC_8x6, NONE, WZYX),
+ _T(ASTC_8x8, ASTC_8x8, NONE, WZYX),
+ _T(ASTC_10x5, ASTC_10x5, NONE, WZYX),
+ _T(ASTC_10x6, ASTC_10x6, NONE, WZYX),
+ _T(ASTC_10x8, ASTC_10x8, NONE, WZYX),
+ _T(ASTC_10x10, ASTC_10x10, NONE, WZYX),
+ _T(ASTC_12x10, ASTC_12x10, NONE, WZYX),
+ _T(ASTC_12x12, ASTC_12x12, NONE, WZYX),
+
+ _T(ASTC_4x4_SRGB, ASTC_4x4, NONE, WZYX),
+ _T(ASTC_5x4_SRGB, ASTC_5x4, NONE, WZYX),
+ _T(ASTC_5x5_SRGB, ASTC_5x5, NONE, WZYX),
+ _T(ASTC_6x5_SRGB, ASTC_6x5, NONE, WZYX),
+ _T(ASTC_6x6_SRGB, ASTC_6x6, NONE, WZYX),
+ _T(ASTC_8x5_SRGB, ASTC_8x5, NONE, WZYX),
+ _T(ASTC_8x6_SRGB, ASTC_8x6, NONE, WZYX),
+ _T(ASTC_8x8_SRGB, ASTC_8x8, NONE, WZYX),
+ _T(ASTC_10x5_SRGB, ASTC_10x5, NONE, WZYX),
+ _T(ASTC_10x6_SRGB, ASTC_10x6, NONE, WZYX),
+ _T(ASTC_10x8_SRGB, ASTC_10x8, NONE, WZYX),
+ _T(ASTC_10x10_SRGB, ASTC_10x10, NONE, WZYX),
+ _T(ASTC_12x10_SRGB, ASTC_12x10, NONE, WZYX),
+ _T(ASTC_12x12_SRGB, ASTC_12x12, NONE, WZYX),
};
/* convert pipe format to vertex buffer format: */
fd4_pipe2vtx(enum pipe_format format)
{
if (!formats[format].present)
- return ~0;
+ return VFMT4_NONE;
return formats[format].vtx;
}
fd4_pipe2tex(enum pipe_format format)
{
if (!formats[format].present)
- return ~0;
+ return TFMT4_NONE;
return formats[format].tex;
}
fd4_pipe2color(enum pipe_format format)
{
if (!formats[format].present)
- return ~0;
+ return RB4_NONE;
return formats[format].rb;
}
if (format == PIPE_FORMAT_Z32_FLOAT_S8X24_UINT)
format = PIPE_FORMAT_Z32_FLOAT;
- switch (util_format_get_blocksizebits(format)) {
+ if (util_format_description(format)->layout == UTIL_FORMAT_LAYOUT_ASTC)
+ return TFETCH4_16_BYTE;
+
+ switch (util_format_get_blocksizebits(format) / util_format_get_blockwidth(format)) {
case 8: return TFETCH4_1_BYTE;
case 16: return TFETCH4_2_BYTE;
case 32: return TFETCH4_4_BYTE;
case 64: return TFETCH4_8_BYTE;
+ case 96: return TFETCH4_1_BYTE; /* Does this matter? */
case 128: return TFETCH4_16_BYTE;
default:
debug_printf("Unknown block size for format %s: %d\n",
}
}
-/* we need to special case a bit the depth/stencil restore, because we are
- * using the texture sampler to blit into the depth/stencil buffer, *not*
- * into a color buffer. Otherwise fd4_tex_swiz() will do the wrong thing,
- * as it is assuming that you are sampling into normal render target..
- *
- * TODO looks like we can probably share w/ a3xx..
- */
-enum pipe_format
-fd4_gmem_restore_format(enum pipe_format format)
-{
- switch (format) {
- case PIPE_FORMAT_Z24X8_UNORM:
- case PIPE_FORMAT_Z24_UNORM_S8_UINT:
- return PIPE_FORMAT_R8G8B8A8_UNORM;
- case PIPE_FORMAT_Z16_UNORM:
- return PIPE_FORMAT_R8G8_UNORM;
- case PIPE_FORMAT_S8_UINT:
- return PIPE_FORMAT_R8_UNORM;
- default:
- return format;
- }
-}
-
enum a4xx_depth_format
fd4_pipe2depth(enum pipe_format format)
{
{
switch (swiz) {
default:
- case PIPE_SWIZZLE_RED: return A4XX_TEX_X;
- case PIPE_SWIZZLE_GREEN: return A4XX_TEX_Y;
- case PIPE_SWIZZLE_BLUE: return A4XX_TEX_Z;
- case PIPE_SWIZZLE_ALPHA: return A4XX_TEX_W;
- case PIPE_SWIZZLE_ZERO: return A4XX_TEX_ZERO;
- case PIPE_SWIZZLE_ONE: return A4XX_TEX_ONE;
+ case PIPE_SWIZZLE_X: return A4XX_TEX_X;
+ case PIPE_SWIZZLE_Y: return A4XX_TEX_Y;
+ case PIPE_SWIZZLE_Z: return A4XX_TEX_Z;
+ case PIPE_SWIZZLE_W: return A4XX_TEX_W;
+ case PIPE_SWIZZLE_0: return A4XX_TEX_ZERO;
+ case PIPE_SWIZZLE_1: return A4XX_TEX_ONE;
}
}