[enable_gallium_nouveau="$enableval"],
[enable_gallium_nouveau=no])
if test "x$enable_gallium_nouveau" = xyes; then
- GALLIUM_WINSYS_DRM_DIRS="$GALLIUM_WINSYS_DRM_DIRS nouveau"
- GALLIUM_DRIVERS_DIRS="$GALLIUM_DRIVERS_DIRS nouveau nv30 nv40 nv50"
+ GALLIUM_DRIVERS_DIRS="$GALLIUM_DRIVERS_DIRS nouveau nvfx nv50"
+ gallium_check_st "nouveau/drm" "dri-nouveau" "egl-nouveau" "xorg-nouveau"
fi
+dnl
+dnl Gallium G3DVL configuration
+dnl
+AC_ARG_ENABLE([gallium-g3dvl],
+ [AS_HELP_STRING([--enable-gallium-g3dvl],
+ [build gallium g3dvl @<:@default=disabled@:>@])],
+ [enable_gallium_g3dvl="$enableval"],
+ [enable_gallium_g3dvl=no])
+if test "x$enable_gallium_g3dvl" = xyes; then
+ vl_winsys_dirs=""
+ for dir in $GALLIUM_WINSYS_DIRS; do
+ vl_winsys_dirs="$vl_winsys_dirs g3dvl/$dir"
+ done
+ # Hack, g3dvl dri state tracker is in winsys/g3dvl/dri
+ # and needs to be built before the drm bits
+ if test "$mesa_driver" = dri; then
+ vl_winsys_dirs="g3dvl/dri $vl_winsys_dirs"
+ fi
+ GALLIUM_WINSYS_DIRS="$GALLIUM_WINSYS_DIRS $vl_winsys_dirs"
+fi
+
dnl
dnl Gallium swrast configuration
dnl
util/u_simple_shaders.c \
util/u_snprintf.c \
util/u_surface.c \
+ util/u_surfaces.c \
util/u_texture.c \
util/u_tile.c \
- util/u_timed_winsys.c \
+ util/u_transfer.c \
+ util/u_resource.c \
util/u_upload_mgr.c \
- util/u_simple_screen.c \
- target-helpers/wrap_screen.c
-
- # Disabling until pipe-video branch gets merged in
- #vl/vl_bitstream_parser.c \
- #vl/vl_mpeg12_mc_renderer.c \
- #vl/vl_compositor.c \
- #vl/vl_csc.c \
- #vl/vl_shader_build.c \
++ target-helpers/wrap_screen.c \
+ vl/vl_bitstream_parser.c \
+ vl/vl_mpeg12_mc_renderer.c \
+ vl/vl_compositor.c \
+ vl/vl_csc.c
GALLIVM_SOURCES = \
- gallivm/lp_bld_alpha.c \
gallivm/lp_bld_arit.c \
- gallivm/lp_bld_blend_aos.c \
- gallivm/lp_bld_blend_logicop.c \
- gallivm/lp_bld_blend_soa.c \
gallivm/lp_bld_const.c \
gallivm/lp_bld_conv.c \
gallivm/lp_bld_debug.c \
'util/u_simple_shaders.c',
'util/u_snprintf.c',
'util/u_surface.c',
+ 'util/u_surfaces.c',
'util/u_texture.c',
'util/u_tile.c',
- 'util/u_timed_winsys.c',
+ 'util/u_transfer.c',
'util/u_upload_mgr.c',
- 'util/u_simple_screen.c',
- # Disabling until pipe-video branch gets merged in
- #'vl/vl_bitstream_parser.c',
- #'vl/vl_mpeg12_mc_renderer.c',
- #'vl/vl_compositor.c',
- #'vl/vl_csc.c',
- #'vl/vl_shader_build.c',
+ 'vl/vl_bitstream_parser.c',
+ 'vl/vl_mpeg12_mc_renderer.c',
+ 'vl/vl_compositor.c',
+ 'vl/vl_csc.c',
+ 'target-helpers/wrap_screen.c',
]
- if drawllvm:
+ if env['llvm']:
source += [
- 'gallivm/lp_bld_alpha.c',
'gallivm/lp_bld_arit.c',
- 'gallivm/lp_bld_blend_aos.c',
- 'gallivm/lp_bld_blend_logicop.c',
- 'gallivm/lp_bld_blend_soa.c',
'gallivm/lp_bld_const.c',
'gallivm/lp_bld_conv.c',
'gallivm/lp_bld_debug.c',
PIPE_FORMAT_R8G8_SSCALED , plain, 1, 1, s8 , s8 , , , xy01, rgb
PIPE_FORMAT_R8G8B8_SSCALED , plain, 1, 1, s8 , s8 , s8 , , xyz1, rgb
PIPE_FORMAT_R8G8B8A8_SSCALED , plain, 1, 1, s8 , s8 , s8 , s8 , xyzw, rgb
+
+ # GL-specific vertex buffer element formats
+ # A.k.a. GL_FIXED
+ PIPE_FORMAT_R32_FIXED , plain, 1, 1, h32 , , , , x001, rgb
+ PIPE_FORMAT_R32G32_FIXED , plain, 1, 1, h32 , h32 , , , xy01, rgb
+ PIPE_FORMAT_R32G32B32_FIXED , plain, 1, 1, h32 , h32 , h32 , , xyz1, rgb
+ PIPE_FORMAT_R32G32B32A32_FIXED , plain, 1, 1, h32 , h32 , h32 , h32 , xyzw, rgb
+
+ # D3D9-specific vertex buffer element formats
+ # See also:
+ # - http://msdn.microsoft.com/en-us/library/bb172533.aspx
+ # A.k.a. D3DDECLTYPE_UDEC3
+ PIPE_FORMAT_R10G10B10X2_USCALED , plain, 1, 1, u10 , u10 , u10 , x2 , xyz1, rgb
+ # A.k.a. D3DDECLTYPE_DEC3N
+ PIPE_FORMAT_R10G10B10X2_SNORM , plain, 1, 1, sn10, sn10, sn10 , x2 , xyz1, rgb
++
+PIPE_FORMAT_YV12 , subsampled, 1, 1, x8 , x8 , x8 , x8 , xyzw, yuv
+PIPE_FORMAT_YV16 , subsampled, 1, 1, x8 , x8 , x8 , x8 , xyzw, yuv
+PIPE_FORMAT_IYUV , subsampled, 1, 1, x8 , x8 , x8 , x8 , xyzw, yuv
+PIPE_FORMAT_NV12 , subsampled, 1, 1, x8 , x8 , x8 , x8 , xyzw, yuv
+PIPE_FORMAT_NV21 , subsampled, 1, 1, x8 , x8 , x8 , x8 , xyzw, yuv
+PIPE_FORMAT_IA44 , subsampled, 1, 1, x8 , x8 , x8 , x8 , xyzw, yuv
+PIPE_FORMAT_AI44 , subsampled, 1, 1, x8 , x8 , x8 , x8 , xyzw, yuv
--- /dev/null
+ /**************************************************************************
+ *
+ * Copyright 2010 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ **************************************************************************/
+
+
+ /**
+ * @file
+ * YUV and RGB subsampled formats conversion.
+ *
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ */
+
+
+ #include "util/u_format_yuv.h"
+
+
+ void
+ util_format_r8g8_b8g8_unorm_unpack_rgba_float(float *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height)
+ {
+ unsigned x, y;
+
+ for (y = 0; y < height; y += 1) {
+ float *dst = dst_row;
+ const uint32_t *src = (const uint32_t *)src_row;
+ uint32_t value;
+ float r, g0, g1, b;
+
+ for (x = 0; x + 1 < width; x += 2) {
+ value = *src++;
+
+ #ifdef PIPE_ARCH_BIG_ENDIAN
+ value = util_bswap32(value);
+ #endif
+
+ r = ubyte_to_float((value >> 0) & 0xff);
+ g0 = ubyte_to_float((value >> 8) & 0xff);
+ b = ubyte_to_float((value >> 16) & 0xff);
+ g1 = ubyte_to_float((value >> 24) & 0xff);
+
+ dst[0] = r; /* r */
+ dst[1] = g0; /* g */
+ dst[2] = b; /* b */
+ dst[3] = 1.0f; /* a */
+ dst += 4;
+
+ dst[0] = r; /* r */
+ dst[1] = g1; /* g */
+ dst[2] = b; /* b */
+ dst[3] = 1.0f; /* a */
+ dst += 4;
+ }
+
+ if (x < width) {
+ value = *src;
+
+ #ifdef PIPE_ARCH_BIG_ENDIAN
+ value = util_bswap32(value);
+ #endif
+
+ r = ubyte_to_float((value >> 0) & 0xff);
+ g0 = ubyte_to_float((value >> 8) & 0xff);
+ b = ubyte_to_float((value >> 16) & 0xff);
+ g1 = ubyte_to_float((value >> 24) & 0xff);
+
+ dst[0] = r; /* r */
+ dst[1] = g0; /* g */
+ dst[2] = b; /* b */
+ dst[3] = 1.0f; /* a */
+ }
+
+ src_row += src_stride/sizeof(*src_row);
+ dst_row += dst_stride/sizeof(*dst_row);
+ }
+ }
+
+
+ void
+ util_format_r8g8_b8g8_unorm_unpack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height)
+ {
+ unsigned x, y;
+
+ for (y = 0; y < height; y += 1) {
+ uint8_t *dst = dst_row;
+ const uint32_t *src = (const uint32_t *)src_row;
+ uint32_t value;
+ uint8_t r, g0, g1, b;
+
+ for (x = 0; x + 1 < width; x += 2) {
+ value = *src++;
+
+ #ifdef PIPE_ARCH_BIG_ENDIAN
+ value = util_bswap32(value);
+ #endif
+
+ r = (value >> 0) & 0xff;
+ g0 = (value >> 8) & 0xff;
+ b = (value >> 16) & 0xff;
+ g1 = (value >> 24) & 0xff;
+
+ dst[0] = r; /* r */
+ dst[1] = g0; /* g */
+ dst[2] = b; /* b */
+ dst[3] = 0xff; /* a */
+ dst += 4;
+
+ dst[0] = r; /* r */
+ dst[1] = g1; /* g */
+ dst[2] = b; /* b */
+ dst[3] = 0xff; /* a */
+ dst += 4;
+ }
+
+ if (x < width) {
+ value = *src;
+
+ #ifdef PIPE_ARCH_BIG_ENDIAN
+ value = util_bswap32(value);
+ #endif
+
+ r = (value >> 0) & 0xff;
+ g0 = (value >> 8) & 0xff;
+ b = (value >> 16) & 0xff;
+ g1 = (value >> 24) & 0xff;
+
+ dst[0] = r; /* r */
+ dst[1] = g0; /* g */
+ dst[2] = b; /* b */
+ dst[3] = 0xff; /* a */
+ }
+
+ src_row += src_stride/sizeof(*src_row);
+ dst_row += dst_stride/sizeof(*dst_row);
+ }
+ }
+
+
+ void
+ util_format_r8g8_b8g8_unorm_pack_rgba_float(uint8_t *dst_row, unsigned dst_stride,
+ const float *src_row, unsigned src_stride,
+ unsigned width, unsigned height)
+ {
+ unsigned x, y;
+
+ for (y = 0; y < height; y += 1) {
+ const float *src = src_row;
+ uint32_t *dst = (uint32_t *)dst_row;
+ float r, g0, g1, b;
+ uint32_t value;
+
+ for (x = 0; x + 1 < width; x += 2) {
+ r = 0.5f*(src[0] + src[4]);
+ g0 = src[1];
+ g1 = src[5];
+ b = 0.5f*(src[2] + src[6]);
+
+ value = float_to_ubyte(r);
+ value |= float_to_ubyte(g0) << 8;
+ value |= float_to_ubyte(b) << 16;
+ value |= float_to_ubyte(g1) << 24;
+
+ #ifdef PIPE_ARCH_BIG_ENDIAN
+ value = util_bswap32(value);
+ #endif
+
+ *dst++ = value;
+
+ src += 8;
+ }
+
+ if (x < width) {
+ r = src[0];
+ g0 = src[1];
+ g1 = 0;
+ b = src[2];
+
+ value = float_to_ubyte(r);
+ value |= float_to_ubyte(g0) << 8;
+ value |= float_to_ubyte(b) << 16;
+ value |= float_to_ubyte(g1) << 24;
+
+ #ifdef PIPE_ARCH_BIG_ENDIAN
+ value = util_bswap32(value);
+ #endif
+
+ *dst = value;
+ }
+
+ dst_row += dst_stride/sizeof(*dst_row);
+ src_row += src_stride/sizeof(*src_row);
+ }
+ }
+
+
+ void
+ util_format_r8g8_b8g8_unorm_pack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height)
+ {
+ unsigned x, y;
+
+ for (y = 0; y < height; y += 1) {
+ const uint8_t *src = src_row;
+ uint32_t *dst = (uint32_t *)dst_row;
+ uint32_t r, g0, g1, b;
+ uint32_t value;
+
+ for (x = 0; x + 1 < width; x += 2) {
+ r = (src[0] + src[4] + 1) >> 1;
+ g0 = src[1];
+ g1 = src[5];
+ b = (src[2] + src[6] + 1) >> 1;
+
+ value = r;
+ value |= g0 << 8;
+ value |= b << 16;
+ value |= g1 << 24;
+
+ #ifdef PIPE_ARCH_BIG_ENDIAN
+ value = util_bswap32(value);
+ #endif
+
+ *dst++ = value;
+
+ src += 8;
+ }
+
+ if (x < width) {
+ r = src[0];
+ g0 = src[1];
+ g1 = 0;
+ b = src[2];
+
+ value = r;
+ value |= g0 << 8;
+ value |= b << 16;
+ value |= g1 << 24;
+
+ #ifdef PIPE_ARCH_BIG_ENDIAN
+ value = util_bswap32(value);
+ #endif
+
+ *dst = value;
+ }
+
+ dst_row += dst_stride/sizeof(*dst_row);
+ src_row += src_stride/sizeof(*src_row);
+ }
+ }
+
+
+ void
+ util_format_r8g8_b8g8_unorm_fetch_rgba_float(float *dst, const uint8_t *src,
+ unsigned i, unsigned j)
+ {
+ assert(i < 2);
+ assert(j < 1);
+
+ dst[0] = ubyte_to_float(src[0]); /* r */
+ dst[1] = ubyte_to_float(src[1 + 2*i]); /* g */
+ dst[2] = ubyte_to_float(src[2]); /* b */
+ dst[3] = 1.0f; /* a */
+ }
+
+
+ void
+ util_format_g8r8_g8b8_unorm_unpack_rgba_float(float *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height)
+ {
+ unsigned x, y;
+
+ for (y = 0; y < height; y += 1) {
+ float *dst = dst_row;
+ const uint32_t *src = (const uint32_t *)src_row;
+ uint32_t value;
+ float r, g0, g1, b;
+
+ for (x = 0; x + 1 < width; x += 2) {
+ value = *src++;
+
+ #ifdef PIPE_ARCH_BIG_ENDIAN
+ value = util_bswap32(value);
+ #endif
+
+ g0 = ubyte_to_float((value >> 0) & 0xff);
+ r = ubyte_to_float((value >> 8) & 0xff);
+ g1 = ubyte_to_float((value >> 16) & 0xff);
+ b = ubyte_to_float((value >> 24) & 0xff);
+
+ dst[0] = r; /* r */
+ dst[1] = g0; /* g */
+ dst[2] = b; /* b */
+ dst[3] = 1.0f; /* a */
+ dst += 4;
+
+ dst[0] = r; /* r */
+ dst[1] = g1; /* g */
+ dst[2] = b; /* b */
+ dst[3] = 1.0f; /* a */
+ dst += 4;
+ }
+
+ if (x < width) {
+ value = *src;
+
+ #ifdef PIPE_ARCH_BIG_ENDIAN
+ value = util_bswap32(value);
+ #endif
+
+ g0 = ubyte_to_float((value >> 0) & 0xff);
+ r = ubyte_to_float((value >> 8) & 0xff);
+ g1 = ubyte_to_float((value >> 16) & 0xff);
+ b = ubyte_to_float((value >> 24) & 0xff);
+
+ dst[0] = r; /* r */
+ dst[1] = g0; /* g */
+ dst[2] = b; /* b */
+ dst[3] = 1.0f; /* a */
+ }
+
+ src_row += src_stride/sizeof(*src_row);
+ dst_row += dst_stride/sizeof(*dst_row);
+ }
+ }
+
+
+ void
+ util_format_g8r8_g8b8_unorm_unpack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height)
+ {
+ unsigned x, y;
+
+ for (y = 0; y < height; y += 1) {
+ uint8_t *dst = dst_row;
+ const uint32_t *src = (const uint32_t *)src_row;
+ uint32_t value;
+ uint8_t r, g0, g1, b;
+
+ for (x = 0; x + 1 < width; x += 2) {
+ value = *src++;
+
+ #ifdef PIPE_ARCH_BIG_ENDIAN
+ value = util_bswap32(value);
+ #endif
+
+ g0 = (value >> 0) & 0xff;
+ r = (value >> 8) & 0xff;
+ g1 = (value >> 16) & 0xff;
+ b = (value >> 24) & 0xff;
+
+ dst[0] = r; /* r */
+ dst[1] = g0; /* g */
+ dst[2] = b; /* b */
+ dst[3] = 0xff; /* a */
+ dst += 4;
+
+ dst[0] = r; /* r */
+ dst[1] = g1; /* g */
+ dst[2] = b; /* b */
+ dst[3] = 0xff; /* a */
+ dst += 4;
+ }
+
+ if (x < width) {
+ value = *src;
+
+ #ifdef PIPE_ARCH_BIG_ENDIAN
+ value = util_bswap32(value);
+ #endif
+
+ g0 = (value >> 0) & 0xff;
+ r = (value >> 8) & 0xff;
+ g1 = (value >> 16) & 0xff;
+ b = (value >> 24) & 0xff;
+
+ dst[0] = r; /* r */
+ dst[1] = g0; /* g */
+ dst[2] = b; /* b */
+ dst[3] = 0xff; /* a */
+ }
+
+ src_row += src_stride/sizeof(*src_row);
+ dst_row += dst_stride/sizeof(*dst_row);
+ }
+ }
+
+
+ void
+ util_format_g8r8_g8b8_unorm_pack_rgba_float(uint8_t *dst_row, unsigned dst_stride,
+ const float *src_row, unsigned src_stride,
+ unsigned width, unsigned height)
+ {
+ unsigned x, y;
+
+ for (y = 0; y < height; y += 1) {
+ const float *src = src_row;
+ uint32_t *dst = (uint32_t *)dst_row;
+ float r, g0, g1, b;
+ uint32_t value;
+
+ for (x = 0; x + 1 < width; x += 2) {
+ r = 0.5f*(src[0] + src[4]);
+ g0 = src[1];
+ g1 = src[5];
+ b = 0.5f*(src[2] + src[6]);
+
+ value = float_to_ubyte(g0);
+ value |= float_to_ubyte(r) << 8;
+ value |= float_to_ubyte(g1) << 16;
+ value |= float_to_ubyte(b) << 24;
+
+ #ifdef PIPE_ARCH_BIG_ENDIAN
+ value = util_bswap32(value);
+ #endif
+
+ *dst++ = value;
+
+ src += 8;
+ }
+
+ if (x < width) {
+ r = src[0];
+ g0 = src[1];
+ g1 = 0;
+ b = src[2];
+
+ value = float_to_ubyte(g0);
+ value |= float_to_ubyte(r) << 8;
+ value |= float_to_ubyte(g1) << 16;
+ value |= float_to_ubyte(b) << 24;
+
+ #ifdef PIPE_ARCH_BIG_ENDIAN
+ value = util_bswap32(value);
+ #endif
+
+ *dst = value;
+ }
+
+ dst_row += dst_stride/sizeof(*dst_row);
+ src_row += src_stride/sizeof(*src_row);
+ }
+ }
+
+
+ void
+ util_format_g8r8_g8b8_unorm_pack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height)
+ {
+ unsigned x, y;
+
+ for (y = 0; y < height; y += 1) {
+ const uint8_t *src = src_row;
+ uint32_t *dst = (uint32_t *)dst_row;
+ uint32_t r, g0, g1, b;
+ uint32_t value;
+
+ for (x = 0; x + 1 < width; x += 2) {
+ r = (src[0] + src[4] + 1) >> 1;
+ g0 = src[1];
+ g1 = src[5];
+ b = (src[2] + src[6] + 1) >> 1;
+
+ value = g0;
+ value |= r << 8;
+ value |= g1 << 16;
+ value |= b << 24;
+
+ #ifdef PIPE_ARCH_BIG_ENDIAN
+ value = util_bswap32(value);
+ #endif
+
+ *dst++ = value;
+
+ src += 8;
+ }
+
+ if (x < width) {
+ r = src[0];
+ g0 = src[1];
+ g1 = 0;
+ b = src[2];
+
+ value = g0;
+ value |= r << 8;
+ value |= g1 << 16;
+ value |= b << 24;
+
+ #ifdef PIPE_ARCH_BIG_ENDIAN
+ value = util_bswap32(value);
+ #endif
+
+ *dst = value;
+ }
+
+ dst_row += dst_stride/sizeof(*dst_row);
+ src_row += src_stride/sizeof(*src_row);
+ }
+ }
+
+
+ void
+ util_format_g8r8_g8b8_unorm_fetch_rgba_float(float *dst, const uint8_t *src,
+ unsigned i, unsigned j)
+ {
+ assert(i < 2);
+ assert(j < 1);
+
+ dst[0] = ubyte_to_float(src[1]); /* r */
+ dst[1] = ubyte_to_float(src[0 + 2*i]); /* g */
+ dst[2] = ubyte_to_float(src[3]); /* b */
+ dst[3] = 1.0f; /* a */
+ }
+
+
+ void
+ util_format_uyvy_unpack_rgba_float(float *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height)
+ {
+ unsigned x, y;
+
+ for (y = 0; y < height; y += 1) {
+ float *dst = dst_row;
+ const uint32_t *src = (const uint32_t *)src_row;
+ uint32_t value;
+ uint8_t y0, y1, u, v;
+
+ for (x = 0; x + 1 < width; x += 2) {
+ value = *src++;
+
+ #ifdef PIPE_ARCH_BIG_ENDIAN
+ value = util_bswap32(value);
+ #endif
+
+ u = (value >> 0) & 0xff;
+ y0 = (value >> 8) & 0xff;
+ v = (value >> 16) & 0xff;
+ y1 = (value >> 24) & 0xff;
+
+ util_format_yuv_to_rgb_float(y0, u, v, &dst[0], &dst[1], &dst[2]);
+ dst[3] = 1.0f; /* a */
+ dst += 4;
+
+ util_format_yuv_to_rgb_float(y1, u, v, &dst[0], &dst[1], &dst[2]);
+ dst[3] = 1.0f; /* a */
+ dst += 4;
+ }
+
+ if (x < width) {
+ value = *src;
+
+ #ifdef PIPE_ARCH_BIG_ENDIAN
+ value = util_bswap32(value);
+ #endif
+
+ u = (value >> 0) & 0xff;
+ y0 = (value >> 8) & 0xff;
+ v = (value >> 16) & 0xff;
+ y1 = (value >> 24) & 0xff;
+
+ util_format_yuv_to_rgb_float(y0, u, v, &dst[0], &dst[1], &dst[2]);
+ dst[3] = 1.0f; /* a */
+ }
+
+ src_row += src_stride/sizeof(*src_row);
+ dst_row += dst_stride/sizeof(*dst_row);
+ }
+ }
+
+
+ void
+ util_format_uyvy_unpack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height)
+ {
+ unsigned x, y;
+
+ for (y = 0; y < height; y += 1) {
+ uint8_t *dst = dst_row;
+ const uint32_t *src = (const uint32_t *)src_row;
+ uint32_t value;
+ uint8_t y0, y1, u, v;
+
+ for (x = 0; x + 1 < width; x += 2) {
+ value = *src++;
+
+ #ifdef PIPE_ARCH_BIG_ENDIAN
+ value = util_bswap32(value);
+ #endif
+
+ u = (value >> 0) & 0xff;
+ y0 = (value >> 8) & 0xff;
+ v = (value >> 16) & 0xff;
+ y1 = (value >> 24) & 0xff;
+
+ util_format_yuv_to_rgb_8unorm(y0, u, v, &dst[0], &dst[1], &dst[2]);
+ dst[3] = 0xff; /* a */
+ dst += 4;
+
+ util_format_yuv_to_rgb_8unorm(y1, u, v, &dst[0], &dst[1], &dst[2]);
+ dst[3] = 0xff; /* a */
+ dst += 4;
+ }
+
+ if (x < width) {
+ value = *src;
+
+ #ifdef PIPE_ARCH_BIG_ENDIAN
+ value = util_bswap32(value);
+ #endif
+
+ u = (value >> 0) & 0xff;
+ y0 = (value >> 8) & 0xff;
+ v = (value >> 16) & 0xff;
+ y1 = (value >> 24) & 0xff;
+
+ util_format_yuv_to_rgb_8unorm(y0, u, v, &dst[0], &dst[1], &dst[2]);
+ dst[3] = 0xff; /* a */
+ }
+
+ src_row += src_stride/sizeof(*src_row);
+ dst_row += dst_stride/sizeof(*dst_row);
+ }
+ }
+
+
+ void
+ util_format_uyvy_pack_rgba_float(uint8_t *dst_row, unsigned dst_stride,
+ const float *src_row, unsigned src_stride,
+ unsigned width, unsigned height)
+ {
+ unsigned x, y;
+
+ for (y = 0; y < height; y += 1) {
+ const float *src = src_row;
+ uint32_t *dst = (uint32_t *)dst_row;
+ uint8_t y0, y1, u, v;
+ uint32_t value;
+
+ for (x = 0; x + 1 < width; x += 2) {
+ uint8_t y0, y1, u0, u1, v0, v1, u, v;
+
+ util_format_rgb_float_to_yuv(src[0], src[1], src[2],
+ &y0, &u0, &v0);
+ util_format_rgb_float_to_yuv(src[4], src[5], src[6],
+ &y1, &u1, &v1);
+
+ u = (u0 + u1 + 1) >> 1;
+ v = (v0 + v1 + 1) >> 1;
+
+ value = u;
+ value |= y0 << 8;
+ value |= v << 16;
+ value |= y1 << 24;
+
+ #ifdef PIPE_ARCH_BIG_ENDIAN
+ value = util_bswap32(value);
+ #endif
+
+ *dst++ = value;
+
+ src += 8;
+ }
+
+ if (x < width) {
+ util_format_rgb_float_to_yuv(src[0], src[1], src[2],
+ &y0, &u, &v);
+ y1 = 0;
+
+ value = u;
+ value |= y0 << 8;
+ value |= v << 16;
+ value |= y1 << 24;
+
+ #ifdef PIPE_ARCH_BIG_ENDIAN
+ value = util_bswap32(value);
+ #endif
+
+ *dst = value;
+ }
+
+ dst_row += dst_stride/sizeof(*dst_row);
+ src_row += src_stride/sizeof(*src_row);
+ }
+ }
+
+
+ void
+ util_format_uyvy_pack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height)
+ {
+ unsigned x, y;
+
+ for (y = 0; y < height; y += 1) {
+ const uint8_t *src = src_row;
+ uint32_t *dst = (uint32_t *)dst_row;
+ uint8_t y0, y1, u, v;
+ uint32_t value;
+
+ for (x = 0; x + 1 < width; x += 2) {
+ uint8_t y0, y1, u0, u1, v0, v1, u, v;
+
+ util_format_rgb_8unorm_to_yuv(src[0], src[1], src[2],
+ &y0, &u0, &v0);
+ util_format_rgb_8unorm_to_yuv(src[4], src[5], src[6],
+ &y1, &u1, &v1);
+
+ u = (u0 + u1 + 1) >> 1;
+ v = (v0 + v1 + 1) >> 1;
+
+ value = u;
+ value |= y0 << 8;
+ value |= v << 16;
+ value |= y1 << 24;
+
+ #ifdef PIPE_ARCH_BIG_ENDIAN
+ value = util_bswap32(value);
+ #endif
+
+ *dst++ = value;
+
+ src += 8;
+ }
+
+ if (x < width) {
+ util_format_rgb_8unorm_to_yuv(src[0], src[1], src[2],
+ &y0, &u, &v);
+ y1 = 0;
+
+ value = u;
+ value |= y0 << 8;
+ value |= v << 16;
+ value |= y1 << 24;
+
+ #ifdef PIPE_ARCH_BIG_ENDIAN
+ value = util_bswap32(value);
+ #endif
+
+ *dst = value;
+ }
+
+ dst_row += dst_stride/sizeof(*dst_row);
+ src_row += src_stride/sizeof(*src_row);
+ }
+ }
+
+
+ void
+ util_format_uyvy_fetch_rgba_float(float *dst, const uint8_t *src,
+ unsigned i, unsigned j)
+ {
+ uint8_t y, u, v;
+
+ assert(i < 2);
+ assert(j < 1);
+
+ y = src[1 + i*2];
+ u = src[0];
+ v = src[2];
+
+ util_format_yuv_to_rgb_float(y, u, v, &dst[0], &dst[1], &dst[2]);
+
+ dst[3] = 1.0f;
+ }
+
+
+ void
+ util_format_yuyv_unpack_rgba_float(float *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height)
+ {
+ unsigned x, y;
+
+ for (y = 0; y < height; y += 1) {
+ float *dst = dst_row;
+ const uint32_t *src = (const uint32_t *)src_row;
+ uint32_t value;
+ uint8_t y0, y1, u, v;
+
+ for (x = 0; x + 1 < width; x += 2) {
+ value = *src++;
+
+ #ifdef PIPE_ARCH_BIG_ENDIAN
+ value = util_bswap32(value);
+ #endif
+
+ y0 = (value >> 0) & 0xff;
+ u = (value >> 8) & 0xff;
+ y1 = (value >> 16) & 0xff;
+ v = (value >> 24) & 0xff;
+
+ util_format_yuv_to_rgb_float(y0, u, v, &dst[0], &dst[1], &dst[2]);
+ dst[3] = 1.0f; /* a */
+ dst += 4;
+
+ util_format_yuv_to_rgb_float(y1, u, v, &dst[0], &dst[1], &dst[2]);
+ dst[3] = 1.0f; /* a */
+ dst += 4;
+ }
+
+ if (x < width) {
+ value = *src;
+
+ #ifdef PIPE_ARCH_BIG_ENDIAN
+ value = util_bswap32(value);
+ #endif
+
+ y0 = (value >> 0) & 0xff;
+ u = (value >> 8) & 0xff;
+ y1 = (value >> 16) & 0xff;
+ v = (value >> 24) & 0xff;
+
+ util_format_yuv_to_rgb_float(y0, u, v, &dst[0], &dst[1], &dst[2]);
+ dst[3] = 1.0f; /* a */
+ }
+
+ src_row += src_stride/sizeof(*src_row);
+ dst_row += dst_stride/sizeof(*dst_row);
+ }
+ }
+
+
+ void
+ util_format_yuyv_unpack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height)
+ {
+ unsigned x, y;
+
+ for (y = 0; y < height; y += 1) {
+ uint8_t *dst = dst_row;
+ const uint32_t *src = (const uint32_t *)src_row;
+ uint32_t value;
+ uint8_t y0, y1, u, v;
+
+ for (x = 0; x + 1 < width; x += 2) {
+ value = *src++;
+
+ #ifdef PIPE_ARCH_BIG_ENDIAN
+ value = util_bswap32(value);
+ #endif
+
+ y0 = (value >> 0) & 0xff;
+ u = (value >> 8) & 0xff;
+ y1 = (value >> 16) & 0xff;
+ v = (value >> 24) & 0xff;
+
+ util_format_yuv_to_rgb_8unorm(y0, u, v, &dst[0], &dst[1], &dst[2]);
+ dst[3] = 0xff; /* a */
+ dst += 4;
+
+ util_format_yuv_to_rgb_8unorm(y1, u, v, &dst[0], &dst[1], &dst[2]);
+ dst[3] = 0xff; /* a */
+ dst += 4;
+ }
+
+ if (x < width) {
+ value = *src;
+
+ #ifdef PIPE_ARCH_BIG_ENDIAN
+ value = util_bswap32(value);
+ #endif
+
+ y0 = (value >> 0) & 0xff;
+ u = (value >> 8) & 0xff;
+ y1 = (value >> 16) & 0xff;
+ v = (value >> 24) & 0xff;
+
+ util_format_yuv_to_rgb_8unorm(y0, u, v, &dst[0], &dst[1], &dst[2]);
+ dst[3] = 0xff; /* a */
+ }
+
+ src_row += src_stride/sizeof(*src_row);
+ dst_row += dst_stride/sizeof(*dst_row);
+ }
+ }
+
+
+ void
+ util_format_yuyv_pack_rgba_float(uint8_t *dst_row, unsigned dst_stride,
+ const float *src_row, unsigned src_stride,
+ unsigned width, unsigned height)
+ {
+ unsigned x, y;
+
+ for (y = 0; y < height; y += 1) {
+ const float *src = src_row;
+ uint32_t *dst = (uint32_t *)dst_row;
+ uint8_t y0, y1, u, v;
+ uint32_t value;
+
+ for (x = 0; x + 1 < width; x += 2) {
+ uint8_t y0, y1, u0, u1, v0, v1, u, v;
+
+ util_format_rgb_float_to_yuv(src[0], src[1], src[2],
+ &y0, &u0, &v0);
+ util_format_rgb_float_to_yuv(src[4], src[5], src[6],
+ &y1, &u1, &v1);
+
+ u = (u0 + u1 + 1) >> 1;
+ v = (v0 + v1 + 1) >> 1;
+
+ value = y0;
+ value |= u << 8;
+ value |= y1 << 16;
+ value |= v << 24;
+
+ #ifdef PIPE_ARCH_BIG_ENDIAN
+ value = util_bswap32(value);
+ #endif
+
+ *dst++ = value;
+
+ src += 8;
+ }
+
+ if (x < width) {
+ util_format_rgb_float_to_yuv(src[0], src[1], src[2],
+ &y0, &u, &v);
+ y1 = 0;
+
+ value = y0;
+ value |= u << 8;
+ value |= y1 << 16;
+ value |= v << 24;
+
+ #ifdef PIPE_ARCH_BIG_ENDIAN
+ value = util_bswap32(value);
+ #endif
+
+ *dst = value;
+ }
+
+ dst_row += dst_stride/sizeof(*dst_row);
+ src_row += src_stride/sizeof(*src_row);
+ }
+ }
+
+
+ void
+ util_format_yuyv_pack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height)
+ {
+ unsigned x, y;
+
+ for (y = 0; y < height; y += 1) {
+ const uint8_t *src = src_row;
+ uint32_t *dst = (uint32_t *)dst_row;
+ uint8_t y0, y1, u, v;
+ uint32_t value;
+
+ for (x = 0; x + 1 < width; x += 2) {
+ uint8_t y0, y1, u0, u1, v0, v1, u, v;
+
+ util_format_rgb_8unorm_to_yuv(src[0], src[1], src[2],
+ &y0, &u0, &v0);
+ util_format_rgb_8unorm_to_yuv(src[4], src[5], src[6],
+ &y1, &u1, &v1);
+
+ u = (u0 + u1 + 1) >> 1;
+ v = (v0 + v1 + 1) >> 1;
+
+ value = y0;
+ value |= u << 8;
+ value |= y1 << 16;
+ value |= v << 24;
+
+ #ifdef PIPE_ARCH_BIG_ENDIAN
+ value = util_bswap32(value);
+ #endif
+
+ *dst++ = value;
+
+ src += 8;
+ }
+
+ if (x < width) {
+ util_format_rgb_8unorm_to_yuv(src[0], src[1], src[2],
+ &y0, &u, &v);
+ y1 = 0;
+
+ value = y0;
+ value |= u << 8;
+ value |= y1 << 16;
+ value |= v << 24;
+
+ #ifdef PIPE_ARCH_BIG_ENDIAN
+ value = util_bswap32(value);
+ #endif
+
+ *dst = value;
+ }
+
+ dst_row += dst_stride/sizeof(*dst_row);
+ src_row += src_stride/sizeof(*src_row);
+ }
+ }
+
+
+ void
+ util_format_yuyv_fetch_rgba_float(float *dst, const uint8_t *src,
+ unsigned i, unsigned j)
+ {
+ uint8_t y, u, v;
+
+ assert(i < 2);
+ assert(j < 1);
+
+ y = src[0 + i*2];
+ u = src[1];
+ v = src[3];
+
+ util_format_yuv_to_rgb_float(y, u, v, &dst[0], &dst[1], &dst[2]);
+
+ dst[3] = 1.0f;
+ }
++
++/* XXX: Stubbed for now */
++void
++util_format_yv12_unpack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
++ const uint8_t *src_row, unsigned src_stride,
++ unsigned width, unsigned height) {}
++void
++util_format_yv12_pack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
++ const uint8_t *src_row, unsigned src_stride,
++ unsigned width, unsigned height) {}
++void
++util_format_yv12_unpack_rgba_float(float *dst_row, unsigned dst_stride,
++ const uint8_t *src_row, unsigned src_stride,
++ unsigned width, unsigned height) {}
++void
++util_format_yv12_pack_rgba_float(uint8_t *dst_row, unsigned dst_stride,
++ const float *src_row, unsigned src_stride,
++ unsigned width, unsigned height) {}
++void
++util_format_yv12_fetch_rgba_float(float *dst, const uint8_t *src,
++ unsigned i, unsigned j) {}
++void
++util_format_yv16_unpack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
++ const uint8_t *src_row, unsigned src_stride,
++ unsigned width, unsigned height) {}
++void
++util_format_yv16_pack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
++ const uint8_t *src_row, unsigned src_stride,
++ unsigned width, unsigned height) {}
++void
++util_format_yv16_unpack_rgba_float(float *dst_row, unsigned dst_stride,
++ const uint8_t *src_row, unsigned src_stride,
++ unsigned width, unsigned height) {}
++void
++util_format_yv16_pack_rgba_float(uint8_t *dst_row, unsigned dst_stride,
++ const float *src_row, unsigned src_stride,
++ unsigned width, unsigned height) {}
++void
++util_format_yv16_fetch_rgba_float(float *dst, const uint8_t *src,
++ unsigned i, unsigned j) {}
++void
++util_format_iyuv_unpack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
++ const uint8_t *src_row, unsigned src_stride,
++ unsigned width, unsigned height) {}
++void
++util_format_iyuv_pack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
++ const uint8_t *src_row, unsigned src_stride,
++ unsigned width, unsigned height) {}
++void
++util_format_iyuv_unpack_rgba_float(float *dst_row, unsigned dst_stride,
++ const uint8_t *src_row, unsigned src_stride,
++ unsigned width, unsigned height) {}
++void
++util_format_iyuv_pack_rgba_float(uint8_t *dst_row, unsigned dst_stride,
++ const float *src_row, unsigned src_stride,
++ unsigned width, unsigned height) {}
++void
++util_format_iyuv_fetch_rgba_float(float *dst, const uint8_t *src,
++ unsigned i, unsigned j) {}
++void
++util_format_nv12_unpack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
++ const uint8_t *src_row, unsigned src_stride,
++ unsigned width, unsigned height) {}
++void
++util_format_nv12_pack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
++ const uint8_t *src_row, unsigned src_stride,
++ unsigned width, unsigned height) {}
++void
++util_format_nv12_unpack_rgba_float(float *dst_row, unsigned dst_stride,
++ const uint8_t *src_row, unsigned src_stride,
++ unsigned width, unsigned height) {}
++void
++util_format_nv12_pack_rgba_float(uint8_t *dst_row, unsigned dst_stride,
++ const float *src_row, unsigned src_stride,
++ unsigned width, unsigned height) {}
++void
++util_format_nv12_fetch_rgba_float(float *dst, const uint8_t *src,
++ unsigned i, unsigned j) {}
++void
++util_format_nv21_unpack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
++ const uint8_t *src_row, unsigned src_stride,
++ unsigned width, unsigned height) {}
++void
++util_format_nv21_pack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
++ const uint8_t *src_row, unsigned src_stride,
++ unsigned width, unsigned height) {}
++void
++util_format_nv21_unpack_rgba_float(float *dst_row, unsigned dst_stride,
++ const uint8_t *src_row, unsigned src_stride,
++ unsigned width, unsigned height) {}
++void
++util_format_nv21_pack_rgba_float(uint8_t *dst_row, unsigned dst_stride,
++ const float *src_row, unsigned src_stride,
++ unsigned width, unsigned height) {}
++void
++util_format_nv21_fetch_rgba_float(float *dst, const uint8_t *src,
++ unsigned i, unsigned j) {}
++void
++util_format_ia44_unpack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
++ const uint8_t *src_row, unsigned src_stride,
++ unsigned width, unsigned height) {}
++void
++util_format_ia44_pack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
++ const uint8_t *src_row, unsigned src_stride,
++ unsigned width, unsigned height) {}
++void
++util_format_ia44_unpack_rgba_float(float *dst_row, unsigned dst_stride,
++ const uint8_t *src_row, unsigned src_stride,
++ unsigned width, unsigned height) {}
++void
++util_format_ia44_pack_rgba_float(uint8_t *dst_row, unsigned dst_stride,
++ const float *src_row, unsigned src_stride,
++ unsigned width, unsigned height) {}
++void
++util_format_ia44_fetch_rgba_float(float *dst, const uint8_t *src,
++ unsigned i, unsigned j) {}
++void
++util_format_ai44_unpack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
++ const uint8_t *src_row, unsigned src_stride,
++ unsigned width, unsigned height) {}
++void
++util_format_ai44_pack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
++ const uint8_t *src_row, unsigned src_stride,
++ unsigned width, unsigned height) {}
++void
++util_format_ai44_unpack_rgba_float(float *dst_row, unsigned dst_stride,
++ const uint8_t *src_row, unsigned src_stride,
++ unsigned width, unsigned height) {}
++void
++util_format_ai44_pack_rgba_float(uint8_t *dst_row, unsigned dst_stride,
++ const float *src_row, unsigned src_stride,
++ unsigned width, unsigned height) {}
++void
++util_format_ai44_fetch_rgba_float(float *dst, const uint8_t *src,
++ unsigned i, unsigned j) {}
--- /dev/null
+ /**************************************************************************
+ *
+ * Copyright 2010 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ **************************************************************************/
+
+
+ /**
+ * @file
+ * YUV colorspace conversion.
+ *
+ * @author Brian Paul <brianp@vmware.com>
+ * @author Michal Krol <michal@vmware.com>
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ *
+ * See also:
+ * - http://www.fourcc.org/fccyvrgb.php
+ * - http://msdn.microsoft.com/en-us/library/ms893078
+ * - http://en.wikipedia.org/wiki/YUV
+ */
+
+
+ #ifndef U_FORMAT_YUV_H_
+ #define U_FORMAT_YUV_H_
+
+
+ #include "pipe/p_compiler.h"
+ #include "u_math.h"
+
+
+ /*
+ * TODO: Ensure we use consistent and right floating formulas, with enough
+ * precision in the coefficients.
+ */
+
+ static INLINE void
+ util_format_rgb_float_to_yuv(float r, float g, float b,
+ uint8_t *y, uint8_t *u, uint8_t *v)
+ {
+ const float _r = CLAMP(r, 0.0f, 1.0f);
+ const float _g = CLAMP(g, 0.0f, 1.0f);
+ const float _b = CLAMP(b, 0.0f, 1.0f);
+
+ const float scale = 255.0f;
+
+ const int _y = scale * ( (0.257f * _r) + (0.504f * _g) + (0.098f * _b));
+ const int _u = scale * (-(0.148f * _r) - (0.291f * _g) + (0.439f * _b));
+ const int _v = scale * ( (0.439f * _r) - (0.368f * _g) - (0.071f * _b));
+
+ *y = _y + 16;
+ *u = _u + 128;
+ *v = _v + 128;
+ }
+
+
+ static INLINE void
+ util_format_yuv_to_rgb_float(uint8_t y, uint8_t u, uint8_t v,
+ float *r, float *g, float *b)
+ {
+ const int _y = y - 16;
+ const int _u = u - 128;
+ const int _v = v - 128;
+
+ const float y_factor = 255.0f / 219.0f;
+
+ const float scale = 1.0f / 255.0f;
+
+ *r = scale * (y_factor * _y + 1.596f * _v);
+ *g = scale * (y_factor * _y - 0.391f * _u - 0.813f * _v);
+ *b = scale * (y_factor * _y + 2.018f * _u );
+ }
+
+
+ static INLINE void
+ util_format_rgb_8unorm_to_yuv(uint8_t r, uint8_t g, uint8_t b,
+ uint8_t *y, uint8_t *u, uint8_t *v)
+ {
+ *y = (( 66 * r + 129 * g + 25 * b + 128) >> 8) + 16;
+ *u = (( -38 * r - 74 * g + 112 * b + 128) >> 8) + 128;
+ *v = (( 112 * r - 94 * g - 18 * b + 128) >> 8) + 128;
+ }
+
+
+ static INLINE void
+ util_format_yuv_to_rgb_8unorm(uint8_t y, uint8_t u, uint8_t v,
+ uint8_t *r, uint8_t *g, uint8_t *b)
+ {
+ const int _y = y - 16;
+ const int _u = u - 128;
+ const int _v = v - 128;
+
+ const int _r = (298 * _y + 409 * _v + 128) >> 8;
+ const int _g = (298 * _y - 100 * _u - 208 * _v + 128) >> 8;
+ const int _b = (298 * _y + 516 * _u + 128) >> 8;
+
+ *r = CLAMP(_r, 0, 255);
+ *g = CLAMP(_g, 0, 255);
+ *b = CLAMP(_b, 0, 255);
+ }
+
+
+
+ void
+ util_format_uyvy_unpack_rgba_float(float *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height);
+
+ void
+ util_format_uyvy_unpack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height);
+
+ void
+ util_format_uyvy_pack_rgba_float(uint8_t *dst_row, unsigned dst_stride,
+ const float *src_row, unsigned src_stride,
+ unsigned width, unsigned height);
+
+ void
+ util_format_uyvy_pack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height);
+
+ void
+ util_format_uyvy_fetch_rgba_float(float *dst, const uint8_t *src,
+ unsigned i, unsigned j);
+
+ void
+ util_format_yuyv_unpack_rgba_float(float *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height);
+
+ void
+ util_format_yuyv_unpack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height);
+
+ void
+ util_format_yuyv_pack_rgba_float(uint8_t *dst_row, unsigned dst_stride,
+ const float *src_row, unsigned src_stride,
+ unsigned width, unsigned height);
+
+ void
+ util_format_yuyv_pack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height);
+
+ void
+ util_format_yuyv_fetch_rgba_float(float *dst, const uint8_t *src,
+ unsigned i, unsigned j);
+
++/* XXX: Stubbed for now */
++void
++util_format_yv12_unpack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
++ const uint8_t *src_row, unsigned src_stride,
++ unsigned width, unsigned height);
++void
++util_format_yv12_pack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
++ const uint8_t *src_row, unsigned src_stride,
++ unsigned width, unsigned height);
++void
++util_format_yv12_unpack_rgba_float(float *dst_row, unsigned dst_stride,
++ const uint8_t *src_row, unsigned src_stride,
++ unsigned width, unsigned height);
++void
++util_format_yv12_pack_rgba_float(uint8_t *dst_row, unsigned dst_stride,
++ const float *src_row, unsigned src_stride,
++ unsigned width, unsigned height);
++void
++util_format_yv12_fetch_rgba_float(float *dst, const uint8_t *src,
++ unsigned i, unsigned j);
++void
++util_format_yv16_unpack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
++ const uint8_t *src_row, unsigned src_stride,
++ unsigned width, unsigned height);
++void
++util_format_yv16_pack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
++ const uint8_t *src_row, unsigned src_stride,
++ unsigned width, unsigned height);
++void
++util_format_yv16_unpack_rgba_float(float *dst_row, unsigned dst_stride,
++ const uint8_t *src_row, unsigned src_stride,
++ unsigned width, unsigned height);
++void
++util_format_yv16_pack_rgba_float(uint8_t *dst_row, unsigned dst_stride,
++ const float *src_row, unsigned src_stride,
++ unsigned width, unsigned height);
++void
++util_format_yv16_fetch_rgba_float(float *dst, const uint8_t *src,
++ unsigned i, unsigned j);
++void
++util_format_iyuv_unpack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
++ const uint8_t *src_row, unsigned src_stride,
++ unsigned width, unsigned height);
++void
++util_format_iyuv_pack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
++ const uint8_t *src_row, unsigned src_stride,
++ unsigned width, unsigned height);
++void
++util_format_iyuv_unpack_rgba_float(float *dst_row, unsigned dst_stride,
++ const uint8_t *src_row, unsigned src_stride,
++ unsigned width, unsigned height);
++void
++util_format_iyuv_pack_rgba_float(uint8_t *dst_row, unsigned dst_stride,
++ const float *src_row, unsigned src_stride,
++ unsigned width, unsigned height);
++void
++util_format_iyuv_fetch_rgba_float(float *dst, const uint8_t *src,
++ unsigned i, unsigned j);
++void
++util_format_nv12_unpack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
++ const uint8_t *src_row, unsigned src_stride,
++ unsigned width, unsigned height);
++void
++util_format_nv12_pack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
++ const uint8_t *src_row, unsigned src_stride,
++ unsigned width, unsigned height);
++void
++util_format_nv12_unpack_rgba_float(float *dst_row, unsigned dst_stride,
++ const uint8_t *src_row, unsigned src_stride,
++ unsigned width, unsigned height);
++void
++util_format_nv12_pack_rgba_float(uint8_t *dst_row, unsigned dst_stride,
++ const float *src_row, unsigned src_stride,
++ unsigned width, unsigned height);
++void
++util_format_nv12_fetch_rgba_float(float *dst, const uint8_t *src,
++ unsigned i, unsigned j);
++void
++util_format_nv21_unpack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
++ const uint8_t *src_row, unsigned src_stride,
++ unsigned width, unsigned height);
++void
++util_format_nv21_pack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
++ const uint8_t *src_row, unsigned src_stride,
++ unsigned width, unsigned height);
++void
++util_format_nv21_unpack_rgba_float(float *dst_row, unsigned dst_stride,
++ const uint8_t *src_row, unsigned src_stride,
++ unsigned width, unsigned height);
++void
++util_format_nv21_pack_rgba_float(uint8_t *dst_row, unsigned dst_stride,
++ const float *src_row, unsigned src_stride,
++ unsigned width, unsigned height);
++void
++util_format_nv21_fetch_rgba_float(float *dst, const uint8_t *src,
++ unsigned i, unsigned j);
++void
++util_format_ia44_unpack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
++ const uint8_t *src_row, unsigned src_stride,
++ unsigned width, unsigned height);
++void
++util_format_ia44_pack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
++ const uint8_t *src_row, unsigned src_stride,
++ unsigned width, unsigned height);
++void
++util_format_ia44_unpack_rgba_float(float *dst_row, unsigned dst_stride,
++ const uint8_t *src_row, unsigned src_stride,
++ unsigned width, unsigned height);
++void
++util_format_ia44_pack_rgba_float(uint8_t *dst_row, unsigned dst_stride,
++ const float *src_row, unsigned src_stride,
++ unsigned width, unsigned height);
++void
++util_format_ia44_fetch_rgba_float(float *dst, const uint8_t *src,
++ unsigned i, unsigned j);
++void
++util_format_ai44_unpack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
++ const uint8_t *src_row, unsigned src_stride,
++ unsigned width, unsigned height);
++void
++util_format_ai44_pack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
++ const uint8_t *src_row, unsigned src_stride,
++ unsigned width, unsigned height);
++void
++util_format_ai44_unpack_rgba_float(float *dst_row, unsigned dst_stride,
++ const uint8_t *src_row, unsigned src_stride,
++ unsigned width, unsigned height);
++void
++util_format_ai44_pack_rgba_float(uint8_t *dst_row, unsigned dst_stride,
++ const float *src_row, unsigned src_stride,
++ unsigned width, unsigned height);
++void
++util_format_ai44_fetch_rgba_float(float *dst, const uint8_t *src,
++ unsigned i, unsigned j);
++
+
+ void
+ util_format_r8g8_b8g8_unorm_unpack_rgba_float(float *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height);
+
+ void
+ util_format_r8g8_b8g8_unorm_unpack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height);
+
+ void
+ util_format_r8g8_b8g8_unorm_pack_rgba_float(uint8_t *dst_row, unsigned dst_stride,
+ const float *src_row, unsigned src_stride,
+ unsigned width, unsigned height);
+
+ void
+ util_format_r8g8_b8g8_unorm_pack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height);
+
+ void
+ util_format_r8g8_b8g8_unorm_fetch_rgba_float(float *dst, const uint8_t *src,
+ unsigned i, unsigned j);
+
+ void
+ util_format_g8r8_g8b8_unorm_unpack_rgba_float(float *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height);
+
+ void
+ util_format_g8r8_g8b8_unorm_unpack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height);
+
+ void
+ util_format_g8r8_g8b8_unorm_pack_rgba_float(uint8_t *dst_row, unsigned dst_stride,
+ const float *src_row, unsigned src_stride,
+ unsigned width, unsigned height);
+
+ void
+ util_format_g8r8_g8b8_unorm_pack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height);
+
+ void
+ util_format_g8r8_g8b8_unorm_fetch_rgba_float(float *dst, const uint8_t *src,
+ unsigned i, unsigned j);
+
+
+
+ #endif /* U_FORMAT_YUV_H_ */
--- /dev/null
- 1,
- PIPE_BUFFER_USAGE_VERTEX,
+/**************************************************************************
+ *
+ * Copyright 2009 Younes Manton.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vl_compositor.h"
+#include <assert.h>
+#include <pipe/p_context.h>
+#include <util/u_inlines.h>
+#include <util/u_memory.h>
+#include <tgsi/tgsi_ureg.h>
+#include "vl_csc.h"
+
+struct vertex_shader_consts
+{
+ struct vertex4f dst_scale;
+ struct vertex4f dst_trans;
+ struct vertex4f src_scale;
+ struct vertex4f src_trans;
+};
+
+struct fragment_shader_consts
+{
+ float matrix[16];
+};
+
+static bool
+u_video_rects_equal(struct pipe_video_rect *a, struct pipe_video_rect *b)
+{
+ assert(a && b);
+
+ if (a->x != b->x)
+ return false;
+ if (a->y != b->y)
+ return false;
+ if (a->w != b->w)
+ return false;
+ if (a->h != b->h)
+ return false;
+
+ return true;
+}
+
+static bool
+create_vert_shader(struct vl_compositor *c)
+{
+ struct ureg_program *shader;
+ struct ureg_src vpos, vtex;
+ struct ureg_dst o_vpos, o_vtex;
+
+ shader = ureg_create(TGSI_PROCESSOR_VERTEX);
+ if (!shader)
+ return false;
+
+ vpos = ureg_DECL_vs_input(shader, 0);
+ vtex = ureg_DECL_vs_input(shader, 1);
+ o_vpos = ureg_DECL_output(shader, TGSI_SEMANTIC_POSITION, 0);
+ o_vtex = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, 1);
+
+ /*
+ * o_vpos = vpos
+ * o_vtex = vtex
+ */
+ ureg_MOV(shader, o_vpos, vpos);
+ ureg_MOV(shader, o_vtex, vtex);
+
+ ureg_END(shader);
+
+ c->vertex_shader = ureg_create_shader_and_destroy(shader, c->pipe);
+ if (!c->vertex_shader)
+ return false;
+
+ return true;
+}
+
+static bool
+create_frag_shader(struct vl_compositor *c)
+{
+ struct ureg_program *shader;
+ struct ureg_src tc;
+ struct ureg_src csc[4];
+ struct ureg_src sampler;
+ struct ureg_dst texel;
+ struct ureg_dst fragment;
+ unsigned i;
+
+ shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
+ if (!shader)
+ return false;
+
+ tc = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, 1, TGSI_INTERPOLATE_LINEAR);
+ for (i = 0; i < 4; ++i)
+ csc[i] = ureg_DECL_constant(shader, i);
+ sampler = ureg_DECL_sampler(shader, 0);
+ texel = ureg_DECL_temporary(shader);
+ fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
+
+ /*
+ * texel = tex(tc, sampler)
+ * fragment = csc * texel
+ */
+ ureg_TEX(shader, texel, TGSI_TEXTURE_2D, tc, sampler);
+ for (i = 0; i < 4; ++i)
+ ureg_DP4(shader, ureg_writemask(fragment, TGSI_WRITEMASK_X << i), csc[i], ureg_src(texel));
+
+ ureg_release_temporary(shader, texel);
+ ureg_END(shader);
+
+ c->fragment_shader = ureg_create_shader_and_destroy(shader, c->pipe);
+ if (!c->fragment_shader)
+ return false;
+
+ return true;
+}
+
+static bool
+init_pipe_state(struct vl_compositor *c)
+{
+ struct pipe_sampler_state sampler;
+
+ assert(c);
+
+ c->fb_state.nr_cbufs = 1;
+ c->fb_state.zsbuf = NULL;
+
+ sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ sampler.min_img_filter = PIPE_TEX_FILTER_LINEAR;
+ sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
+ sampler.mag_img_filter = PIPE_TEX_FILTER_LINEAR;
+ sampler.compare_mode = PIPE_TEX_COMPARE_NONE;
+ sampler.compare_func = PIPE_FUNC_ALWAYS;
+ sampler.normalized_coords = 1;
+ /*sampler.lod_bias = ;*/
+ /*sampler.min_lod = ;*/
+ /*sampler.max_lod = ;*/
+ /*sampler.border_color[i] = ;*/
+ /*sampler.max_anisotropy = ;*/
+ c->sampler = c->pipe->create_sampler_state(c->pipe, &sampler);
+
+ return true;
+}
+
+static void cleanup_pipe_state(struct vl_compositor *c)
+{
+ assert(c);
+
+ c->pipe->delete_sampler_state(c->pipe, c->sampler);
+}
+
+static bool
+init_shaders(struct vl_compositor *c)
+{
+ assert(c);
+
+ create_vert_shader(c);
+ create_frag_shader(c);
+
+ return true;
+}
+
+static void cleanup_shaders(struct vl_compositor *c)
+{
+ assert(c);
+
+ c->pipe->delete_vs_state(c->pipe, c->vertex_shader);
+ c->pipe->delete_fs_state(c->pipe, c->fragment_shader);
+}
+
+static bool
+init_buffers(struct vl_compositor *c)
+{
+ struct fragment_shader_consts fsc;
++ struct pipe_vertex_element vertex_elems[2];
+
+ assert(c);
+
+ /*
+ * Create our vertex buffer and vertex buffer elements
+ */
+ c->vertex_buf.stride = sizeof(struct vertex4f);
+ c->vertex_buf.max_index = (VL_COMPOSITOR_MAX_LAYERS + 2) * 6 - 1;
+ c->vertex_buf.buffer_offset = 0;
++ /* XXX: Create with DYNAMIC or STREAM */
+ c->vertex_buf.buffer = pipe_buffer_create
+ (
+ c->pipe->screen,
- c->vertex_elems[0].src_offset = 0;
- c->vertex_elems[0].instance_divisor = 0;
- c->vertex_elems[0].vertex_buffer_index = 0;
- c->vertex_elems[0].nr_components = 2;
- c->vertex_elems[0].src_format = PIPE_FORMAT_R32G32_FLOAT;
- c->vertex_elems[1].src_offset = sizeof(struct vertex2f);
- c->vertex_elems[1].instance_divisor = 0;
- c->vertex_elems[1].vertex_buffer_index = 0;
- c->vertex_elems[1].nr_components = 2;
- c->vertex_elems[1].src_format = PIPE_FORMAT_R32G32_FLOAT;
++ PIPE_BIND_VERTEX_BUFFER,
+ sizeof(struct vertex4f) * (VL_COMPOSITOR_MAX_LAYERS + 2) * 6
+ );
+
- 1,
- PIPE_BUFFER_USAGE_CONSTANT,
++ vertex_elems[0].src_offset = 0;
++ vertex_elems[0].instance_divisor = 0;
++ vertex_elems[0].vertex_buffer_index = 0;
++ vertex_elems[0].src_format = PIPE_FORMAT_R32G32_FLOAT;
++ vertex_elems[1].src_offset = sizeof(struct vertex2f);
++ vertex_elems[1].instance_divisor = 0;
++ vertex_elems[1].vertex_buffer_index = 0;
++ vertex_elems[1].src_format = PIPE_FORMAT_R32G32_FLOAT;
++ c->vertex_elems_state = c->pipe->create_vertex_elements_state(c->pipe, 2, vertex_elems);
+
+ /*
+ * Create our fragment shader's constant buffer
+ * Const buffer contains the color conversion matrix and bias vectors
+ */
++ /* XXX: Create with IMMUTABLE/STATIC... although it does change every once in a long while... */
+ c->fs_const_buf = pipe_buffer_create
+ (
+ c->pipe->screen,
- pipe_buffer_reference(&c->vertex_buf.buffer, NULL);
- pipe_buffer_reference(&c->fs_const_buf, NULL);
++ PIPE_BIND_CONSTANT_BUFFER,
+ sizeof(struct fragment_shader_consts)
+ );
+
+ vl_csc_get_matrix(VL_CSC_COLOR_STANDARD_IDENTITY, NULL, true, fsc.matrix);
+
+ vl_compositor_set_csc_matrix(c, fsc.matrix);
+
+ return true;
+}
+
+static void
+cleanup_buffers(struct vl_compositor *c)
+{
+ assert(c);
+
- vb = pipe_buffer_map(c->pipe->screen, c->vertex_buf.buffer,
- PIPE_BUFFER_USAGE_CPU_WRITE | PIPE_BUFFER_USAGE_DISCARD);
++ c->pipe->delete_vertex_elements_state(c->pipe, c->vertex_elems_state);
++ pipe_resource_reference(&c->vertex_buf.buffer, NULL);
++ pipe_resource_reference(&c->fs_const_buf, NULL);
+}
+
+bool vl_compositor_init(struct vl_compositor *compositor, struct pipe_context *pipe)
+{
+ unsigned i;
+
+ assert(compositor);
+
+ memset(compositor, 0, sizeof(struct vl_compositor));
+
+ compositor->pipe = pipe;
+
+ if (!init_pipe_state(compositor))
+ return false;
+ if (!init_shaders(compositor)) {
+ cleanup_pipe_state(compositor);
+ return false;
+ }
+ if (!init_buffers(compositor)) {
+ cleanup_shaders(compositor);
+ cleanup_pipe_state(compositor);
+ return false;
+ }
+
+ compositor->fb_state.width = 0;
+ compositor->fb_state.height = 0;
+ compositor->bg = NULL;
+ compositor->dirty_bg = false;
+ for (i = 0; i < VL_COMPOSITOR_MAX_LAYERS; ++i)
+ compositor->layers[i] = NULL;
+ compositor->dirty_layers = 0;
+
+ return true;
+}
+
+void vl_compositor_cleanup(struct vl_compositor *compositor)
+{
+ assert(compositor);
+
+ cleanup_buffers(compositor);
+ cleanup_shaders(compositor);
+ cleanup_pipe_state(compositor);
+}
+
+void vl_compositor_set_background(struct vl_compositor *compositor,
+ struct pipe_surface *bg, struct pipe_video_rect *bg_src_rect)
+{
+ assert(compositor);
+ assert((bg && bg_src_rect) || (!bg && !bg_src_rect));
+
+ if (compositor->bg != bg ||
+ !u_video_rects_equal(&compositor->bg_src_rect, bg_src_rect)) {
+ pipe_surface_reference(&compositor->bg, bg);
+ /*if (!u_video_rects_equal(&compositor->bg_src_rect, bg_src_rect))*/
+ compositor->bg_src_rect = *bg_src_rect;
+ compositor->dirty_bg = true;
+ }
+}
+
+void vl_compositor_set_layers(struct vl_compositor *compositor,
+ struct pipe_surface *layers[],
+ struct pipe_video_rect *src_rects[],
+ struct pipe_video_rect *dst_rects[],
+ unsigned num_layers)
+{
+ unsigned i;
+
+ assert(compositor);
+ assert(num_layers <= VL_COMPOSITOR_MAX_LAYERS);
+
+ for (i = 0; i < num_layers; ++i)
+ {
+ assert((layers[i] && src_rects[i] && dst_rects[i]) ||
+ (!layers[i] && !src_rects[i] && !dst_rects[i]));
+
+ if (compositor->layers[i] != layers[i] ||
+ !u_video_rects_equal(&compositor->layer_src_rects[i], src_rects[i]) ||
+ !u_video_rects_equal(&compositor->layer_dst_rects[i], dst_rects[i]))
+ {
+ pipe_surface_reference(&compositor->layers[i], layers[i]);
+ /*if (!u_video_rects_equal(&compositor->layer_src_rects[i], src_rects[i]))*/
+ compositor->layer_src_rects[i] = *src_rects[i];
+ /*if (!u_video_rects_equal(&compositor->layer_dst_rects[i], dst_rects[i]))*/
+ compositor->layer_dst_rects[i] = *dst_rects[i];
+ compositor->dirty_layers |= 1 << i;
+ }
+ }
+
+ for (; i < VL_COMPOSITOR_MAX_LAYERS; ++i)
+ pipe_surface_reference(&compositor->layers[i], NULL);
+}
+
+static void gen_rect_verts(unsigned pos,
+ struct pipe_video_rect *src_rect,
+ struct vertex2f *src_inv_size,
+ struct pipe_video_rect *dst_rect,
+ struct vertex2f *dst_inv_size,
+ struct vertex4f *vb)
+{
+ assert(pos < VL_COMPOSITOR_MAX_LAYERS + 2);
+ assert(src_rect);
+ assert(src_inv_size);
+ assert((dst_rect && dst_inv_size) /*|| (!dst_rect && !dst_inv_size)*/);
+ assert(vb);
+
+ vb[pos * 6 + 0].x = dst_rect->x * dst_inv_size->x;
+ vb[pos * 6 + 0].y = dst_rect->y * dst_inv_size->y;
+ vb[pos * 6 + 0].z = src_rect->x * src_inv_size->x;
+ vb[pos * 6 + 0].w = src_rect->y * src_inv_size->y;
+
+ vb[pos * 6 + 1].x = dst_rect->x * dst_inv_size->x;
+ vb[pos * 6 + 1].y = (dst_rect->y + dst_rect->h) * dst_inv_size->y;
+ vb[pos * 6 + 1].z = src_rect->x * src_inv_size->x;
+ vb[pos * 6 + 1].w = (src_rect->y + src_rect->h) * src_inv_size->y;
+
+ vb[pos * 6 + 2].x = (dst_rect->x + dst_rect->w) * dst_inv_size->x;
+ vb[pos * 6 + 2].y = dst_rect->y * dst_inv_size->y;
+ vb[pos * 6 + 2].z = (src_rect->x + src_rect->w) * src_inv_size->x;
+ vb[pos * 6 + 2].w = src_rect->y * src_inv_size->y;
+
+ vb[pos * 6 + 3].x = (dst_rect->x + dst_rect->w) * dst_inv_size->x;
+ vb[pos * 6 + 3].y = dst_rect->y * dst_inv_size->y;
+ vb[pos * 6 + 3].z = (src_rect->x + src_rect->w) * src_inv_size->x;
+ vb[pos * 6 + 3].w = src_rect->y * src_inv_size->y;
+
+ vb[pos * 6 + 4].x = dst_rect->x * dst_inv_size->x;
+ vb[pos * 6 + 4].y = (dst_rect->y + dst_rect->h) * dst_inv_size->y;
+ vb[pos * 6 + 4].z = src_rect->x * src_inv_size->x;
+ vb[pos * 6 + 4].w = (src_rect->y + src_rect->h) * src_inv_size->y;
+
+ vb[pos * 6 + 5].x = (dst_rect->x + dst_rect->w) * dst_inv_size->x;
+ vb[pos * 6 + 5].y = (dst_rect->y + dst_rect->h) * dst_inv_size->y;
+ vb[pos * 6 + 5].z = (src_rect->x + src_rect->w) * src_inv_size->x;
+ vb[pos * 6 + 5].w = (src_rect->y + src_rect->h) * src_inv_size->y;
+}
+
+static unsigned gen_data(struct vl_compositor *c,
+ struct pipe_surface *src_surface,
+ struct pipe_video_rect *src_rect,
+ struct pipe_video_rect *dst_rect,
+ struct pipe_surface **textures)
+{
+ void *vb;
++ struct pipe_transfer *buf_transfer;
+ unsigned num_rects = 0;
+ unsigned i;
+
+ assert(c);
+ assert(src_surface);
+ assert(src_rect);
+ assert(dst_rect);
+ assert(textures);
+
- pipe_buffer_unmap(c->pipe->screen, c->vertex_buf.buffer);
++ vb = pipe_buffer_map(c->pipe, c->vertex_buf.buffer,
++ PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
++ &buf_transfer);
+
+ if (!vb)
+ return 0;
+
+ if (c->dirty_bg) {
+ struct vertex2f bg_inv_size = {1.0f / c->bg->width, 1.0f / c->bg->height};
+ gen_rect_verts(num_rects, &c->bg_src_rect, &bg_inv_size, NULL, NULL, vb);
+ textures[num_rects] = c->bg;
+ ++num_rects;
+ c->dirty_bg = false;
+ }
+
+ {
+ struct vertex2f src_inv_size = { 1.0f / src_surface->width, 1.0f / src_surface->height};
+ gen_rect_verts(num_rects, src_rect, &src_inv_size, dst_rect, &c->fb_inv_size, vb);
+ textures[num_rects] = src_surface;
+ ++num_rects;
+ }
+
+ for (i = 0; c->dirty_layers > 0; i++) {
+ assert(i < VL_COMPOSITOR_MAX_LAYERS);
+
+ if (c->dirty_layers & (1 << i)) {
+ struct vertex2f layer_inv_size = {1.0f / c->layers[i]->width, 1.0f / c->layers[i]->height};
+ gen_rect_verts(num_rects, &c->layer_src_rects[i], &layer_inv_size,
+ &c->layer_dst_rects[i], &c->fb_inv_size, vb);
+ textures[num_rects] = c->layers[i];
+ ++num_rects;
+ c->dirty_layers &= ~(1 << i);
+ }
+ }
+
- c->pipe->set_fragment_sampler_textures(c->pipe, 1, &src_surfaces[i]->texture);
++ pipe_buffer_unmap(c->pipe, c->vertex_buf.buffer, buf_transfer);
+
+ return num_rects;
+}
+
+static void draw_layers(struct vl_compositor *c,
+ struct pipe_surface *src_surface,
+ struct pipe_video_rect *src_rect,
+ struct pipe_video_rect *dst_rect)
+{
+ unsigned num_rects;
+ struct pipe_surface *src_surfaces[VL_COMPOSITOR_MAX_LAYERS + 2];
+ unsigned i;
+
+ assert(c);
+ assert(src_surface);
+ assert(src_rect);
+ assert(dst_rect);
+
+ num_rects = gen_data(c, src_surface, src_rect, dst_rect, src_surfaces);
+
+ for (i = 0; i < num_rects; ++i) {
- compositor->pipe->set_vertex_elements(compositor->pipe, 2, compositor->vertex_elems);
++ //c->pipe->set_fragment_sampler_views(c->pipe, 1, &src_surfaces[i]->texture);
+ c->pipe->draw_arrays(c->pipe, PIPE_PRIM_TRIANGLES, i * 6, 6);
+ }
+}
+
+void vl_compositor_render(struct vl_compositor *compositor,
+ struct pipe_surface *src_surface,
+ enum pipe_mpeg12_picture_type picture_type,
+ /*unsigned num_past_surfaces,
+ struct pipe_surface *past_surfaces,
+ unsigned num_future_surfaces,
+ struct pipe_surface *future_surfaces,*/
+ struct pipe_video_rect *src_area,
+ struct pipe_surface *dst_surface,
+ struct pipe_video_rect *dst_area,
+ struct pipe_fence_handle **fence)
+{
+ assert(compositor);
+ assert(src_surface);
+ assert(src_area);
+ assert(dst_surface);
+ assert(dst_area);
+ assert(picture_type == PIPE_MPEG12_PICTURE_TYPE_FRAME);
+
+ if (compositor->fb_state.width != dst_surface->width) {
+ compositor->fb_inv_size.x = 1.0f / dst_surface->width;
+ compositor->fb_state.width = dst_surface->width;
+ }
+ if (compositor->fb_state.height != dst_surface->height) {
+ compositor->fb_inv_size.y = 1.0f / dst_surface->height;
+ compositor->fb_state.height = dst_surface->height;
+ }
+
+ compositor->fb_state.cbufs[0] = dst_surface;
+
+ compositor->viewport.scale[0] = compositor->fb_state.width;
+ compositor->viewport.scale[1] = compositor->fb_state.height;
+ compositor->viewport.scale[2] = 1;
+ compositor->viewport.scale[3] = 1;
+ compositor->viewport.translate[0] = 0;
+ compositor->viewport.translate[1] = 0;
+ compositor->viewport.translate[2] = 0;
+ compositor->viewport.translate[3] = 0;
+
+ compositor->pipe->set_framebuffer_state(compositor->pipe, &compositor->fb_state);
+ compositor->pipe->set_viewport_state(compositor->pipe, &compositor->viewport);
+ compositor->pipe->bind_fragment_sampler_states(compositor->pipe, 1, &compositor->sampler);
+ compositor->pipe->bind_vs_state(compositor->pipe, compositor->vertex_shader);
+ compositor->pipe->bind_fs_state(compositor->pipe, compositor->fragment_shader);
+ compositor->pipe->set_vertex_buffers(compositor->pipe, 1, &compositor->vertex_buf);
- pipe_buffer_map(compositor->pipe->screen, compositor->fs_const_buf,
- PIPE_BUFFER_USAGE_CPU_WRITE | PIPE_BUFFER_USAGE_DISCARD),
++ compositor->pipe->bind_vertex_elements_state(compositor->pipe, compositor->vertex_elems_state);
+ compositor->pipe->set_constant_buffer(compositor->pipe, PIPE_SHADER_FRAGMENT, 0, compositor->fs_const_buf);
+
+ draw_layers(compositor, src_surface, src_area, dst_area);
+
+ assert(!compositor->dirty_bg && !compositor->dirty_layers);
+ compositor->pipe->flush(compositor->pipe, PIPE_FLUSH_RENDER_CACHE, fence);
+}
+
+void vl_compositor_set_csc_matrix(struct vl_compositor *compositor, const float *mat)
+{
++ struct pipe_transfer *buf_transfer;
++
+ assert(compositor);
+
+ memcpy
+ (
- pipe_buffer_unmap(compositor->pipe->screen, compositor->fs_const_buf);
++ pipe_buffer_map(compositor->pipe, compositor->fs_const_buf,
++ PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
++ &buf_transfer),
+ mat,
+ sizeof(struct fragment_shader_consts)
+ );
+
++ pipe_buffer_unmap(compositor->pipe, compositor->fs_const_buf,
++ buf_transfer);
+}
--- /dev/null
- struct pipe_vertex_element vertex_elems[2];
- struct pipe_buffer *fs_const_buf;
+/**************************************************************************
+ *
+ * Copyright 2009 Younes Manton.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef vl_compositor_h
+#define vl_compositor_h
+
+#include <pipe/p_compiler.h>
+#include <pipe/p_state.h>
+#include <pipe/p_video_state.h>
+#include "vl_types.h"
+
+struct pipe_context;
+
+#define VL_COMPOSITOR_MAX_LAYERS 16
+
+struct vl_compositor
+{
+ struct pipe_context *pipe;
+
+ struct pipe_framebuffer_state fb_state;
+ struct vertex2f fb_inv_size;
+ void *sampler;
++ struct pipe_sampler_view *sampler_view;
+ void *vertex_shader;
+ void *fragment_shader;
+ struct pipe_viewport_state viewport;
+ struct pipe_vertex_buffer vertex_buf;
++ void *vertex_elems_state;
++ struct pipe_resource *fs_const_buf;
+
+ struct pipe_surface *bg;
+ struct pipe_video_rect bg_src_rect;
+ bool dirty_bg;
+ struct pipe_surface *layers[VL_COMPOSITOR_MAX_LAYERS];
+ struct pipe_video_rect layer_src_rects[VL_COMPOSITOR_MAX_LAYERS];
+ struct pipe_video_rect layer_dst_rects[VL_COMPOSITOR_MAX_LAYERS];
+ unsigned dirty_layers;
+};
+
+bool vl_compositor_init(struct vl_compositor *compositor, struct pipe_context *pipe);
+
+void vl_compositor_cleanup(struct vl_compositor *compositor);
+
+void vl_compositor_set_background(struct vl_compositor *compositor,
+ struct pipe_surface *bg, struct pipe_video_rect *bg_src_rect);
+
+void vl_compositor_set_layers(struct vl_compositor *compositor,
+ struct pipe_surface *layers[],
+ struct pipe_video_rect *src_rects[],
+ struct pipe_video_rect *dst_rects[],
+ unsigned num_layers);
+
+void vl_compositor_render(struct vl_compositor *compositor,
+ struct pipe_surface *src_surface,
+ enum pipe_mpeg12_picture_type picture_type,
+ /*unsigned num_past_surfaces,
+ struct pipe_surface *past_surfaces,
+ unsigned num_future_surfaces,
+ struct pipe_surface *future_surfaces,*/
+ struct pipe_video_rect *src_area,
+ struct pipe_surface *dst_surface,
+ struct pipe_video_rect *dst_area,
+ struct pipe_fence_handle **fence);
+
+void vl_compositor_set_csc_matrix(struct vl_compositor *compositor, const float *mat);
+
+#endif /* vl_compositor_h */
--- /dev/null
- r->tex_transfer[i] = r->pipe->screen->get_tex_transfer
+/**************************************************************************
+ *
+ * Copyright 2009 Younes Manton.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vl_mpeg12_mc_renderer.h"
+#include <assert.h>
+#include <pipe/p_context.h>
+#include <util/u_inlines.h>
+#include <util/u_format.h>
+#include <util/u_math.h>
+#include <util/u_memory.h>
++#include <util/u_sampler.h>
+#include <tgsi/tgsi_ureg.h>
+
+#define DEFAULT_BUF_ALIGNMENT 1
+#define MACROBLOCK_WIDTH 16
+#define MACROBLOCK_HEIGHT 16
+#define BLOCK_WIDTH 8
+#define BLOCK_HEIGHT 8
+#define ZERO_BLOCK_NIL -1.0f
+#define ZERO_BLOCK_IS_NIL(zb) ((zb).x < 0.0f)
+#define SCALE_FACTOR_16_TO_9 (32767.0f / 255.0f)
+
+struct vertex_shader_consts
+{
+ struct vertex4f denorm;
+};
+
+struct fragment_shader_consts
+{
+ struct vertex4f multiplier;
+ struct vertex4f div;
+};
+
+struct vert_stream_0
+{
+ struct vertex2f pos;
+ struct vertex2f luma_tc;
+ struct vertex2f cb_tc;
+ struct vertex2f cr_tc;
+};
+
+enum MACROBLOCK_TYPE
+{
+ MACROBLOCK_TYPE_INTRA,
+ MACROBLOCK_TYPE_FWD_FRAME_PRED,
+ MACROBLOCK_TYPE_FWD_FIELD_PRED,
+ MACROBLOCK_TYPE_BKWD_FRAME_PRED,
+ MACROBLOCK_TYPE_BKWD_FIELD_PRED,
+ MACROBLOCK_TYPE_BI_FRAME_PRED,
+ MACROBLOCK_TYPE_BI_FIELD_PRED,
+
+ NUM_MACROBLOCK_TYPES
+};
+
+static bool
+create_intra_vert_shader(struct vl_mpeg12_mc_renderer *r)
+{
+ struct ureg_program *shader;
+ struct ureg_src vpos, vtex[3];
+ struct ureg_dst o_vpos, o_vtex[3];
+ unsigned i;
+
+ shader = ureg_create(TGSI_PROCESSOR_VERTEX);
+ if (!shader)
+ return false;
+
+ vpos = ureg_DECL_vs_input(shader, 0);
+ for (i = 0; i < 3; ++i)
+ vtex[i] = ureg_DECL_vs_input(shader, i + 1);
+ o_vpos = ureg_DECL_output(shader, TGSI_SEMANTIC_POSITION, 0);
+ for (i = 0; i < 3; ++i)
+ o_vtex[i] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, i + 1);
+
+ /*
+ * o_vpos = vpos
+ * o_vtex[0..2] = vtex[0..2]
+ */
+ ureg_MOV(shader, o_vpos, vpos);
+ for (i = 0; i < 3; ++i)
+ ureg_MOV(shader, o_vtex[i], vtex[i]);
+
+ ureg_END(shader);
+
+ r->i_vs = ureg_create_shader_and_destroy(shader, r->pipe);
+ if (!r->i_vs)
+ return false;
+
+ return true;
+}
+
+static bool
+create_intra_frag_shader(struct vl_mpeg12_mc_renderer *r)
+{
+ struct ureg_program *shader;
+ struct ureg_src tc[3];
+ struct ureg_src sampler[3];
+ struct ureg_dst texel, temp;
+ struct ureg_dst fragment;
+ unsigned i;
+
+ shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
+ if (!shader)
+ return false;
+
+ for (i = 0; i < 3; ++i) {
+ tc[i] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, i + 1, TGSI_INTERPOLATE_LINEAR);
+ sampler[i] = ureg_DECL_sampler(shader, i);
+ }
+ texel = ureg_DECL_temporary(shader);
+ temp = ureg_DECL_temporary(shader);
+ fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
+
+ /*
+ * texel.r = tex(tc[0], sampler[0])
+ * texel.g = tex(tc[1], sampler[1])
+ * texel.b = tex(tc[2], sampler[2])
+ * fragment = texel * scale
+ */
+ for (i = 0; i < 3; ++i) {
+ /* Nouveau can't writemask tex dst regs (yet?), do in two steps */
+ ureg_TEX(shader, temp, TGSI_TEXTURE_2D, tc[i], sampler[i]);
+ ureg_MOV(shader, ureg_writemask(texel, TGSI_WRITEMASK_X << i), ureg_scalar(ureg_src(temp), TGSI_SWIZZLE_X));
+ }
+ ureg_MUL(shader, fragment, ureg_src(texel), ureg_scalar(ureg_imm1f(shader, SCALE_FACTOR_16_TO_9), TGSI_SWIZZLE_X));
+
+ ureg_release_temporary(shader, texel);
+ ureg_release_temporary(shader, temp);
+ ureg_END(shader);
+
+ r->i_fs = ureg_create_shader_and_destroy(shader, r->pipe);
+ if (!r->i_fs)
+ return false;
+
+ return true;
+}
+
+static bool
+create_frame_pred_vert_shader(struct vl_mpeg12_mc_renderer *r)
+{
+ struct ureg_program *shader;
+ struct ureg_src vpos, vtex[4];
+ struct ureg_dst o_vpos, o_vtex[4];
+ unsigned i;
+
+ shader = ureg_create(TGSI_PROCESSOR_VERTEX);
+ if (!shader)
+ return false;
+
+ vpos = ureg_DECL_vs_input(shader, 0);
+ for (i = 0; i < 4; ++i)
+ vtex[i] = ureg_DECL_vs_input(shader, i + 1);
+ o_vpos = ureg_DECL_output(shader, TGSI_SEMANTIC_POSITION, 0);
+ for (i = 0; i < 4; ++i)
+ o_vtex[i] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, i + 1);
+
+ /*
+ * o_vpos = vpos
+ * o_vtex[0..2] = vtex[0..2]
+ * o_vtex[3] = vpos + vtex[3] // Apply motion vector
+ */
+ ureg_MOV(shader, o_vpos, vpos);
+ for (i = 0; i < 3; ++i)
+ ureg_MOV(shader, o_vtex[i], vtex[i]);
+ ureg_ADD(shader, o_vtex[3], vpos, vtex[3]);
+
+ ureg_END(shader);
+
+ r->p_vs[0] = ureg_create_shader_and_destroy(shader, r->pipe);
+ if (!r->p_vs[0])
+ return false;
+
+ return true;
+}
+
+#if 0
+static void
+create_field_pred_vert_shader(struct vl_mpeg12_mc_renderer *r)
+{
+ assert(false);
+}
+#endif
+
+static bool
+create_frame_pred_frag_shader(struct vl_mpeg12_mc_renderer *r)
+{
+ struct ureg_program *shader;
+ struct ureg_src tc[4];
+ struct ureg_src sampler[4];
+ struct ureg_dst texel, ref;
+ struct ureg_dst fragment;
+ unsigned i;
+
+ shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
+ if (!shader)
+ return false;
+
+ for (i = 0; i < 4; ++i) {
+ tc[i] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, i + 1, TGSI_INTERPOLATE_LINEAR);
+ sampler[i] = ureg_DECL_sampler(shader, i);
+ }
+ texel = ureg_DECL_temporary(shader);
+ ref = ureg_DECL_temporary(shader);
+ fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
+
+ /*
+ * texel.r = tex(tc[0], sampler[0])
+ * texel.g = tex(tc[1], sampler[1])
+ * texel.b = tex(tc[2], sampler[2])
+ * ref = tex(tc[3], sampler[3])
+ * fragment = texel * scale + ref
+ */
+ for (i = 0; i < 3; ++i) {
+ /* Nouveau can't writemask tex dst regs (yet?), do in two steps */
+ ureg_TEX(shader, ref, TGSI_TEXTURE_2D, tc[i], sampler[i]);
+ ureg_MOV(shader, ureg_writemask(texel, TGSI_WRITEMASK_X << i), ureg_scalar(ureg_src(ref), TGSI_SWIZZLE_X));
+ }
+ ureg_TEX(shader, ref, TGSI_TEXTURE_2D, tc[3], sampler[3]);
+ ureg_MAD(shader, fragment, ureg_src(texel), ureg_scalar(ureg_imm1f(shader, SCALE_FACTOR_16_TO_9), TGSI_SWIZZLE_X), ureg_src(ref));
+
+ ureg_release_temporary(shader, texel);
+ ureg_release_temporary(shader, ref);
+ ureg_END(shader);
+
+ r->p_fs[0] = ureg_create_shader_and_destroy(shader, r->pipe);
+ if (!r->p_fs[0])
+ return false;
+
+ return true;
+}
+
+#if 0
+static void
+create_field_pred_frag_shader(struct vl_mpeg12_mc_renderer *r)
+{
+ assert(false);
+}
+#endif
+
+static bool
+create_frame_bi_pred_vert_shader(struct vl_mpeg12_mc_renderer *r)
+{
+ struct ureg_program *shader;
+ struct ureg_src vpos, vtex[5];
+ struct ureg_dst o_vpos, o_vtex[5];
+ unsigned i;
+
+ shader = ureg_create(TGSI_PROCESSOR_VERTEX);
+ if (!shader)
+ return false;
+
+ vpos = ureg_DECL_vs_input(shader, 0);
+ for (i = 0; i < 4; ++i)
+ vtex[i] = ureg_DECL_vs_input(shader, i + 1);
+ /* Skip input 5 */
+ vtex[4] = ureg_DECL_vs_input(shader, 6);
+ o_vpos = ureg_DECL_output(shader, TGSI_SEMANTIC_POSITION, 0);
+ for (i = 0; i < 5; ++i)
+ o_vtex[i] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, i + 1);
+
+ /*
+ * o_vpos = vpos
+ * o_vtex[0..2] = vtex[0..2]
+ * o_vtex[3..4] = vpos + vtex[3..4] // Apply motion vector
+ */
+ ureg_MOV(shader, o_vpos, vpos);
+ for (i = 0; i < 3; ++i)
+ ureg_MOV(shader, o_vtex[i], vtex[i]);
+ for (i = 3; i < 5; ++i)
+ ureg_ADD(shader, o_vtex[i], vpos, vtex[i]);
+
+ ureg_END(shader);
+
+ r->b_vs[0] = ureg_create_shader_and_destroy(shader, r->pipe);
+ if (!r->b_vs[0])
+ return false;
+
+ return true;
+}
+
+#if 0
+static void
+create_field_bi_pred_vert_shader(struct vl_mpeg12_mc_renderer *r)
+{
+ assert(false);
+}
+#endif
+
+static bool
+create_frame_bi_pred_frag_shader(struct vl_mpeg12_mc_renderer *r)
+{
+ struct ureg_program *shader;
+ struct ureg_src tc[5];
+ struct ureg_src sampler[5];
+ struct ureg_dst texel, ref[2];
+ struct ureg_dst fragment;
+ unsigned i;
+
+ shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
+ if (!shader)
+ return false;
+
+ for (i = 0; i < 5; ++i) {
+ tc[i] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, i + 1, TGSI_INTERPOLATE_LINEAR);
+ sampler[i] = ureg_DECL_sampler(shader, i);
+ }
+ texel = ureg_DECL_temporary(shader);
+ ref[0] = ureg_DECL_temporary(shader);
+ ref[1] = ureg_DECL_temporary(shader);
+ fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
+
+ /*
+ * texel.r = tex(tc[0], sampler[0])
+ * texel.g = tex(tc[1], sampler[1])
+ * texel.b = tex(tc[2], sampler[2])
+ * ref[0..1 = tex(tc[3..4], sampler[3..4])
+ * ref[0] = lerp(ref[0], ref[1], 0.5)
+ * fragment = texel * scale + ref[0]
+ */
+ for (i = 0; i < 3; ++i) {
+ /* Nouveau can't writemask tex dst regs (yet?), do in two steps */
+ ureg_TEX(shader, ref[0], TGSI_TEXTURE_2D, tc[i], sampler[i]);
+ ureg_MOV(shader, ureg_writemask(texel, TGSI_WRITEMASK_X << i), ureg_scalar(ureg_src(ref[0]), TGSI_SWIZZLE_X));
+ }
+ ureg_TEX(shader, ref[0], TGSI_TEXTURE_2D, tc[3], sampler[3]);
+ ureg_TEX(shader, ref[1], TGSI_TEXTURE_2D, tc[4], sampler[4]);
+ ureg_LRP(shader, ref[0], ureg_scalar(ureg_imm1f(shader, 0.5f), TGSI_SWIZZLE_X), ureg_src(ref[0]), ureg_src(ref[1]));
+
+ ureg_MAD(shader, fragment, ureg_src(texel), ureg_scalar(ureg_imm1f(shader, SCALE_FACTOR_16_TO_9), TGSI_SWIZZLE_X), ureg_src(ref[0]));
+
+ ureg_release_temporary(shader, texel);
+ ureg_release_temporary(shader, ref[0]);
+ ureg_release_temporary(shader, ref[1]);
+ ureg_END(shader);
+
+ r->b_fs[0] = ureg_create_shader_and_destroy(shader, r->pipe);
+ if (!r->b_fs[0])
+ return false;
+
+ return true;
+}
+
+#if 0
+static void
+create_field_bi_pred_frag_shader(struct vl_mpeg12_mc_renderer *r)
+{
+ assert(false);
+}
+#endif
+
+static void
+xfer_buffers_map(struct vl_mpeg12_mc_renderer *r)
+{
+ unsigned i;
+
+ assert(r);
+
+ for (i = 0; i < 3; ++i) {
- r->pipe->screen, r->textures.all[i],
- 0, 0, 0, PIPE_TRANSFER_WRITE, 0, 0,
- r->textures.all[i]->width0, r->textures.all[i]->height0
++ struct pipe_box rect =
++ {
++ 0, 0, 0,
++ r->textures.all[i]->width0,
++ r->textures.all[i]->height0,
++ 0
++ };
++
++ r->tex_transfer[i] = r->pipe->get_transfer
+ (
- r->texels[i] = r->pipe->screen->transfer_map(r->pipe->screen, r->tex_transfer[i]);
++ r->pipe, r->textures.all[i],
++ u_subresource(0, 0),
++ PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
++ &rect
+ );
+
- r->pipe->screen->transfer_unmap(r->pipe->screen, r->tex_transfer[i]);
- r->pipe->screen->tex_transfer_destroy(r->tex_transfer[i]);
++ r->texels[i] = r->pipe->transfer_map(r->pipe, r->tex_transfer[i]);
+ }
+}
+
+static void
+xfer_buffers_unmap(struct vl_mpeg12_mc_renderer *r)
+{
+ unsigned i;
+
+ assert(r);
+
+ for (i = 0; i < 3; ++i) {
- struct pipe_texture template;
++ r->pipe->transfer_unmap(r->pipe, r->tex_transfer[i]);
++ r->pipe->transfer_destroy(r->pipe, r->tex_transfer[i]);
+ }
+}
+
+static bool
+init_pipe_state(struct vl_mpeg12_mc_renderer *r)
+{
+ struct pipe_sampler_state sampler;
+ unsigned filters[5];
+ unsigned i;
+
+ assert(r);
+
+ r->viewport.scale[0] = r->pot_buffers ?
+ util_next_power_of_two(r->picture_width) : r->picture_width;
+ r->viewport.scale[1] = r->pot_buffers ?
+ util_next_power_of_two(r->picture_height) : r->picture_height;
+ r->viewport.scale[2] = 1;
+ r->viewport.scale[3] = 1;
+ r->viewport.translate[0] = 0;
+ r->viewport.translate[1] = 0;
+ r->viewport.translate[2] = 0;
+ r->viewport.translate[3] = 0;
+
+ r->fb_state.width = r->pot_buffers ?
+ util_next_power_of_two(r->picture_width) : r->picture_width;
+ r->fb_state.height = r->pot_buffers ?
+ util_next_power_of_two(r->picture_height) : r->picture_height;
+ r->fb_state.nr_cbufs = 1;
+ r->fb_state.zsbuf = NULL;
+
+ /* Luma filter */
+ filters[0] = PIPE_TEX_FILTER_NEAREST;
+ /* Chroma filters */
+ if (r->chroma_format == PIPE_VIDEO_CHROMA_FORMAT_444 ||
+ r->eb_handling == VL_MPEG12_MC_RENDERER_EMPTY_BLOCK_XFER_ONE) {
+ filters[1] = PIPE_TEX_FILTER_NEAREST;
+ filters[2] = PIPE_TEX_FILTER_NEAREST;
+ }
+ else {
+ filters[1] = PIPE_TEX_FILTER_LINEAR;
+ filters[2] = PIPE_TEX_FILTER_LINEAR;
+ }
+ /* Fwd, bkwd ref filters */
+ filters[3] = PIPE_TEX_FILTER_LINEAR;
+ filters[4] = PIPE_TEX_FILTER_LINEAR;
+
+ for (i = 0; i < 5; ++i) {
+ sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ sampler.min_img_filter = filters[i];
+ sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
+ sampler.mag_img_filter = filters[i];
+ sampler.compare_mode = PIPE_TEX_COMPARE_NONE;
+ sampler.compare_func = PIPE_FUNC_ALWAYS;
+ sampler.normalized_coords = 1;
+ /*sampler.shadow_ambient = ; */
+ /*sampler.lod_bias = ; */
+ sampler.min_lod = 0;
+ /*sampler.max_lod = ; */
+ /*sampler.border_color[i] = ; */
+ /*sampler.max_anisotropy = ; */
+ r->samplers.all[i] = r->pipe->create_sampler_state(r->pipe, &sampler);
+ }
+
+ return true;
+}
+
+static void
+cleanup_pipe_state(struct vl_mpeg12_mc_renderer *r)
+{
+ unsigned i;
+
+ assert(r);
+
+ for (i = 0; i < 5; ++i)
+ r->pipe->delete_sampler_state(r->pipe, r->samplers.all[i]);
+}
+
+static bool
+init_shaders(struct vl_mpeg12_mc_renderer *r)
+{
+ assert(r);
+
+ create_intra_vert_shader(r);
+ create_intra_frag_shader(r);
+ create_frame_pred_vert_shader(r);
+ create_frame_pred_frag_shader(r);
+ create_frame_bi_pred_vert_shader(r);
+ create_frame_bi_pred_frag_shader(r);
+
+ return true;
+}
+
+static void
+cleanup_shaders(struct vl_mpeg12_mc_renderer *r)
+{
+ assert(r);
+
+ r->pipe->delete_vs_state(r->pipe, r->i_vs);
+ r->pipe->delete_fs_state(r->pipe, r->i_fs);
+ r->pipe->delete_vs_state(r->pipe, r->p_vs[0]);
+ r->pipe->delete_fs_state(r->pipe, r->p_fs[0]);
+ r->pipe->delete_vs_state(r->pipe, r->b_vs[0]);
+ r->pipe->delete_fs_state(r->pipe, r->b_fs[0]);
+}
+
+static bool
+init_buffers(struct vl_mpeg12_mc_renderer *r)
+{
- memset(&template, 0, sizeof(struct pipe_texture));
++ struct pipe_resource template;
++ struct pipe_vertex_element vertex_elems[8];
++ struct pipe_sampler_view sampler_view;
+
+ const unsigned mbw =
+ align(r->picture_width, MACROBLOCK_WIDTH) / MACROBLOCK_WIDTH;
+ const unsigned mbh =
+ align(r->picture_height, MACROBLOCK_HEIGHT) / MACROBLOCK_HEIGHT;
+
+ unsigned i;
+
+ assert(r);
+
+ r->macroblocks_per_batch =
+ mbw * (r->bufmode == VL_MPEG12_MC_RENDERER_BUFFER_PICTURE ? mbh : 1);
+ r->num_macroblocks = 0;
+ r->macroblock_buf = MALLOC(r->macroblocks_per_batch * sizeof(struct pipe_mpeg12_macroblock));
+
- template.tex_usage = PIPE_TEXTURE_USAGE_SAMPLER | PIPE_TEXTURE_USAGE_DYNAMIC;
++ memset(&template, 0, sizeof(struct pipe_resource));
+ template.target = PIPE_TEXTURE_2D;
+ /* TODO: Accomodate HW that can't do this and also for cases when this isn't precise enough */
+ template.format = PIPE_FORMAT_R16_SNORM;
+ template.last_level = 0;
+ template.width0 = r->pot_buffers ?
+ util_next_power_of_two(r->picture_width) : r->picture_width;
+ template.height0 = r->pot_buffers ?
+ util_next_power_of_two(r->picture_height) : r->picture_height;
+ template.depth0 = 1;
- r->textures.individual.y = r->pipe->screen->texture_create(r->pipe->screen, &template);
++ template.usage = PIPE_USAGE_DYNAMIC;
++ template.bind = PIPE_BIND_SAMPLER_VIEW;
++ template.flags = 0;
+
- r->pipe->screen->texture_create(r->pipe->screen, &template);
++ r->textures.individual.y = r->pipe->screen->resource_create(r->pipe->screen, &template);
+
+ if (r->chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420) {
+ template.width0 = r->pot_buffers ?
+ util_next_power_of_two(r->picture_width / 2) :
+ r->picture_width / 2;
+ template.height0 = r->pot_buffers ?
+ util_next_power_of_two(r->picture_height / 2) :
+ r->picture_height / 2;
+ }
+ else if (r->chroma_format == PIPE_VIDEO_CHROMA_FORMAT_422)
+ template.height0 = r->pot_buffers ?
+ util_next_power_of_two(r->picture_height / 2) :
+ r->picture_height / 2;
+
+ r->textures.individual.cb =
- r->pipe->screen->texture_create(r->pipe->screen, &template);
++ r->pipe->screen->resource_create(r->pipe->screen, &template);
+ r->textures.individual.cr =
- DEFAULT_BUF_ALIGNMENT,
- PIPE_BUFFER_USAGE_VERTEX | PIPE_BUFFER_USAGE_DISCARD,
++ r->pipe->screen->resource_create(r->pipe->screen, &template);
++
++ for (i = 0; i < 3; ++i) {
++ u_sampler_view_default_template(&sampler_view,
++ r->textures.all[i],
++ r->textures.all[i]->format);
++ r->sampler_views.all[i] = r->pipe->create_sampler_view(r->pipe, r->textures.all[i], &sampler_view);
++ }
+
+ r->vertex_bufs.individual.ycbcr.stride = sizeof(struct vertex2f) * 4;
+ r->vertex_bufs.individual.ycbcr.max_index = 24 * r->macroblocks_per_batch - 1;
+ r->vertex_bufs.individual.ycbcr.buffer_offset = 0;
++ /* XXX: Create with usage DYNAMIC or STREAM */
+ r->vertex_bufs.individual.ycbcr.buffer = pipe_buffer_create
+ (
+ r->pipe->screen,
- DEFAULT_BUF_ALIGNMENT,
- PIPE_BUFFER_USAGE_VERTEX | PIPE_BUFFER_USAGE_DISCARD,
++ PIPE_BIND_VERTEX_BUFFER,
+ sizeof(struct vertex2f) * 4 * 24 * r->macroblocks_per_batch
+ );
+
+ for (i = 1; i < 3; ++i) {
+ r->vertex_bufs.all[i].stride = sizeof(struct vertex2f) * 2;
+ r->vertex_bufs.all[i].max_index = 24 * r->macroblocks_per_batch - 1;
+ r->vertex_bufs.all[i].buffer_offset = 0;
++ /* XXX: Create with usage DYNAMIC or STREAM */
+ r->vertex_bufs.all[i].buffer = pipe_buffer_create
+ (
+ r->pipe->screen,
- r->vertex_elems[0].src_offset = 0;
- r->vertex_elems[0].instance_divisor = 0;
- r->vertex_elems[0].vertex_buffer_index = 0;
- r->vertex_elems[0].nr_components = 2;
- r->vertex_elems[0].src_format = PIPE_FORMAT_R32G32_FLOAT;
++ PIPE_BIND_VERTEX_BUFFER,
+ sizeof(struct vertex2f) * 2 * 24 * r->macroblocks_per_batch
+ );
+ }
+
+ /* Position element */
- r->vertex_elems[1].src_offset = sizeof(struct vertex2f);
- r->vertex_elems[1].instance_divisor = 0;
- r->vertex_elems[1].vertex_buffer_index = 0;
- r->vertex_elems[1].nr_components = 2;
- r->vertex_elems[1].src_format = PIPE_FORMAT_R32G32_FLOAT;
++ vertex_elems[0].src_offset = 0;
++ vertex_elems[0].instance_divisor = 0;
++ vertex_elems[0].vertex_buffer_index = 0;
++ vertex_elems[0].src_format = PIPE_FORMAT_R32G32_FLOAT;
+
+ /* Luma, texcoord element */
- r->vertex_elems[2].src_offset = sizeof(struct vertex2f) * 2;
- r->vertex_elems[2].instance_divisor = 0;
- r->vertex_elems[2].vertex_buffer_index = 0;
- r->vertex_elems[2].nr_components = 2;
- r->vertex_elems[2].src_format = PIPE_FORMAT_R32G32_FLOAT;
++ vertex_elems[1].src_offset = sizeof(struct vertex2f);
++ vertex_elems[1].instance_divisor = 0;
++ vertex_elems[1].vertex_buffer_index = 0;
++ vertex_elems[1].src_format = PIPE_FORMAT_R32G32_FLOAT;
+
+ /* Chroma Cr texcoord element */
- r->vertex_elems[3].src_offset = sizeof(struct vertex2f) * 3;
- r->vertex_elems[3].instance_divisor = 0;
- r->vertex_elems[3].vertex_buffer_index = 0;
- r->vertex_elems[3].nr_components = 2;
- r->vertex_elems[3].src_format = PIPE_FORMAT_R32G32_FLOAT;
++ vertex_elems[2].src_offset = sizeof(struct vertex2f) * 2;
++ vertex_elems[2].instance_divisor = 0;
++ vertex_elems[2].vertex_buffer_index = 0;
++ vertex_elems[2].src_format = PIPE_FORMAT_R32G32_FLOAT;
+
+ /* Chroma Cb texcoord element */
- r->vertex_elems[4].src_offset = 0;
- r->vertex_elems[4].instance_divisor = 0;
- r->vertex_elems[4].vertex_buffer_index = 1;
- r->vertex_elems[4].nr_components = 2;
- r->vertex_elems[4].src_format = PIPE_FORMAT_R32G32_FLOAT;
++ vertex_elems[3].src_offset = sizeof(struct vertex2f) * 3;
++ vertex_elems[3].instance_divisor = 0;
++ vertex_elems[3].vertex_buffer_index = 0;
++ vertex_elems[3].src_format = PIPE_FORMAT_R32G32_FLOAT;
+
+ /* First ref surface top field texcoord element */
- r->vertex_elems[5].src_offset = sizeof(struct vertex2f);
- r->vertex_elems[5].instance_divisor = 0;
- r->vertex_elems[5].vertex_buffer_index = 1;
- r->vertex_elems[5].nr_components = 2;
- r->vertex_elems[5].src_format = PIPE_FORMAT_R32G32_FLOAT;
++ vertex_elems[4].src_offset = 0;
++ vertex_elems[4].instance_divisor = 0;
++ vertex_elems[4].vertex_buffer_index = 1;
++ vertex_elems[4].src_format = PIPE_FORMAT_R32G32_FLOAT;
+
+ /* First ref surface bottom field texcoord element */
- r->vertex_elems[6].src_offset = 0;
- r->vertex_elems[6].instance_divisor = 0;
- r->vertex_elems[6].vertex_buffer_index = 2;
- r->vertex_elems[6].nr_components = 2;
- r->vertex_elems[6].src_format = PIPE_FORMAT_R32G32_FLOAT;
++ vertex_elems[5].src_offset = sizeof(struct vertex2f);
++ vertex_elems[5].instance_divisor = 0;
++ vertex_elems[5].vertex_buffer_index = 1;
++ vertex_elems[5].src_format = PIPE_FORMAT_R32G32_FLOAT;
+
+ /* Second ref surface top field texcoord element */
- r->vertex_elems[7].src_offset = sizeof(struct vertex2f);
- r->vertex_elems[7].instance_divisor = 0;
- r->vertex_elems[7].vertex_buffer_index = 2;
- r->vertex_elems[7].nr_components = 2;
- r->vertex_elems[7].src_format = PIPE_FORMAT_R32G32_FLOAT;
++ vertex_elems[6].src_offset = 0;
++ vertex_elems[6].instance_divisor = 0;
++ vertex_elems[6].vertex_buffer_index = 2;
++ vertex_elems[6].src_format = PIPE_FORMAT_R32G32_FLOAT;
+
+ /* Second ref surface bottom field texcoord element */
- DEFAULT_BUF_ALIGNMENT,
- PIPE_BUFFER_USAGE_CONSTANT | PIPE_BUFFER_USAGE_DISCARD,
++ vertex_elems[7].src_offset = sizeof(struct vertex2f);
++ vertex_elems[7].instance_divisor = 0;
++ vertex_elems[7].vertex_buffer_index = 2;
++ vertex_elems[7].src_format = PIPE_FORMAT_R32G32_FLOAT;
++
++ r->vertex_elems_state.individual.i = r->pipe->create_vertex_elements_state(r->pipe, 4, vertex_elems);
++ r->vertex_elems_state.individual.p = r->pipe->create_vertex_elements_state(r->pipe, 6, vertex_elems);
++ r->vertex_elems_state.individual.b = r->pipe->create_vertex_elements_state(r->pipe, 8, vertex_elems);
+
+ r->vs_const_buf = pipe_buffer_create
+ (
+ r->pipe->screen,
- pipe_buffer_reference(&r->vs_const_buf, NULL);
++ PIPE_BIND_CONSTANT_BUFFER,
+ sizeof(struct vertex_shader_consts)
+ );
+
+ return true;
+}
+
+static void
+cleanup_buffers(struct vl_mpeg12_mc_renderer *r)
+{
+ unsigned i;
+
+ assert(r);
+
- for (i = 0; i < 3; ++i)
- pipe_buffer_reference(&r->vertex_bufs.all[i].buffer, NULL);
-
- for (i = 0; i < 3; ++i)
- pipe_texture_reference(&r->textures.all[i], NULL);
++ pipe_resource_reference(&r->vs_const_buf, NULL);
+
- r->pipe->screen,
++ for (i = 0; i < 3; ++i) {
++ r->pipe->sampler_view_destroy(r->pipe, r->sampler_views.all[i]);
++ r->pipe->delete_vertex_elements_state(r->pipe, r->vertex_elems_state.all[i]);
++ pipe_resource_reference(&r->vertex_bufs.all[i].buffer, NULL);
++ pipe_resource_reference(&r->textures.all[i], NULL);
++ }
+
+ FREE(r->macroblock_buf);
+}
+
+static enum MACROBLOCK_TYPE
+get_macroblock_type(struct pipe_mpeg12_macroblock *mb)
+{
+ assert(mb);
+
+ switch (mb->mb_type) {
+ case PIPE_MPEG12_MACROBLOCK_TYPE_INTRA:
+ return MACROBLOCK_TYPE_INTRA;
+ case PIPE_MPEG12_MACROBLOCK_TYPE_FWD:
+ return mb->mo_type == PIPE_MPEG12_MOTION_TYPE_FRAME ?
+ MACROBLOCK_TYPE_FWD_FRAME_PRED : MACROBLOCK_TYPE_FWD_FIELD_PRED;
+ case PIPE_MPEG12_MACROBLOCK_TYPE_BKWD:
+ return mb->mo_type == PIPE_MPEG12_MOTION_TYPE_FRAME ?
+ MACROBLOCK_TYPE_BKWD_FRAME_PRED : MACROBLOCK_TYPE_BKWD_FIELD_PRED;
+ case PIPE_MPEG12_MACROBLOCK_TYPE_BI:
+ return mb->mo_type == PIPE_MPEG12_MOTION_TYPE_FRAME ?
+ MACROBLOCK_TYPE_BI_FRAME_PRED : MACROBLOCK_TYPE_BI_FIELD_PRED;
+ default:
+ assert(0);
+ }
+
+ /* Unreachable */
+ return -1;
+}
+
+static void
+gen_block_verts(struct vert_stream_0 *vb, unsigned cbp, unsigned mbx, unsigned mby,
+ const struct vertex2f *unit, const struct vertex2f *half, const struct vertex2f *offset,
+ unsigned luma_mask, unsigned cb_mask, unsigned cr_mask,
+ bool use_zeroblocks, struct vertex2f *zero_blocks)
+{
+ struct vertex2f v;
+
+ assert(vb);
+ assert(unit && half && offset);
+ assert(zero_blocks || !use_zeroblocks);
+
+ /* Generate vertices for two triangles covering a block */
+ v.x = mbx * unit->x + offset->x;
+ v.y = mby * unit->y + offset->y;
+
+ vb[0].pos.x = v.x;
+ vb[0].pos.y = v.y;
+ vb[1].pos.x = v.x;
+ vb[1].pos.y = v.y + half->y;
+ vb[2].pos.x = v.x + half->x;
+ vb[2].pos.y = v.y;
+ vb[3].pos.x = v.x + half->x;
+ vb[3].pos.y = v.y;
+ vb[4].pos.x = v.x;
+ vb[4].pos.y = v.y + half->y;
+ vb[5].pos.x = v.x + half->x;
+ vb[5].pos.y = v.y + half->y;
+
+ /* Generate texcoords for the triangles, either pointing to the correct area on the luma/chroma texture
+ or if zero blocks are being used, to the zero block if the appropriate CBP bits aren't set (i.e. no data
+ for this channel is defined for this block) */
+
+ if (!use_zeroblocks || cbp & luma_mask) {
+ v.x = mbx * unit->x + offset->x;
+ v.y = mby * unit->y + offset->y;
+ }
+ else {
+ v.x = zero_blocks[0].x;
+ v.y = zero_blocks[0].y;
+ }
+
+ vb[0].luma_tc.x = v.x;
+ vb[0].luma_tc.y = v.y;
+ vb[1].luma_tc.x = v.x;
+ vb[1].luma_tc.y = v.y + half->y;
+ vb[2].luma_tc.x = v.x + half->x;
+ vb[2].luma_tc.y = v.y;
+ vb[3].luma_tc.x = v.x + half->x;
+ vb[3].luma_tc.y = v.y;
+ vb[4].luma_tc.x = v.x;
+ vb[4].luma_tc.y = v.y + half->y;
+ vb[5].luma_tc.x = v.x + half->x;
+ vb[5].luma_tc.y = v.y + half->y;
+
+ if (!use_zeroblocks || cbp & cb_mask) {
+ v.x = mbx * unit->x + offset->x;
+ v.y = mby * unit->y + offset->y;
+ }
+ else {
+ v.x = zero_blocks[1].x;
+ v.y = zero_blocks[1].y;
+ }
+
+ vb[0].cb_tc.x = v.x;
+ vb[0].cb_tc.y = v.y;
+ vb[1].cb_tc.x = v.x;
+ vb[1].cb_tc.y = v.y + half->y;
+ vb[2].cb_tc.x = v.x + half->x;
+ vb[2].cb_tc.y = v.y;
+ vb[3].cb_tc.x = v.x + half->x;
+ vb[3].cb_tc.y = v.y;
+ vb[4].cb_tc.x = v.x;
+ vb[4].cb_tc.y = v.y + half->y;
+ vb[5].cb_tc.x = v.x + half->x;
+ vb[5].cb_tc.y = v.y + half->y;
+
+ if (!use_zeroblocks || cbp & cr_mask) {
+ v.x = mbx * unit->x + offset->x;
+ v.y = mby * unit->y + offset->y;
+ }
+ else {
+ v.x = zero_blocks[2].x;
+ v.y = zero_blocks[2].y;
+ }
+
+ vb[0].cr_tc.x = v.x;
+ vb[0].cr_tc.y = v.y;
+ vb[1].cr_tc.x = v.x;
+ vb[1].cr_tc.y = v.y + half->y;
+ vb[2].cr_tc.x = v.x + half->x;
+ vb[2].cr_tc.y = v.y;
+ vb[3].cr_tc.x = v.x + half->x;
+ vb[3].cr_tc.y = v.y;
+ vb[4].cr_tc.x = v.x;
+ vb[4].cr_tc.y = v.y + half->y;
+ vb[5].cr_tc.x = v.x + half->x;
+ vb[5].cr_tc.y = v.y + half->y;
+}
+
+static void
+gen_macroblock_verts(struct vl_mpeg12_mc_renderer *r,
+ struct pipe_mpeg12_macroblock *mb, unsigned pos,
+ struct vert_stream_0 *ycbcr_vb, struct vertex2f **ref_vb)
+{
+ struct vertex2f mo_vec[2];
+
+ unsigned i;
+
+ assert(r);
+ assert(mb);
+ assert(ycbcr_vb);
+ assert(pos < r->macroblocks_per_batch);
+
+ mo_vec[1].x = 0;
+ mo_vec[1].y = 0;
+
+ switch (mb->mb_type) {
+ case PIPE_MPEG12_MACROBLOCK_TYPE_BI:
+ {
+ struct vertex2f *vb;
+
+ assert(ref_vb && ref_vb[1]);
+
+ vb = ref_vb[1] + pos * 2 * 24;
+
+ mo_vec[0].x = mb->pmv[0][1][0] * 0.5f * r->surface_tex_inv_size.x;
+ mo_vec[0].y = mb->pmv[0][1][1] * 0.5f * r->surface_tex_inv_size.y;
+
+ if (mb->mo_type == PIPE_MPEG12_MOTION_TYPE_FRAME) {
+ for (i = 0; i < 24 * 2; i += 2) {
+ vb[i].x = mo_vec[0].x;
+ vb[i].y = mo_vec[0].y;
+ }
+ }
+ else {
+ mo_vec[1].x = mb->pmv[1][1][0] * 0.5f * r->surface_tex_inv_size.x;
+ mo_vec[1].y = mb->pmv[1][1][1] * 0.5f * r->surface_tex_inv_size.y;
+
+ for (i = 0; i < 24 * 2; i += 2) {
+ vb[i].x = mo_vec[0].x;
+ vb[i].y = mo_vec[0].y;
+ vb[i + 1].x = mo_vec[1].x;
+ vb[i + 1].y = mo_vec[1].y;
+ }
+ }
+
+ /* fall-through */
+ }
+ case PIPE_MPEG12_MACROBLOCK_TYPE_FWD:
+ case PIPE_MPEG12_MACROBLOCK_TYPE_BKWD:
+ {
+ struct vertex2f *vb;
+
+ assert(ref_vb && ref_vb[0]);
+
+ vb = ref_vb[0] + pos * 2 * 24;
+
+ if (mb->mb_type == PIPE_MPEG12_MACROBLOCK_TYPE_BKWD) {
+ mo_vec[0].x = mb->pmv[0][1][0] * 0.5f * r->surface_tex_inv_size.x;
+ mo_vec[0].y = mb->pmv[0][1][1] * 0.5f * r->surface_tex_inv_size.y;
+
+ if (mb->mo_type == PIPE_MPEG12_MOTION_TYPE_FIELD) {
+ mo_vec[1].x = mb->pmv[1][1][0] * 0.5f * r->surface_tex_inv_size.x;
+ mo_vec[1].y = mb->pmv[1][1][1] * 0.5f * r->surface_tex_inv_size.y;
+ }
+ }
+ else {
+ mo_vec[0].x = mb->pmv[0][0][0] * 0.5f * r->surface_tex_inv_size.x;
+ mo_vec[0].y = mb->pmv[0][0][1] * 0.5f * r->surface_tex_inv_size.y;
+
+ if (mb->mo_type == PIPE_MPEG12_MOTION_TYPE_FIELD) {
+ mo_vec[1].x = mb->pmv[1][0][0] * 0.5f * r->surface_tex_inv_size.x;
+ mo_vec[1].y = mb->pmv[1][0][1] * 0.5f * r->surface_tex_inv_size.y;
+ }
+ }
+
+ if (mb->mb_type == PIPE_MPEG12_MOTION_TYPE_FRAME) {
+ for (i = 0; i < 24 * 2; i += 2) {
+ vb[i].x = mo_vec[0].x;
+ vb[i].y = mo_vec[0].y;
+ }
+ }
+ else {
+ for (i = 0; i < 24 * 2; i += 2) {
+ vb[i].x = mo_vec[0].x;
+ vb[i].y = mo_vec[0].y;
+ vb[i + 1].x = mo_vec[1].x;
+ vb[i + 1].y = mo_vec[1].y;
+ }
+ }
+
+ /* fall-through */
+ }
+ case PIPE_MPEG12_MACROBLOCK_TYPE_INTRA:
+ {
+ const struct vertex2f unit =
+ {
+ r->surface_tex_inv_size.x * MACROBLOCK_WIDTH,
+ r->surface_tex_inv_size.y * MACROBLOCK_HEIGHT
+ };
+ const struct vertex2f half =
+ {
+ r->surface_tex_inv_size.x * (MACROBLOCK_WIDTH / 2),
+ r->surface_tex_inv_size.y * (MACROBLOCK_HEIGHT / 2)
+ };
+ const struct vertex2f offsets[2][2] =
+ {
+ {
+ {0, 0}, {0, half.y}
+ },
+ {
+ {half.x, 0}, {half.x, half.y}
+ }
+ };
+ const bool use_zb = r->eb_handling == VL_MPEG12_MC_RENDERER_EMPTY_BLOCK_XFER_ONE;
+
+ struct vert_stream_0 *vb = ycbcr_vb + pos * 24;
+
+ gen_block_verts(vb, mb->cbp, mb->mbx, mb->mby,
+ &unit, &half, &offsets[0][0],
+ 32, 2, 1, use_zb, r->zero_block);
+
+ gen_block_verts(vb + 6, mb->cbp, mb->mbx, mb->mby,
+ &unit, &half, &offsets[1][0],
+ 16, 2, 1, use_zb, r->zero_block);
+
+ gen_block_verts(vb + 12, mb->cbp, mb->mbx, mb->mby,
+ &unit, &half, &offsets[0][1],
+ 8, 2, 1, use_zb, r->zero_block);
+
+ gen_block_verts(vb + 18, mb->cbp, mb->mbx, mb->mby,
+ &unit, &half, &offsets[1][1],
+ 4, 2, 1, use_zb, r->zero_block);
+
+ break;
+ }
+ default:
+ assert(0);
+ }
+}
+
+static void
+gen_macroblock_stream(struct vl_mpeg12_mc_renderer *r,
+ unsigned *num_macroblocks)
+{
+ unsigned offset[NUM_MACROBLOCK_TYPES];
+ struct vert_stream_0 *ycbcr_vb;
+ struct vertex2f *ref_vb[2];
++ struct pipe_transfer *buf_transfer[3];
+ unsigned i;
+
+ assert(r);
+ assert(num_macroblocks);
+
+ for (i = 0; i < r->num_macroblocks; ++i) {
+ enum MACROBLOCK_TYPE mb_type = get_macroblock_type(&r->macroblock_buf[i]);
+ ++num_macroblocks[mb_type];
+ }
+
+ offset[0] = 0;
+
+ for (i = 1; i < NUM_MACROBLOCK_TYPES; ++i)
+ offset[i] = offset[i - 1] + num_macroblocks[i - 1];
+
+ ycbcr_vb = (struct vert_stream_0 *)pipe_buffer_map
+ (
- PIPE_BUFFER_USAGE_CPU_WRITE | PIPE_BUFFER_USAGE_DISCARD
++ r->pipe,
+ r->vertex_bufs.individual.ycbcr.buffer,
- r->pipe->screen,
++ PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
++ &buf_transfer[0]
+ );
+
+ for (i = 0; i < 2; ++i)
+ ref_vb[i] = (struct vertex2f *)pipe_buffer_map
+ (
- PIPE_BUFFER_USAGE_CPU_WRITE | PIPE_BUFFER_USAGE_DISCARD
++ r->pipe,
+ r->vertex_bufs.individual.ref[i].buffer,
- pipe_buffer_unmap(r->pipe->screen, r->vertex_bufs.individual.ycbcr.buffer);
++ PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
++ &buf_transfer[i + 1]
+ );
+
+ for (i = 0; i < r->num_macroblocks; ++i) {
+ enum MACROBLOCK_TYPE mb_type = get_macroblock_type(&r->macroblock_buf[i]);
+
+ gen_macroblock_verts(r, &r->macroblock_buf[i], offset[mb_type],
+ ycbcr_vb, ref_vb);
+
+ ++offset[mb_type];
+ }
+
- pipe_buffer_unmap(r->pipe->screen, r->vertex_bufs.individual.ref[i].buffer);
++ pipe_buffer_unmap(r->pipe, r->vertex_bufs.individual.ycbcr.buffer, buf_transfer[0]);
+ for (i = 0; i < 2; ++i)
- r->pipe->screen, r->vs_const_buf,
- PIPE_BUFFER_USAGE_CPU_WRITE | PIPE_BUFFER_USAGE_DISCARD
++ pipe_buffer_unmap(r->pipe, r->vertex_bufs.individual.ref[i].buffer, buf_transfer[i + 1]);
+}
+
+static void
+flush(struct vl_mpeg12_mc_renderer *r)
+{
+ unsigned num_macroblocks[NUM_MACROBLOCK_TYPES] = { 0 };
+ unsigned vb_start = 0;
+ struct vertex_shader_consts *vs_consts;
++ struct pipe_transfer *buf_transfer;
+ unsigned i;
+
+ assert(r);
+ assert(r->num_macroblocks == r->macroblocks_per_batch);
+
+ gen_macroblock_stream(r, num_macroblocks);
+
+ r->fb_state.cbufs[0] = r->surface;
+
+ r->pipe->set_framebuffer_state(r->pipe, &r->fb_state);
+ r->pipe->set_viewport_state(r->pipe, &r->viewport);
+
+ vs_consts = pipe_buffer_map
+ (
- pipe_buffer_unmap(r->pipe->screen, r->vs_const_buf);
++ r->pipe, r->vs_const_buf,
++ PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
++ &buf_transfer
+ );
+
+ vs_consts->denorm.x = r->surface->width;
+ vs_consts->denorm.y = r->surface->height;
+
- r->pipe->set_vertex_elements(r->pipe, 4, r->vertex_elems);
- r->pipe->set_fragment_sampler_textures(r->pipe, 3, r->textures.all);
++ pipe_buffer_unmap(r->pipe, r->vs_const_buf, buf_transfer);
+
+ r->pipe->set_constant_buffer(r->pipe, PIPE_SHADER_VERTEX, 0,
+ r->vs_const_buf);
+
+ if (num_macroblocks[MACROBLOCK_TYPE_INTRA] > 0) {
+ r->pipe->set_vertex_buffers(r->pipe, 1, r->vertex_bufs.all);
- if (num_macroblocks[MACROBLOCK_TYPE_FWD_FRAME_PRED] > 0) {
++ r->pipe->bind_vertex_elements_state(r->pipe, r->vertex_elems_state.individual.i);
++ r->pipe->set_fragment_sampler_views(r->pipe, 3, r->sampler_views.all);
+ r->pipe->bind_fragment_sampler_states(r->pipe, 3, r->samplers.all);
+ r->pipe->bind_vs_state(r->pipe, r->i_vs);
+ r->pipe->bind_fs_state(r->pipe, r->i_fs);
+
+ r->pipe->draw_arrays(r->pipe, PIPE_PRIM_TRIANGLES, vb_start,
+ num_macroblocks[MACROBLOCK_TYPE_INTRA] * 24);
+ vb_start += num_macroblocks[MACROBLOCK_TYPE_INTRA] * 24;
+ }
+
- r->pipe->set_vertex_elements(r->pipe, 6, r->vertex_elems);
++ if (false /*num_macroblocks[MACROBLOCK_TYPE_FWD_FRAME_PRED] > 0*/) {
+ r->pipe->set_vertex_buffers(r->pipe, 2, r->vertex_bufs.all);
- r->pipe->set_fragment_sampler_textures(r->pipe, 4, r->textures.all);
++ r->pipe->bind_vertex_elements_state(r->pipe, r->vertex_elems_state.individual.p);
+ r->textures.individual.ref[0] = r->past->texture;
- r->pipe->set_vertex_elements(r->pipe, 6, r->vertex_elems);
++ r->pipe->set_fragment_sampler_views(r->pipe, 4, r->sampler_views.all);
+ r->pipe->bind_fragment_sampler_states(r->pipe, 4, r->samplers.all);
+ r->pipe->bind_vs_state(r->pipe, r->p_vs[0]);
+ r->pipe->bind_fs_state(r->pipe, r->p_fs[0]);
+
+ r->pipe->draw_arrays(r->pipe, PIPE_PRIM_TRIANGLES, vb_start,
+ num_macroblocks[MACROBLOCK_TYPE_FWD_FRAME_PRED] * 24);
+ vb_start += num_macroblocks[MACROBLOCK_TYPE_FWD_FRAME_PRED] * 24;
+ }
+
+ if (false /*num_macroblocks[MACROBLOCK_TYPE_FWD_FIELD_PRED] > 0 */ ) {
+ r->pipe->set_vertex_buffers(r->pipe, 2, r->vertex_bufs.all);
- r->pipe->set_fragment_sampler_textures(r->pipe, 4, r->textures.all);
++ r->pipe->bind_vertex_elements_state(r->pipe, r->vertex_elems_state.individual.p);
+ r->textures.individual.ref[0] = r->past->texture;
- if (num_macroblocks[MACROBLOCK_TYPE_BKWD_FRAME_PRED] > 0) {
++ r->pipe->set_fragment_sampler_views(r->pipe, 4, r->sampler_views.all);
+ r->pipe->bind_fragment_sampler_states(r->pipe, 4, r->samplers.all);
+ r->pipe->bind_vs_state(r->pipe, r->p_vs[1]);
+ r->pipe->bind_fs_state(r->pipe, r->p_fs[1]);
+
+ r->pipe->draw_arrays(r->pipe, PIPE_PRIM_TRIANGLES, vb_start,
+ num_macroblocks[MACROBLOCK_TYPE_FWD_FIELD_PRED] * 24);
+ vb_start += num_macroblocks[MACROBLOCK_TYPE_FWD_FIELD_PRED] * 24;
+ }
+
- r->pipe->set_vertex_elements(r->pipe, 6, r->vertex_elems);
++ if (false /*num_macroblocks[MACROBLOCK_TYPE_BKWD_FRAME_PRED] > 0*/) {
+ r->pipe->set_vertex_buffers(r->pipe, 2, r->vertex_bufs.all);
- r->pipe->set_fragment_sampler_textures(r->pipe, 4, r->textures.all);
++ r->pipe->bind_vertex_elements_state(r->pipe, r->vertex_elems_state.individual.p);
+ r->textures.individual.ref[0] = r->future->texture;
- if (false /*num_macroblocks[MACROBLOCK_TYPE_BKWD_FIELD_PRED] > 0 */ ) {
++ r->pipe->set_fragment_sampler_views(r->pipe, 4, r->sampler_views.all);
+ r->pipe->bind_fragment_sampler_states(r->pipe, 4, r->samplers.all);
+ r->pipe->bind_vs_state(r->pipe, r->p_vs[0]);
+ r->pipe->bind_fs_state(r->pipe, r->p_fs[0]);
+
+ r->pipe->draw_arrays(r->pipe, PIPE_PRIM_TRIANGLES, vb_start,
+ num_macroblocks[MACROBLOCK_TYPE_BKWD_FRAME_PRED] * 24);
+ vb_start += num_macroblocks[MACROBLOCK_TYPE_BKWD_FRAME_PRED] * 24;
+ }
+
- r->pipe->set_vertex_elements(r->pipe, 6, r->vertex_elems);
++ if (false /*num_macroblocks[MACROBLOCK_TYPE_BKWD_FIELD_PRED] > 0*/ ) {
+ r->pipe->set_vertex_buffers(r->pipe, 2, r->vertex_bufs.all);
- r->pipe->set_fragment_sampler_textures(r->pipe, 4, r->textures.all);
++ r->pipe->bind_vertex_elements_state(r->pipe, r->vertex_elems_state.individual.p);
+ r->textures.individual.ref[0] = r->future->texture;
- if (num_macroblocks[MACROBLOCK_TYPE_BI_FRAME_PRED] > 0) {
++ r->pipe->set_fragment_sampler_views(r->pipe, 4, r->sampler_views.all);
+ r->pipe->bind_fragment_sampler_states(r->pipe, 4, r->samplers.all);
+ r->pipe->bind_vs_state(r->pipe, r->p_vs[1]);
+ r->pipe->bind_fs_state(r->pipe, r->p_fs[1]);
+
+ r->pipe->draw_arrays(r->pipe, PIPE_PRIM_TRIANGLES, vb_start,
+ num_macroblocks[MACROBLOCK_TYPE_BKWD_FIELD_PRED] * 24);
+ vb_start += num_macroblocks[MACROBLOCK_TYPE_BKWD_FIELD_PRED] * 24;
+ }
+
- r->pipe->set_vertex_elements(r->pipe, 8, r->vertex_elems);
++ if (false /*num_macroblocks[MACROBLOCK_TYPE_BI_FRAME_PRED] > 0*/) {
+ r->pipe->set_vertex_buffers(r->pipe, 3, r->vertex_bufs.all);
- r->pipe->set_fragment_sampler_textures(r->pipe, 5, r->textures.all);
++ r->pipe->bind_vertex_elements_state(r->pipe, r->vertex_elems_state.individual.b);
+ r->textures.individual.ref[0] = r->past->texture;
+ r->textures.individual.ref[1] = r->future->texture;
- r->pipe->set_vertex_elements(r->pipe, 8, r->vertex_elems);
++ r->pipe->set_fragment_sampler_views(r->pipe, 5, r->sampler_views.all);
+ r->pipe->bind_fragment_sampler_states(r->pipe, 5, r->samplers.all);
+ r->pipe->bind_vs_state(r->pipe, r->b_vs[0]);
+ r->pipe->bind_fs_state(r->pipe, r->b_fs[0]);
+
+ r->pipe->draw_arrays(r->pipe, PIPE_PRIM_TRIANGLES, vb_start,
+ num_macroblocks[MACROBLOCK_TYPE_BI_FRAME_PRED] * 24);
+ vb_start += num_macroblocks[MACROBLOCK_TYPE_BI_FRAME_PRED] * 24;
+ }
+
+ if (false /*num_macroblocks[MACROBLOCK_TYPE_BI_FIELD_PRED] > 0 */ ) {
+ r->pipe->set_vertex_buffers(r->pipe, 3, r->vertex_bufs.all);
- r->pipe->set_fragment_sampler_textures(r->pipe, 5, r->textures.all);
++ r->pipe->bind_vertex_elements_state(r->pipe, r->vertex_elems_state.individual.b);
+ r->textures.individual.ref[0] = r->past->texture;
+ r->textures.individual.ref[1] = r->future->texture;
- tex_pitch = r->tex_transfer[0]->stride / util_format_get_blocksize(r->tex_transfer[0]->texture->format);
++ r->pipe->set_fragment_sampler_views(r->pipe, 5, r->sampler_views.all);
+ r->pipe->bind_fragment_sampler_states(r->pipe, 5, r->samplers.all);
+ r->pipe->bind_vs_state(r->pipe, r->b_vs[1]);
+ r->pipe->bind_fs_state(r->pipe, r->b_fs[1]);
+
+ r->pipe->draw_arrays(r->pipe, PIPE_PRIM_TRIANGLES, vb_start,
+ num_macroblocks[MACROBLOCK_TYPE_BI_FIELD_PRED] * 24);
+ vb_start += num_macroblocks[MACROBLOCK_TYPE_BI_FIELD_PRED] * 24;
+ }
+
+ r->pipe->flush(r->pipe, PIPE_FLUSH_RENDER_CACHE, r->fence);
+
+ if (r->eb_handling == VL_MPEG12_MC_RENDERER_EMPTY_BLOCK_XFER_ONE)
+ for (i = 0; i < 3; ++i)
+ r->zero_block[i].x = ZERO_BLOCK_NIL;
+
+ r->num_macroblocks = 0;
+}
+
+static void
+grab_frame_coded_block(short *src, short *dst, unsigned dst_pitch)
+{
+ unsigned y;
+
+ assert(src);
+ assert(dst);
+
+ for (y = 0; y < BLOCK_HEIGHT; ++y)
+ memcpy(dst + y * dst_pitch, src + y * BLOCK_WIDTH, BLOCK_WIDTH * 2);
+}
+
+static void
+grab_field_coded_block(short *src, short *dst, unsigned dst_pitch)
+{
+ unsigned y;
+
+ assert(src);
+ assert(dst);
+
+ for (y = 0; y < BLOCK_HEIGHT; ++y)
+ memcpy(dst + y * dst_pitch * 2, src + y * BLOCK_WIDTH, BLOCK_WIDTH * 2);
+}
+
+static void
+fill_zero_block(short *dst, unsigned dst_pitch)
+{
+ unsigned y;
+
+ assert(dst);
+
+ for (y = 0; y < BLOCK_HEIGHT; ++y)
+ memset(dst + y * dst_pitch, 0, BLOCK_WIDTH * 2);
+}
+
+static void
+grab_blocks(struct vl_mpeg12_mc_renderer *r, unsigned mbx, unsigned mby,
+ enum pipe_mpeg12_dct_type dct_type, unsigned cbp, short *blocks)
+{
+ unsigned tex_pitch;
+ short *texels;
+ unsigned tb = 0, sb = 0;
+ unsigned mbpx = mbx * MACROBLOCK_WIDTH, mbpy = mby * MACROBLOCK_HEIGHT;
+ unsigned x, y;
+
+ assert(r);
+ assert(blocks);
+
- tex_pitch = r->tex_transfer[tb + 1]->stride / util_format_get_blocksize(r->tex_transfer[tb + 1]->texture->format);
++ tex_pitch = r->tex_transfer[0]->stride / util_format_get_blocksize(r->tex_transfer[0]->resource->format);
+ texels = r->texels[0] + mbpy * tex_pitch + mbpx;
+
+ for (y = 0; y < 2; ++y) {
+ for (x = 0; x < 2; ++x, ++tb) {
+ if ((cbp >> (5 - tb)) & 1) {
+ if (dct_type == PIPE_MPEG12_DCT_TYPE_FRAME) {
+ grab_frame_coded_block(blocks + sb * BLOCK_WIDTH * BLOCK_HEIGHT,
+ texels + y * tex_pitch * BLOCK_WIDTH +
+ x * BLOCK_WIDTH, tex_pitch);
+ }
+ else {
+ grab_field_coded_block(blocks + sb * BLOCK_WIDTH * BLOCK_HEIGHT,
+ texels + y * tex_pitch + x * BLOCK_WIDTH,
+ tex_pitch);
+ }
+
+ ++sb;
+ }
+ else if (r->eb_handling != VL_MPEG12_MC_RENDERER_EMPTY_BLOCK_XFER_NONE) {
+ if (r->eb_handling == VL_MPEG12_MC_RENDERER_EMPTY_BLOCK_XFER_ALL ||
+ ZERO_BLOCK_IS_NIL(r->zero_block[0])) {
+ fill_zero_block(texels + y * tex_pitch * BLOCK_WIDTH + x * BLOCK_WIDTH, tex_pitch);
+ if (r->eb_handling == VL_MPEG12_MC_RENDERER_EMPTY_BLOCK_XFER_ONE) {
+ r->zero_block[0].x = (mbpx + x * 8) * r->surface_tex_inv_size.x;
+ r->zero_block[0].y = (mbpy + y * 8) * r->surface_tex_inv_size.y;
+ }
+ }
+ }
+ }
+ }
+
+ /* TODO: Implement 422, 444 */
+ assert(r->chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420);
+
+ mbpx /= 2;
+ mbpy /= 2;
+
+ for (tb = 0; tb < 2; ++tb) {
++ tex_pitch = r->tex_transfer[tb + 1]->stride / util_format_get_blocksize(r->tex_transfer[tb + 1]->resource->format);
+ texels = r->texels[tb + 1] + mbpy * tex_pitch + mbpx;
+
+ if ((cbp >> (1 - tb)) & 1) {
+ grab_frame_coded_block(blocks + sb * BLOCK_WIDTH * BLOCK_HEIGHT, texels, tex_pitch);
+ ++sb;
+ }
+ else if (r->eb_handling != VL_MPEG12_MC_RENDERER_EMPTY_BLOCK_XFER_NONE) {
+ if (r->eb_handling == VL_MPEG12_MC_RENDERER_EMPTY_BLOCK_XFER_ALL ||
+ ZERO_BLOCK_IS_NIL(r->zero_block[tb + 1])) {
+ fill_zero_block(texels, tex_pitch);
+ if (r->eb_handling == VL_MPEG12_MC_RENDERER_EMPTY_BLOCK_XFER_ONE) {
+ r->zero_block[tb + 1].x = (mbpx << 1) * r->surface_tex_inv_size.x;
+ r->zero_block[tb + 1].y = (mbpy << 1) * r->surface_tex_inv_size.y;
+ }
+ }
+ }
+ }
+}
+
+static void
+grab_macroblock(struct vl_mpeg12_mc_renderer *r,
+ struct pipe_mpeg12_macroblock *mb)
+{
+ assert(r);
+ assert(mb);
+ assert(mb->blocks);
+ assert(r->num_macroblocks < r->macroblocks_per_batch);
+
+ memcpy(&r->macroblock_buf[r->num_macroblocks], mb,
+ sizeof(struct pipe_mpeg12_macroblock));
+
+ grab_blocks(r, mb->mbx, mb->mby, mb->dct_type, mb->cbp, mb->blocks);
+
+ ++r->num_macroblocks;
+}
+
+bool
+vl_mpeg12_mc_renderer_init(struct vl_mpeg12_mc_renderer *renderer,
+ struct pipe_context *pipe,
+ unsigned picture_width,
+ unsigned picture_height,
+ enum pipe_video_chroma_format chroma_format,
+ enum VL_MPEG12_MC_RENDERER_BUFFER_MODE bufmode,
+ enum VL_MPEG12_MC_RENDERER_EMPTY_BLOCK eb_handling,
+ bool pot_buffers)
+{
+ unsigned i;
+
+ assert(renderer);
+ assert(pipe);
+ /* TODO: Implement other policies */
+ assert(bufmode == VL_MPEG12_MC_RENDERER_BUFFER_PICTURE);
+ /* TODO: Implement this */
+ /* XXX: XFER_ALL sampling issue at block edges when using bilinear filtering */
+ assert(eb_handling != VL_MPEG12_MC_RENDERER_EMPTY_BLOCK_XFER_NONE);
+ /* TODO: Non-pot buffers untested, probably doesn't work without changes to texcoord generation, vert shader, etc */
+ assert(pot_buffers);
+
+ memset(renderer, 0, sizeof(struct vl_mpeg12_mc_renderer));
+
+ renderer->pipe = pipe;
+ renderer->picture_width = picture_width;
+ renderer->picture_height = picture_height;
+ renderer->chroma_format = chroma_format;
+ renderer->bufmode = bufmode;
+ renderer->eb_handling = eb_handling;
+ renderer->pot_buffers = pot_buffers;
+
+ if (!init_pipe_state(renderer))
+ return false;
+ if (!init_shaders(renderer)) {
+ cleanup_pipe_state(renderer);
+ return false;
+ }
+ if (!init_buffers(renderer)) {
+ cleanup_shaders(renderer);
+ cleanup_pipe_state(renderer);
+ return false;
+ }
+
+ renderer->surface = NULL;
+ renderer->past = NULL;
+ renderer->future = NULL;
+ for (i = 0; i < 3; ++i)
+ renderer->zero_block[i].x = ZERO_BLOCK_NIL;
+ renderer->num_macroblocks = 0;
+
+ xfer_buffers_map(renderer);
+
+ return true;
+}
+
+void
+vl_mpeg12_mc_renderer_cleanup(struct vl_mpeg12_mc_renderer *renderer)
+{
+ assert(renderer);
+
+ xfer_buffers_unmap(renderer);
+
+ cleanup_pipe_state(renderer);
+ cleanup_shaders(renderer);
+ cleanup_buffers(renderer);
+
+ pipe_surface_reference(&renderer->surface, NULL);
+ pipe_surface_reference(&renderer->past, NULL);
+ pipe_surface_reference(&renderer->future, NULL);
+}
+
+void
+vl_mpeg12_mc_renderer_render_macroblocks(struct vl_mpeg12_mc_renderer
+ *renderer,
+ struct pipe_surface *surface,
+ struct pipe_surface *past,
+ struct pipe_surface *future,
+ unsigned num_macroblocks,
+ struct pipe_mpeg12_macroblock
+ *mpeg12_macroblocks,
+ struct pipe_fence_handle **fence)
+{
+ bool new_surface = false;
+
+ assert(renderer);
+ assert(surface);
+ assert(num_macroblocks);
+ assert(mpeg12_macroblocks);
+
+ if (renderer->surface) {
+ if (surface != renderer->surface) {
+ if (renderer->num_macroblocks > 0) {
+ xfer_buffers_unmap(renderer);
+ flush(renderer);
+ }
+
+ new_surface = true;
+ }
+
+ /* If the surface we're rendering hasn't changed the ref frames shouldn't change. */
+ assert(surface != renderer->surface || renderer->past == past);
+ assert(surface != renderer->surface || renderer->future == future);
+ }
+ else
+ new_surface = true;
+
+ if (new_surface) {
+ pipe_surface_reference(&renderer->surface, surface);
+ pipe_surface_reference(&renderer->past, past);
+ pipe_surface_reference(&renderer->future, future);
+ renderer->fence = fence;
+ renderer->surface_tex_inv_size.x = 1.0f / surface->width;
+ renderer->surface_tex_inv_size.y = 1.0f / surface->height;
+ }
+
+ while (num_macroblocks) {
+ unsigned left_in_batch = renderer->macroblocks_per_batch - renderer->num_macroblocks;
+ unsigned num_to_submit = MIN2(num_macroblocks, left_in_batch);
+ unsigned i;
+
+ for (i = 0; i < num_to_submit; ++i) {
+ assert(mpeg12_macroblocks[i].base.codec == PIPE_VIDEO_CODEC_MPEG12);
+ grab_macroblock(renderer, &mpeg12_macroblocks[i]);
+ }
+
+ num_macroblocks -= num_to_submit;
+
+ if (renderer->num_macroblocks == renderer->macroblocks_per_batch) {
+ xfer_buffers_unmap(renderer);
+ flush(renderer);
+ xfer_buffers_map(renderer);
+ /* Next time we get this surface it may have new ref frames */
+ pipe_surface_reference(&renderer->surface, NULL);
+ pipe_surface_reference(&renderer->past, NULL);
+ pipe_surface_reference(&renderer->future, NULL);
+ }
+ }
+}
--- /dev/null
- struct pipe_buffer *vs_const_buf;
+/**************************************************************************
+ *
+ * Copyright 2009 Younes Manton.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef vl_mpeg12_mc_renderer_h
+#define vl_mpeg12_mc_renderer_h
+
+#include <pipe/p_compiler.h>
+#include <pipe/p_state.h>
+#include <pipe/p_video_state.h>
+#include "vl_types.h"
+
+struct pipe_context;
+struct pipe_macroblock;
+
+/* A slice is video-width (rounded up to a multiple of macroblock width) x macroblock height */
+enum VL_MPEG12_MC_RENDERER_BUFFER_MODE
+{
+ VL_MPEG12_MC_RENDERER_BUFFER_SLICE, /* Saves memory at the cost of smaller batches */
+ VL_MPEG12_MC_RENDERER_BUFFER_PICTURE /* Larger batches, more memory */
+};
+
+enum VL_MPEG12_MC_RENDERER_EMPTY_BLOCK
+{
+ VL_MPEG12_MC_RENDERER_EMPTY_BLOCK_XFER_ALL, /* Waste of memory bandwidth */
+ VL_MPEG12_MC_RENDERER_EMPTY_BLOCK_XFER_ONE, /* Can only do point-filtering when interpolating subsampled chroma channels */
+ VL_MPEG12_MC_RENDERER_EMPTY_BLOCK_XFER_NONE /* Needs conditional texel fetch! */
+};
+
+struct vl_mpeg12_mc_renderer
+{
+ struct pipe_context *pipe;
+ unsigned picture_width;
+ unsigned picture_height;
+ enum pipe_video_chroma_format chroma_format;
+ enum VL_MPEG12_MC_RENDERER_BUFFER_MODE bufmode;
+ enum VL_MPEG12_MC_RENDERER_EMPTY_BLOCK eb_handling;
+ bool pot_buffers;
+ unsigned macroblocks_per_batch;
+
+ struct pipe_viewport_state viewport;
- struct pipe_vertex_element vertex_elems[8];
++ struct pipe_resource *vs_const_buf;
+ struct pipe_framebuffer_state fb_state;
- struct pipe_texture *all[5];
- struct { struct pipe_texture *y, *cb, *cr, *ref[2]; } individual;
++ union
++ {
++ void *all[3];
++ struct { void *i, *p, *b; } individual;
++ } vertex_elems_state;
+
+ union
+ {
+ void *all[5];
+ struct { void *y, *cb, *cr, *ref[2]; } individual;
+ } samplers;
+
++ union
++ {
++ struct pipe_sampler_view *all[5];
++ struct { struct pipe_sampler_view *y, *cb, *cr, *ref[2]; } individual;
++ } sampler_views;
++
+ void *i_vs, *p_vs[2], *b_vs[2];
+ void *i_fs, *p_fs[2], *b_fs[2];
+
+ union
+ {
++ struct pipe_resource *all[5];
++ struct { struct pipe_resource *y, *cb, *cr, *ref[2]; } individual;
+ } textures;
+
+ union
+ {
+ struct pipe_vertex_buffer all[3];
+ struct { struct pipe_vertex_buffer ycbcr, ref[2]; } individual;
+ } vertex_bufs;
+
+ struct pipe_surface *surface, *past, *future;
+ struct pipe_fence_handle **fence;
+ unsigned num_macroblocks;
+ struct pipe_mpeg12_macroblock *macroblock_buf;
+ struct pipe_transfer *tex_transfer[3];
+ short *texels[3];
+ struct vertex2f surface_tex_inv_size;
+ struct vertex2f zero_block[3];
+};
+
+bool vl_mpeg12_mc_renderer_init(struct vl_mpeg12_mc_renderer *renderer,
+ struct pipe_context *pipe,
+ unsigned picture_width,
+ unsigned picture_height,
+ enum pipe_video_chroma_format chroma_format,
+ enum VL_MPEG12_MC_RENDERER_BUFFER_MODE bufmode,
+ enum VL_MPEG12_MC_RENDERER_EMPTY_BLOCK eb_handling,
+ bool pot_buffers);
+
+void vl_mpeg12_mc_renderer_cleanup(struct vl_mpeg12_mc_renderer *renderer);
+
+void vl_mpeg12_mc_renderer_render_macroblocks(struct vl_mpeg12_mc_renderer *renderer,
+ struct pipe_surface *surface,
+ struct pipe_surface *past,
+ struct pipe_surface *future,
+ unsigned num_macroblocks,
+ struct pipe_mpeg12_macroblock *mpeg12_macroblocks,
+ struct pipe_fence_handle **fence);
+
+#endif /* vl_mpeg12_mc_renderer_h */
--- /dev/null
+ #ifndef __NVFX_STATE_H__
+ #define __NVFX_STATE_H__
+
+ #include "pipe/p_state.h"
++#include "pipe/p_video_state.h"
+ #include "tgsi/tgsi_scan.h"
+ #include "nouveau/nouveau_statebuf.h"
+
+ struct nvfx_vertex_program_exec {
+ uint32_t data[4];
+ boolean has_branch_offset;
+ int const_index;
+ };
+
+ struct nvfx_vertex_program_data {
+ int index; /* immediates == -1 */
+ float value[4];
+ };
+
+ struct nvfx_vertex_program {
+ struct pipe_shader_state pipe;
+
+ struct draw_vertex_shader *draw;
+
+ boolean translated;
+
+ struct pipe_clip_state ucp;
+
+ struct nvfx_vertex_program_exec *insns;
+ unsigned nr_insns;
+ struct nvfx_vertex_program_data *consts;
+ unsigned nr_consts;
+
+ struct nouveau_resource *exec;
+ unsigned exec_start;
+ struct nouveau_resource *data;
+ unsigned data_start;
+ unsigned data_start_min;
+
+ uint32_t ir;
+ uint32_t or;
+ uint32_t clip_ctrl;
+ };
+
+ struct nvfx_fragment_program_data {
+ unsigned offset;
+ unsigned index;
+ };
+
+ struct nvfx_fragment_program_bo {
+ struct nvfx_fragment_program_bo* next;
+ struct nouveau_bo* bo;
+ char insn[] __attribute__((aligned(16)));
+ };
+
+ struct nvfx_fragment_program {
+ struct pipe_shader_state pipe;
+ struct tgsi_shader_info info;
+
+ boolean translated;
+ unsigned samplers;
+
+ uint32_t *insn;
+ int insn_len;
+
+ struct nvfx_fragment_program_data *consts;
+ unsigned nr_consts;
+
+ uint32_t fp_control;
+
+ unsigned bo_prog_idx;
+ unsigned prog_size;
+ unsigned progs_per_bo;
+ struct nvfx_fragment_program_bo* fpbo;
+ };
+
+
+ #endif
};
-
/** cast wrappers */
- static INLINE struct softpipe_texture *
- softpipe_texture(struct pipe_texture *pt)
+ static INLINE struct softpipe_resource *
+ softpipe_resource(struct pipe_resource *pt)
{
- return (struct softpipe_texture *) pt;
+ return (struct softpipe_resource *) pt;
}
static INLINE struct softpipe_transfer *
#define PIPE_REFERENCED_FOR_READ (1 << 0)
#define PIPE_REFERENCED_FOR_WRITE (1 << 1)
-
+enum pipe_video_codec
+{
+ PIPE_VIDEO_CODEC_UNKNOWN = 0,
+ PIPE_VIDEO_CODEC_MPEG12, /**< MPEG1, MPEG2 */
+ PIPE_VIDEO_CODEC_MPEG4, /**< DIVX, XVID */
+ PIPE_VIDEO_CODEC_VC1, /**< WMV */
+ PIPE_VIDEO_CODEC_MPEG4_AVC /**< H.264 */
+};
+
+enum pipe_video_profile
+{
+ PIPE_VIDEO_PROFILE_MPEG1,
+ PIPE_VIDEO_PROFILE_MPEG2_SIMPLE,
+ PIPE_VIDEO_PROFILE_MPEG2_MAIN,
+ PIPE_VIDEO_PROFILE_MPEG4_SIMPLE,
+ PIPE_VIDEO_PROFILE_MPEG4_ADVANCED_SIMPLE,
+ PIPE_VIDEO_PROFILE_VC1_SIMPLE,
+ PIPE_VIDEO_PROFILE_VC1_MAIN,
+ PIPE_VIDEO_PROFILE_VC1_ADVANCED,
+ PIPE_VIDEO_PROFILE_MPEG4_AVC_BASELINE,
+ PIPE_VIDEO_PROFILE_MPEG4_AVC_MAIN,
+ PIPE_VIDEO_PROFILE_MPEG4_AVC_HIGH
+};
+
+
#ifdef __cplusplus
}
#endif
*/
enum pipe_format {
- PIPE_FORMAT_NONE = 0,
- PIPE_FORMAT_B8G8R8A8_UNORM = 1,
- PIPE_FORMAT_B8G8R8X8_UNORM = 2,
- PIPE_FORMAT_A8R8G8B8_UNORM = 3,
- PIPE_FORMAT_X8R8G8B8_UNORM = 4,
- PIPE_FORMAT_B5G5R5A1_UNORM = 5,
- PIPE_FORMAT_B4G4R4A4_UNORM = 6,
- PIPE_FORMAT_B5G6R5_UNORM = 7,
- PIPE_FORMAT_R10G10B10A2_UNORM = 8,
- PIPE_FORMAT_L8_UNORM = 9, /**< ubyte luminance */
- PIPE_FORMAT_A8_UNORM = 10, /**< ubyte alpha */
- PIPE_FORMAT_I8_UNORM = 11, /**< ubyte intensity */
- PIPE_FORMAT_L8A8_UNORM = 12, /**< ubyte alpha, luminance */
- PIPE_FORMAT_L16_UNORM = 13, /**< ushort luminance */
- PIPE_FORMAT_UYVY = 14, /**< aka Y422, UYNV, HDYC */
- PIPE_FORMAT_YUYV = 15, /**< aka YUY2, YUNV, V422 */
- PIPE_FORMAT_Z16_UNORM = 16,
- PIPE_FORMAT_Z32_UNORM = 17,
- PIPE_FORMAT_Z32_FLOAT = 18,
- PIPE_FORMAT_Z24S8_UNORM = 19,
- PIPE_FORMAT_S8Z24_UNORM = 20,
- PIPE_FORMAT_Z24X8_UNORM = 21,
- PIPE_FORMAT_X8Z24_UNORM = 22,
- PIPE_FORMAT_S8_UNORM = 23, /**< ubyte stencil */
- PIPE_FORMAT_R64_FLOAT = 24,
- PIPE_FORMAT_R64G64_FLOAT = 25,
- PIPE_FORMAT_R64G64B64_FLOAT = 26,
- PIPE_FORMAT_R64G64B64A64_FLOAT = 27,
- PIPE_FORMAT_R32_FLOAT = 28,
- PIPE_FORMAT_R32G32_FLOAT = 29,
- PIPE_FORMAT_R32G32B32_FLOAT = 30,
- PIPE_FORMAT_R32G32B32A32_FLOAT = 31,
- PIPE_FORMAT_R32_UNORM = 32,
- PIPE_FORMAT_R32G32_UNORM = 33,
- PIPE_FORMAT_R32G32B32_UNORM = 34,
- PIPE_FORMAT_R32G32B32A32_UNORM = 35,
- PIPE_FORMAT_R32_USCALED = 36,
- PIPE_FORMAT_R32G32_USCALED = 37,
- PIPE_FORMAT_R32G32B32_USCALED = 38,
- PIPE_FORMAT_R32G32B32A32_USCALED = 39,
- PIPE_FORMAT_R32_SNORM = 40,
- PIPE_FORMAT_R32G32_SNORM = 41,
- PIPE_FORMAT_R32G32B32_SNORM = 42,
- PIPE_FORMAT_R32G32B32A32_SNORM = 43,
- PIPE_FORMAT_R32_SSCALED = 44,
- PIPE_FORMAT_R32G32_SSCALED = 45,
- PIPE_FORMAT_R32G32B32_SSCALED = 46,
- PIPE_FORMAT_R32G32B32A32_SSCALED = 47,
- PIPE_FORMAT_R16_UNORM = 48,
- PIPE_FORMAT_R16G16_UNORM = 49,
- PIPE_FORMAT_R16G16B16_UNORM = 50,
- PIPE_FORMAT_R16G16B16A16_UNORM = 51,
- PIPE_FORMAT_R16_USCALED = 52,
- PIPE_FORMAT_R16G16_USCALED = 53,
- PIPE_FORMAT_R16G16B16_USCALED = 54,
- PIPE_FORMAT_R16G16B16A16_USCALED = 55,
- PIPE_FORMAT_R16_SNORM = 56,
- PIPE_FORMAT_R16G16_SNORM = 57,
- PIPE_FORMAT_R16G16B16_SNORM = 58,
- PIPE_FORMAT_R16G16B16A16_SNORM = 59,
- PIPE_FORMAT_R16_SSCALED = 60,
- PIPE_FORMAT_R16G16_SSCALED = 61,
- PIPE_FORMAT_R16G16B16_SSCALED = 62,
- PIPE_FORMAT_R16G16B16A16_SSCALED = 63,
- PIPE_FORMAT_R8_UNORM = 64,
- PIPE_FORMAT_R8G8_UNORM = 65,
- PIPE_FORMAT_R8G8B8_UNORM = 66,
- PIPE_FORMAT_R8G8B8A8_UNORM = 67,
- PIPE_FORMAT_X8B8G8R8_UNORM = 68,
- PIPE_FORMAT_R8_USCALED = 69,
- PIPE_FORMAT_R8G8_USCALED = 70,
- PIPE_FORMAT_R8G8B8_USCALED = 71,
- PIPE_FORMAT_R8G8B8A8_USCALED = 72,
- PIPE_FORMAT_R8_SNORM = 74,
- PIPE_FORMAT_R8G8_SNORM = 75,
- PIPE_FORMAT_R8G8B8_SNORM = 76,
- PIPE_FORMAT_R8G8B8A8_SNORM = 77,
- PIPE_FORMAT_R8_SSCALED = 82,
- PIPE_FORMAT_R8G8_SSCALED = 83,
- PIPE_FORMAT_R8G8B8_SSCALED = 84,
- PIPE_FORMAT_R8G8B8A8_SSCALED = 85,
- PIPE_FORMAT_R32_FIXED = 87,
- PIPE_FORMAT_R32G32_FIXED = 88,
- PIPE_FORMAT_R32G32B32_FIXED = 89,
- PIPE_FORMAT_R32G32B32A32_FIXED = 90,
- /* sRGB formats */
- PIPE_FORMAT_L8_SRGB = 91,
- PIPE_FORMAT_L8A8_SRGB = 92,
- PIPE_FORMAT_R8G8B8_SRGB = 93,
- PIPE_FORMAT_A8B8G8R8_SRGB = 94,
- PIPE_FORMAT_X8B8G8R8_SRGB = 95,
- PIPE_FORMAT_B8G8R8A8_SRGB = 96,
- PIPE_FORMAT_B8G8R8X8_SRGB = 97,
- PIPE_FORMAT_A8R8G8B8_SRGB = 98,
- PIPE_FORMAT_X8R8G8B8_SRGB = 99,
+ PIPE_FORMAT_NONE = 0,
+ PIPE_FORMAT_B8G8R8A8_UNORM = 1,
+ PIPE_FORMAT_B8G8R8X8_UNORM = 2,
+ PIPE_FORMAT_A8R8G8B8_UNORM = 3,
+ PIPE_FORMAT_X8R8G8B8_UNORM = 4,
+ PIPE_FORMAT_B5G5R5A1_UNORM = 5,
+ PIPE_FORMAT_B4G4R4A4_UNORM = 6,
+ PIPE_FORMAT_B5G6R5_UNORM = 7,
+ PIPE_FORMAT_R10G10B10A2_UNORM = 8,
+ PIPE_FORMAT_L8_UNORM = 9, /**< ubyte luminance */
+ PIPE_FORMAT_A8_UNORM = 10, /**< ubyte alpha */
+ PIPE_FORMAT_I8_UNORM = 11, /**< ubyte intensity */
+ PIPE_FORMAT_L8A8_UNORM = 12, /**< ubyte alpha, luminance */
+ PIPE_FORMAT_L16_UNORM = 13, /**< ushort luminance */
+ PIPE_FORMAT_UYVY = 14,
+ PIPE_FORMAT_YUYV = 15,
+ PIPE_FORMAT_Z16_UNORM = 16,
+ PIPE_FORMAT_Z32_UNORM = 17,
+ PIPE_FORMAT_Z32_FLOAT = 18,
+ PIPE_FORMAT_Z24_UNORM_S8_USCALED = 19,
+ PIPE_FORMAT_S8_USCALED_Z24_UNORM = 20,
+ PIPE_FORMAT_Z24X8_UNORM = 21,
+ PIPE_FORMAT_X8Z24_UNORM = 22,
+ PIPE_FORMAT_S8_USCALED = 23, /**< ubyte stencil */
+ PIPE_FORMAT_R64_FLOAT = 24,
+ PIPE_FORMAT_R64G64_FLOAT = 25,
+ PIPE_FORMAT_R64G64B64_FLOAT = 26,
+ PIPE_FORMAT_R64G64B64A64_FLOAT = 27,
+ PIPE_FORMAT_R32_FLOAT = 28,
+ PIPE_FORMAT_R32G32_FLOAT = 29,
+ PIPE_FORMAT_R32G32B32_FLOAT = 30,
+ PIPE_FORMAT_R32G32B32A32_FLOAT = 31,
+ PIPE_FORMAT_R32_UNORM = 32,
+ PIPE_FORMAT_R32G32_UNORM = 33,
+ PIPE_FORMAT_R32G32B32_UNORM = 34,
+ PIPE_FORMAT_R32G32B32A32_UNORM = 35,
+ PIPE_FORMAT_R32_USCALED = 36,
+ PIPE_FORMAT_R32G32_USCALED = 37,
+ PIPE_FORMAT_R32G32B32_USCALED = 38,
+ PIPE_FORMAT_R32G32B32A32_USCALED = 39,
+ PIPE_FORMAT_R32_SNORM = 40,
+ PIPE_FORMAT_R32G32_SNORM = 41,
+ PIPE_FORMAT_R32G32B32_SNORM = 42,
+ PIPE_FORMAT_R32G32B32A32_SNORM = 43,
+ PIPE_FORMAT_R32_SSCALED = 44,
+ PIPE_FORMAT_R32G32_SSCALED = 45,
+ PIPE_FORMAT_R32G32B32_SSCALED = 46,
+ PIPE_FORMAT_R32G32B32A32_SSCALED = 47,
+ PIPE_FORMAT_R16_UNORM = 48,
+ PIPE_FORMAT_R16G16_UNORM = 49,
+ PIPE_FORMAT_R16G16B16_UNORM = 50,
+ PIPE_FORMAT_R16G16B16A16_UNORM = 51,
+ PIPE_FORMAT_R16_USCALED = 52,
+ PIPE_FORMAT_R16G16_USCALED = 53,
+ PIPE_FORMAT_R16G16B16_USCALED = 54,
+ PIPE_FORMAT_R16G16B16A16_USCALED = 55,
+ PIPE_FORMAT_R16_SNORM = 56,
+ PIPE_FORMAT_R16G16_SNORM = 57,
+ PIPE_FORMAT_R16G16B16_SNORM = 58,
+ PIPE_FORMAT_R16G16B16A16_SNORM = 59,
+ PIPE_FORMAT_R16_SSCALED = 60,
+ PIPE_FORMAT_R16G16_SSCALED = 61,
+ PIPE_FORMAT_R16G16B16_SSCALED = 62,
+ PIPE_FORMAT_R16G16B16A16_SSCALED = 63,
+ PIPE_FORMAT_R8_UNORM = 64,
+ PIPE_FORMAT_R8G8_UNORM = 65,
+ PIPE_FORMAT_R8G8B8_UNORM = 66,
+ PIPE_FORMAT_R8G8B8A8_UNORM = 67,
+ PIPE_FORMAT_X8B8G8R8_UNORM = 68,
+ PIPE_FORMAT_R8_USCALED = 69,
+ PIPE_FORMAT_R8G8_USCALED = 70,
+ PIPE_FORMAT_R8G8B8_USCALED = 71,
+ PIPE_FORMAT_R8G8B8A8_USCALED = 72,
+ PIPE_FORMAT_R8_SNORM = 74,
+ PIPE_FORMAT_R8G8_SNORM = 75,
+ PIPE_FORMAT_R8G8B8_SNORM = 76,
+ PIPE_FORMAT_R8G8B8A8_SNORM = 77,
+ PIPE_FORMAT_R8_SSCALED = 82,
+ PIPE_FORMAT_R8G8_SSCALED = 83,
+ PIPE_FORMAT_R8G8B8_SSCALED = 84,
+ PIPE_FORMAT_R8G8B8A8_SSCALED = 85,
+ PIPE_FORMAT_R32_FIXED = 87,
+ PIPE_FORMAT_R32G32_FIXED = 88,
+ PIPE_FORMAT_R32G32B32_FIXED = 89,
+ PIPE_FORMAT_R32G32B32A32_FIXED = 90,
+ PIPE_FORMAT_R16_FLOAT = 91,
+ PIPE_FORMAT_R16G16_FLOAT = 92,
+ PIPE_FORMAT_R16G16B16_FLOAT = 93,
+ PIPE_FORMAT_R16G16B16A16_FLOAT = 94,
- /* mixed formats */
- PIPE_FORMAT_R8SG8SB8UX8U_NORM = 100,
- PIPE_FORMAT_R5SG5SB6U_NORM = 101,
+ /* sRGB formats */
+ PIPE_FORMAT_L8_SRGB = 95,
+ PIPE_FORMAT_L8A8_SRGB = 96,
+ PIPE_FORMAT_R8G8B8_SRGB = 97,
+ PIPE_FORMAT_A8B8G8R8_SRGB = 98,
+ PIPE_FORMAT_X8B8G8R8_SRGB = 99,
+ PIPE_FORMAT_B8G8R8A8_SRGB = 100,
+ PIPE_FORMAT_B8G8R8X8_SRGB = 101,
+ PIPE_FORMAT_A8R8G8B8_SRGB = 102,
+ PIPE_FORMAT_X8R8G8B8_SRGB = 103,
+ PIPE_FORMAT_R8G8B8A8_SRGB = 104,
/* compressed formats */
- PIPE_FORMAT_DXT1_RGB = 102,
- PIPE_FORMAT_DXT1_RGBA = 103,
- PIPE_FORMAT_DXT3_RGBA = 104,
- PIPE_FORMAT_DXT5_RGBA = 105,
+ PIPE_FORMAT_DXT1_RGB = 105,
+ PIPE_FORMAT_DXT1_RGBA = 106,
+ PIPE_FORMAT_DXT3_RGBA = 107,
+ PIPE_FORMAT_DXT5_RGBA = 108,
/* sRGB, compressed */
- PIPE_FORMAT_DXT1_SRGB = 106,
- PIPE_FORMAT_DXT1_SRGBA = 107,
- PIPE_FORMAT_DXT3_SRGBA = 108,
- PIPE_FORMAT_DXT5_SRGBA = 109,
-
- PIPE_FORMAT_A8B8G8R8_UNORM = 110,
-
- PIPE_FORMAT_YV12 = 111,
- PIPE_FORMAT_YV16 = 112,
- PIPE_FORMAT_IYUV = 113, /**< aka I420 */
- PIPE_FORMAT_NV12 = 114,
- PIPE_FORMAT_NV21 = 115,
+ PIPE_FORMAT_DXT1_SRGB = 109,
+ PIPE_FORMAT_DXT1_SRGBA = 110,
+ PIPE_FORMAT_DXT3_SRGBA = 111,
+ PIPE_FORMAT_DXT5_SRGBA = 112,
+
+ /* rgtc compressed */
+ PIPE_FORMAT_RGTC1_UNORM = 113,
+ PIPE_FORMAT_RGTC1_SNORM = 114,
+ PIPE_FORMAT_RGTC2_UNORM = 115,
+ PIPE_FORMAT_RGTC2_SNORM = 116,
+
+ PIPE_FORMAT_R8G8_B8G8_UNORM = 117,
+ PIPE_FORMAT_G8R8_G8B8_UNORM = 118,
+
+ /* mixed formats */
+ PIPE_FORMAT_R8SG8SB8UX8U_NORM = 119,
+ PIPE_FORMAT_R5SG5SB6U_NORM = 120,
+
+ /* TODO: re-order these */
+ PIPE_FORMAT_A8B8G8R8_UNORM = 121,
+ PIPE_FORMAT_B5G5R5X1_UNORM = 122,
+ PIPE_FORMAT_R10G10B10A2_USCALED = 123,
+ PIPE_FORMAT_R11G11B10_FLOAT = 124,
+ PIPE_FORMAT_R9G9B9E5_FLOAT = 125,
+ PIPE_FORMAT_Z32_FLOAT_S8X24_USCALED = 126,
+ PIPE_FORMAT_R1_UNORM = 127,
+ PIPE_FORMAT_R10G10B10X2_USCALED = 128,
+ PIPE_FORMAT_R10G10B10X2_SNORM = 129,
+ PIPE_FORMAT_L4A4_UNORM = 130,
+ PIPE_FORMAT_B10G10R10A2_UNORM = 131,
+ PIPE_FORMAT_R10SG10SB10SA2U_NORM = 132,
+ PIPE_FORMAT_R8G8Bx_SNORM = 133,
+ PIPE_FORMAT_R8G8B8X8_UNORM = 134,
+ PIPE_FORMAT_B4G4R4X4_UNORM = 135,
+
++ PIPE_FORMAT_YV12 = 136,
++ PIPE_FORMAT_YV16 = 137,
++ PIPE_FORMAT_IYUV = 138, /**< aka I420 */
++ PIPE_FORMAT_NV12 = 139,
++ PIPE_FORMAT_NV21 = 140,
+ PIPE_FORMAT_AYUV = PIPE_FORMAT_A8R8G8B8_UNORM,
+ PIPE_FORMAT_VUYA = PIPE_FORMAT_B8G8R8A8_UNORM,
- PIPE_FORMAT_IA44 = 116,
- PIPE_FORMAT_AI44 = 117,
++ PIPE_FORMAT_IA44 = 141,
++ PIPE_FORMAT_AI44 = 142,
+
PIPE_FORMAT_COUNT
};
--
+enum pipe_video_chroma_format
+{
+ PIPE_VIDEO_CHROMA_FORMAT_420,
+ PIPE_VIDEO_CHROMA_FORMAT_422,
+ PIPE_VIDEO_CHROMA_FORMAT_444
+};
-
#ifdef __cplusplus
}
#endif
struct pipe_winsys;
struct pipe_buffer;
struct pipe_context;
- struct pipe_texture;
+struct pipe_video_context;
+ struct pipe_resource;
enum drm_create_screen_mode {
DRM_CREATE_NORMAL = 0,
--- /dev/null
- struct pipe_texture template;
- struct pipe_texture *tex;
+/**************************************************************************
+ *
+ * Copyright 2009 Younes Manton.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <assert.h>
+#include <X11/Xlibint.h>
+#include <X11/extensions/XvMClib.h>
+#include <xorg/fourcc.h>
+#include <vl_winsys.h>
+#include <pipe/p_screen.h>
+#include <pipe/p_video_context.h>
+#include <pipe/p_state.h>
+#include <util/u_memory.h>
+#include <util/u_math.h>
+#include "xvmc_private.h"
+
+#define FOURCC_RGB 0x0000003
+
+static enum pipe_format XvIDToPipe(int xvimage_id)
+{
+ switch (xvimage_id) {
+ case FOURCC_RGB:
+ return PIPE_FORMAT_B8G8R8X8_UNORM;
+ default:
+ XVMC_MSG(XVMC_ERR, "[XvMC] Unrecognized Xv image ID 0x%08X.\n", xvimage_id);
+ return PIPE_FORMAT_NONE;
+ }
+}
+
+static int PipeToComponentOrder(enum pipe_format format, char *component_order)
+{
+ assert(component_order);
+
+ switch (format) {
+ case PIPE_FORMAT_B8G8R8X8_UNORM:
+ return 0;
+ default:
+ XVMC_MSG(XVMC_ERR, "[XvMC] Unrecognized PIPE_FORMAT 0x%08X.\n", format);
+ component_order[0] = 0;
+ component_order[1] = 0;
+ component_order[2] = 0;
+ component_order[3] = 0;
+ }
+
+ return 0;
+}
+
+static Status Validate(Display *dpy, XvPortID port, int surface_type_id, int xvimage_id)
+{
+ XvImageFormatValues *subpictures;
+ int num_subpics;
+ unsigned int i;
+
+ subpictures = XvMCListSubpictureTypes(dpy, port, surface_type_id, &num_subpics);
+ if (num_subpics < 1) {
+ if (subpictures)
+ XFree(subpictures);
+ return BadMatch;
+ }
+ if (!subpictures)
+ return BadAlloc;
+
+ for (i = 0; i < num_subpics; ++i) {
+ if (subpictures[i].id == xvimage_id) {
+ XVMC_MSG(XVMC_TRACE, "[XvMC] Found requested subpicture format.\n" \
+ "[XvMC] port=%u\n" \
+ "[XvMC] surface id=0x%08X\n" \
+ "[XvMC] image id=0x%08X\n" \
+ "[XvMC] type=%08X\n" \
+ "[XvMC] byte order=%08X\n" \
+ "[XvMC] bits per pixel=%u\n" \
+ "[XvMC] format=%08X\n" \
+ "[XvMC] num planes=%d\n",
+ port, surface_type_id, xvimage_id, subpictures[i].type, subpictures[i].byte_order,
+ subpictures[i].bits_per_pixel, subpictures[i].format, subpictures[i].num_planes);
+ if (subpictures[i].type == XvRGB) {
+ XVMC_MSG(XVMC_TRACE, "[XvMC] depth=%d\n" \
+ "[XvMC] red mask=0x%08X\n" \
+ "[XvMC] green mask=0x%08X\n" \
+ "[XvMC] blue mask=0x%08X\n",
+ subpictures[i].depth, subpictures[i].red_mask, subpictures[i].green_mask, subpictures[i].blue_mask);
+ }
+ else if (subpictures[i].type == XvYUV) {
+ XVMC_MSG(XVMC_TRACE, "[XvMC] y sample bits=0x%08X\n" \
+ "[XvMC] u sample bits=0x%08X\n" \
+ "[XvMC] v sample bits=0x%08X\n" \
+ "[XvMC] horz y period=%u\n" \
+ "[XvMC] horz u period=%u\n" \
+ "[XvMC] horz v period=%u\n" \
+ "[XvMC] vert y period=%u\n" \
+ "[XvMC] vert u period=%u\n" \
+ "[XvMC] vert v period=%u\n",
+ subpictures[i].y_sample_bits, subpictures[i].u_sample_bits, subpictures[i].v_sample_bits,
+ subpictures[i].horz_y_period, subpictures[i].horz_u_period, subpictures[i].horz_v_period,
+ subpictures[i].vert_y_period, subpictures[i].vert_u_period, subpictures[i].vert_v_period);
+ }
+ break;
+ }
+ }
+
+ XFree(subpictures);
+
+ return i < num_subpics ? Success : BadMatch;
+}
+
+PUBLIC
+Status XvMCCreateSubpicture(Display *dpy, XvMCContext *context, XvMCSubpicture *subpicture,
+ unsigned short width, unsigned short height, int xvimage_id)
+{
+ XvMCContextPrivate *context_priv;
+ XvMCSubpicturePrivate *subpicture_priv;
+ struct pipe_video_context *vpipe;
- memset(&template, 0, sizeof(struct pipe_texture));
++ struct pipe_resource template;
++ struct pipe_resource *tex;
+ Status ret;
+
+ XVMC_MSG(XVMC_TRACE, "[XvMC] Creating subpicture %p.\n", subpicture);
+
+ assert(dpy);
+
+ if (!context)
+ return XvMCBadContext;
+
+ context_priv = context->privData;
+ vpipe = context_priv->vctx->vpipe;
+
+ if (!subpicture)
+ return XvMCBadSubpicture;
+
+ if (width > context_priv->subpicture_max_width ||
+ height > context_priv->subpicture_max_height)
+ return BadValue;
+
+ ret = Validate(dpy, context->port, context->surface_type_id, xvimage_id);
+ if (ret != Success)
+ return ret;
+
+ subpicture_priv = CALLOC(1, sizeof(XvMCSubpicturePrivate));
+ if (!subpicture_priv)
+ return BadAlloc;
+
- template.tex_usage = PIPE_TEXTURE_USAGE_SAMPLER;
++ memset(&template, 0, sizeof(struct pipe_resource));
+ template.target = PIPE_TEXTURE_2D;
+ template.format = XvIDToPipe(xvimage_id);
+ template.last_level = 0;
+ if (vpipe->get_param(vpipe, PIPE_CAP_NPOT_TEXTURES)) {
+ template.width0 = width;
+ template.height0 = height;
+ }
+ else {
+ template.width0 = util_next_power_of_two(width);
+ template.height0 = util_next_power_of_two(height);
+ }
+ template.depth0 = 1;
- tex = vpipe->screen->texture_create(vpipe->screen, &template);
++ template.usage = PIPE_USAGE_DYNAMIC;
++ template.bind = PIPE_BIND_SAMPLER_VIEW;
++ template.flags = 0;
+
+ subpicture_priv->context = context;
- PIPE_BUFFER_USAGE_CPU_WRITE |
- PIPE_BUFFER_USAGE_GPU_READ);
- pipe_texture_reference(&tex, NULL);
++ tex = vpipe->screen->resource_create(vpipe->screen, &template);
+ subpicture_priv->sfc = vpipe->screen->get_tex_surface(vpipe->screen, tex, 0, 0, 0,
++ PIPE_BIND_SAMPLER_VIEW);
++ pipe_resource_reference(&tex, NULL);
+ if (!subpicture_priv->sfc) {
+ FREE(subpicture_priv);
+ return BadAlloc;
+ }
+
+ subpicture->subpicture_id = XAllocID(dpy);
+ subpicture->context_id = context->context_id;
+ subpicture->xvimage_id = xvimage_id;
+ subpicture->width = width;
+ subpicture->height = height;
+ subpicture->num_palette_entries = 0;
+ subpicture->entry_bytes = PipeToComponentOrder(template.format, subpicture->component_order);
+ subpicture->privData = subpicture_priv;
+
+ SyncHandle();
+
+ XVMC_MSG(XVMC_TRACE, "[XvMC] Subpicture %p created.\n", subpicture);
+
+ return Success;
+}
+
+PUBLIC
+Status XvMCClearSubpicture(Display *dpy, XvMCSubpicture *subpicture, short x, short y,
+ unsigned short width, unsigned short height, unsigned int color)
+{
+ XvMCSubpicturePrivate *subpicture_priv;
+ XvMCContextPrivate *context_priv;
+
+ assert(dpy);
+
+ if (!subpicture)
+ return XvMCBadSubpicture;
+
+ subpicture_priv = subpicture->privData;
+ context_priv = subpicture_priv->context->privData;
+ /* TODO: Assert clear rect is within bounds? Or clip? */
+ context_priv->vctx->vpipe->surface_fill(context_priv->vctx->vpipe,
+ subpicture_priv->sfc, x, y,
+ width, height, color);
+
+ return Success;
+}
+
+PUBLIC
+Status XvMCCompositeSubpicture(Display *dpy, XvMCSubpicture *subpicture, XvImage *image,
+ short srcx, short srcy, unsigned short width, unsigned short height,
+ short dstx, short dsty)
+{
+ XvMCSubpicturePrivate *subpicture_priv;
+ XvMCContextPrivate *context_priv;
+ struct pipe_screen *screen;
+ struct pipe_transfer *xfer;
+ unsigned char *src, *dst;
+ unsigned x, y;
+
+ XVMC_MSG(XVMC_TRACE, "[XvMC] Compositing subpicture %p.\n", subpicture);
+
+ assert(dpy);
+
+ if (!subpicture)
+ return XvMCBadSubpicture;
+
+ assert(image);
+
+ if (subpicture->xvimage_id != image->id)
+ return BadMatch;
+
+ /* No planar support for now */
+ if (image->num_planes != 1)
+ return BadMatch;
+
+ subpicture_priv = subpicture->privData;
+ context_priv = subpicture_priv->context->privData;
+ screen = context_priv->vctx->vpipe->screen;
+
+ /* TODO: Assert rects are within bounds? Or clip? */
+
++#if 0
+ xfer = screen->get_tex_transfer(screen, subpicture_priv->sfc->texture, 0, 0, 0,
+ PIPE_TRANSFER_WRITE, dstx, dsty, width, height);
+ if (!xfer)
+ return BadAlloc;
+
+ src = image->data;
+ dst = screen->transfer_map(screen, xfer);
+ if (!dst) {
+ screen->tex_transfer_destroy(xfer);
+ return BadAlloc;
+ }
+
+ switch (image->id) {
+ case FOURCC_RGB:
+ assert(subpicture_priv->sfc->format == XvIDToPipe(image->id));
+ for (y = 0; y < height; ++y) {
+ for (x = 0; x < width; ++x, src += 3, dst += 4) {
+ /* TODO: Confirm or fix */
+ dst[0] = src[0];
+ dst[1] = src[1];
+ dst[2] = src[2];
+ }
+ }
+ break;
+ default:
+ XVMC_MSG(XVMC_ERR, "[XvMC] Unrecognized Xv image ID 0x%08X.\n", image->id);
+ }
+
+ screen->transfer_unmap(screen, xfer);
+ screen->tex_transfer_destroy(xfer);
++#endif
+
+ XVMC_MSG(XVMC_TRACE, "[XvMC] Subpicture %p composited.\n", subpicture);
+
+ return Success;
+}
+
+PUBLIC
+Status XvMCDestroySubpicture(Display *dpy, XvMCSubpicture *subpicture)
+{
+ XvMCSubpicturePrivate *subpicture_priv;
+
+ XVMC_MSG(XVMC_TRACE, "[XvMC] Destroying subpicture %p.\n", subpicture);
+
+ assert(dpy);
+
+ if (!subpicture)
+ return XvMCBadSubpicture;
+
+ subpicture_priv = subpicture->privData;
+ pipe_surface_reference(&subpicture_priv->sfc, NULL);
+ FREE(subpicture_priv);
+
+ XVMC_MSG(XVMC_TRACE, "[XvMC] Subpicture %p destroyed.\n", subpicture);
+
+ return Success;
+}
+
+PUBLIC
+Status XvMCSetSubpicturePalette(Display *dpy, XvMCSubpicture *subpicture, unsigned char *palette)
+{
+ assert(dpy);
+
+ if (!subpicture)
+ return XvMCBadSubpicture;
+
+ assert(palette);
+
+ /* We don't support paletted subpictures */
+ return BadMatch;
+}
+
+PUBLIC
+Status XvMCBlendSubpicture(Display *dpy, XvMCSurface *target_surface, XvMCSubpicture *subpicture,
+ short subx, short suby, unsigned short subw, unsigned short subh,
+ short surfx, short surfy, unsigned short surfw, unsigned short surfh)
+{
+ XvMCSurfacePrivate *surface_priv;
+ XvMCSubpicturePrivate *subpicture_priv;
+
+ XVMC_MSG(XVMC_TRACE, "[XvMC] Associating subpicture %p with surface %p.\n", subpicture, target_surface);
+
+ assert(dpy);
+
+ if (!target_surface)
+ return XvMCBadSurface;
+
+ if (!subpicture)
+ return XvMCBadSubpicture;
+
+ if (target_surface->context_id != subpicture->context_id)
+ return BadMatch;
+
+ /* TODO: Verify against subpicture independent scaling */
+
+ surface_priv = target_surface->privData;
+ subpicture_priv = subpicture->privData;
+
+ /* TODO: Assert rects are within bounds? Or clip? */
+
+ surface_priv->subpicture = subpicture;
+ surface_priv->subx = subx;
+ surface_priv->suby = suby;
+ surface_priv->subw = subw;
+ surface_priv->subh = subh;
+ surface_priv->surfx = surfx;
+ surface_priv->surfy = surfy;
+ surface_priv->surfw = surfw;
+ surface_priv->surfh = surfh;
+ subpicture_priv->surface = target_surface;
+
+ return Success;
+}
+
+PUBLIC
+Status XvMCBlendSubpicture2(Display *dpy, XvMCSurface *source_surface, XvMCSurface *target_surface,
+ XvMCSubpicture *subpicture, short subx, short suby, unsigned short subw, unsigned short subh,
+ short surfx, short surfy, unsigned short surfw, unsigned short surfh)
+{
+ assert(dpy);
+
+ if (!source_surface || !target_surface)
+ return XvMCBadSurface;
+
+ if (!subpicture)
+ return XvMCBadSubpicture;
+
+ if (source_surface->context_id != subpicture->context_id)
+ return BadMatch;
+
+ if (source_surface->context_id != subpicture->context_id)
+ return BadMatch;
+
+ /* TODO: Assert rects are within bounds? Or clip? */
+
+ return Success;
+}
+
+PUBLIC
+Status XvMCSyncSubpicture(Display *dpy, XvMCSubpicture *subpicture)
+{
+ assert(dpy);
+
+ if (!subpicture)
+ return XvMCBadSubpicture;
+
+ return Success;
+}
+
+PUBLIC
+Status XvMCFlushSubpicture(Display *dpy, XvMCSubpicture *subpicture)
+{
+ assert(dpy);
+
+ if (!subpicture)
+ return XvMCBadSubpicture;
+
+ return Success;
+}
+
+PUBLIC
+Status XvMCGetSubpictureStatus(Display *dpy, XvMCSubpicture *subpicture, int *status)
+{
+ assert(dpy);
+
+ if (!subpicture)
+ return XvMCBadSubpicture;
+
+ assert(status);
+
+ /* TODO */
+ *status = 0;
+
+ return Success;
+}
--- /dev/null
- struct pipe_texture template;
- struct pipe_texture *tex;
+/**************************************************************************
+ *
+ * Copyright 2009 Younes Manton.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <assert.h>
+#include <X11/Xlibint.h>
+#include <vl_winsys.h>
+#include <pipe/p_video_context.h>
+#include <pipe/p_video_state.h>
+#include <pipe/p_state.h>
+#include <util/u_inlines.h>
+#include <util/u_memory.h>
+#include <util/u_math.h>
+#include "xvmc_private.h"
+
+static enum pipe_mpeg12_macroblock_type TypeToPipe(int xvmc_mb_type)
+{
+ if (xvmc_mb_type & XVMC_MB_TYPE_INTRA)
+ return PIPE_MPEG12_MACROBLOCK_TYPE_INTRA;
+ if ((xvmc_mb_type & (XVMC_MB_TYPE_MOTION_FORWARD | XVMC_MB_TYPE_MOTION_BACKWARD)) == XVMC_MB_TYPE_MOTION_FORWARD)
+ return PIPE_MPEG12_MACROBLOCK_TYPE_FWD;
+ if ((xvmc_mb_type & (XVMC_MB_TYPE_MOTION_FORWARD | XVMC_MB_TYPE_MOTION_BACKWARD)) == XVMC_MB_TYPE_MOTION_BACKWARD)
+ return PIPE_MPEG12_MACROBLOCK_TYPE_BKWD;
+ if ((xvmc_mb_type & (XVMC_MB_TYPE_MOTION_FORWARD | XVMC_MB_TYPE_MOTION_BACKWARD)) == (XVMC_MB_TYPE_MOTION_FORWARD | XVMC_MB_TYPE_MOTION_BACKWARD))
+ return PIPE_MPEG12_MACROBLOCK_TYPE_BI;
+
+ assert(0);
+
+ XVMC_MSG(XVMC_ERR, "[XvMC] Unrecognized mb type 0x%08X.\n", xvmc_mb_type);
+
+ return -1;
+}
+
+static enum pipe_mpeg12_picture_type PictureToPipe(int xvmc_pic)
+{
+ switch (xvmc_pic) {
+ case XVMC_TOP_FIELD:
+ return PIPE_MPEG12_PICTURE_TYPE_FIELD_TOP;
+ case XVMC_BOTTOM_FIELD:
+ return PIPE_MPEG12_PICTURE_TYPE_FIELD_BOTTOM;
+ case XVMC_FRAME_PICTURE:
+ return PIPE_MPEG12_PICTURE_TYPE_FRAME;
+ default:
+ assert(0);
+ }
+
+ XVMC_MSG(XVMC_ERR, "[XvMC] Unrecognized picture type 0x%08X.\n", xvmc_pic);
+
+ return -1;
+}
+
+static enum pipe_mpeg12_motion_type MotionToPipe(int xvmc_motion_type, int xvmc_dct_type)
+{
+ switch (xvmc_motion_type) {
+ case XVMC_PREDICTION_FRAME:
+ if (xvmc_dct_type == XVMC_DCT_TYPE_FIELD)
+ return PIPE_MPEG12_MOTION_TYPE_16x8;
+ else if (xvmc_dct_type == XVMC_DCT_TYPE_FRAME)
+ return PIPE_MPEG12_MOTION_TYPE_FRAME;
+ break;
+ case XVMC_PREDICTION_FIELD:
+ return PIPE_MPEG12_MOTION_TYPE_FIELD;
+ case XVMC_PREDICTION_DUAL_PRIME:
+ return PIPE_MPEG12_MOTION_TYPE_DUALPRIME;
+ default:
+ assert(0);
+ }
+
+ XVMC_MSG(XVMC_ERR, "[XvMC] Unrecognized motion type 0x%08X (with DCT type 0x%08X).\n", xvmc_motion_type, xvmc_dct_type);
+
+ return -1;
+}
+
+static bool
+CreateOrResizeBackBuffer(struct vl_context *vctx, unsigned int width, unsigned int height,
+ struct pipe_surface **backbuffer)
+{
+ struct pipe_video_context *vpipe;
- memset(&template, 0, sizeof(struct pipe_texture));
++ struct pipe_resource template;
++ struct pipe_resource *tex;
+
+ assert(vctx);
+
+ vpipe = vctx->vpipe;
+
+ if (*backbuffer) {
+ if ((*backbuffer)->width != width || (*backbuffer)->height != height)
+ pipe_surface_reference(backbuffer, NULL);
+ else
+ return true;
+ }
+
- template.tex_usage = PIPE_TEXTURE_USAGE_DISPLAY_TARGET;
++ memset(&template, 0, sizeof(struct pipe_resource));
+ template.target = PIPE_TEXTURE_2D;
+ template.format = vctx->vscreen->format;
+ template.last_level = 0;
+ template.width0 = width;
+ template.height0 = height;
+ template.depth0 = 1;
- tex = vpipe->screen->texture_create(vpipe->screen, &template);
++ template.usage = PIPE_USAGE_DEFAULT;
++ template.bind = PIPE_BIND_RENDER_TARGET;
++ template.flags = 0;
+
- PIPE_BUFFER_USAGE_GPU_READ_WRITE);
- pipe_texture_reference(&tex, NULL);
++ tex = vpipe->screen->resource_create(vpipe->screen, &template);
+ if (!tex)
+ return false;
+
+ *backbuffer = vpipe->screen->get_tex_surface(vpipe->screen, tex, 0, 0, 0,
- struct pipe_texture template;
- struct pipe_texture *vsfc_tex;
++ PIPE_BIND_RENDER_TARGET | PIPE_BIND_BLIT_SOURCE);
++ pipe_resource_reference(&tex, NULL);
+
+ if (!*backbuffer)
+ return false;
+
+ /* Clear the backbuffer in case the video doesn't cover the whole window */
+ /* FIXME: Need to clear every time a frame moves and leaves dirty rects */
+ vpipe->surface_fill(vpipe, *backbuffer, 0, 0, width, height, 0);
+
+ return true;
+}
+
+static void
+MacroBlocksToPipe(struct pipe_screen *screen,
+ const XvMCMacroBlockArray *xvmc_macroblocks,
+ const XvMCBlockArray *xvmc_blocks,
+ unsigned int first_macroblock,
+ unsigned int num_macroblocks,
+ struct pipe_mpeg12_macroblock *pipe_macroblocks)
+{
+ unsigned int i, j, k, l;
+ XvMCMacroBlock *xvmc_mb;
+
+ assert(xvmc_macroblocks);
+ assert(xvmc_blocks);
+ assert(pipe_macroblocks);
+ assert(num_macroblocks);
+
+ xvmc_mb = xvmc_macroblocks->macro_blocks + first_macroblock;
+
+ for (i = 0; i < num_macroblocks; ++i) {
+ pipe_macroblocks->base.codec = PIPE_VIDEO_CODEC_MPEG12;
+ pipe_macroblocks->mbx = xvmc_mb->x;
+ pipe_macroblocks->mby = xvmc_mb->y;
+ pipe_macroblocks->mb_type = TypeToPipe(xvmc_mb->macroblock_type);
+ if (pipe_macroblocks->mb_type != PIPE_MPEG12_MACROBLOCK_TYPE_INTRA)
+ pipe_macroblocks->mo_type = MotionToPipe(xvmc_mb->motion_type, xvmc_mb->dct_type);
+ /* Get rid of Valgrind 'undefined' warnings */
+ else
+ pipe_macroblocks->mo_type = -1;
+ pipe_macroblocks->dct_type = xvmc_mb->dct_type == XVMC_DCT_TYPE_FIELD ?
+ PIPE_MPEG12_DCT_TYPE_FIELD : PIPE_MPEG12_DCT_TYPE_FRAME;
+
+ for (j = 0; j < 2; ++j)
+ for (k = 0; k < 2; ++k)
+ for (l = 0; l < 2; ++l)
+ pipe_macroblocks->pmv[j][k][l] = xvmc_mb->PMV[j][k][l];
+
+ pipe_macroblocks->cbp = xvmc_mb->coded_block_pattern;
+ pipe_macroblocks->blocks = xvmc_blocks->blocks + xvmc_mb->index * BLOCK_SIZE_SAMPLES;
+
+ ++pipe_macroblocks;
+ ++xvmc_mb;
+ }
+}
+
+PUBLIC
+Status XvMCCreateSurface(Display *dpy, XvMCContext *context, XvMCSurface *surface)
+{
+ XvMCContextPrivate *context_priv;
+ struct pipe_video_context *vpipe;
+ XvMCSurfacePrivate *surface_priv;
- memset(&template, 0, sizeof(struct pipe_texture));
++ struct pipe_resource template;
++ struct pipe_resource *vsfc_tex;
+ struct pipe_surface *vsfc;
+
+ XVMC_MSG(XVMC_TRACE, "[XvMC] Creating surface %p.\n", surface);
+
+ assert(dpy);
+
+ if (!context)
+ return XvMCBadContext;
+ if (!surface)
+ return XvMCBadSurface;
+
+ context_priv = context->privData;
+ vpipe = context_priv->vctx->vpipe;
+
+ surface_priv = CALLOC(1, sizeof(XvMCSurfacePrivate));
+ if (!surface_priv)
+ return BadAlloc;
+
- PIPE_TEXTURE_USAGE_SAMPLER |
- PIPE_TEXTURE_USAGE_RENDER_TARGET,
++ memset(&template, 0, sizeof(struct pipe_resource));
+ template.target = PIPE_TEXTURE_2D;
+ template.format = (enum pipe_format)vpipe->get_param(vpipe, PIPE_CAP_DECODE_TARGET_PREFERRED_FORMAT);
+ template.last_level = 0;
+ if (vpipe->is_format_supported(vpipe, template.format,
- PIPE_TEXTURE_USAGE_SAMPLER |
- PIPE_TEXTURE_USAGE_RENDER_TARGET,
++ PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET,
+ PIPE_TEXTURE_GEOM_NON_POWER_OF_TWO)) {
+ template.width0 = context->width;
+ template.height0 = context->height;
+ }
+ else {
+ assert(vpipe->is_format_supported(vpipe, template.format,
- template.tex_usage = PIPE_TEXTURE_USAGE_SAMPLER | PIPE_TEXTURE_USAGE_RENDER_TARGET;
- vsfc_tex = vpipe->screen->texture_create(vpipe->screen, &template);
++ PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET,
+ PIPE_TEXTURE_GEOM_NON_SQUARE));
+ template.width0 = util_next_power_of_two(context->width);
+ template.height0 = util_next_power_of_two(context->height);
+ }
+ template.depth0 = 1;
- PIPE_BUFFER_USAGE_GPU_READ_WRITE);
- pipe_texture_reference(&vsfc_tex, NULL);
++ template.usage = PIPE_USAGE_DEFAULT;
++ template.bind = PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET;
++ template.flags = 0;
++ vsfc_tex = vpipe->screen->resource_create(vpipe->screen, &template);
+ if (!vsfc_tex) {
+ FREE(surface_priv);
+ return BadAlloc;
+ }
+
+ vsfc = vpipe->screen->get_tex_surface(vpipe->screen, vsfc_tex, 0, 0, 0,
++ PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET);
++ pipe_resource_reference(&vsfc_tex, NULL);
+ if (!vsfc) {
+ FREE(surface_priv);
+ return BadAlloc;
+ }
+
+ surface_priv->pipe_vsfc = vsfc;
+ surface_priv->context = context;
+
+ surface->surface_id = XAllocID(dpy);
+ surface->context_id = context->context_id;
+ surface->surface_type_id = context->surface_type_id;
+ surface->width = context->width;
+ surface->height = context->height;
+ surface->privData = surface_priv;
+
+ SyncHandle();
+
+ XVMC_MSG(XVMC_TRACE, "[XvMC] Surface %p created.\n", surface);
+
+ return Success;
+}
+
+PUBLIC
+Status XvMCRenderSurface(Display *dpy, XvMCContext *context, unsigned int picture_structure,
+ XvMCSurface *target_surface, XvMCSurface *past_surface, XvMCSurface *future_surface,
+ unsigned int flags, unsigned int num_macroblocks, unsigned int first_macroblock,
+ XvMCMacroBlockArray *macroblocks, XvMCBlockArray *blocks
+)
+{
+ struct pipe_video_context *vpipe;
+ struct pipe_surface *t_vsfc;
+ struct pipe_surface *p_vsfc;
+ struct pipe_surface *f_vsfc;
+ XvMCContextPrivate *context_priv;
+ XvMCSurfacePrivate *target_surface_priv;
+ XvMCSurfacePrivate *past_surface_priv;
+ XvMCSurfacePrivate *future_surface_priv;
+ struct pipe_mpeg12_macroblock pipe_macroblocks[num_macroblocks];
+ unsigned int i;
+
+ XVMC_MSG(XVMC_TRACE, "[XvMC] Rendering to surface %p.\n", target_surface);
+
+ assert(dpy);
+
+ if (!context || !context->privData)
+ return XvMCBadContext;
+ if (!target_surface || !target_surface->privData)
+ return XvMCBadSurface;
+
+ if (picture_structure != XVMC_TOP_FIELD &&
+ picture_structure != XVMC_BOTTOM_FIELD &&
+ picture_structure != XVMC_FRAME_PICTURE)
+ return BadValue;
+ /* Bkwd pred equivalent to fwd (past && !future) */
+ if (future_surface && !past_surface)
+ return BadMatch;
+
+ assert(context->context_id == target_surface->context_id);
+ assert(!past_surface || context->context_id == past_surface->context_id);
+ assert(!future_surface || context->context_id == future_surface->context_id);
+
+ assert(macroblocks);
+ assert(blocks);
+
+ assert(macroblocks->context_id == context->context_id);
+ assert(blocks->context_id == context->context_id);
+
+ assert(flags == 0 || flags == XVMC_SECOND_FIELD);
+
+ target_surface_priv = target_surface->privData;
+ past_surface_priv = past_surface ? past_surface->privData : NULL;
+ future_surface_priv = future_surface ? future_surface->privData : NULL;
+
+ assert(target_surface_priv->context == context);
+ assert(!past_surface || past_surface_priv->context == context);
+ assert(!future_surface || future_surface_priv->context == context);
+
+ context_priv = context->privData;
+ vpipe = context_priv->vctx->vpipe;
+
+ t_vsfc = target_surface_priv->pipe_vsfc;
+ p_vsfc = past_surface ? past_surface_priv->pipe_vsfc : NULL;
+ f_vsfc = future_surface ? future_surface_priv->pipe_vsfc : NULL;
+
+ MacroBlocksToPipe(vpipe->screen, macroblocks, blocks, first_macroblock,
+ num_macroblocks, pipe_macroblocks);
+
+ vpipe->set_decode_target(vpipe, t_vsfc);
+ vpipe->decode_macroblocks(vpipe, p_vsfc, f_vsfc, num_macroblocks,
+ &pipe_macroblocks->base, target_surface_priv->render_fence);
+
+ XVMC_MSG(XVMC_TRACE, "[XvMC] Submitted surface %p for rendering.\n", target_surface);
+
+ return Success;
+}
+
+PUBLIC
+Status XvMCFlushSurface(Display *dpy, XvMCSurface *surface)
+{
+ assert(dpy);
+
+ if (!surface)
+ return XvMCBadSurface;
+
+ return Success;
+}
+
+PUBLIC
+Status XvMCSyncSurface(Display *dpy, XvMCSurface *surface)
+{
+ assert(dpy);
+
+ if (!surface)
+ return XvMCBadSurface;
+
+ return Success;
+}
+
+PUBLIC
+Status XvMCPutSurface(Display *dpy, XvMCSurface *surface, Drawable drawable,
+ short srcx, short srcy, unsigned short srcw, unsigned short srch,
+ short destx, short desty, unsigned short destw, unsigned short desth,
+ int flags)
+{
+ Window root;
+ int x, y;
+ unsigned int width, height;
+ unsigned int border_width;
+ unsigned int depth;
+ struct pipe_video_context *vpipe;
+ XvMCSurfacePrivate *surface_priv;
+ XvMCContextPrivate *context_priv;
+ XvMCSubpicturePrivate *subpicture_priv;
+ XvMCContext *context;
+ struct pipe_video_rect src_rect = {srcx, srcy, srcw, srch};
+ struct pipe_video_rect dst_rect = {destx, desty, destw, desth};
+
+ XVMC_MSG(XVMC_TRACE, "[XvMC] Displaying surface %p.\n", surface);
+
+ assert(dpy);
+
+ if (!surface || !surface->privData)
+ return XvMCBadSurface;
+
+ if (XGetGeometry(dpy, drawable, &root, &x, &y, &width, &height, &border_width, &depth) == BadDrawable)
+ return BadDrawable;
+
+ assert(flags == XVMC_TOP_FIELD || flags == XVMC_BOTTOM_FIELD || flags == XVMC_FRAME_PICTURE);
+ assert(srcx + srcw - 1 < surface->width);
+ assert(srcy + srch - 1 < surface->height);
+ /*
+ * Some apps (mplayer) hit these asserts because they call
+ * this function after the window has been resized by the WM
+ * but before they've handled the corresponding XEvent and
+ * know about the new dimensions. The output should be clipped
+ * until the app updates destw and desth.
+ */
+ /*
+ assert(destx + destw - 1 < width);
+ assert(desty + desth - 1 < height);
+ */
+
+ surface_priv = surface->privData;
+ context = surface_priv->context;
+ context_priv = context->privData;
+ subpicture_priv = surface_priv->subpicture ? surface_priv->subpicture->privData : NULL;
+ vpipe = context_priv->vctx->vpipe;
+
+ if (!CreateOrResizeBackBuffer(context_priv->vctx, width, height, &context_priv->backbuffer))
+ return BadAlloc;
+
+ if (subpicture_priv) {
+ struct pipe_video_rect src_rect = {surface_priv->subx, surface_priv->suby, surface_priv->subw, surface_priv->subh};
+ struct pipe_video_rect dst_rect = {surface_priv->surfx, surface_priv->surfy, surface_priv->surfw, surface_priv->surfh};
+ struct pipe_video_rect *src_rects[1] = {&src_rect};
+ struct pipe_video_rect *dst_rects[1] = {&dst_rect};
+
+ XVMC_MSG(XVMC_TRACE, "[XvMC] Surface %p has subpicture %p.\n", surface, surface_priv->subpicture);
+
+ assert(subpicture_priv->surface == surface);
+ vpipe->set_picture_layers(vpipe, &subpicture_priv->sfc, &src_rects, &dst_rects, 1);
+
+ surface_priv->subpicture = NULL;
+ subpicture_priv->surface = NULL;
+ }
+ else
+ vpipe->set_picture_layers(vpipe, NULL, NULL, NULL, 0);
+
+ vpipe->render_picture(vpipe, surface_priv->pipe_vsfc, PictureToPipe(flags), &src_rect,
+ context_priv->backbuffer, &dst_rect, surface_priv->disp_fence);
+
+ XVMC_MSG(XVMC_TRACE, "[XvMC] Submitted surface %p for display. Pushing to front buffer.\n", surface);
+
+ vl_video_bind_drawable(context_priv->vctx, drawable);
+
+ vpipe->screen->flush_frontbuffer
+ (
+ vpipe->screen,
+ context_priv->backbuffer,
+ vpipe->priv
+ );
+
+ XVMC_MSG(XVMC_TRACE, "[XvMC] Pushed surface %p to front buffer.\n", surface);
+
+ return Success;
+}
+
+PUBLIC
+Status XvMCGetSurfaceStatus(Display *dpy, XvMCSurface *surface, int *status)
+{
+ assert(dpy);
+
+ if (!surface)
+ return XvMCBadSurface;
+
+ assert(status);
+
+ *status = 0;
+
+ return Success;
+}
+
+PUBLIC
+Status XvMCDestroySurface(Display *dpy, XvMCSurface *surface)
+{
+ XvMCSurfacePrivate *surface_priv;
+
+ XVMC_MSG(XVMC_TRACE, "[XvMC] Destroying surface %p.\n", surface);
+
+ assert(dpy);
+
+ if (!surface || !surface->privData)
+ return XvMCBadSurface;
+
+ surface_priv = surface->privData;
+ pipe_surface_reference(&surface_priv->pipe_vsfc, NULL);
+ FREE(surface_priv);
+ surface->privData = NULL;
+
+ XVMC_MSG(XVMC_TRACE, "[XvMC] Surface %p destroyed.\n", surface);
+
+ return Success;
+}
+
+PUBLIC
+Status XvMCHideSurface(Display *dpy, XvMCSurface *surface)
+{
+ assert(dpy);
+
+ if (!surface || !surface->privData)
+ return XvMCBadSurface;
+
+ /* No op, only for overlaid rendering */
+
+ return Success;
+}
--- /dev/null
+ TOP = ../../../..
+ include $(TOP)/configs/current
+
+ LIBNAME = nouveau_dri.so
+
+ PIPE_DRIVERS = \
+ $(TOP)/src/gallium/state_trackers/dri/drm/libdridrm.a \
+ $(TOP)/src/gallium/winsys/nouveau/drm/libnouveaudrm.a \
+ $(TOP)/src/gallium/drivers/nvfx/libnvfx.a \
+ $(TOP)/src/gallium/drivers/nv50/libnv50.a \
++ $(TOP)/src/gallium/drivers/softpipe/libsoftpipe.a \
+ $(TOP)/src/gallium/drivers/nouveau/libnouveau.a
+
+ C_SOURCES = \
+ $(COMMON_GALLIUM_SOURCES) \
+ $(DRIVER_SOURCES)
+
+ include ../Makefile.dri
+
+ DRI_LIB_DEPS += $(shell pkg-config libdrm_nouveau --libs)
+
+ symlinks:
--- /dev/null
- struct pipe_texture template, *front_tex;
+/**************************************************************************
+ *
+ * Copyright 2009 Younes Manton.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <vl_winsys.h>
+#include <driclient.h>
+#include <state_tracker/dri1_api.h>
+#include <pipe/p_video_context.h>
+#include <pipe/p_state.h>
+#include <util/u_memory.h>
+
+struct vl_dri_screen
+{
+ struct vl_screen base;
+ Visual *visual;
+ struct drm_api *api;
+ dri_screen_t *dri_screen;
+ dri_framebuffer_t dri_framebuf;
+ struct dri1_api *api_hooks;
+ boolean dri2;
+};
+
+struct vl_dri_context
+{
+ struct vl_context base;
+ boolean is_locked;
+ boolean lost_lock;
+ drmLock *lock;
+ dri_context_t *dri_context;
+ int fd;
+ struct pipe_video_context *vpipe;
+ dri_drawable_t *drawable;
+ struct pipe_surface *dri2_front;
+};
+
+static void
+vl_dri_lock(void *priv)
+{
+ struct vl_dri_context *vl_dri_ctx = priv;
+ drm_context_t hw_context;
+ char ret = 0;
+
+ assert(priv);
+
+ hw_context = vl_dri_ctx->dri_context->drm_context;
+
+ DRM_CAS(vl_dri_ctx->lock, hw_context, DRM_LOCK_HELD | hw_context, ret);
+ if (ret) {
+ drmGetLock(vl_dri_ctx->fd, hw_context, 0);
+ vl_dri_ctx->lost_lock = TRUE;
+ }
+ vl_dri_ctx->is_locked = TRUE;
+}
+
+static void
+vl_dri_unlock(void *priv)
+{
+ struct vl_dri_context *vl_dri_ctx = priv;
+ drm_context_t hw_context;
+
+ assert(priv);
+
+ hw_context = vl_dri_ctx->dri_context->drm_context;
+
+ vl_dri_ctx->is_locked = FALSE;
+ DRM_UNLOCK(vl_dri_ctx->fd, vl_dri_ctx->lock, hw_context);
+}
+
+static boolean
+vl_dri_is_locked(void *priv)
+{
+ struct vl_dri_context *vl_dri_ctx = priv;
+
+ assert(priv);
+
+ return vl_dri_ctx->is_locked;
+}
+
+static boolean
+vl_dri_lost_lock(void *priv)
+{
+ struct vl_dri_context *vl_dri_ctx = priv;
+
+ assert(priv);
+
+ return vl_dri_ctx->lost_lock;
+}
+
+static void
+vl_dri_clear_lost_lock(void *priv)
+{
+ struct vl_dri_context *vl_dri_ctx = priv;
+
+ assert(priv);
+
+ vl_dri_ctx->lost_lock = FALSE;
+}
+
+struct dri1_api_lock_funcs dri1_lf =
+{
+ .lock = vl_dri_lock,
+ .unlock = vl_dri_unlock,
+ .is_locked = vl_dri_is_locked,
+ .is_lock_lost = vl_dri_lost_lock,
+ .clear_lost_lock = vl_dri_clear_lost_lock
+};
+
+static void
+vl_dri_copy_version(struct dri1_api_version *dst, dri_version_t *src)
+{
+ assert(src);
+ assert(dst);
+ dst->major = src->major;
+ dst->minor = src->minor;
+ dst->patch_level = src->patch;
+}
+
+static boolean
+vl_dri_intersect_src_bbox(struct drm_clip_rect *dst, int dst_x, int dst_y,
+ const struct drm_clip_rect *src, const struct drm_clip_rect *bbox)
+{
+ int xy1;
+ int xy2;
+
+ assert(dst);
+ assert(src);
+ assert(bbox);
+
+ xy1 = ((int)src->x1 > (int)bbox->x1 + dst_x) ? src->x1 :
+ (int)bbox->x1 + dst_x;
+ xy2 = ((int)src->x2 < (int)bbox->x2 + dst_x) ? src->x2 :
+ (int)bbox->x2 + dst_x;
+ if (xy1 >= xy2 || xy1 < 0)
+ return FALSE;
+
+ dst->x1 = xy1;
+ dst->x2 = xy2;
+
+ xy1 = ((int)src->y1 > (int)bbox->y1 + dst_y) ? src->y1 :
+ (int)bbox->y1 + dst_y;
+ xy2 = ((int)src->y2 < (int)bbox->y2 + dst_y) ? src->y2 :
+ (int)bbox->y2 + dst_y;
+ if (xy1 >= xy2 || xy1 < 0)
+ return FALSE;
+
+ dst->y1 = xy1;
+ dst->y2 = xy2;
+ return TRUE;
+}
+
+static void
+vl_clip_copy(struct vl_dri_context *vl_dri_ctx,
+ struct pipe_surface *dst,
+ struct pipe_surface *src,
+ const struct drm_clip_rect *src_bbox)
+{
+ struct pipe_video_context *vpipe;
+ struct drm_clip_rect clip;
+ struct drm_clip_rect *cur;
+ int i;
+
+ assert(vl_dri_ctx);
+ assert(dst);
+ assert(src);
+ assert(src_bbox);
+
+ vpipe = vl_dri_ctx->base.vpipe;
+
+ assert(vl_dri_ctx->drawable->cliprects);
+ assert(vl_dri_ctx->drawable->num_cliprects > 0);
+
+ cur = vl_dri_ctx->drawable->cliprects;
+
+ for (i = 0; i < vl_dri_ctx->drawable->num_cliprects; ++i) {
+ if (vl_dri_intersect_src_bbox(&clip, vl_dri_ctx->drawable->x, vl_dri_ctx->drawable->y, cur++, src_bbox))
+ vpipe->surface_copy
+ (
+ vpipe, dst, clip.x1, clip.y1, src,
+ (int)clip.x1 - vl_dri_ctx->drawable->x,
+ (int)clip.y1 - vl_dri_ctx->drawable->y,
+ clip.x2 - clip.x1, clip.y2 - clip.y1
+ );
+ }
+}
+
+static void
+vl_dri_update_drawables_locked(struct vl_dri_context *vl_dri_ctx)
+{
+ struct vl_dri_screen *vl_dri_scrn;
+
+ assert(vl_dri_ctx);
+
+ vl_dri_scrn = (struct vl_dri_screen*)vl_dri_ctx->base.vscreen;
+
+ if (vl_dri_ctx->lost_lock) {
+ vl_dri_ctx->lost_lock = FALSE;
+ DRI_VALIDATE_DRAWABLE_INFO(vl_dri_scrn->dri_screen, vl_dri_ctx->drawable);
+ }
+}
+
+static void
+vl_dri_flush_frontbuffer(struct pipe_screen *screen,
+ struct pipe_surface *surf, void *context_private)
+{
+ struct vl_dri_context *vl_dri_ctx = (struct vl_dri_context*)context_private;
+ struct vl_dri_screen *vl_dri_scrn;
+ struct drm_clip_rect src_bbox;
+ boolean save_lost_lock = FALSE;
+
+ assert(screen);
+ assert(surf);
+ assert(context_private);
+
+ vl_dri_scrn = (struct vl_dri_screen*)vl_dri_ctx->base.vscreen;
+
+ vl_dri_lock(vl_dri_ctx);
+
+ save_lost_lock = vl_dri_ctx->lost_lock;
+
+ vl_dri_update_drawables_locked(vl_dri_ctx);
+
+ if (vl_dri_ctx->drawable->cliprects) {
+ src_bbox.x1 = 0;
+ src_bbox.x2 = vl_dri_ctx->drawable->w;
+ src_bbox.y1 = 0;
+ src_bbox.y2 = vl_dri_ctx->drawable->h;
+
+#if 0
+ if (vl_dri_scrn->_api_hooks->present_locked)
+ vl_dri_scrn->api_hooks->present_locked(pipe, surf,
+ vl_dri_ctx->drawable->cliprects,
+ vl_dri_ctx->drawable->num_cliprects,
+ vl_dri_ctx->drawable->x, vl_dri_drawable->y,
+ &bbox, NULL /*fence*/);
+ else
+#endif
+ if (vl_dri_scrn->api_hooks->front_srf_locked) {
+ struct pipe_surface *front = vl_dri_scrn->api_hooks->front_srf_locked(screen);
+
+ if (front)
+ vl_clip_copy(vl_dri_ctx, front, surf, &src_bbox);
+
+ //st_flush(ctx->st, PIPE_FLUSH_RENDER_CACHE, fence);
+ }
+ }
+
+ vl_dri_ctx->lost_lock = save_lost_lock;
+
+ vl_dri_unlock(vl_dri_ctx);
+}
+
+static struct pipe_surface*
+vl_dri2_get_front(struct vl_dri_screen *vl_dri_scrn, Drawable drawable)
+{
+ int w, h;
+ unsigned int attachments[1] = {DRI_BUFFER_FRONT_LEFT};
+ int count;
+ DRI2Buffer *dri2_front;
- front_tex = vl_dri_scrn->api->texture_from_shared_handle(vl_dri_scrn->api, vl_dri_scrn->base.pscreen,
- &template, "", dri2_front->pitch, dri2_front->name);
++ struct pipe_resource template, *front_tex;
+ struct pipe_surface *front_surf = NULL;
+
+ assert(vl_dri_scrn);
+
+ dri2_front = DRI2GetBuffers(vl_dri_scrn->dri_screen->display,
+ drawable, &w, &h, attachments, 1, &count);
+ if (dri2_front) {
- PIPE_BUFFER_USAGE_GPU_READ_WRITE);
- pipe_texture_reference(&front_tex, NULL);
++ struct winsys_handle dri2_front_handle =
++ {
++ .type = DRM_API_HANDLE_TYPE_SHARED,
++ .handle = dri2_front->name,
++ .stride = dri2_front->pitch
++ };
++ front_tex = vl_dri_scrn->base.pscreen->resource_from_handle(vl_dri_scrn->base.pscreen, &template, &dri2_front_handle);
+ if (front_tex)
+ front_surf = vl_dri_scrn->base.pscreen->get_tex_surface(vl_dri_scrn->base.pscreen,
+ front_tex, 0, 0, 0,
++ /*PIPE_BIND_RENDER_TARGET*/ PIPE_BIND_BLIT_DESTINATION);
++ pipe_resource_reference(&front_tex, NULL);
+ }
+
+ return front_surf;
+}
+
+static void
+vl_dri2_flush_frontbuffer(struct pipe_screen *screen,
+ struct pipe_surface *surf, void *context_private)
+{
+ struct vl_dri_context *vl_dri_ctx = (struct vl_dri_context*)context_private;
+ struct vl_dri_screen *vl_dri_scrn;
+ struct pipe_video_context *vpipe;
+
+ assert(screen);
+ assert(surf);
+ assert(context_private);
+ assert(vl_dri_ctx->dri2_front);
+
+ vl_dri_scrn = (struct vl_dri_screen*)vl_dri_ctx->base.vscreen;
+ vpipe = vl_dri_ctx->base.vpipe;
+
+ /* XXX: Why not just render to fake front? */
+ vpipe->surface_copy(vpipe, vl_dri_ctx->dri2_front, 0, 0, surf, 0, 0, surf->width, surf->height);
+
+ //st_flush(ctx->st, PIPE_FLUSH_RENDER_CACHE, fence);
+}
+
+
+Drawable
+vl_video_bind_drawable(struct vl_context *vctx, Drawable drawable)
+{
+ struct vl_dri_context *vl_dri_ctx = (struct vl_dri_context*)vctx;
+ struct vl_dri_screen *vl_dri_scrn;
+ dri_drawable_t *dri_drawable;
+ Drawable old_drawable = None;
+
+ assert(vctx);
+
+ if (vl_dri_ctx->drawable)
+ old_drawable = vl_dri_ctx->drawable->x_drawable;
+
+ if (drawable != old_drawable) {
+ vl_dri_scrn = (struct vl_dri_screen*)vl_dri_ctx->base.vscreen;
+ if (vl_dri_scrn->dri2) {
+ /* XXX: Need dri2CreateDrawable()? */
+ vl_dri_ctx->dri2_front = vl_dri2_get_front(vl_dri_scrn, drawable);
+ }
+ else {
+ driCreateDrawable(vl_dri_scrn->dri_screen, drawable, &dri_drawable);
+ vl_dri_ctx->drawable = dri_drawable;
+ }
+ }
+
+ return old_drawable;
+}
+
+struct vl_screen*
+vl_screen_create(Display *display, int screen)
+{
+ struct vl_dri_screen *vl_dri_scrn;
+ struct dri1_create_screen_arg arg;
+
+ assert(display);
+
+ vl_dri_scrn = CALLOC_STRUCT(vl_dri_screen);
+ if (!vl_dri_scrn)
+ return NULL;
+
+ /* Try DRI2 first */
+ if (dri2CreateScreen(display, screen, &vl_dri_scrn->dri_screen)) {
+ /* If not, try DRI */
+ if (driCreateScreen(display, screen, &vl_dri_scrn->dri_screen, &vl_dri_scrn->dri_framebuf)) {
+ /* Now what? */
+ FREE(vl_dri_scrn);
+ return NULL;
+ }
+ else {
+ /* Got DRI */
+ arg.base.mode = DRM_CREATE_DRI1;
+ arg.lf = &dri1_lf;
+ arg.ddx_info = vl_dri_scrn->dri_framebuf.private;
+ arg.ddx_info_size = vl_dri_scrn->dri_framebuf.private_size;
+ arg.sarea = vl_dri_scrn->dri_screen->sarea;
+ vl_dri_copy_version(&arg.ddx_version, &vl_dri_scrn->dri_screen->ddx);
+ vl_dri_copy_version(&arg.dri_version, &vl_dri_scrn->dri_screen->dri);
+ vl_dri_copy_version(&arg.drm_version, &vl_dri_scrn->dri_screen->drm);
+ arg.api = NULL;
+ vl_dri_scrn->dri2 = FALSE;
+ }
+ }
+ else {
+ /* Got DRI2 */
+ arg.base.mode = DRM_CREATE_NORMAL;
+ vl_dri_scrn->dri2 = TRUE;
+ }
+
+ vl_dri_scrn->api = drm_api_create();
+ if (!vl_dri_scrn->api) {
+ FREE(vl_dri_scrn);
+ return NULL;
+ }
+
+ vl_dri_scrn->base.pscreen = vl_dri_scrn->api->create_screen(vl_dri_scrn->api,
+ vl_dri_scrn->dri_screen->fd,
+ &arg.base);
+
+ if (!vl_dri_scrn->base.pscreen) {
+ FREE(vl_dri_scrn);
+ return NULL;
+ }
+
+ if (!vl_dri_scrn->dri2) {
+ vl_dri_scrn->visual = XDefaultVisual(display, screen);
+ vl_dri_scrn->api_hooks = arg.api;
+ vl_dri_scrn->base.format = vl_dri_scrn->api_hooks->front_srf_locked(vl_dri_scrn->base.pscreen)->format;
+ vl_dri_scrn->base.pscreen->flush_frontbuffer = vl_dri_flush_frontbuffer;
+ }
+ else
+ vl_dri_scrn->base.pscreen->flush_frontbuffer = vl_dri2_flush_frontbuffer;
+
+ return &vl_dri_scrn->base;
+}
+
+void vl_screen_destroy(struct vl_screen *vscreen)
+{
+ struct vl_dri_screen *vl_dri_scrn = (struct vl_dri_screen*)vscreen;
+
+ assert(vscreen);
+
+ vl_dri_scrn->base.pscreen->destroy(vl_dri_scrn->base.pscreen);
+ if (vl_dri_scrn->dri2)
+ dri2DestroyScreen(vl_dri_scrn->dri_screen);
+ else
+ driDestroyScreen(vl_dri_scrn->dri_screen);
+ FREE(vl_dri_scrn);
+}
+
+struct vl_context*
+vl_video_create(struct vl_screen *vscreen,
+ enum pipe_video_profile profile,
+ enum pipe_video_chroma_format chroma_format,
+ unsigned width, unsigned height)
+{
+ struct vl_dri_screen *vl_dri_scrn = (struct vl_dri_screen*)vscreen;
+ struct vl_dri_context *vl_dri_ctx;
+
+ vl_dri_ctx = CALLOC_STRUCT(vl_dri_context);
+ if (!vl_dri_ctx)
+ return NULL;
+
+ /* XXX: Is default visual correct/sufficient here? */
+ if (!vl_dri_scrn->dri2)
+ driCreateContext(vl_dri_scrn->dri_screen, vl_dri_scrn->visual, &vl_dri_ctx->dri_context);
+
+ if (!vscreen->pscreen->video_context_create) {
+ debug_printf("[G3DVL] No video support found on %s/%s.\n",
+ vscreen->pscreen->get_vendor(vscreen->pscreen),
+ vscreen->pscreen->get_name(vscreen->pscreen));
+ FREE(vl_dri_ctx);
+ return NULL;
+ }
+
+ vl_dri_ctx->base.vpipe = vscreen->pscreen->video_context_create(vscreen->pscreen,
+ profile, chroma_format,
+ width, height,
+ vl_dri_ctx->dri_context);
+
+ if (!vl_dri_ctx->base.vpipe) {
+ FREE(vl_dri_ctx);
+ return NULL;
+ }
+
+ vl_dri_ctx->base.vpipe->priv = vl_dri_ctx;
+ vl_dri_ctx->base.vscreen = vscreen;
+ vl_dri_ctx->fd = vl_dri_scrn->dri_screen->fd;
+ if (!vl_dri_scrn->dri2)
+ vl_dri_ctx->lock = (drmLock*)&vl_dri_scrn->dri_screen->sarea->lock;
+
+ return &vl_dri_ctx->base;
+}
+
+void vl_video_destroy(struct vl_context *vctx)
+{
+ struct vl_dri_context *vl_dri_ctx = (struct vl_dri_context*)vctx;
+
+ assert(vctx);
+
+ vl_dri_ctx->base.vpipe->destroy(vl_dri_ctx->base.vpipe);
+ if (!((struct vl_dri_screen *)vctx->vscreen)->dri2)
+ driDestroyContext(vl_dri_ctx->dri_context);
+ FREE(vl_dri_ctx);
+}
--- /dev/null
-nouveau_dri1_front_surface(struct pipe_context *pipe)
+ #include "pipe/p_context.h"
+ #include "pipe/p_state.h"
+ #include "util/u_format.h"
+ #include "util/u_memory.h"
+ #include "util/u_inlines.h"
+
+ #include "nouveau_drm_api.h"
+
+ #include "nouveau_drmif.h"
+ #include "nouveau_channel.h"
+ #include "nouveau_bo.h"
+
+ #include "nouveau/nouveau_winsys.h"
+ #include "nouveau/nouveau_screen.h"
+
+ static struct pipe_surface *
+ dri_surface_from_handle(struct drm_api *api, struct pipe_screen *pscreen,
+ unsigned handle, enum pipe_format format,
+ unsigned width, unsigned height, unsigned pitch)
+ {
+ struct pipe_surface *ps = NULL;
+ struct pipe_resource *pt = NULL;
+ struct pipe_resource tmpl;
+ struct winsys_handle whandle;
+ unsigned bind = (PIPE_BIND_SCANOUT |
+ PIPE_BIND_RENDER_TARGET |
+ PIPE_BIND_BLIT_DESTINATION |
+ PIPE_BIND_BLIT_SOURCE);
+
+ memset(&tmpl, 0, sizeof(tmpl));
+ tmpl.bind = bind;
+ tmpl.target = PIPE_TEXTURE_2D;
+ tmpl.last_level = 0;
+ tmpl.depth0 = 1;
+ tmpl.format = format;
+ tmpl.width0 = width;
+ tmpl.height0 = height;
+
+ memset(&whandle, 0, sizeof(whandle));
+ whandle.stride = pitch;
+ whandle.handle = handle;
+
+ pt = pscreen->resource_from_handle(pscreen, &tmpl, &whandle);
+ if (!pt)
+ return NULL;
+
+ ps = pscreen->get_tex_surface(pscreen, pt, 0, 0, 0, bind);
+
+ /* we don't need the texture from this point on */
+ pipe_resource_reference(&pt, NULL);
+ return ps;
+ }
+
+ static struct pipe_surface *
- return nouveau_winsys_screen(pipe->screen)->front;
++nouveau_dri1_front_surface(struct pipe_screen *screen)
+ {
++ return nouveau_winsys_screen(screen)->front;
+ }
+
+ static struct dri1_api nouveau_dri1_api = {
+ nouveau_dri1_front_surface,
+ };
+
+ static void
+ nouveau_drm_destroy_winsys(struct pipe_winsys *s)
+ {
+ struct nouveau_winsys *nv_winsys = nouveau_winsys(s);
+ struct nouveau_screen *nv_screen= nouveau_screen(nv_winsys->pscreen);
+ nouveau_device_close(&nv_screen->device);
+ FREE(nv_winsys);
+ }
+
+ static struct pipe_screen *
+ nouveau_drm_create_screen(struct drm_api *api, int fd,
+ struct drm_create_screen_arg *arg)
+ {
+ struct dri1_create_screen_arg *dri1 = (void *)arg;
+ struct nouveau_winsys *nvws;
+ struct pipe_winsys *ws;
+ struct nouveau_device *dev = NULL;
+ struct pipe_screen *(*init)(struct pipe_winsys *,
+ struct nouveau_device *);
+ int ret;
+
+ ret = nouveau_device_open_existing(&dev, 0, fd, 0);
+ if (ret)
+ return NULL;
+
+ switch (dev->chipset & 0xf0) {
+ case 0x30:
+ case 0x40:
+ case 0x60:
+ init = nvfx_screen_create;
+ break;
+ case 0x50:
+ case 0x80:
+ case 0x90:
+ case 0xa0:
+ init = nv50_screen_create;
+ break;
+ default:
+ debug_printf("%s: unknown chipset nv%02x\n", __func__,
+ dev->chipset);
+ return NULL;
+ }
+
+ nvws = CALLOC_STRUCT(nouveau_winsys);
+ if (!nvws) {
+ nouveau_device_close(&dev);
+ return NULL;
+ }
+ ws = &nvws->base;
+ ws->destroy = nouveau_drm_destroy_winsys;
+
+ nvws->pscreen = init(ws, dev);
+ if (!nvws->pscreen) {
+ ws->destroy(ws);
+ return NULL;
+ }
+
+ if (arg && arg->mode == DRM_CREATE_DRI1) {
+ struct nouveau_dri *nvdri = dri1->ddx_info;
+ enum pipe_format format;
+
+ if (nvdri->bpp == 16)
+ format = PIPE_FORMAT_B5G6R5_UNORM;
+ else
+ format = PIPE_FORMAT_B8G8R8A8_UNORM;
+
+ nvws->front = dri_surface_from_handle(api, nvws->pscreen,
+ nvdri->front_offset,
+ format, nvdri->width,
+ nvdri->height,
+ nvdri->front_pitch *
+ (nvdri->bpp / 8));
+ if (!nvws->front) {
+ debug_printf("%s: error referencing front buffer\n",
+ __func__);
+ ws->destroy(ws);
+ return NULL;
+ }
+
+ dri1->api = &nouveau_dri1_api;
+ }
+
+ return nvws->pscreen;
+ }
+
+ static struct drm_api nouveau_drm_api_hooks = {
+ .name = "nouveau",
+ .driver_name = "nouveau",
+ .create_screen = nouveau_drm_create_screen,
+ .destroy = NULL,
+ };
+
+ struct drm_api *
+ drm_api_create() {
+ return &nouveau_drm_api_hooks;
+ }
+