Merge remote branch 'origin/master' into pipe-video
authorChristian König <deathsimple@vodafone.de>
Thu, 24 Feb 2011 21:02:42 +0000 (22:02 +0100)
committerChristian König <deathsimple@vodafone.de>
Thu, 24 Feb 2011 21:02:42 +0000 (22:02 +0100)
Conflicts:
configure.ac
src/gallium/auxiliary/Makefile
src/gallium/auxiliary/SConscript
src/gallium/drivers/r600/r600_asm.c
src/gallium/drivers/r600/r600_asm.h
src/gallium/drivers/r600/r600_shader.c
src/gallium/drivers/r600/r600_state_inlines.h
src/gallium/drivers/r600/r600_texture.c

23 files changed:
1  2 
configs/autoconf.in
configs/linux-dri
configure.ac
src/gallium/auxiliary/Makefile
src/gallium/auxiliary/SConscript
src/gallium/auxiliary/vl/vl_compositor.c
src/gallium/auxiliary/vl/vl_idct.c
src/gallium/auxiliary/vl/vl_mpeg12_mc_renderer.c
src/gallium/auxiliary/vl/vl_vertex_buffers.c
src/gallium/drivers/r600/Makefile
src/gallium/drivers/r600/r600_asm.c
src/gallium/drivers/r600/r600_asm.h
src/gallium/drivers/r600/r600_pipe.c
src/gallium/drivers/r600/r600_shader.c
src/gallium/drivers/r600/r600_state.c
src/gallium/drivers/r600/r600_state_inlines.h
src/gallium/drivers/r600/r600_texture.c
src/gallium/drivers/softpipe/sp_screen.c
src/gallium/include/pipe/p_defines.h
src/gallium/include/pipe/p_format.h
src/gallium/state_trackers/xorg/xvmc/subpicture.c
src/gallium/state_trackers/xorg/xvmc/surface.c
src/gallium/winsys/r600/drm/r600_bo.c

diff --combined configs/autoconf.in
index dc8f8622416a00f52745299547732aa191045534,a3c69e1d398b97e7ed0ecbd59d409ac3df21d9b5..9defba21afbf16b0c3d02bd26cf8e2a620fb9a6b
@@@ -16,10 -16,12 +16,12 @@@ PIC_FLAGS = @PIC_FLAGS
  DEFINES = @DEFINES@
  API_DEFINES = @API_DEFINES@
  SHARED_GLAPI = @SHARED_GLAPI@
- CFLAGS = @CPPFLAGS@ @CFLAGS@ \
+ CFLAGS_NOVISIBILITY = @CPPFLAGS@ @CFLAGS@ \
        $(OPT_FLAGS) $(PIC_FLAGS) $(ARCH_FLAGS) $(ASM_FLAGS) $(DEFINES)
- CXXFLAGS = @CPPFLAGS@ @CXXFLAGS@ \
+ CXXFLAGS_NOVISIBILITY = @CPPFLAGS@ @CXXFLAGS@ \
        $(OPT_FLAGS) $(PIC_FLAGS) $(ARCH_FLAGS) $(DEFINES)
+ CFLAGS = $(CFLAGS_NOVISIBILITY) @VISIBILITY_CFLAGS@
+ CXXFLAGS = $(CXXFLAGS_NOVISIBILITY) @VISIBILITY_CXXFLAGS@
  LDFLAGS = @LDFLAGS@
  EXTRA_LIB_PATH = @EXTRA_LIB_PATH@
  RADEON_CFLAGS = @RADEON_CFLAGS@
@@@ -34,9 -36,8 +36,8 @@@ LLVM_LIBS = @LLVM_LIBS
  GLW_CFLAGS = @GLW_CFLAGS@
  GLUT_CFLAGS = @GLUT_CFLAGS@
  GLX_TLS = @GLX_TLS@
- TALLOC_LIBS = @TALLOC_LIBS@
- TALLOC_CFLAGS = @TALLOC_CFLAGS@
+ DRI_CFLAGS = @DRI_CFLAGS@
+ DRI_CXXFLAGS = @DRI_CXXFLAGS@
  
  # dlopen
  DLOPEN_LIBS = @DLOPEN_LIBS@
@@@ -67,6 -68,7 +68,7 @@@ GLESv1_CM_LIB = GLESv1_C
  GLESv2_LIB = GLESv2
  VG_LIB = OpenVG
  GLAPI_LIB = glapi
+ WAYLAND_EGL_LIB = wayland-egl
  
  # Library names (actual file names)
  GL_LIB_NAME = @GL_LIB_NAME@
@@@ -79,6 -81,7 +81,7 @@@ GLESv1_CM_LIB_NAME = @GLESv1_CM_LIB_NAM
  GLESv2_LIB_NAME = @GLESv2_LIB_NAME@
  VG_LIB_NAME = @VG_LIB_NAME@
  GLAPI_LIB_NAME = @GLAPI_LIB_NAME@
+ WAYLAND_EGL_LIB_NAME = @WAYLAND_EGL_LIB_NAME@
  
  # Globs used to install the lib and all symlinks
  GL_LIB_GLOB = @GL_LIB_GLOB@
@@@ -91,6 -94,7 +94,7 @@@ GLESv1_CM_LIB_GLOB = @GLESv1_CM_LIB_GLO
  GLESv2_LIB_GLOB = @GLESv2_LIB_GLOB@
  VG_LIB_GLOB = @VG_LIB_GLOB@
  GLAPI_LIB_GLOB = @GLAPI_LIB_GLOB@
+ WAYLAND_EGL_LIB_GLOB = @WAYLAND_EGL_LIB_GLOB@
  
  # Directories to build
  LIB_DIR = @LIB_DIR@
@@@ -107,7 -111,10 +111,10 @@@ GALLIUM_AUXILIARIES = $(TOP)/src/galliu
  GALLIUM_DRIVERS = $(foreach DIR,$(GALLIUM_DRIVERS_DIRS),$(TOP)/src/gallium/drivers/$(DIR)/lib$(DIR).a)
  
  # Driver specific build vars
- DRI_DIRS = @DRI_DIRS@ 
+ DRI_DIRS = @DRI_DIRS@
+ DRICORE_GLSL_LIBS = @DRICORE_GLSL_LIBS@
+ DRICORE_LIBS = @DRICORE_LIBS@
+ DRICORE_LIB_DEPS = @DRICORE_LIB_DEPS@
  EGL_PLATFORMS = @EGL_PLATFORMS@
  EGL_CLIENT_APIS = @EGL_CLIENT_APIS@
  
@@@ -134,8 -141,10 +141,10 @@@ GLESv1_CM_LIB_DEPS = $(EXTRA_LIB_PATH) 
  GLESv2_LIB_DEPS = $(EXTRA_LIB_PATH) @GLESv2_LIB_DEPS@
  VG_LIB_DEPS = $(EXTRA_LIB_PATH) @VG_LIB_DEPS@
  GLAPI_LIB_DEPS = $(EXTRA_LIB_PATH) @GLAPI_LIB_DEPS@
+ WAYLAND_EGL_LIB_DEPS = $(EXTRA_LIBPATH) @WAYLAND_EGL_LIB_DEPS@
  
  # DRI dependencies
+ MESA_MODULES = @MESA_MODULES@
  DRI_LIB_DEPS = $(EXTRA_LIB_PATH) @DRI_LIB_DEPS@
  LIBDRM_CFLAGS = @LIBDRM_CFLAGS@
  LIBDRM_LIB = @LIBDRM_LIBS@
@@@ -162,12 -171,6 +171,12 @@@ DRI_DRIVER_SEARCH_DIR = @DRI_DRIVER_SEA
  # EGL driver install directory
  EGL_DRIVER_INSTALL_DIR = @EGL_DRIVER_INSTALL_DIR@
  
 +# VDPAU library install directory
 +VDPAU_LIB_INSTALL_DIR=@VDPAU_LIB_INSTALL_DIR@
 +
 +# VA library install directory
 +VA_LIB_INSTALL_DIR=@VA_LIB_INSTALL_DIR@
 +
  # Xorg driver install directory (for xorg state-tracker)
  XORG_DRIVER_INSTALL_DIR = @XORG_DRIVER_INSTALL_DIR@
  
@@@ -193,11 -196,16 +202,16 @@@ GLESv2_PC_LIB_PRIV = @GLESv2_PC_LIB_PRI
  EGL_PC_REQ_PRIV = @GL_PC_REQ_PRIV@
  EGL_PC_LIB_PRIV = @GL_PC_LIB_PRIV@
  EGL_PC_CFLAGS = @GL_PC_CFLAGS@
+ WAYLAND_EGL_PC_REQ_PRIV = @WAYLAND_EGL_PC_REQ_PRIV@
+ WAYLAND_EGL_PC_LIB_PRIV = @WAYLAND_EGL_PC_LIB_PRIV@
+ WAYLAND_EGL_PC_CFLAGS = @WAYLAND_EGL_PC_CFLAGS@
  
  XCB_DRI2_CFLAGS = @XCB_DRI2_CFLAGS@
  XCB_DRI2_LIBS = @XCB_DRI2_LIBS@
  LIBUDEV_CFLAGS = @LIBUDEV_CFLAGS@
  LIBUDEV_LIBS = @LIBUDEV_LIBS@
+ WAYLAND_CFLAGS = @WAYLAND_CFLAGS@
+ WAYLAND_LIBS = @WAYLAND_LIBS@
  
  MESA_LLVM = @MESA_LLVM@
  
diff --combined configs/linux-dri
index 9a0253f919b18def99bec183237797b84b0a0883,22190bfc64f537b4db9b8ae2c6ba24b7c49286f5..ce2da8317d754443ad88d6d75b16f4e84b395ddf
@@@ -43,9 -43,11 +43,11 @@@ MESA_ASM_SOURCES 
  # Library/program dependencies
  EXTRA_LIB_PATH=-L/usr/X11R6/lib
  
+ MESA_MODULES  = $(TOP)/src/mesa/libmesa.a
  LIBDRM_CFLAGS = $(shell pkg-config --cflags libdrm)
  LIBDRM_LIB = $(shell pkg-config --libs libdrm)
- DRI_LIB_DEPS  = $(EXTRA_LIB_PATH) -lm -lpthread -lexpat -ldl -ltalloc $(LIBDRM_LIB)
+ DRI_LIB_DEPS  = $(MESA_MODULES) $(EXTRA_LIB_PATH) -lm -lpthread -lexpat -ldl $(LIBDRM_LIB)
  GL_LIB_DEPS   = $(EXTRA_LIB_PATH) -lX11 -lXext -lXxf86vm -lXdamage -lXfixes \
                -lm -lpthread -ldl $(LIBDRM_LIB)
  
@@@ -57,12 -59,12 +59,12 @@@ SRC_DIRS := glx egl $(SRC_DIRS
  EGL_DRIVERS_DIRS = glx
  
  DRIVER_DIRS = dri
 +
  GALLIUM_WINSYS_DIRS = sw sw/xlib drm/vmware drm/intel drm/i965
 -GALLIUM_TARGET_DIRS = 
 -GALLIUM_STATE_TRACKERS_DIRS = egl
 +GALLIUM_TARGET_DIRS = egl-swrast
 +GALLIUM_STATE_TRACKERS_DIRS = egl vdpau
  
 -DRI_DIRS = i810 i915 i965 mach64 mga r128 r200 r300 radeon \
 -      savage sis tdfx unichrome swrast
 +DRI_DIRS = r300 radeon swrast
  
  INTEL_LIBS = `pkg-config --libs libdrm_intel`
  INTEL_CFLAGS = `pkg-config --cflags libdrm_intel`
diff --combined configure.ac
index 2f1e94a791239b556b8d6d3736b0908d2cffb4eb,fbc743650c6c9f2f63e10fddb6508525a057e335..19455eed186bbf3c253e30fbc7686cf63a64a1f2
@@@ -20,6 -20,7 +20,7 @@@ AC_CANONICAL_HOS
  dnl Versions for external dependencies
  LIBDRM_REQUIRED=2.4.23
  LIBDRM_RADEON_REQUIRED=2.4.23
+ LIBDRM_INTEL_REQUIRED=2.4.23
  DRI2PROTO_REQUIRED=2.1
  GLPROTO_REQUIRED=1.4.11
  LIBDRM_XORG_REQUIRED=2.4.23
@@@ -150,9 -151,13 +151,13 @@@ if test "x$GCC" = xyes; the
      # Enable -fvisibility=hidden if using a gcc that supports it
      save_CFLAGS="$CFLAGS"
      AC_MSG_CHECKING([whether $CC supports -fvisibility=hidden])
-     CFLAGS="$CFLAGS -fvisibility=hidden"
+     VISIBILITY_CFLAGS="-fvisibility=hidden"
+     CFLAGS="$CFLAGS $VISIBILITY_CFLAGS"
      AC_LINK_IFELSE([AC_LANG_PROGRAM()], AC_MSG_RESULT([yes]),
-                  [CFLAGS="$save_CFLAGS" ; AC_MSG_RESULT([no])]);
+                  [VISIBILITY_CFLAGS=""; AC_MSG_RESULT([no])]);
+     # Restore CFLAGS; VISIBILITY_CFLAGS are added to it where needed.
+     CFLAGS=$save_CFLAGS
  
      # Work around aliasing bugs - developers should comment this out
      CFLAGS="$CFLAGS -fno-strict-aliasing"
@@@ -163,14 -168,21 +168,21 @@@ if test "x$GXX" = xyes; the
      # Enable -fvisibility=hidden if using a gcc that supports it
      save_CXXFLAGS="$CXXFLAGS"
      AC_MSG_CHECKING([whether $CXX supports -fvisibility=hidden])
-     CXXFLAGS="$CXXFLAGS -fvisibility=hidden"
+     VISIBILITY_CXXFLAGS="-fvisibility=hidden"
+     CXXFLAGS="$CXXFLAGS $VISIBILITY_CXXFLAGS"
      AC_LINK_IFELSE([AC_LANG_PROGRAM()], AC_MSG_RESULT([yes]),
-                  [CXXFLAGS="$save_CXXFLAGS" ; AC_MSG_RESULT([no])]);
+                  [VISIBILITY_CXXFLAGS="" ; AC_MSG_RESULT([no])]);
+     # Restore CXXFLAGS; VISIBILITY_CXXFLAGS are added to it where needed.
+     CXXFLAGS=$save_CXXFLAGS
  
      # Work around aliasing bugs - developers should comment this out
      CXXFLAGS="$CXXFLAGS -fno-strict-aliasing"
  fi
  
+ AC_SUBST([VISIBILITY_CFLAGS])
+ AC_SUBST([VISIBILITY_CXXFLAGS])
  dnl These should be unnecessary, but let the user set them if they want
  AC_ARG_VAR([OPT_FLAGS], [Additional optimization flags for the compiler.
      Default is to use CFLAGS.])
@@@ -317,6 -329,7 +329,7 @@@ GLESv1_CM_LIB_NAME='lib$(GLESv1_CM_LIB)
  GLESv2_LIB_NAME='lib$(GLESv2_LIB).'${LIB_EXTENSION}
  VG_LIB_NAME='lib$(VG_LIB).'${LIB_EXTENSION}
  GLAPI_LIB_NAME='lib$(GLAPI_LIB).'${LIB_EXTENSION}
+ WAYLAND_EGL_LIB_NAME='lib$(WAYLAND_EGL_LIB).'${LIB_EXTENSION}
  
  GL_LIB_GLOB=${LIB_PREFIX_GLOB}'$(GL_LIB)'${LIB_VERSION_SEPARATOR}'*'${LIB_EXTENSION}'*'
  GLU_LIB_GLOB=${LIB_PREFIX_GLOB}'$(GLU_LIB)'${LIB_VERSION_SEPARATOR}'*'${LIB_EXTENSION}'*'
@@@ -329,6 -342,7 +342,7 @@@ GLESv1_CM_LIB_GLOB=${LIB_PREFIX_GLOB}'$
  GLESv2_LIB_GLOB=${LIB_PREFIX_GLOB}'$(GLESv2_LIB)'${LIB_VERSION_SEPARATOR}'*'${LIB_EXTENSION}'*'
  VG_LIB_GLOB=${LIB_PREFIX_GLOB}'$(VG_LIB)'${LIB_VERSION_SEPARATOR}'*'${LIB_EXTENSION}'*'
  GLAPI_LIB_GLOB=${LIB_PREFIX_GLOB}'$(GLAPI_LIB)'${LIB_VERSION_SEPARATOR}'*'${LIB_EXTENSION}'*'
+ WAYLAND_EGL_LIB_GLOB=${LIB_PREFIX_GLOB}'$(WAYLAND_EGL_LIB)'${LIB_VERSION_SEPARATOR}'*'${LIB_EXTENSION}'*'
  
  AC_SUBST([GL_LIB_NAME])
  AC_SUBST([GLU_LIB_NAME])
@@@ -340,6 -354,7 +354,7 @@@ AC_SUBST([GLESv1_CM_LIB_NAME]
  AC_SUBST([GLESv2_LIB_NAME])
  AC_SUBST([VG_LIB_NAME])
  AC_SUBST([GLAPI_LIB_NAME])
+ AC_SUBST([WAYLAND_EGL_LIB_NAME])
  
  AC_SUBST([GL_LIB_GLOB])
  AC_SUBST([GLU_LIB_GLOB])
@@@ -351,6 -366,7 +366,7 @@@ AC_SUBST([GLESv1_CM_LIB_GLOB]
  AC_SUBST([GLESv2_LIB_GLOB])
  AC_SUBST([VG_LIB_GLOB])
  AC_SUBST([GLAPI_LIB_GLOB])
+ AC_SUBST([WAYLAND_EGL_LIB_GLOB])
  
  dnl
  dnl Arch/platform-specific settings
@@@ -582,10 -598,6 +598,6 @@@ xno
      ;;
  esac
  
- PKG_CHECK_MODULES([TALLOC], [talloc])
- AC_SUBST([TALLOC_LIBS])
- AC_SUBST([TALLOC_CFLAGS])
  dnl
  dnl Driver specific build directories
  dnl
@@@ -602,7 -614,7 +614,7 @@@ GALLIUM_DRIVERS_DIRS="softpipe failove
  GALLIUM_STATE_TRACKERS_DIRS=""
  
  # build shared-glapi if enabled for OpenGL or if OpenGL ES is enabled
- case "x$enabled_shared_glapi$enable_gles1$enable_gles2" in
+ case "x$enable_shared_glapi$enable_gles1$enable_gles2" in
  x*yes*)
      CORE_DIRS="$CORE_DIRS mapi/shared-glapi"
      ;;
@@@ -734,8 -746,8 +746,8 @@@ xlib
          GL_PC_LIB_PRIV="$GL_LIB_DEPS"
          GL_PC_CFLAGS="$X11_INCLUDES"
      fi
-     GL_LIB_DEPS="$GL_LIB_DEPS $SELINUX_LIBS -lm -lpthread $TALLOC_LIBS"
-     GL_PC_LIB_PRIV="$GL_PC_LIB_PRIV $SELINUX_LIBS -lm -lpthread $TALLOC_LIBS"
+     GL_LIB_DEPS="$GL_LIB_DEPS $SELINUX_LIBS -lm -lpthread"
+     GL_PC_LIB_PRIV="$GL_PC_LIB_PRIV $SELINUX_LIBS -lm -lpthread"
  
      # if static, move the external libraries to the programs
      # and empty the libraries for libGL
@@@ -818,10 -830,45 +830,45 @@@ AC_SUBST([GLESv2_PC_LIB_PRIV]
  GLAPI_LIB_DEPS="-lpthread"
  AC_SUBST([GLAPI_LIB_DEPS])
  
+ dnl Setup default DRI CFLAGS
+ DRI_CFLAGS='$(CFLAGS)'
+ DRI_CXXFLAGS='$(CXXFLAGS)'
+ DRI_LIB_DEPS='$(TOP)/src/mesa/libmesa.a'
+ MESA_MODULES='$(TOP)/src/mesa/libmesa.a'
+ AC_ARG_ENABLE([shared-dricore],
+     [AS_HELP_STRING([--enable-shared-dricore],
+         [link DRI modules with shared core DRI routines @<:@default=disabled@:>@])],
+     [enable_dricore="$enableval"],
+     [enable_dricore=no])
+ if test "$mesa_driver" = dri ; then
+    if test "$enable_dricore" = yes ; then
+       if test "$GCC$GXX" != yesyes ; then
+                AC_MSG_WARN([Shared dricore requires GCC-compatible rpath handling.  Disabling shared dricore])
+        enable_dricore=no
+       else
+        DRICORE_GLSL_LIBS='$(TOP)/$(LIB_DIR)/libglsl.so'
+        DRICORE_LIBS='$(TOP)/$(LIB_DIR)/libdricore.so'
+        DRICORE_LIB_DEPS='-L$(TOP)/$(LIB_DIR) -Wl,-R$(DRI_DRIVER_INSTALL_DIR) -lglsl'
+                DRI_LIB_DEPS='-L$(TOP)/$(LIB_DIR) -Wl,-R$(DRI_DRIVER_INSTALL_DIR) -ldricore -lglsl'
+                DRI_CFLAGS='$(CFLAGS_NOVISIBILITY) -DUSE_DRICORE'
+                DRI_CXXFLAGS='$(CXXFLAGS_NOVISIBILITY) -DUSE_DRICORE'
+                MESA_MODULES='$(DRICORE_LIBS) $(DRICORE_GLSL_LIBS)'
+       fi
+    fi
+ fi
+ AC_SUBST([DRICORE_LIBS])
+ AC_SUBST([DRICORE_GLSL_LIBS])
+ AC_SUBST([DRICORE_LIB_DEPS])
+ AC_SUBST([DRI_CXXFLAGS])
+ AC_SUBST([DRI_CFLAGS])
+ AC_SUBST([MESA_MODULES])
  AC_SUBST([HAVE_XF86VIDMODE])
  
  PKG_CHECK_MODULES([LIBDRM_RADEON],
-                 [libdrm_radeon libdrm >= $LIBDRM_RADEON_REQUIRED],
+                 [libdrm_radeon >= $LIBDRM_RADEON_REQUIRED],
                  HAVE_LIBDRM_RADEON=yes,
                  HAVE_LIBDRM_RADEON=no)
  
@@@ -843,6 -890,9 +890,9 @@@ AC_ARG_ENABLE([glx-tls]
      [GLX_USE_TLS=no])
  AC_SUBST(GLX_TLS, ${GLX_USE_TLS})
  
+ AS_IF([test "x$GLX_USE_TLS" = xyes],
+       [DEFINES="${DEFINES} -DGLX_USE_TLS -DPTHREADS"])
  dnl
  dnl More DRI setup
  dnl
@@@ -898,11 -948,6 +948,6 @@@ esa
  
  dnl Set DRI_DIRS, DEFINES and LIB_DEPS
  if test "$mesa_driver" = dri -o "$mesa_driver" = no; then
-     # Use TLS in GLX?
-     if test "x$GLX_USE_TLS" = xyes; then
-         DEFINES="$DEFINES -DGLX_USE_TLS -DPTHREADS"
-     fi
      # Platform specific settings and drivers to build
      case "$host_os" in
      linux*)
              [AC_MSG_ERROR([Expat required for DRI.])])
      fi
  
-     # put all the necessary libs together
-     DRI_LIB_DEPS="$SELINUX_LIBS $LIBDRM_LIBS $EXPAT_LIB -lm -lpthread $DLOPEN_LIBS $TALLOC_LIBS"
+     # put all the necessary libs together, including possibly libdricore
+     DRI_LIB_DEPS="$DRI_LIB_DEPS $SELINUX_LIBS $LIBDRM_LIBS $EXPAT_LIB -lm -lpthread $DLOPEN_LIBS"
  fi
  AC_SUBST([DRI_DIRS])
  AC_SUBST([EXPAT_INCLUDES])
@@@ -997,7 -1042,7 +1042,7 @@@ AC_SUBST([DRI_LIB_DEPS]
  
  case $DRI_DIRS in
  *i915*|*i965*)
-     PKG_CHECK_MODULES([INTEL], [libdrm_intel >= 2.4.23])
+     PKG_CHECK_MODULES([INTEL], [libdrm_intel >= $LIBDRM_INTEL_REQUIRED])
      ;;
  esac
  
@@@ -1065,12 -1110,12 +1110,12 @@@ case "$DRIVER_DIRS" i
  *osmesa*)
      # only link libraries with osmesa if shared
      if test "$enable_static" = no; then
-         OSMESA_LIB_DEPS="-lm -lpthread $SELINUX_LIBS $DLOPEN_LIBS $TALLOC_LIBS"
+         OSMESA_LIB_DEPS="-lm -lpthread $SELINUX_LIBS $DLOPEN_LIBS"
      else
          OSMESA_LIB_DEPS=""
      fi
      OSMESA_MESA_DEPS=""
-     OSMESA_PC_LIB_PRIV="-lm -lpthread $SELINUX_LIBS $DLOPEN_LIBS $TALLOC_LIBS"
+     OSMESA_PC_LIB_PRIV="-lm -lpthread $SELINUX_LIBS $DLOPEN_LIBS"
      ;;
  esac
  AC_SUBST([OSMESA_LIB_DEPS])
              fi
              have_st_vega="yes"
              ;;
 +      xorg/xvmc)
 +            # Check for xvmc?
 +            if test "x$enable_gallium_g3dvl" != xyes; then
 +                AC_MSG_ERROR([cannot build XvMC state tracker without --enable-gallium-g3dvl])
 +            fi
 +            HAVE_ST_XVMC="yes"
 +            ;;
 +        vdpau)
 +            # Check for libvdpau?
 +            if test "x$enable_gallium_g3dvl" != xyes; then
 +                AC_MSG_ERROR([cannot build vdpau state tracker without --enable-gallium-g3dvl])
 +            fi
 +            HAVE_ST_VDPAU="yes"
 +            ;;
 +      va)
 +            # Check for libva?
 +            if test "x$enable_gallium_g3dvl" != xyes; then
 +                AC_MSG_ERROR([cannot build va state tracker without --enable-gallium-g3dvl])
 +            fi
 +            HAVE_ST_VA="yes"
 +            ;;
          esac
  
        if test -n "$tracker"; then
@@@ -1539,6 -1563,8 +1584,8 @@@ AC_ARG_WITH([egl-displays]
      [with_egl_platforms="$withval"])
  
  EGL_PLATFORMS=""
+ WAYLAND_EGL_LIB_DEPS=""
  case "$with_egl_platforms" in
  yes)
      if test "x$enable_egl" = xyes && test "x$mesa_driver" != xosmesa; then
      egl_platforms=`IFS=', '; echo $with_egl_platforms`
      for plat in $egl_platforms; do
          test -d "$srcdir/src/gallium/state_trackers/egl/$plat" || \
-             AC_MSG_ERROR([EGL platform '$plat' does't exist])
+             AC_MSG_ERROR([EGL platform '$plat' doesn't exist])
          if test "$plat" = "fbdev"; then
                  GALLIUM_WINSYS_DIRS="$GALLIUM_WINSYS_DIRS sw/fbdev"
          fi
+       if test "$plat" = "wayland"; then
+               PKG_CHECK_MODULES([WAYLAND], [wayland-client],, \
+                                 [AC_MSG_ERROR([cannot find libwayland-client])])
+               WAYLAND_EGL_LIB_DEPS="$WAYLAND_LIBS $LIBDRM_LIBS"
+       fi
      done
      EGL_PLATFORMS="$egl_platforms"
      ;;
  esac
  AC_SUBST([EGL_PLATFORMS])
  
+ AC_SUBST([WAYLAND_EGL_LIB_DEPS])
+ WAYLAND_EGL_PC_REQ_PRIV="wayland-client libdrm"
+ WAYLAND_EGL_PC_LIB_PRIV=
+ WAYLAND_EGL_PC_CFLAGS=
+ AC_SUBST([WAYLAND_EGL_PC_REQ_PRIV])
+ AC_SUBST([WAYLAND_EGL_PC_LIB_PRIV])
+ AC_SUBST([WAYLAND_EGL_PC_CFLAGS])
  AC_ARG_WITH([egl-driver-dir],
      [AS_HELP_STRING([--with-egl-driver-dir=DIR],
                      [directory for EGL drivers [[default=${libdir}/egl]]])],
@@@ -1628,7 -1669,7 +1690,7 @@@ dn
  dnl Gallium helper functions
  dnl
  gallium_check_st() {
 -    if test "x$HAVE_ST_DRI" = xyes || test "x$HAVE_ST_XORG" = xyes; then
 +    if test "x$HAVE_ST_DRI" = xyes || test "x$HAVE_ST_XORG" = xyes || test "x$HAVE_ST_XVMC" = xyes || test "x$HAVE_ST_VDPAU" = xyes || test "x$HAVE_ST_VA" = xyes; then
           GALLIUM_WINSYS_DIRS="$GALLIUM_WINSYS_DIRS $1"
      fi
      if test "x$HAVE_ST_DRI" = xyes && test "x$2" != x; then
      if test "x$HAVE_ST_XORG" = xyes && test "x$3" != x; then
           GALLIUM_TARGET_DIRS="$GALLIUM_TARGET_DIRS $3"
      fi
 +    if test "x$HAVE_ST_XVMC" = xyes && test "x$4" != x; then
 +         GALLIUM_TARGET_DIRS="$GALLIUM_TARGET_DIRS $4"
 +    fi
 +    if test "x$HAVE_ST_VDPAU" = xyes && test "x$5" != x; then
 +         GALLIUM_TARGET_DIRS="$GALLIUM_TARGET_DIRS $5"
 +    fi
 +    if test "x$HAVE_ST_VA" = xyes && test "x$6" != x; then
 +         GALLIUM_TARGET_DIRS="$GALLIUM_TARGET_DIRS $6"
 +    fi
  }
  
  
@@@ -1705,20 -1737,12 +1767,12 @@@ AC_ARG_ENABLE([gallium-radeon]
      [enable_gallium_radeon="$enableval"],
      [enable_gallium_radeon=auto])
  if test "x$enable_gallium_radeon" = xauto; then
-     if test "x$HAVE_LIBDRM_RADEON" = xyes; then
-       GALLIUM_DRIVERS_DIRS="$GALLIUM_DRIVERS_DIRS r300"
-       gallium_check_st "radeon/drm" "dri-r300"
-     else
-       AC_MSG_WARN([libdrm_radeon is missing, not building gallium-radeon (r300)])
-     fi
+     GALLIUM_DRIVERS_DIRS="$GALLIUM_DRIVERS_DIRS r300"
+     gallium_check_st "radeon/drm" "dri-r300"
  fi
  if test "x$enable_gallium_radeon" = xyes; then
-     if test "x$HAVE_LIBDRM_RADEON" = xyes; then
-       GALLIUM_DRIVERS_DIRS="$GALLIUM_DRIVERS_DIRS r300"
-       gallium_check_st "radeon/drm" "dri-r300" "xorg-radeon"
-     else
-       AC_MSG_ERROR([libdrm_radeon is missing, cannot build gallium-radeon (r300)])
-     fi
+     GALLIUM_DRIVERS_DIRS="$GALLIUM_DRIVERS_DIRS r300"
+     gallium_check_st "radeon/drm" "dri-r300" "xorg-radeon"
  fi
  
  dnl
@@@ -1730,12 -1754,8 +1784,8 @@@ AC_ARG_ENABLE([gallium-r600]
      [enable_gallium_r600="$enableval"],
      [enable_gallium_r600=auto])
  if test "x$enable_gallium_r600" = xyes; then
-     if test "x$HAVE_LIBDRM_RADEON" = xyes; then
-       GALLIUM_DRIVERS_DIRS="$GALLIUM_DRIVERS_DIRS r600"
-       gallium_check_st "r600/drm" "dri-r600" "xvmc-r600" "va-r600"
-     else
-       AC_MSG_ERROR([libdrm_radeon is missing, cannot build gallium-r600])
-     fi
+     GALLIUM_DRIVERS_DIRS="$GALLIUM_DRIVERS_DIRS r600"
 -    gallium_check_st "r600/drm" "dri-r600"
++    gallium_check_st "r600/drm" "dri-r600" "xvmc-r600"
  fi
  
  dnl
@@@ -1748,50 -1768,8 +1798,50 @@@ AC_ARG_ENABLE([gallium-nouveau]
      [enable_gallium_nouveau=no])
  if test "x$enable_gallium_nouveau" = xyes; then
      GALLIUM_DRIVERS_DIRS="$GALLIUM_DRIVERS_DIRS nouveau nvfx nv50 nvc0"
 -    gallium_check_st "nouveau/drm" "dri-nouveau" "xorg-nouveau"
 +    gallium_check_st "nouveau/drm" "dri-nouveau" "xorg-nouveau" "xvmc-nouveau"
 +fi
 +
 +dnl
 +dnl Gallium G3DVL configuration
 +dnl
 +AC_ARG_ENABLE([gallium-g3dvl],
 +    [AS_HELP_STRING([--enable-gallium-g3dvl],
 +        [build gallium g3dvl @<:@default=disabled@:>@])],
 +    [enable_gallium_g3dvl="$enableval"],
 +    [enable_gallium_g3dvl=no])
 +if test "x$enable_gallium_g3dvl" = xyes; then
 +    case "$mesa_driver" in
 +    xlib)
 +      if test "x$HAVE_ST_VDPAU" = xyes; then
 +        GALLIUM_TARGET_DIRS="$GALLIUM_TARGET_DIRS vdpau-softpipe"
 +      fi
 +      if test "x$HAVE_ST_XVMC" = xyes; then
 +        GALLIUM_TARGET_DIRS="$GALLIUM_TARGET_DIRS xvmc-softpipe"
 +      fi
 +      if test "x$HAVE_ST_VA" = xyes; then
 +      GALLIUM_TARGET_DIRS="$GALLIUM_TARGET_DIRS va-softpipe"
 +      fi
 +      ;;
 +    dri)
 +        GALLIUM_WINSYS_DIRS="$GALLIUM_WINSYS_DIRS g3dvl/dri"
 +        ;;
 +    esac
  fi
 +dnl Directory for VDPAU libs
 +AC_ARG_WITH([vdpau-libdir],
 +    [AS_HELP_STRING([--with-vdpau-libdir=DIR],
 +        [directory for the VDPAU libraries @<:@default=${libdir}/vdpau@:>@])],
 +    [VDPAU_LIB_INSTALL_DIR="$withval"],
 +    [VDPAU_LIB_INSTALL_DIR='${libdir}/vdpau'])
 +AC_SUBST([VDPAU_LIB_INSTALL_DIR])
 +
 +dnl Directory for VA libs
 +AC_ARG_WITH([va-libdir],
 +    [AS_HELP_STRING([--with-va-libdir=DIR],
 +        [directory for the VA libraries @<:@default=${libdir}/va@:>@])],
 +    [VA_LIB_INSTALL_DIR="$withval"],
 +    [VA_LIB_INSTALL_DIR='${libdir}/va'])
 +AC_SUBST([VA_LIB_INSTALL_DIR])
  
  dnl
  dnl Gallium swrast configuration
@@@ -1860,6 -1838,7 +1910,7 @@@ if test "$mesa_driver" != no; the
          fi
          echo "        DRI driver dir:  $DRI_DRIVER_INSTALL_DIR"
          echo "        Use XCB:         $enable_xcb"
+         echo "        Shared dricore:  $enable_dricore"
      fi
  fi
  echo ""
index 6cf1ddd43fe7aaa9d7039892ad101fc8a0e85087,7d7d700eacd2602f4e95ed53e4436bca608c763c..e40f546929df94c6124bab09c0abc7fdc855acf1
@@@ -143,12 -143,14 +143,13 @@@ C_SOURCES = 
        util/u_transfer.c \
        util/u_resource.c \
        util/u_upload_mgr.c \
 -      util/u_vbuf_mgr.c
 -
 -      # Disabling until pipe-video branch gets merged in
 -      #vl/vl_bitstream_parser.c \
 -      #vl/vl_mpeg12_mc_renderer.c \
 -      #vl/vl_compositor.c \
 -      #vl/vl_csc.c \
 -      #vl/vl_shader_build.c \
++      util/u_vbuf_mgr.c \
 +      vl/vl_bitstream_parser.c \
 +      vl/vl_mpeg12_mc_renderer.c \
 +      vl/vl_compositor.c \
 +      vl/vl_csc.c \
 +        vl/vl_idct.c \
 +        vl/vl_vertex_buffers.c
  
  GALLIVM_SOURCES = \
          gallivm/lp_bld_arit.c \
@@@ -219,4 -221,3 +220,4 @@@ util/u_format_table.c: util/u_format_ta
  
  util/u_half.c: util/u_half.py
        $(PYTHON2) util/u_half.py > $@
 +# DO NOT DELETE
index e6806d9a723160138b81c9b0f58baacd1e0aeb5a,0ec6307161555614b562321e23f041349b9380c4..11024d4192356bbe260628a3c0d9b2fd64bc0877
@@@ -190,11 -190,13 +190,11 @@@ source = 
      'util/u_tile.c',
      'util/u_transfer.c',
      'util/u_upload_mgr.c',
 -    # Disabling until pipe-video branch gets merged in
 -    #'vl/vl_bitstream_parser.c',
 -    #'vl/vl_mpeg12_mc_renderer.c',
 -    #'vl/vl_compositor.c',
 -    #'vl/vl_csc.c',
 -    #'vl/vl_shader_build.c',
+     'util/u_vbuf_mgr.c',
-     'target-helpers/wrap_screen.c',
 +    'vl/vl_bitstream_parser.c',
 +    'vl/vl_mpeg12_mc_renderer.c',
 +    'vl/vl_compositor.c',
 +    'vl/vl_csc.c',
  ]
  
  if env['llvm']:
index d7b29497ace9a56dcfe626f05f3a8bd54435996e,0000000000000000000000000000000000000000..d1ba5faf7881afcb895b0f46056624ad4be5379c
mode 100644,000000..100644
--- /dev/null
@@@ -1,639 -1,0 +1,640 @@@
-    c->vertex_buf.max_index = (VL_COMPOSITOR_MAX_LAYERS + 2) * 6 - 1;
 +/**************************************************************************
 + *
 + * Copyright 2009 Younes Manton.
 + * All Rights Reserved.
 + *
 + * Permission is hereby granted, free of charge, to any person obtaining a
 + * copy of this software and associated documentation files (the
 + * "Software"), to deal in the Software without restriction, including
 + * without limitation the rights to use, copy, modify, merge, publish,
 + * distribute, sub license, and/or sell copies of the Software, and to
 + * permit persons to whom the Software is furnished to do so, subject to
 + * the following conditions:
 + *
 + * The above copyright notice and this permission notice (including the
 + * next paragraph) shall be included in all copies or substantial portions
 + * of the Software.
 + *
 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
 + * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
 + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
 + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
 + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 + *
 + **************************************************************************/
 +
 +#include "vl_compositor.h"
 +#include "util/u_draw.h"
 +#include <assert.h>
 +#include <pipe/p_context.h>
 +#include <util/u_inlines.h>
 +#include <util/u_memory.h>
 +#include <util/u_keymap.h>
 +#include <util/u_draw.h>
 +#include <util/u_sampler.h>
 +#include <tgsi/tgsi_ureg.h>
 +#include "vl_csc.h"
 +
 +struct vertex_shader_consts
 +{
 +   struct vertex4f dst_scale;
 +   struct vertex4f dst_trans;
 +   struct vertex4f src_scale;
 +   struct vertex4f src_trans;
 +};
 +
 +struct fragment_shader_consts
 +{
 +   float matrix[16];
 +};
 +
 +static bool
 +u_video_rects_equal(struct pipe_video_rect *a, struct pipe_video_rect *b)
 +{
 +   assert(a && b);
 +
 +   if (a->x != b->x)
 +      return false;
 +   if (a->y != b->y)
 +      return false;
 +   if (a->w != b->w)
 +      return false;
 +   if (a->h != b->h)
 +      return false;
 +
 +   return true;
 +}
 +
 +static bool
 +create_vert_shader(struct vl_compositor *c)
 +{
 +   struct ureg_program *shader;
 +   struct ureg_src vpos, vtex;
 +   struct ureg_dst o_vpos, o_vtex;
 +
 +   shader = ureg_create(TGSI_PROCESSOR_VERTEX);
 +   if (!shader)
 +      return false;
 +
 +   vpos = ureg_DECL_vs_input(shader, 0);
 +   vtex = ureg_DECL_vs_input(shader, 1);
 +   o_vpos = ureg_DECL_output(shader, TGSI_SEMANTIC_POSITION, 0);
 +   o_vtex = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, 1);
 +
 +   /*
 +    * o_vpos = vpos
 +    * o_vtex = vtex
 +    */
 +   ureg_MOV(shader, o_vpos, vpos);
 +   ureg_MOV(shader, o_vtex, vtex);
 +
 +   ureg_END(shader);
 +
 +   c->vertex_shader = ureg_create_shader_and_destroy(shader, c->pipe);
 +   if (!c->vertex_shader)
 +      return false;
 +
 +   return true;
 +}
 +
 +static bool
 +create_frag_shader_ycbcr_2_rgb(struct vl_compositor *c)
 +{
 +   struct ureg_program *shader;
 +   struct ureg_src tc;
 +   struct ureg_src csc[4];
 +   struct ureg_src sampler;
 +   struct ureg_dst texel;
 +   struct ureg_dst fragment;
 +   unsigned i;
 +
 +   shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
 +   if (!shader)
 +      return false;
 +
 +   tc = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, 1, TGSI_INTERPOLATE_LINEAR);
 +   for (i = 0; i < 4; ++i)
 +      csc[i] = ureg_DECL_constant(shader, i);
 +   sampler = ureg_DECL_sampler(shader, 0);
 +   texel = ureg_DECL_temporary(shader);
 +   fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
 +
 +   /*
 +    * texel = tex(tc, sampler)
 +    * fragment = csc * texel
 +    */
 +   ureg_TEX(shader, texel, TGSI_TEXTURE_2D, tc, sampler);
 +   for (i = 0; i < 4; ++i)
 +      ureg_DP4(shader, ureg_writemask(fragment, TGSI_WRITEMASK_X << i), csc[i], ureg_src(texel));
 +
 +   ureg_release_temporary(shader, texel);
 +   ureg_END(shader);
 +
 +   c->fragment_shader.ycbcr_2_rgb = ureg_create_shader_and_destroy(shader, c->pipe);
 +   if (!c->fragment_shader.ycbcr_2_rgb)
 +      return false;
 +
 +   return true;
 +}
 +
 +static bool
 +create_frag_shader_rgb_2_rgb(struct vl_compositor *c)
 +{
 +   struct ureg_program *shader;
 +   struct ureg_src tc;
 +   struct ureg_src sampler;
 +   struct ureg_dst fragment;
 +
 +   shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
 +   if (!shader)
 +      return false;
 +
 +   tc = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, 1, TGSI_INTERPOLATE_LINEAR);
 +   sampler = ureg_DECL_sampler(shader, 0);
 +   fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
 +
 +   /*
 +    * fragment = tex(tc, sampler)
 +    */
 +   ureg_TEX(shader, fragment, TGSI_TEXTURE_2D, tc, sampler);
 +   ureg_END(shader);
 +
 +   c->fragment_shader.rgb_2_rgb = ureg_create_shader_and_destroy(shader, c->pipe);
 +   if (!c->fragment_shader.rgb_2_rgb)
 +      return false;
 +
 +   return true;
 +}
 +
 +static bool
 +init_pipe_state(struct vl_compositor *c)
 +{
 +   struct pipe_sampler_state sampler;
 +
 +   assert(c);
 +
 +   c->fb_state.nr_cbufs = 1;
 +   c->fb_state.zsbuf = NULL;
 +
 +   memset(&sampler, 0, sizeof(sampler));
 +   sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
 +   sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
 +   sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
 +   sampler.min_img_filter = PIPE_TEX_FILTER_LINEAR;
 +   sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
 +   sampler.mag_img_filter = PIPE_TEX_FILTER_LINEAR;
 +   sampler.compare_mode = PIPE_TEX_COMPARE_NONE;
 +   sampler.compare_func = PIPE_FUNC_ALWAYS;
 +   sampler.normalized_coords = 1;
 +   /*sampler.lod_bias = ;*/
 +   /*sampler.min_lod = ;*/
 +   /*sampler.max_lod = ;*/
 +   /*sampler.border_color[i] = ;*/
 +   /*sampler.max_anisotropy = ;*/
 +   c->sampler = c->pipe->create_sampler_state(c->pipe, &sampler);
 +
 +   return true;
 +}
 +
 +static void cleanup_pipe_state(struct vl_compositor *c)
 +{
 +   assert(c);
 +
 +   c->pipe->delete_sampler_state(c->pipe, c->sampler);
 +}
 +
 +static bool
 +init_shaders(struct vl_compositor *c)
 +{
 +   assert(c);
 +
 +   if (!create_vert_shader(c)) {
 +      debug_printf("Unable to create vertex shader.\n");
 +      return false;
 +   }
 +   if (!create_frag_shader_ycbcr_2_rgb(c)) {
 +      debug_printf("Unable to create YCbCr-to-RGB fragment shader.\n");
 +      return false;
 +   }
 +   if (!create_frag_shader_rgb_2_rgb(c)) {
 +      debug_printf("Unable to create RGB-to-RGB fragment shader.\n");
 +      return false;
 +   }
 +
 +   return true;
 +}
 +
 +static void cleanup_shaders(struct vl_compositor *c)
 +{
 +   assert(c);
 +
 +   c->pipe->delete_vs_state(c->pipe, c->vertex_shader);
 +   c->pipe->delete_fs_state(c->pipe, c->fragment_shader.ycbcr_2_rgb);
 +   c->pipe->delete_fs_state(c->pipe, c->fragment_shader.rgb_2_rgb);
 +}
 +
 +static bool
 +init_buffers(struct vl_compositor *c)
 +{
 +   struct fragment_shader_consts fsc;
 +   struct pipe_vertex_element vertex_elems[2];
 +
 +   assert(c);
 +
 +   /*
 +    * Create our vertex buffer and vertex buffer elements
 +    */
 +   c->vertex_buf.stride = sizeof(struct vertex4f);
 +   c->vertex_buf.buffer_offset = 0;
 +   /* XXX: Create with DYNAMIC or STREAM */
 +   c->vertex_buf.buffer = pipe_buffer_create
 +   (
 +      c->pipe->screen,
 +      PIPE_BIND_VERTEX_BUFFER,
++      PIPE_USAGE_STATIC,
 +      sizeof(struct vertex4f) * (VL_COMPOSITOR_MAX_LAYERS + 2) * 6
 +   );
 +
 +   vertex_elems[0].src_offset = 0;
 +   vertex_elems[0].instance_divisor = 0;
 +   vertex_elems[0].vertex_buffer_index = 0;
 +   vertex_elems[0].src_format = PIPE_FORMAT_R32G32_FLOAT;
 +   vertex_elems[1].src_offset = sizeof(struct vertex2f);
 +   vertex_elems[1].instance_divisor = 0;
 +   vertex_elems[1].vertex_buffer_index = 0;
 +   vertex_elems[1].src_format = PIPE_FORMAT_R32G32_FLOAT;
 +   c->vertex_elems_state = c->pipe->create_vertex_elements_state(c->pipe, 2, vertex_elems);
 +
 +   /*
 +    * Create our fragment shader's constant buffer
 +    * Const buffer contains the color conversion matrix and bias vectors
 +    */
 +   /* XXX: Create with IMMUTABLE/STATIC... although it does change every once in a long while... */
 +   c->fs_const_buf = pipe_buffer_create
 +   (
 +      c->pipe->screen,
 +      PIPE_BIND_CONSTANT_BUFFER,
++      PIPE_USAGE_STATIC,
 +      sizeof(struct fragment_shader_consts)
 +   );
 +
 +   vl_csc_get_matrix(VL_CSC_COLOR_STANDARD_IDENTITY, NULL, true, fsc.matrix);
 +
 +   vl_compositor_set_csc_matrix(c, fsc.matrix);
 +
 +   return true;
 +}
 +
 +static void
 +cleanup_buffers(struct vl_compositor *c)
 +{
 +   assert(c);
 +
 +   c->pipe->delete_vertex_elements_state(c->pipe, c->vertex_elems_state);
 +   pipe_resource_reference(&c->vertex_buf.buffer, NULL);
 +   pipe_resource_reference(&c->fs_const_buf, NULL);
 +}
 +
 +static void
 +texview_map_delete(const struct keymap *map,
 +                   const void *key, void *data,
 +                   void *user)
 +{
 +   struct pipe_sampler_view *sv = (struct pipe_sampler_view*)data;
 +
 +   assert(map);
 +   assert(key);
 +   assert(data);
 +   assert(user);
 +
 +   pipe_sampler_view_reference(&sv, NULL);
 +}
 +
 +bool vl_compositor_init(struct vl_compositor *compositor, struct pipe_context *pipe)
 +{
 +   unsigned i;
 +
 +   assert(compositor);
 +
 +   memset(compositor, 0, sizeof(struct vl_compositor));
 +
 +   compositor->pipe = pipe;
 +
 +   compositor->texview_map = util_new_keymap(sizeof(struct pipe_surface*), -1,
 +                                             texview_map_delete);
 +   if (!compositor->texview_map)
 +      return false;
 +
 +   if (!init_pipe_state(compositor)) {
 +      util_delete_keymap(compositor->texview_map, compositor->pipe);
 +      return false;
 +   }
 +   if (!init_shaders(compositor)) {
 +      util_delete_keymap(compositor->texview_map, compositor->pipe);
 +      cleanup_pipe_state(compositor);
 +      return false;
 +   }
 +   if (!init_buffers(compositor)) {
 +      util_delete_keymap(compositor->texview_map, compositor->pipe);
 +      cleanup_shaders(compositor);
 +      cleanup_pipe_state(compositor);
 +      return false;
 +   }
 +
 +   compositor->fb_state.width = 0;
 +   compositor->fb_state.height = 0;
 +   compositor->bg = NULL;
 +   compositor->dirty_bg = false;
 +   for (i = 0; i < VL_COMPOSITOR_MAX_LAYERS; ++i)
 +      compositor->layers[i] = NULL;
 +   compositor->dirty_layers = 0;
 +
 +   return true;
 +}
 +
 +void vl_compositor_cleanup(struct vl_compositor *compositor)
 +{
 +   assert(compositor);
 +
 +   util_delete_keymap(compositor->texview_map, compositor->pipe);
 +   cleanup_buffers(compositor);
 +   cleanup_shaders(compositor);
 +   cleanup_pipe_state(compositor);
 +}
 +
 +void vl_compositor_set_background(struct vl_compositor *compositor,
 +                                 struct pipe_surface *bg, struct pipe_video_rect *bg_src_rect)
 +{
 +   assert(compositor);
 +   assert((bg && bg_src_rect) || (!bg && !bg_src_rect));
 +
 +   if (compositor->bg != bg ||
 +       !u_video_rects_equal(&compositor->bg_src_rect, bg_src_rect)) {
 +      pipe_surface_reference(&compositor->bg, bg);
 +      /*if (!u_video_rects_equal(&compositor->bg_src_rect, bg_src_rect))*/
 +         compositor->bg_src_rect = *bg_src_rect;
 +      compositor->dirty_bg = true;
 +   }
 +}
 +
 +void vl_compositor_set_layers(struct vl_compositor *compositor,
 +                              struct pipe_surface *layers[],
 +                              struct pipe_video_rect *src_rects[],
 +                              struct pipe_video_rect *dst_rects[],
 +                              unsigned num_layers)
 +{
 +   unsigned i;
 +
 +   assert(compositor);
 +   assert(num_layers <= VL_COMPOSITOR_MAX_LAYERS);
 +
 +   for (i = 0; i < num_layers; ++i)
 +   {
 +      assert((layers[i] && src_rects[i] && dst_rects[i]) ||
 +             (!layers[i] && !src_rects[i] && !dst_rects[i]));
 +
 +      if (compositor->layers[i] != layers[i] ||
 +          !u_video_rects_equal(&compositor->layer_src_rects[i], src_rects[i]) ||
 +          !u_video_rects_equal(&compositor->layer_dst_rects[i], dst_rects[i]))
 +      {
 +         pipe_surface_reference(&compositor->layers[i], layers[i]);
 +         /*if (!u_video_rects_equal(&compositor->layer_src_rects[i], src_rects[i]))*/
 +            compositor->layer_src_rects[i] = *src_rects[i];
 +         /*if (!u_video_rects_equal(&compositor->layer_dst_rects[i], dst_rects[i]))*/
 +            compositor->layer_dst_rects[i] = *dst_rects[i];
 +         compositor->dirty_layers |= 1 << i;
 +      }
 +
 +      if (layers[i])
 +         compositor->dirty_layers |= 1 << i;
 +   }
 +
 +   for (; i < VL_COMPOSITOR_MAX_LAYERS; ++i)
 +      pipe_surface_reference(&compositor->layers[i], NULL);
 +}
 +
 +static void gen_rect_verts(unsigned pos,
 +                           struct pipe_video_rect *src_rect,
 +                           struct vertex2f *src_inv_size,
 +                           struct pipe_video_rect *dst_rect,
 +                           struct vertex2f *dst_inv_size,
 +                           struct vertex4f *vb)
 +{
 +   assert(pos < VL_COMPOSITOR_MAX_LAYERS + 2);
 +   assert(src_rect);
 +   assert(src_inv_size);
 +   assert((dst_rect && dst_inv_size) /*|| (!dst_rect && !dst_inv_size)*/);
 +   assert(vb);
 +
 +   vb[pos * 6 + 0].x = dst_rect->x * dst_inv_size->x;
 +   vb[pos * 6 + 0].y = dst_rect->y * dst_inv_size->y;
 +   vb[pos * 6 + 0].z = src_rect->x * src_inv_size->x;
 +   vb[pos * 6 + 0].w = src_rect->y * src_inv_size->y;
 +
 +   vb[pos * 6 + 1].x = dst_rect->x * dst_inv_size->x;
 +   vb[pos * 6 + 1].y = (dst_rect->y + dst_rect->h) * dst_inv_size->y;
 +   vb[pos * 6 + 1].z = src_rect->x * src_inv_size->x;
 +   vb[pos * 6 + 1].w = (src_rect->y + src_rect->h) * src_inv_size->y;
 +
 +   vb[pos * 6 + 2].x = (dst_rect->x + dst_rect->w) * dst_inv_size->x;
 +   vb[pos * 6 + 2].y = dst_rect->y * dst_inv_size->y;
 +   vb[pos * 6 + 2].z = (src_rect->x + src_rect->w) * src_inv_size->x;
 +   vb[pos * 6 + 2].w = src_rect->y * src_inv_size->y;
 +
 +   vb[pos * 6 + 3].x = (dst_rect->x + dst_rect->w) * dst_inv_size->x;
 +   vb[pos * 6 + 3].y = dst_rect->y * dst_inv_size->y;
 +   vb[pos * 6 + 3].z = (src_rect->x + src_rect->w) * src_inv_size->x;
 +   vb[pos * 6 + 3].w = src_rect->y * src_inv_size->y;
 +
 +   vb[pos * 6 + 4].x = dst_rect->x * dst_inv_size->x;
 +   vb[pos * 6 + 4].y = (dst_rect->y + dst_rect->h) * dst_inv_size->y;
 +   vb[pos * 6 + 4].z = src_rect->x * src_inv_size->x;
 +   vb[pos * 6 + 4].w = (src_rect->y + src_rect->h) * src_inv_size->y;
 +
 +   vb[pos * 6 + 5].x = (dst_rect->x + dst_rect->w) * dst_inv_size->x;
 +   vb[pos * 6 + 5].y = (dst_rect->y + dst_rect->h) * dst_inv_size->y;
 +   vb[pos * 6 + 5].z = (src_rect->x + src_rect->w) * src_inv_size->x;
 +   vb[pos * 6 + 5].w = (src_rect->y + src_rect->h) * src_inv_size->y;
 +}
 +
 +static unsigned gen_data(struct vl_compositor *c,
 +                         struct pipe_surface *src_surface,
 +                         struct pipe_video_rect *src_rect,
 +                         struct pipe_video_rect *dst_rect,
 +                         struct pipe_surface **textures,
 +                         void **frag_shaders)
 +{
 +   void *vb;
 +   struct pipe_transfer *buf_transfer;
 +   unsigned num_rects = 0;
 +   unsigned i;
 +
 +   assert(c);
 +   assert(src_surface);
 +   assert(src_rect);
 +   assert(dst_rect);
 +   assert(textures);
 +
 +   vb = pipe_buffer_map(c->pipe, c->vertex_buf.buffer,
 +                        PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
 +                        &buf_transfer);
 +
 +   if (!vb)
 +      return 0;
 +
 +   if (c->dirty_bg) {
 +      struct vertex2f bg_inv_size = {1.0f / c->bg->width, 1.0f / c->bg->height};
 +      gen_rect_verts(num_rects, &c->bg_src_rect, &bg_inv_size, NULL, NULL, vb);
 +      textures[num_rects] = c->bg;
 +      /* XXX: Hack */
 +      frag_shaders[num_rects] = c->fragment_shader.rgb_2_rgb;
 +      ++num_rects;
 +      c->dirty_bg = false;
 +   }
 +
 +   {
 +      struct vertex2f src_inv_size = { 1.0f / src_surface->width, 1.0f / src_surface->height};
 +      gen_rect_verts(num_rects, src_rect, &src_inv_size, dst_rect, &c->fb_inv_size, vb);
 +      textures[num_rects] = src_surface;
 +      /* XXX: Hack, sort of */
 +      frag_shaders[num_rects] = c->fragment_shader.ycbcr_2_rgb;
 +      ++num_rects;
 +   }
 +
 +   for (i = 0; c->dirty_layers > 0; i++) {
 +      assert(i < VL_COMPOSITOR_MAX_LAYERS);
 +
 +      if (c->dirty_layers & (1 << i)) {
 +         struct vertex2f layer_inv_size = {1.0f / c->layers[i]->width, 1.0f / c->layers[i]->height};
 +         gen_rect_verts(num_rects, &c->layer_src_rects[i], &layer_inv_size,
 +                        &c->layer_dst_rects[i], &c->fb_inv_size, vb);
 +         textures[num_rects] = c->layers[i];
 +         /* XXX: Hack */
 +         frag_shaders[num_rects] = c->fragment_shader.rgb_2_rgb;
 +         ++num_rects;
 +         c->dirty_layers &= ~(1 << i);
 +      }
 +   }
 +
 +   pipe_buffer_unmap(c->pipe, buf_transfer);
 +
 +   return num_rects;
 +}
 +
 +static void draw_layers(struct vl_compositor *c,
 +                        struct pipe_surface *src_surface,
 +                        struct pipe_video_rect *src_rect,
 +                        struct pipe_video_rect *dst_rect)
 +{
 +   unsigned num_rects;
 +   struct pipe_surface *src_surfaces[VL_COMPOSITOR_MAX_LAYERS + 2];
 +   void *frag_shaders[VL_COMPOSITOR_MAX_LAYERS + 2];
 +   unsigned i;
 +
 +   assert(c);
 +   assert(src_surface);
 +   assert(src_rect);
 +   assert(dst_rect);
 +
 +   num_rects = gen_data(c, src_surface, src_rect, dst_rect, src_surfaces, frag_shaders);
 +
 +   for (i = 0; i < num_rects; ++i) {
 +      boolean delete_view = FALSE;
 +      struct pipe_sampler_view *surface_view = (struct pipe_sampler_view*)util_keymap_lookup(c->texview_map,
 +                                                                                             &src_surfaces[i]);
 +      if (!surface_view) {
 +         struct pipe_sampler_view templat;
 +         u_sampler_view_default_template(&templat, src_surfaces[i]->texture,
 +                                         src_surfaces[i]->texture->format);
 +         surface_view = c->pipe->create_sampler_view(c->pipe, src_surfaces[i]->texture,
 +                                                     &templat);
 +         if (!surface_view)
 +            return;
 +
 +         delete_view = !util_keymap_insert(c->texview_map, &src_surfaces[i],
 +                                           surface_view, c->pipe);
 +      }
 +
 +      c->pipe->bind_fs_state(c->pipe, frag_shaders[i]);
 +      c->pipe->set_fragment_sampler_views(c->pipe, 1, &surface_view);
 +
 +      util_draw_arrays(c->pipe, PIPE_PRIM_TRIANGLES, i * 6, 6);
 +
 +      if (delete_view) {
 +         pipe_sampler_view_reference(&surface_view, NULL);
 +      }
 +   }
 +}
 +
 +void vl_compositor_render(struct vl_compositor          *compositor,
 +                          struct pipe_surface           *src_surface,
 +                          enum pipe_mpeg12_picture_type picture_type,
 +                          /*unsigned                    num_past_surfaces,
 +                          struct pipe_surface           *past_surfaces,
 +                          unsigned                      num_future_surfaces,
 +                          struct pipe_surface           *future_surfaces,*/
 +                          struct pipe_video_rect        *src_area,
 +                          struct pipe_surface           *dst_surface,
 +                          struct pipe_video_rect        *dst_area,
 +                          struct pipe_fence_handle      **fence)
 +{
 +   assert(compositor);
 +   assert(src_surface);
 +   assert(src_area);
 +   assert(dst_surface);
 +   assert(dst_area);
 +   assert(picture_type == PIPE_MPEG12_PICTURE_TYPE_FRAME);
 +
 +   if (compositor->fb_state.width != dst_surface->width) {
 +      compositor->fb_inv_size.x = 1.0f / dst_surface->width;
 +      compositor->fb_state.width = dst_surface->width;
 +   }
 +   if (compositor->fb_state.height != dst_surface->height) {
 +      compositor->fb_inv_size.y = 1.0f / dst_surface->height;
 +      compositor->fb_state.height = dst_surface->height;
 +   }
 +
 +   compositor->fb_state.cbufs[0] = dst_surface;
 +
 +   compositor->viewport.scale[0] = compositor->fb_state.width;
 +   compositor->viewport.scale[1] = compositor->fb_state.height;
 +   compositor->viewport.scale[2] = 1;
 +   compositor->viewport.scale[3] = 1;
 +   compositor->viewport.translate[0] = 0;
 +   compositor->viewport.translate[1] = 0;
 +   compositor->viewport.translate[2] = 0;
 +   compositor->viewport.translate[3] = 0;
 +
 +   compositor->pipe->set_framebuffer_state(compositor->pipe, &compositor->fb_state);
 +   compositor->pipe->set_viewport_state(compositor->pipe, &compositor->viewport);
 +   compositor->pipe->bind_fragment_sampler_states(compositor->pipe, 1, &compositor->sampler);
 +   compositor->pipe->bind_vs_state(compositor->pipe, compositor->vertex_shader);
 +   compositor->pipe->set_vertex_buffers(compositor->pipe, 1, &compositor->vertex_buf);
 +   compositor->pipe->bind_vertex_elements_state(compositor->pipe, compositor->vertex_elems_state);
 +   compositor->pipe->set_constant_buffer(compositor->pipe, PIPE_SHADER_FRAGMENT, 0, compositor->fs_const_buf);
 +
 +   draw_layers(compositor, src_surface, src_area, dst_area);
 +
 +   assert(!compositor->dirty_bg && !compositor->dirty_layers);
 +   compositor->pipe->flush(compositor->pipe, PIPE_FLUSH_RENDER_CACHE, fence);
 +}
 +
 +void vl_compositor_set_csc_matrix(struct vl_compositor *compositor, const float *mat)
 +{
 +   struct pipe_transfer *buf_transfer;
 +
 +   assert(compositor);
 +
 +   memcpy
 +   (
 +      pipe_buffer_map(compositor->pipe, compositor->fs_const_buf,
 +                      PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
 +                      &buf_transfer),
 +              mat,
 +              sizeof(struct fragment_shader_consts)
 +   );
 +
 +   pipe_buffer_unmap(compositor->pipe, buf_transfer);
 +}
index 5d472f93481822fe32ecea0236a80b39718393b8,0000000000000000000000000000000000000000..89463a5c75c448a04c3abdc83e663e34ede8cea8
mode 100644,000000..100644
--- /dev/null
@@@ -1,766 -1,0 +1,766 @@@
-    template.depth0 = 1;
 +/**************************************************************************
 + *
 + * Copyright 2010 Christian König
 + * All Rights Reserved.
 + *
 + * Permission is hereby granted, free of charge, to any person obtaining a
 + * copy of this software and associated documentation files (the
 + * "Software"), to deal in the Software without restriction, including
 + * without limitation the rights to use, copy, modify, merge, publish,
 + * distribute, sub license, and/or sell copies of the Software, and to
 + * permit persons to whom the Software is furnished to do so, subject to
 + * the following conditions:
 + *
 + * The above copyright notice and this permission notice (including the
 + * next paragraph) shall be included in all copies or substantial portions
 + * of the Software.
 + *
 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
 + * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
 + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
 + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
 + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 + *
 + **************************************************************************/
 +
 +#include "vl_idct.h"
 +#include "vl_vertex_buffers.h"
 +#include "util/u_draw.h"
 +#include <assert.h>
 +#include <pipe/p_context.h>
 +#include <pipe/p_screen.h>
 +#include <util/u_inlines.h>
 +#include <util/u_sampler.h>
 +#include <util/u_format.h>
 +#include <tgsi/tgsi_ureg.h>
 +#include "vl_types.h"
 +
 +#define BLOCK_WIDTH 8
 +#define BLOCK_HEIGHT 8
 +
 +#define SCALE_FACTOR_16_TO_9 (32768.0f / 256.0f)
 +
 +#define NR_RENDER_TARGETS 4
 +
 +enum VS_INPUT
 +{
 +   VS_I_RECT,
 +   VS_I_VPOS,
 +
 +   NUM_VS_INPUTS
 +};
 +
 +enum VS_OUTPUT
 +{
 +   VS_O_VPOS,
 +   VS_O_L_ADDR0,
 +   VS_O_L_ADDR1,
 +   VS_O_R_ADDR0,
 +   VS_O_R_ADDR1
 +};
 +
 +static const float const_matrix[8][8] = {
 +   {  0.3535530f,  0.3535530f,  0.3535530f,  0.3535530f,  0.3535530f,  0.3535530f,  0.353553f,  0.3535530f },
 +   {  0.4903930f,  0.4157350f,  0.2777850f,  0.0975451f, -0.0975452f, -0.2777850f, -0.415735f, -0.4903930f },
 +   {  0.4619400f,  0.1913420f, -0.1913420f, -0.4619400f, -0.4619400f, -0.1913420f,  0.191342f,  0.4619400f },
 +   {  0.4157350f, -0.0975452f, -0.4903930f, -0.2777850f,  0.2777850f,  0.4903930f,  0.097545f, -0.4157350f },
 +   {  0.3535530f, -0.3535530f, -0.3535530f,  0.3535540f,  0.3535530f, -0.3535540f, -0.353553f,  0.3535530f },
 +   {  0.2777850f, -0.4903930f,  0.0975452f,  0.4157350f, -0.4157350f, -0.0975451f,  0.490393f, -0.2777850f },
 +   {  0.1913420f, -0.4619400f,  0.4619400f, -0.1913420f, -0.1913410f,  0.4619400f, -0.461940f,  0.1913420f },
 +   {  0.0975451f, -0.2777850f,  0.4157350f, -0.4903930f,  0.4903930f, -0.4157350f,  0.277786f, -0.0975458f }
 +};
 +
 +static void
 +calc_addr(struct ureg_program *shader, struct ureg_dst addr[2],
 +          struct ureg_src tc, struct ureg_src start, bool right_side,
 +          bool transposed, float size)
 +{
 +   unsigned wm_start = (right_side == transposed) ? TGSI_WRITEMASK_X : TGSI_WRITEMASK_Y;
 +   unsigned sw_start = right_side ? TGSI_SWIZZLE_Y : TGSI_SWIZZLE_X;
 +
 +   unsigned wm_tc = (right_side == transposed) ? TGSI_WRITEMASK_Y : TGSI_WRITEMASK_X;
 +   unsigned sw_tc = right_side ? TGSI_SWIZZLE_X : TGSI_SWIZZLE_Y;
 +
 +   /*
 +    * addr[0..1].(start) = right_side ? start.x : tc.x
 +    * addr[0..1].(tc) = right_side ? tc.y : start.y
 +    * addr[0..1].z = tc.z
 +    * addr[1].(start) += 1.0f / scale
 +    */
 +   ureg_MOV(shader, ureg_writemask(addr[0], wm_start), ureg_scalar(start, sw_start));
 +   ureg_MOV(shader, ureg_writemask(addr[0], wm_tc), ureg_scalar(tc, sw_tc));
 +   ureg_MOV(shader, ureg_writemask(addr[0], TGSI_WRITEMASK_Z), tc);
 +
 +   ureg_ADD(shader, ureg_writemask(addr[1], wm_start), ureg_scalar(start, sw_start), ureg_imm1f(shader, 1.0f / size));
 +   ureg_MOV(shader, ureg_writemask(addr[1], wm_tc), ureg_scalar(tc, sw_tc));
 +   ureg_MOV(shader, ureg_writemask(addr[1], TGSI_WRITEMASK_Z), tc);
 +}
 +
 +static void *
 +create_vert_shader(struct vl_idct *idct, bool matrix_stage)
 +{
 +   struct ureg_program *shader;
 +   struct ureg_src scale;
 +   struct ureg_src vrect, vpos;
 +   struct ureg_dst t_tex, t_start;
 +   struct ureg_dst o_vpos, o_l_addr[2], o_r_addr[2];
 +
 +   shader = ureg_create(TGSI_PROCESSOR_VERTEX);
 +   if (!shader)
 +      return NULL;
 +
 +   t_tex = ureg_DECL_temporary(shader);
 +   t_start = ureg_DECL_temporary(shader);
 +
 +   vrect = ureg_DECL_vs_input(shader, VS_I_RECT);
 +   vpos = ureg_DECL_vs_input(shader, VS_I_VPOS);
 +
 +   o_vpos = ureg_DECL_output(shader, TGSI_SEMANTIC_POSITION, VS_O_VPOS);
 +
 +   o_l_addr[0] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_L_ADDR0);
 +   o_l_addr[1] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_L_ADDR1);
 +
 +   o_r_addr[0] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_R_ADDR0);
 +   o_r_addr[1] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_R_ADDR1);
 +
 +   /*
 +    * scale = (BLOCK_WIDTH, BLOCK_HEIGHT) / (dst.width, dst.height)
 +    *
 +    * t_vpos = vpos + vrect
 +    * o_vpos.xy = t_vpos * scale
 +    * o_vpos.zw = vpos
 +    *
 +    * o_l_addr = calc_addr(...)
 +    * o_r_addr = calc_addr(...)
 +    *
 +    */
 +   scale = ureg_imm2f(shader,
 +      (float)BLOCK_WIDTH / idct->buffer_width,
 +      (float)BLOCK_HEIGHT / idct->buffer_height);
 +
 +   ureg_ADD(shader, ureg_writemask(t_tex, TGSI_WRITEMASK_XY), vpos, vrect);
 +   ureg_MUL(shader, ureg_writemask(t_tex, TGSI_WRITEMASK_XY), ureg_src(t_tex), scale);
 +   ureg_MUL(shader, ureg_writemask(t_tex, TGSI_WRITEMASK_Z),
 +      ureg_scalar(vrect, TGSI_SWIZZLE_X),
 +      ureg_imm1f(shader, BLOCK_WIDTH / NR_RENDER_TARGETS));
 +
 +   ureg_MOV(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_XY), ureg_src(t_tex));
 +   ureg_MOV(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_ZW), vpos);
 +
 +   ureg_MUL(shader, ureg_writemask(t_start, TGSI_WRITEMASK_XY), vpos, scale);
 +
 +   if(matrix_stage) {
 +      calc_addr(shader, o_l_addr, ureg_src(t_tex), ureg_src(t_start), false, false, idct->buffer_width / 4);
 +      calc_addr(shader, o_r_addr, vrect, ureg_imm1f(shader, 0.0f), true, true, BLOCK_WIDTH / 4);
 +   } else {
 +      calc_addr(shader, o_l_addr, vrect, ureg_imm1f(shader, 0.0f), false, false, BLOCK_WIDTH / 4);
 +      calc_addr(shader, o_r_addr, ureg_src(t_tex), ureg_src(t_start), true, false, idct->buffer_height / 4);
 +   }
 +
 +   ureg_release_temporary(shader, t_tex);
 +   ureg_release_temporary(shader, t_start);
 +
 +   ureg_END(shader);
 +
 +   return ureg_create_shader_and_destroy(shader, idct->pipe);
 +}
 +
 +static void
 +increment_addr(struct ureg_program *shader, struct ureg_dst daddr[2],
 +               struct ureg_src saddr[2], bool right_side, bool transposed,
 +               int pos, float size)
 +{
 +   unsigned wm_start = (right_side == transposed) ? TGSI_WRITEMASK_X : TGSI_WRITEMASK_Y;
 +   unsigned wm_tc = (right_side == transposed) ? TGSI_WRITEMASK_Y : TGSI_WRITEMASK_X;
 +
 +   /*
 +    * daddr[0..1].(start) = saddr[0..1].(start)
 +    * daddr[0..1].(tc) = saddr[0..1].(tc)
 +    */
 +
 +   ureg_MOV(shader, ureg_writemask(daddr[0], wm_start), saddr[0]);
 +   ureg_ADD(shader, ureg_writemask(daddr[0], wm_tc), saddr[0], ureg_imm1f(shader, pos / size));
 +   ureg_MOV(shader, ureg_writemask(daddr[1], wm_start), saddr[1]);
 +   ureg_ADD(shader, ureg_writemask(daddr[1], wm_tc), saddr[1], ureg_imm1f(shader, pos / size));
 +}
 +
 +static void
 +fetch_four(struct ureg_program *shader, struct ureg_dst m[2], struct ureg_src addr[2], struct ureg_src sampler)
 +{
 +   ureg_TEX(shader, m[0], TGSI_TEXTURE_3D, addr[0], sampler);
 +   ureg_TEX(shader, m[1], TGSI_TEXTURE_3D, addr[1], sampler);
 +}
 +
 +static void
 +matrix_mul(struct ureg_program *shader, struct ureg_dst dst, struct ureg_dst l[2], struct ureg_dst r[2])
 +{
 +   struct ureg_dst tmp;
 +
 +   tmp = ureg_DECL_temporary(shader);
 +
 +   /*
 +    * tmp.xy = dot4(m[0][0..1], m[1][0..1])
 +    * dst = tmp.x + tmp.y
 +    */
 +   ureg_DP4(shader, ureg_writemask(tmp, TGSI_WRITEMASK_X), ureg_src(l[0]), ureg_src(r[0]));
 +   ureg_DP4(shader, ureg_writemask(tmp, TGSI_WRITEMASK_Y), ureg_src(l[1]), ureg_src(r[1]));
 +   ureg_ADD(shader, dst,
 +      ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_X),
 +      ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_Y));
 +
 +   ureg_release_temporary(shader, tmp);
 +}
 +
 +static void *
 +create_matrix_frag_shader(struct vl_idct *idct)
 +{
 +   struct ureg_program *shader;
 +
 +   struct ureg_src l_addr[2], r_addr[2];
 +
 +   struct ureg_dst l[4][2], r[2];
 +   struct ureg_dst fragment[NR_RENDER_TARGETS];
 +
 +   unsigned i, j;
 +
 +   shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
 +   if (!shader)
 +      return NULL;
 +
 +   l_addr[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_L_ADDR0, TGSI_INTERPOLATE_LINEAR);
 +   l_addr[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_L_ADDR1, TGSI_INTERPOLATE_LINEAR);
 +
 +   r_addr[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_R_ADDR0, TGSI_INTERPOLATE_LINEAR);
 +   r_addr[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_R_ADDR1, TGSI_INTERPOLATE_LINEAR);
 +
 +   for (i = 0; i < NR_RENDER_TARGETS; ++i)
 +       fragment[i] = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, i);
 +
 +   for (i = 0; i < 4; ++i) {
 +      l[i][0] = ureg_DECL_temporary(shader);
 +      l[i][1] = ureg_DECL_temporary(shader);
 +   }
 +
 +   r[0] = ureg_DECL_temporary(shader);
 +   r[1] = ureg_DECL_temporary(shader);
 +
 +   for (i = 1; i < 4; ++i) {
 +      increment_addr(shader, l[i], l_addr, false, false, i, idct->buffer_height);
 +   }
 +
 +   for (i = 0; i < 4; ++i) {
 +      struct ureg_src s_addr[2];
 +      s_addr[0] = i == 0 ? l_addr[0] : ureg_src(l[i][0]);
 +      s_addr[1] = i == 0 ? l_addr[1] : ureg_src(l[i][1]);
 +      fetch_four(shader, l[i], s_addr, ureg_DECL_sampler(shader, 1));
 +   }
 +
 +   for (i = 0; i < NR_RENDER_TARGETS; ++i) {
 +      if(i > 0)
 +         increment_addr(shader, r, r_addr, true, true, i, BLOCK_HEIGHT);
 +
 +      struct ureg_src s_addr[2] = { ureg_src(r[0]), ureg_src(r[1]) };
 +      s_addr[0] = i == 0 ? r_addr[0] : ureg_src(r[0]);
 +      s_addr[1] = i == 0 ? r_addr[1] : ureg_src(r[1]);
 +      fetch_four(shader, r, s_addr, ureg_DECL_sampler(shader, 0));
 +
 +      for (j = 0; j < 4; ++j) {
 +         matrix_mul(shader, ureg_writemask(fragment[i], TGSI_WRITEMASK_X << j), l[j], r);
 +      }
 +   }
 +
 +   for (i = 0; i < 4; ++i) {
 +      ureg_release_temporary(shader, l[i][0]);
 +      ureg_release_temporary(shader, l[i][1]);
 +   }
 +   ureg_release_temporary(shader, r[0]);
 +   ureg_release_temporary(shader, r[1]);
 +
 +   ureg_END(shader);
 +
 +   return ureg_create_shader_and_destroy(shader, idct->pipe);
 +}
 +
 +static void *
 +create_transpose_frag_shader(struct vl_idct *idct)
 +{
 +   struct ureg_program *shader;
 +
 +   struct ureg_src l_addr[2], r_addr[2];
 +
 +   struct ureg_dst l[2], r[2];
 +   struct ureg_dst fragment;
 +
 +   shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
 +   if (!shader)
 +      return NULL;
 +
 +   l_addr[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_L_ADDR0, TGSI_INTERPOLATE_LINEAR);
 +   l_addr[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_L_ADDR1, TGSI_INTERPOLATE_LINEAR);
 +
 +   r_addr[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_R_ADDR0, TGSI_INTERPOLATE_LINEAR);
 +   r_addr[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_R_ADDR1, TGSI_INTERPOLATE_LINEAR);
 +
 +   l[0] = ureg_DECL_temporary(shader);
 +   l[1] = ureg_DECL_temporary(shader);
 +   r[0] = ureg_DECL_temporary(shader);
 +   r[1] = ureg_DECL_temporary(shader);
 +
 +   fetch_four(shader, l, l_addr, ureg_DECL_sampler(shader, 0));
 +   fetch_four(shader, r, r_addr, ureg_DECL_sampler(shader, 1));
 +
 +   fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
 +
 +   matrix_mul(shader, ureg_writemask(fragment, TGSI_WRITEMASK_X), l, r);
 +
 +   ureg_release_temporary(shader, l[0]);
 +   ureg_release_temporary(shader, l[1]);
 +   ureg_release_temporary(shader, r[0]);
 +   ureg_release_temporary(shader, r[1]);
 +
 +   ureg_END(shader);
 +
 +   return ureg_create_shader_and_destroy(shader, idct->pipe);
 +}
 +
 +static bool
 +init_shaders(struct vl_idct *idct)
 +{
 +   idct->matrix_vs = create_vert_shader(idct, true);
 +   idct->matrix_fs = create_matrix_frag_shader(idct);
 +
 +   idct->transpose_vs = create_vert_shader(idct, false);
 +   idct->transpose_fs = create_transpose_frag_shader(idct);
 +
 +   return
 +      idct->matrix_vs != NULL &&
 +      idct->matrix_fs != NULL &&
 +      idct->transpose_vs != NULL &&
 +      idct->transpose_fs != NULL;
 +}
 +
 +static void
 +cleanup_shaders(struct vl_idct *idct)
 +{
 +   idct->pipe->delete_vs_state(idct->pipe, idct->matrix_vs);
 +   idct->pipe->delete_fs_state(idct->pipe, idct->matrix_fs);
 +   idct->pipe->delete_vs_state(idct->pipe, idct->transpose_vs);
 +   idct->pipe->delete_fs_state(idct->pipe, idct->transpose_fs);
 +}
 +
 +static bool
 +init_state(struct vl_idct *idct)
 +{
 +   struct pipe_vertex_element vertex_elems[NUM_VS_INPUTS];
 +   struct pipe_sampler_state sampler;
 +   struct pipe_rasterizer_state rs_state;
 +   unsigned i;
 +
 +   assert(idct);
 +
 +   idct->quad = vl_vb_upload_quads(idct->pipe, idct->max_blocks);
 +
 +   if(idct->quad.buffer == NULL)
 +      return false;
 +
 +   for (i = 0; i < 4; ++i) {
 +      memset(&sampler, 0, sizeof(sampler));
 +      sampler.wrap_s = PIPE_TEX_WRAP_REPEAT;
 +      sampler.wrap_t = PIPE_TEX_WRAP_REPEAT;
 +      sampler.wrap_r = PIPE_TEX_WRAP_REPEAT;
 +      sampler.min_img_filter = PIPE_TEX_FILTER_NEAREST;
 +      sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
 +      sampler.mag_img_filter = PIPE_TEX_FILTER_NEAREST;
 +      sampler.compare_mode = PIPE_TEX_COMPARE_NONE;
 +      sampler.compare_func = PIPE_FUNC_ALWAYS;
 +      sampler.normalized_coords = 1;
 +      /*sampler.shadow_ambient = ; */
 +      /*sampler.lod_bias = ; */
 +      sampler.min_lod = 0;
 +      /*sampler.max_lod = ; */
 +      /*sampler.border_color[0] = ; */
 +      /*sampler.max_anisotropy = ; */
 +      idct->samplers.all[i] = idct->pipe->create_sampler_state(idct->pipe, &sampler);
 +   }
 +
 +   memset(&rs_state, 0, sizeof(rs_state));
 +   /*rs_state.sprite_coord_enable */
 +   rs_state.sprite_coord_mode = PIPE_SPRITE_COORD_UPPER_LEFT;
 +   rs_state.point_quad_rasterization = true;
 +   rs_state.point_size = BLOCK_WIDTH;
 +   rs_state.gl_rasterization_rules = false;
 +   idct->rs_state = idct->pipe->create_rasterizer_state(idct->pipe, &rs_state);
 +
 +   vertex_elems[VS_I_RECT] = vl_vb_get_quad_vertex_element();
 +
 +   /* Pos element */
 +   vertex_elems[VS_I_VPOS].src_format = PIPE_FORMAT_R16G16_SSCALED;
 +
 +   idct->vertex_buffer_stride = vl_vb_element_helper(&vertex_elems[VS_I_VPOS], 1, 1);
 +   idct->vertex_elems_state = idct->pipe->create_vertex_elements_state(idct->pipe, 2, vertex_elems);
 +
 +   return true;
 +}
 +
 +static void
 +cleanup_state(struct vl_idct *idct)
 +{
 +   unsigned i;
 +
 +   for (i = 0; i < 4; ++i)
 +      idct->pipe->delete_sampler_state(idct->pipe, idct->samplers.all[i]);
 +
 +   idct->pipe->delete_rasterizer_state(idct->pipe, idct->rs_state);
 +   idct->pipe->delete_vertex_elements_state(idct->pipe, idct->vertex_elems_state);
 +}
 +
 +static bool
 +init_textures(struct vl_idct *idct, struct vl_idct_buffer *buffer)
 +{
 +   struct pipe_resource template;
 +   struct pipe_sampler_view sampler_view;
 +   unsigned i;
 +
 +   assert(idct && buffer);
 +
 +   /* create textures */
 +   memset(&template, 0, sizeof(struct pipe_resource));
 +   template.last_level = 0;
-    buffer->vertex_bufs.individual.quad.max_index = idct->quad.max_index;
 +   template.bind = PIPE_BIND_SAMPLER_VIEW;
 +   template.flags = 0;
 +
 +   template.target = PIPE_TEXTURE_2D;
 +   template.format = PIPE_FORMAT_R16G16B16A16_SNORM;
 +   template.width0 = idct->buffer_width / 4;
 +   template.height0 = idct->buffer_height;
 +   template.depth0 = 1;
++   template.array_size = 1;
 +   template.usage = PIPE_USAGE_STREAM;
 +   buffer->textures.individual.source = idct->pipe->screen->resource_create(idct->pipe->screen, &template);
 +
 +   template.target = PIPE_TEXTURE_3D;
 +   template.format = PIPE_FORMAT_R16G16B16A16_SNORM;
 +   template.width0 = idct->buffer_width / NR_RENDER_TARGETS;
 +   template.height0 = idct->buffer_height / 4;
 +   template.depth0 = NR_RENDER_TARGETS;
 +   template.usage = PIPE_USAGE_STATIC;
 +   buffer->textures.individual.intermediate = idct->pipe->screen->resource_create(idct->pipe->screen, &template);
 +
 +   for (i = 0; i < 4; ++i) {
 +      if(buffer->textures.all[i] == NULL)
 +         return false; /* a texture failed to allocate */
 +
 +      u_sampler_view_default_template(&sampler_view, buffer->textures.all[i], buffer->textures.all[i]->format);
 +      buffer->sampler_views.all[i] = idct->pipe->create_sampler_view(idct->pipe, buffer->textures.all[i], &sampler_view);
 +   }
 +
 +   return true;
 +}
 +
 +static void
 +cleanup_textures(struct vl_idct *idct, struct vl_idct_buffer *buffer)
 +{
 +   unsigned i;
 +
 +   assert(idct && buffer);
 +
 +   for (i = 0; i < 4; ++i) {
 +      pipe_sampler_view_reference(&buffer->sampler_views.all[i], NULL);
 +      pipe_resource_reference(&buffer->textures.all[i], NULL);
 +   }
 +}
 +
 +static bool
 +init_vertex_buffers(struct vl_idct *idct, struct vl_idct_buffer *buffer)
 +{
 +   assert(idct && buffer);
 +
 +   buffer->vertex_bufs.individual.quad.stride = idct->quad.stride;
 +   buffer->vertex_bufs.individual.quad.buffer_offset = idct->quad.buffer_offset;
 +   pipe_resource_reference(&buffer->vertex_bufs.individual.quad.buffer, idct->quad.buffer);
 +
 +   buffer->vertex_bufs.individual.pos = vl_vb_init(
 +      &buffer->blocks, idct->pipe, idct->max_blocks,
 +      idct->vertex_buffer_stride);
 +
 +   if(buffer->vertex_bufs.individual.pos.buffer == NULL)
 +      return false;
 +
 +   return true;
 +}
 +
 +static void
 +cleanup_vertex_buffers(struct vl_idct *idct, struct vl_idct_buffer *buffer)
 +{
 +   assert(idct && buffer);
 +
 +   pipe_resource_reference(&buffer->vertex_bufs.individual.quad.buffer, NULL);
 +   pipe_resource_reference(&buffer->vertex_bufs.individual.pos.buffer, NULL);
 +
 +   vl_vb_cleanup(&buffer->blocks);
 +}
 +
 +struct pipe_resource *
 +vl_idct_upload_matrix(struct pipe_context *pipe)
 +{
 +   struct pipe_resource template, *matrix;
 +   struct pipe_transfer *buf_transfer;
 +   unsigned i, j, pitch;
 +   float *f;
 +
 +   struct pipe_box rect =
 +   {
 +      0, 0, 0,
 +      BLOCK_WIDTH / 4,
 +      BLOCK_HEIGHT,
 +      1
 +   };
 +
 +   memset(&template, 0, sizeof(struct pipe_resource));
 +   template.target = PIPE_TEXTURE_2D;
 +   template.format = PIPE_FORMAT_R32G32B32A32_FLOAT;
 +   template.last_level = 0;
 +   template.width0 = 2;
 +   template.height0 = 8;
 +   template.depth0 = 1;
++   template.array_size = 1;
 +   template.usage = PIPE_USAGE_IMMUTABLE;
 +   template.bind = PIPE_BIND_SAMPLER_VIEW;
 +   template.flags = 0;
 +
 +   matrix = pipe->screen->resource_create(pipe->screen, &template);
 +
 +   /* matrix */
 +   buf_transfer = pipe->get_transfer
 +   (
 +      pipe, matrix,
 +      0, PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
 +      &rect
 +   );
 +   pitch = buf_transfer->stride / sizeof(float);
 +
 +   f = pipe->transfer_map(pipe, buf_transfer);
 +   for(i = 0; i < BLOCK_HEIGHT; ++i)
 +      for(j = 0; j < BLOCK_WIDTH; ++j)
 +         // transpose and scale
 +         f[i * pitch + j] = const_matrix[j][i] * sqrtf(SCALE_FACTOR_16_TO_9);
 +
 +   pipe->transfer_unmap(pipe, buf_transfer);
 +   pipe->transfer_destroy(pipe, buf_transfer);
 +
 +   return matrix;
 +}
 +
 +bool vl_idct_init(struct vl_idct *idct, struct pipe_context *pipe,
 +                  unsigned buffer_width, unsigned buffer_height,
 +                  struct pipe_resource *matrix)
 +{
 +   assert(idct && pipe && matrix);
 +
 +   idct->pipe = pipe;
 +   idct->buffer_width = buffer_width;
 +   idct->buffer_height = buffer_height;
 +   pipe_resource_reference(&idct->matrix, matrix);
 +
 +   idct->max_blocks =
 +      align(buffer_width, BLOCK_WIDTH) / BLOCK_WIDTH *
 +      align(buffer_height, BLOCK_HEIGHT) / BLOCK_HEIGHT;
 +
 +   if(!init_shaders(idct))
 +      return false;
 +
 +   if(!init_state(idct)) {
 +      cleanup_shaders(idct);
 +      return false;
 +   }
 +
 +   return true;
 +}
 +
 +void
 +vl_idct_cleanup(struct vl_idct *idct)
 +{
 +   cleanup_shaders(idct);
 +   cleanup_state(idct);
 +
 +   pipe_resource_reference(&idct->matrix, NULL);
 +}
 +
 +bool
 +vl_idct_init_buffer(struct vl_idct *idct, struct vl_idct_buffer *buffer, struct pipe_resource *dst)
 +{
 +   struct pipe_surface template;
 +
 +   unsigned i;
 +
 +   assert(buffer);
 +   assert(idct);
 +   assert(dst);
 +
 +   pipe_resource_reference(&buffer->textures.individual.matrix, idct->matrix);
 +   pipe_resource_reference(&buffer->textures.individual.transpose, idct->matrix);
 +   pipe_resource_reference(&buffer->destination, dst);
 +
 +   if (!init_textures(idct, buffer))
 +      return false;
 +
 +   if (!init_vertex_buffers(idct, buffer))
 +      return false;
 +
 +   /* init state */
 +   buffer->viewport[0].scale[0] = buffer->textures.individual.intermediate->width0;
 +   buffer->viewport[0].scale[1] = buffer->textures.individual.intermediate->height0;
 +
 +   buffer->viewport[1].scale[0] = buffer->destination->width0;
 +   buffer->viewport[1].scale[1] = buffer->destination->height0;
 +
 +   buffer->fb_state[0].width = buffer->textures.individual.intermediate->width0;
 +   buffer->fb_state[0].height = buffer->textures.individual.intermediate->height0;
 +
 +   buffer->fb_state[0].nr_cbufs = NR_RENDER_TARGETS;
 +   for(i = 0; i < NR_RENDER_TARGETS; ++i) {
 +      memset(&template, 0, sizeof(template));
 +      template.format = buffer->textures.individual.intermediate->format;
 +      template.u.tex.first_layer = i;
 +      template.u.tex.last_layer = i;
 +      template.usage = PIPE_BIND_RENDER_TARGET;
 +      buffer->fb_state[0].cbufs[i] = idct->pipe->create_surface(
 +         idct->pipe, buffer->textures.individual.intermediate,
 +         &template);
 +   }
 +
 +   buffer->fb_state[1].width = buffer->destination->width0;
 +   buffer->fb_state[1].height = buffer->destination->height0;
 +
 +   buffer->fb_state[1].nr_cbufs = 1;
 +
 +   memset(&template, 0, sizeof(template));
 +   template.format = buffer->destination->format;
 +   template.usage = PIPE_BIND_RENDER_TARGET;
 +   buffer->fb_state[1].cbufs[0] = idct->pipe->create_surface(
 +      idct->pipe, buffer->destination, &template);
 +
 +   for(i = 0; i < 2; ++i) {
 +      buffer->viewport[i].scale[2] = 1;
 +      buffer->viewport[i].scale[3] = 1;
 +      buffer->viewport[i].translate[0] = 0;
 +      buffer->viewport[i].translate[1] = 0;
 +      buffer->viewport[i].translate[2] = 0;
 +      buffer->viewport[i].translate[3] = 0;
 +
 +      buffer->fb_state[i].zsbuf = NULL;
 +   }
 +
 +   return true;
 +}
 +
 +void
 +vl_idct_cleanup_buffer(struct vl_idct *idct, struct vl_idct_buffer *buffer)
 +{
 +   unsigned i;
 +
 +   assert(buffer);
 +
 +   for(i = 0; i < NR_RENDER_TARGETS; ++i) {
 +      idct->pipe->surface_destroy(idct->pipe, buffer->fb_state[0].cbufs[i]);
 +   }
 +
 +   idct->pipe->surface_destroy(idct->pipe, buffer->fb_state[1].cbufs[0]);
 +
 +   cleanup_textures(idct, buffer);
 +   cleanup_vertex_buffers(idct, buffer);
 +}
 +
 +void
 +vl_idct_map_buffers(struct vl_idct *idct, struct vl_idct_buffer *buffer)
 +{
 +   assert(idct);
 +
 +   struct pipe_box rect =
 +   {
 +      0, 0, 0,
 +      buffer->textures.individual.source->width0,
 +      buffer->textures.individual.source->height0,
 +      1
 +   };
 +
 +   buffer->tex_transfer = idct->pipe->get_transfer
 +   (
 +      idct->pipe, buffer->textures.individual.source,
 +      0, PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
 +      &rect
 +   );
 +
 +   buffer->texels = idct->pipe->transfer_map(idct->pipe, buffer->tex_transfer);
 +
 +   vl_vb_map(&buffer->blocks, idct->pipe);
 +}
 +
 +void
 +vl_idct_add_block(struct vl_idct_buffer *buffer, unsigned x, unsigned y, short *block)
 +{
 +   struct vertex2s v;
 +   unsigned tex_pitch;
 +   short *texels;
 +
 +   unsigned i;
 +
 +   assert(buffer);
 +
 +   tex_pitch = buffer->tex_transfer->stride / sizeof(short);
 +   texels = buffer->texels + y * tex_pitch * BLOCK_HEIGHT + x * BLOCK_WIDTH;
 +
 +   for (i = 0; i < BLOCK_HEIGHT; ++i)
 +      memcpy(texels + i * tex_pitch, block + i * BLOCK_WIDTH, BLOCK_WIDTH * sizeof(short));
 +
 +   v.x = x;
 +   v.y = y;
 +   vl_vb_add_block(&buffer->blocks, &v);
 +}
 +
 +void
 +vl_idct_unmap_buffers(struct vl_idct *idct, struct vl_idct_buffer *buffer)
 +{
 +   assert(idct && buffer);
 +
 +   idct->pipe->transfer_unmap(idct->pipe, buffer->tex_transfer);
 +   idct->pipe->transfer_destroy(idct->pipe, buffer->tex_transfer);
 +   vl_vb_unmap(&buffer->blocks, idct->pipe);
 +}
 +
 +void
 +vl_idct_flush(struct vl_idct *idct, struct vl_idct_buffer *buffer)
 +{
 +   unsigned num_verts;
 +
 +   assert(idct);
 +
 +   num_verts = vl_vb_restart(&buffer->blocks);
 +
 +   if(num_verts > 0) {
 +
 +      idct->pipe->bind_rasterizer_state(idct->pipe, idct->rs_state);
 +      idct->pipe->set_vertex_buffers(idct->pipe, 2, buffer->vertex_bufs.all);
 +      idct->pipe->bind_vertex_elements_state(idct->pipe, idct->vertex_elems_state);
 +
 +      /* first stage */
 +      idct->pipe->set_framebuffer_state(idct->pipe, &buffer->fb_state[0]);
 +      idct->pipe->set_viewport_state(idct->pipe, &buffer->viewport[0]);
 +      idct->pipe->set_fragment_sampler_views(idct->pipe, 2, buffer->sampler_views.stage[0]);
 +      idct->pipe->bind_fragment_sampler_states(idct->pipe, 2, idct->samplers.stage[0]);
 +      idct->pipe->bind_vs_state(idct->pipe, idct->matrix_vs);
 +      idct->pipe->bind_fs_state(idct->pipe, idct->matrix_fs);
 +      util_draw_arrays(idct->pipe, PIPE_PRIM_QUADS, 0, num_verts);
 +
 +      /* second stage */
 +      idct->pipe->set_framebuffer_state(idct->pipe, &buffer->fb_state[1]);
 +      idct->pipe->set_viewport_state(idct->pipe, &buffer->viewport[1]);
 +      idct->pipe->set_fragment_sampler_views(idct->pipe, 2, buffer->sampler_views.stage[1]);
 +      idct->pipe->bind_fragment_sampler_states(idct->pipe, 2, idct->samplers.stage[1]);
 +      idct->pipe->bind_vs_state(idct->pipe, idct->transpose_vs);
 +      idct->pipe->bind_fs_state(idct->pipe, idct->transpose_fs);
 +      util_draw_arrays(idct->pipe, PIPE_PRIM_QUADS, 0, num_verts);
 +   }
 +}
index de83b6a5338b0b363991b5e9a31b2cae5d3cc9dd,0000000000000000000000000000000000000000..484e781f0cb89d3976c300d917a45becaced0efb
mode 100644,000000..100644
--- /dev/null
@@@ -1,1093 -1,0 +1,1093 @@@
-    buffer->vertex_bufs.individual.quad.max_index = renderer->quad.max_index;
 +/**************************************************************************
 + *
 + * Copyright 2009 Younes Manton.
 + * All Rights Reserved.
 + *
 + * Permission is hereby granted, free of charge, to any person obtaining a
 + * copy of this software and associated documentation files (the
 + * "Software"), to deal in the Software without restriction, including
 + * without limitation the rights to use, copy, modify, merge, publish,
 + * distribute, sub license, and/or sell copies of the Software, and to
 + * permit persons to whom the Software is furnished to do so, subject to
 + * the following conditions:
 + *
 + * The above copyright notice and this permission notice (including the
 + * next paragraph) shall be included in all copies or substantial portions
 + * of the Software.
 + *
 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
 + * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
 + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
 + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
 + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 + *
 + **************************************************************************/
 +
 +#include "vl_mpeg12_mc_renderer.h"
 +#include "util/u_draw.h"
 +#include <assert.h>
 +#include <pipe/p_context.h>
 +#include <util/u_inlines.h>
 +#include <util/u_format.h>
 +#include <util/u_math.h>
 +#include <util/u_memory.h>
 +#include <util/u_keymap.h>
 +#include <util/u_sampler.h>
 +#include <util/u_draw.h>
 +#include <tgsi/tgsi_ureg.h>
 +
 +#define DEFAULT_BUF_ALIGNMENT 1
 +#define MACROBLOCK_WIDTH 16
 +#define MACROBLOCK_HEIGHT 16
 +#define BLOCK_WIDTH 8
 +#define BLOCK_HEIGHT 8
 +
 +struct vertex_stream
 +{
 +   struct vertex2s pos;
 +   struct vertex2s mv[4];
 +   struct {
 +      int8_t y;
 +      int8_t cr;
 +      int8_t cb;
 +      int8_t flag;
 +   } eb[2][2];
 +};
 +
 +enum VS_INPUT
 +{
 +   VS_I_RECT,
 +   VS_I_VPOS,
 +   VS_I_MV0,
 +   VS_I_MV1,
 +   VS_I_MV2,
 +   VS_I_MV3,
 +   VS_I_EB_0_0,
 +   VS_I_EB_0_1,
 +   VS_I_EB_1_0,
 +   VS_I_EB_1_1,
 +
 +   NUM_VS_INPUTS
 +};
 +
 +enum VS_OUTPUT
 +{
 +   VS_O_VPOS,
 +   VS_O_LINE,
 +   VS_O_TEX0,
 +   VS_O_TEX1,
 +   VS_O_TEX2,
 +   VS_O_EB_0,
 +   VS_O_EB_1,
 +   VS_O_INFO,
 +   VS_O_MV0,
 +   VS_O_MV1,
 +   VS_O_MV2,
 +   VS_O_MV3
 +};
 +
 +static const unsigned const_empty_block_mask_420[3][2][2] = {
 +        { { 0x20, 0x10 },  { 0x08, 0x04 } },
 +        { { 0x02, 0x02 },  { 0x02, 0x02 } },
 +        { { 0x01, 0x01 },  { 0x01, 0x01 } }
 +};
 +
 +static void *
 +create_vert_shader(struct vl_mpeg12_mc_renderer *r)
 +{
 +   struct ureg_program *shader;
 +   struct ureg_src block_scale, mv_scale;
 +   struct ureg_src vrect, vpos, eb[2][2], vmv[4];
 +   struct ureg_dst t_vpos, t_vtex, t_vmv;
 +   struct ureg_dst o_vpos, o_line, o_vtex[3], o_eb[2], o_vmv[4], o_info;
 +   unsigned i, label;
 +
 +   shader = ureg_create(TGSI_PROCESSOR_VERTEX);
 +   if (!shader)
 +      return NULL;
 +
 +   t_vpos = ureg_DECL_temporary(shader);
 +   t_vtex = ureg_DECL_temporary(shader);
 +   t_vmv = ureg_DECL_temporary(shader);
 +
 +   vrect = ureg_DECL_vs_input(shader, VS_I_RECT);
 +   vpos = ureg_DECL_vs_input(shader, VS_I_VPOS);
 +   eb[0][0] = ureg_DECL_vs_input(shader, VS_I_EB_0_0);
 +   eb[1][0] = ureg_DECL_vs_input(shader, VS_I_EB_1_0);
 +   eb[0][1] = ureg_DECL_vs_input(shader, VS_I_EB_0_1);
 +   eb[1][1] = ureg_DECL_vs_input(shader, VS_I_EB_1_1);
 +
 +   o_vpos = ureg_DECL_output(shader, TGSI_SEMANTIC_POSITION, VS_O_VPOS);
 +   o_line = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_LINE);
 +   o_vtex[0] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_TEX0);
 +   o_vtex[1] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_TEX1);
 +   o_vtex[2] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_TEX2);
 +   o_eb[0] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_EB_0);
 +   o_eb[1] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_EB_1);
 +   o_info = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_INFO);
 +
 +   for (i = 0; i < 4; ++i) {
 +     vmv[i] = ureg_DECL_vs_input(shader, VS_I_MV0 + i);
 +     o_vmv[i] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_MV0 + i);
 +   }
 +
 +   /*
 +    * block_scale = (MACROBLOCK_WIDTH, MACROBLOCK_HEIGHT) / (dst.width, dst.height)
 +    * mv_scale = 0.5 / (dst.width, dst.height);
 +    *
 +    * t_vpos = (vpos + vrect) * block_scale
 +    * o_vpos.xy = t_vpos
 +    * o_vpos.zw = vpos
 +    *
 +    * o_eb[0..1] = vrect.x ? eb[0..1][1] : eb[0..1][0]
 +    *
 +    * o_frame_pred = frame_pred
 +    * o_info.x = ref_frames
 +    * o_info.y = ref_frames > 0
 +    * o_info.z = bkwd_pred
 +    *
 +    * // Apply motion vectors
 +    * o_vmv[0..count] = t_vpos + vmv[0..count] * mv_scale
 +    *
 +    * o_line.xy = vrect * 8
 +    * o_line.z = interlaced
 +    *
 +    * if(eb[0][0].w) { //interlaced
 +    *    t_vtex.x = vrect.x
 +    *    t_vtex.y = vrect.y * 0.5
 +    *    t_vtex += vpos
 +    *
 +    *    o_vtex[0].xy = t_vtex * block_scale
 +    *
 +    *    t_vtex.y += 0.5
 +    *    o_vtex[1].xy = t_vtex * block_scale
 +    * } else {
 +    *    o_vtex[0..1].xy = t_vpos
 +    * }
 +    * o_vtex[2].xy = t_vpos
 +    *
 +    */
 +   block_scale = ureg_imm2f(shader,
 +      (float)MACROBLOCK_WIDTH / r->buffer_width,
 +      (float)MACROBLOCK_HEIGHT / r->buffer_height);
 +
 +   mv_scale = ureg_imm2f(shader,
 +      0.5f / r->buffer_width,
 +      0.5f / r->buffer_height);
 +
 +   ureg_ADD(shader, ureg_writemask(t_vpos, TGSI_WRITEMASK_XY), vpos, vrect);
 +   ureg_MUL(shader, ureg_writemask(t_vpos, TGSI_WRITEMASK_XY), ureg_src(t_vpos), block_scale);
 +   ureg_MOV(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_XY), ureg_src(t_vpos));
 +   ureg_MOV(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_ZW), vpos);
 +
 +   ureg_CMP(shader, ureg_writemask(o_eb[0], TGSI_WRITEMASK_XYZ),
 +            ureg_negate(ureg_scalar(vrect, TGSI_SWIZZLE_X)),
 +            eb[0][1], eb[0][0]);
 +   ureg_CMP(shader, ureg_writemask(o_eb[1], TGSI_WRITEMASK_XYZ),
 +            ureg_negate(ureg_scalar(vrect, TGSI_SWIZZLE_X)),
 +            eb[1][1], eb[1][0]);
 +
 +   ureg_MOV(shader, ureg_writemask(o_info, TGSI_WRITEMASK_X),
 +            ureg_scalar(eb[1][1], TGSI_SWIZZLE_W));
 +   ureg_SGE(shader, ureg_writemask(o_info, TGSI_WRITEMASK_Y),
 +      ureg_scalar(eb[1][1], TGSI_SWIZZLE_W), ureg_imm1f(shader, 0.0f));
 +   ureg_MOV(shader, ureg_writemask(o_info, TGSI_WRITEMASK_Z),
 +            ureg_scalar(eb[1][0], TGSI_SWIZZLE_W));
 +
 +   ureg_MAD(shader, ureg_writemask(o_vmv[0], TGSI_WRITEMASK_XY), mv_scale, vmv[0], ureg_src(t_vpos));
 +   ureg_MAD(shader, ureg_writemask(o_vmv[2], TGSI_WRITEMASK_XY), mv_scale, vmv[2], ureg_src(t_vpos));
 +
 +   ureg_CMP(shader, ureg_writemask(t_vmv, TGSI_WRITEMASK_XY),
 +            ureg_negate(ureg_scalar(eb[0][1], TGSI_SWIZZLE_W)),
 +            vmv[0], vmv[1]);
 +   ureg_MAD(shader, ureg_writemask(o_vmv[1], TGSI_WRITEMASK_XY), mv_scale, ureg_src(t_vmv), ureg_src(t_vpos));
 +
 +   ureg_CMP(shader, ureg_writemask(t_vmv, TGSI_WRITEMASK_XY),
 +            ureg_negate(ureg_scalar(eb[0][1], TGSI_SWIZZLE_W)),
 +            vmv[2], vmv[3]);
 +   ureg_MAD(shader, ureg_writemask(o_vmv[3], TGSI_WRITEMASK_XY), mv_scale, ureg_src(t_vmv), ureg_src(t_vpos));
 +
 +   ureg_MOV(shader, ureg_writemask(o_vtex[0], TGSI_WRITEMASK_XY), ureg_src(t_vpos));
 +   ureg_MOV(shader, ureg_writemask(o_vtex[1], TGSI_WRITEMASK_XY), ureg_src(t_vpos));
 +   ureg_MOV(shader, ureg_writemask(o_vtex[2], TGSI_WRITEMASK_XY), ureg_src(t_vpos));
 +
 +   ureg_MOV(shader, ureg_writemask(o_line, TGSI_WRITEMASK_X), ureg_scalar(vrect, TGSI_SWIZZLE_Y));
 +   ureg_MUL(shader, ureg_writemask(o_line, TGSI_WRITEMASK_Y),
 +      vrect, ureg_imm1f(shader, MACROBLOCK_HEIGHT / 2));
 +
 +   ureg_IF(shader, ureg_scalar(eb[0][0], TGSI_SWIZZLE_W), &label);
 +
 +      ureg_MOV(shader, ureg_writemask(t_vtex, TGSI_WRITEMASK_X), vrect);
 +      ureg_MUL(shader, ureg_writemask(t_vtex, TGSI_WRITEMASK_Y), vrect, ureg_imm1f(shader, 0.5f));
 +      ureg_ADD(shader, ureg_writemask(t_vtex, TGSI_WRITEMASK_XY), vpos, ureg_src(t_vtex));
 +      ureg_MUL(shader, ureg_writemask(o_vtex[0], TGSI_WRITEMASK_XY), ureg_src(t_vtex), block_scale);
 +      ureg_ADD(shader, ureg_writemask(t_vtex, TGSI_WRITEMASK_Y), ureg_src(t_vtex), ureg_imm1f(shader, 0.5f));
 +      ureg_MUL(shader, ureg_writemask(o_vtex[1], TGSI_WRITEMASK_XY), ureg_src(t_vtex), block_scale);
 +
 +      ureg_MUL(shader, ureg_writemask(o_line, TGSI_WRITEMASK_X),
 +         ureg_scalar(vrect, TGSI_SWIZZLE_Y),
 +         ureg_imm1f(shader, MACROBLOCK_HEIGHT / 2));
 +
 +   ureg_fixup_label(shader, label, ureg_get_instruction_number(shader));
 +   ureg_ENDIF(shader);
 +
 +   ureg_release_temporary(shader, t_vtex);
 +   ureg_release_temporary(shader, t_vpos);
 +   ureg_release_temporary(shader, t_vmv);
 +
 +   ureg_END(shader);
 +
 +   return ureg_create_shader_and_destroy(shader, r->pipe);
 +}
 +
 +static struct ureg_dst
 +calc_field(struct ureg_program *shader)
 +{
 +   struct ureg_dst tmp;
 +   struct ureg_src line;
 +
 +   tmp = ureg_DECL_temporary(shader);
 +
 +   line = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_LINE, TGSI_INTERPOLATE_LINEAR);
 +
 +   /*
 +    * line.x going from 0 to 1 if not interlaced
 +    * line.x going from 0 to 8 in steps of 0.5 if interlaced
 +    * line.y going from 0 to 8 in steps of 0.5
 +    *
 +    * tmp.xy = fraction(line)
 +    * tmp.xy = tmp.xy >= 0.5 ? 1 : 0
 +    */
 +   ureg_FRC(shader, ureg_writemask(tmp, TGSI_WRITEMASK_XY), line);
 +   ureg_SGE(shader, ureg_writemask(tmp, TGSI_WRITEMASK_XY), ureg_src(tmp), ureg_imm1f(shader, 0.5f));
 +
 +   return tmp;
 +}
 +
 +static struct ureg_dst
 +fetch_ycbcr(struct vl_mpeg12_mc_renderer *r, struct ureg_program *shader, struct ureg_dst field)
 +{
 +   struct ureg_src tc[3], sampler[3], eb[2];
 +   struct ureg_dst texel, t_tc, t_eb_info;
 +   unsigned i, label;
 +
 +   texel = ureg_DECL_temporary(shader);
 +   t_tc = ureg_DECL_temporary(shader);
 +   t_eb_info = ureg_DECL_temporary(shader);
 +
 +   tc[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_TEX0, TGSI_INTERPOLATE_LINEAR);
 +   tc[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_TEX1, TGSI_INTERPOLATE_LINEAR);
 +   tc[2] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_TEX2, TGSI_INTERPOLATE_LINEAR);
 +
 +   eb[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_EB_0, TGSI_INTERPOLATE_CONSTANT);
 +   eb[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_EB_1, TGSI_INTERPOLATE_CONSTANT);
 +
 +   for (i = 0; i < 3; ++i)  {
 +      sampler[i] = ureg_DECL_sampler(shader, i);
 +   }
 +
 +   /*
 +    * texel.y  = tex(field.y ? tc[1] : tc[0], sampler[0])
 +    * texel.cb = tex(tc[2], sampler[1])
 +    * texel.cr = tex(tc[2], sampler[2])
 +    */
 +
 +   ureg_CMP(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_XY),
 +            ureg_negate(ureg_scalar(ureg_src(field), TGSI_SWIZZLE_X)),
 +            tc[1], tc[0]);
 +
 +   ureg_CMP(shader, ureg_writemask(t_eb_info, TGSI_WRITEMASK_XYZ),
 +            ureg_negate(ureg_scalar(ureg_src(field), TGSI_SWIZZLE_X)),
 +            eb[1], eb[0]);
 +
 +   /* r600g is ignoring TGSI_INTERPOLATE_CONSTANT, just workaround this */
 +   ureg_SLT(shader, ureg_writemask(t_eb_info, TGSI_WRITEMASK_XYZ), ureg_src(t_eb_info), ureg_imm1f(shader, 0.5f));
 +
 +   ureg_MOV(shader, ureg_writemask(texel, TGSI_WRITEMASK_XYZ), ureg_imm1f(shader, 0.0f));
 +   for (i = 0; i < 3; ++i) {
 +      ureg_IF(shader, ureg_scalar(ureg_src(t_eb_info), TGSI_SWIZZLE_X + i), &label);
 +
 +         /* Nouveau can't writemask tex dst regs (yet?), so this won't work anymore on nvidia hardware */
 +         if(i==0 || r->chroma_format == PIPE_VIDEO_CHROMA_FORMAT_444) {
 +            ureg_TEX(shader, ureg_writemask(texel, TGSI_WRITEMASK_X << i), TGSI_TEXTURE_3D, ureg_src(t_tc), sampler[i]);
 +         } else {
 +            ureg_TEX(shader, ureg_writemask(texel, TGSI_WRITEMASK_X << i), TGSI_TEXTURE_3D, tc[2], sampler[i]);
 +         }
 +
 +      ureg_fixup_label(shader, label, ureg_get_instruction_number(shader));
 +      ureg_ENDIF(shader);
 +   }
 +
 +   ureg_release_temporary(shader, t_tc);
 +   ureg_release_temporary(shader, t_eb_info);
 +
 +   return texel;
 +}
 +
 +static struct ureg_dst
 +fetch_ref(struct ureg_program *shader, struct ureg_dst field)
 +{
 +   struct ureg_src info;
 +   struct ureg_src tc[4], sampler[2];
 +   struct ureg_dst ref[2], result;
 +   unsigned i, intra_label, bi_label, label;
 +
 +   info = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_INFO, TGSI_INTERPOLATE_CONSTANT);
 +
 +   for (i = 0; i < 4; ++i)
 +      tc[i] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_MV0 + i, TGSI_INTERPOLATE_LINEAR);
 +
 +   for (i = 0; i < 2; ++i) {
 +      sampler[i] = ureg_DECL_sampler(shader, i + 3);
 +      ref[i] = ureg_DECL_temporary(shader);
 +   }
 +
 +   result = ureg_DECL_temporary(shader);
 +
 +   ureg_MOV(shader, ureg_writemask(result, TGSI_WRITEMASK_XYZ), ureg_imm1f(shader, 0.5f));
 +
 +   ureg_IF(shader, ureg_scalar(info, TGSI_SWIZZLE_Y), &intra_label);
 +      ureg_CMP(shader, ureg_writemask(ref[0], TGSI_WRITEMASK_XY),
 +               ureg_negate(ureg_scalar(ureg_src(field), TGSI_SWIZZLE_Y)),
 +               tc[1], tc[0]);
 +
 +      ureg_IF(shader, ureg_scalar(info, TGSI_SWIZZLE_X), &bi_label);
 +
 +         /*
 +          * result = tex(field.z ? tc[1] : tc[0], sampler[bkwd_pred ? 1 : 0])
 +          */
 +         ureg_IF(shader, ureg_scalar(info, TGSI_SWIZZLE_Z), &label);
 +            ureg_TEX(shader, result, TGSI_TEXTURE_2D, ureg_src(ref[0]), sampler[1]);
 +         ureg_fixup_label(shader, label, ureg_get_instruction_number(shader));
 +         ureg_ELSE(shader, &label);
 +            ureg_TEX(shader, result, TGSI_TEXTURE_2D, ureg_src(ref[0]), sampler[0]);
 +         ureg_fixup_label(shader, label, ureg_get_instruction_number(shader));
 +         ureg_ENDIF(shader);
 +
 +      ureg_fixup_label(shader, bi_label, ureg_get_instruction_number(shader));
 +      ureg_ELSE(shader, &bi_label);
 +
 +         /*
 +          * if (field.z)
 +          *    ref[0..1] = tex(tc[0..1], sampler[0..1])
 +          * else
 +          *    ref[0..1] = tex(tc[2..3], sampler[0..1])
 +          */
 +         ureg_CMP(shader, ureg_writemask(ref[1], TGSI_WRITEMASK_XY),
 +            ureg_negate(ureg_scalar(ureg_src(field), TGSI_SWIZZLE_Y)),
 +            tc[3], tc[2]);
 +         ureg_TEX(shader, ref[0], TGSI_TEXTURE_2D, ureg_src(ref[0]), sampler[0]);
 +         ureg_TEX(shader, ref[1], TGSI_TEXTURE_2D, ureg_src(ref[1]), sampler[1]);
 +
 +         ureg_LRP(shader, ureg_writemask(result, TGSI_WRITEMASK_XYZ), ureg_imm1f(shader, 0.5f),
 +            ureg_src(ref[0]), ureg_src(ref[1]));
 +
 +      ureg_fixup_label(shader, bi_label, ureg_get_instruction_number(shader));
 +      ureg_ENDIF(shader);
 +   ureg_fixup_label(shader, intra_label, ureg_get_instruction_number(shader));
 +   ureg_ENDIF(shader);
 +
 +   for (i = 0; i < 2; ++i)
 +      ureg_release_temporary(shader, ref[i]);
 +
 +   return result;
 +}
 +
 +static void *
 +create_frag_shader(struct vl_mpeg12_mc_renderer *r)
 +{
 +   struct ureg_program *shader;
 +   struct ureg_dst result;
 +   struct ureg_dst field, texel;
 +   struct ureg_dst fragment;
 +
 +   shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
 +   if (!shader)
 +      return NULL;
 +
 +   fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
 +
 +   field = calc_field(shader);
 +   texel = fetch_ycbcr(r, shader, field);
 +
 +   result = fetch_ref(shader, field);
 +
 +   ureg_ADD(shader, ureg_writemask(fragment, TGSI_WRITEMASK_XYZ), ureg_src(texel), ureg_src(result));
 +
 +   ureg_release_temporary(shader, field);
 +   ureg_release_temporary(shader, texel);
 +   ureg_release_temporary(shader, result);
 +   ureg_END(shader);
 +
 +   return ureg_create_shader_and_destroy(shader, r->pipe);
 +}
 +
 +static bool
 +init_pipe_state(struct vl_mpeg12_mc_renderer *r)
 +{
 +   struct pipe_sampler_state sampler;
 +   struct pipe_rasterizer_state rs_state;
 +   unsigned filters[5];
 +   unsigned i;
 +
 +   assert(r);
 +
 +   r->viewport.scale[0] = r->buffer_width;
 +   r->viewport.scale[1] = r->buffer_height;
 +   r->viewport.scale[2] = 1;
 +   r->viewport.scale[3] = 1;
 +   r->viewport.translate[0] = 0;
 +   r->viewport.translate[1] = 0;
 +   r->viewport.translate[2] = 0;
 +   r->viewport.translate[3] = 0;
 +
 +   r->fb_state.width = r->buffer_width;
 +   r->fb_state.height = r->buffer_height;
 +   r->fb_state.nr_cbufs = 1;
 +   r->fb_state.zsbuf = NULL;
 +
 +   /* Luma filter */
 +   filters[0] = PIPE_TEX_FILTER_NEAREST;
 +   /* Chroma filters */
 +   if (r->chroma_format == PIPE_VIDEO_CHROMA_FORMAT_444 || true) { //TODO
 +      filters[1] = PIPE_TEX_FILTER_NEAREST;
 +      filters[2] = PIPE_TEX_FILTER_NEAREST;
 +   }
 +   else {
 +      filters[1] = PIPE_TEX_FILTER_LINEAR;
 +      filters[2] = PIPE_TEX_FILTER_LINEAR;
 +   }
 +   /* Fwd, bkwd ref filters */
 +   filters[3] = PIPE_TEX_FILTER_LINEAR;
 +   filters[4] = PIPE_TEX_FILTER_LINEAR;
 +
 +   for (i = 0; i < 5; ++i) {
 +      memset(&sampler, 0, sizeof(sampler));
 +      sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
 +      sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
 +      sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_BORDER;
 +      sampler.min_img_filter = filters[i];
 +      sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
 +      sampler.mag_img_filter = filters[i];
 +      sampler.compare_mode = PIPE_TEX_COMPARE_NONE;
 +      sampler.compare_func = PIPE_FUNC_ALWAYS;
 +      sampler.normalized_coords = 1;
 +      /*sampler.shadow_ambient = ; */
 +      /*sampler.lod_bias = ; */
 +      sampler.min_lod = 0;
 +      /*sampler.max_lod = ; */
 +      sampler.border_color[0] = 0.0f;
 +      sampler.border_color[1] = 0.0f;
 +      sampler.border_color[2] = 0.0f;
 +      sampler.border_color[3] = 0.0f;
 +      /*sampler.max_anisotropy = ; */
 +      r->samplers.all[i] = r->pipe->create_sampler_state(r->pipe, &sampler);
 +   }
 +
 +   memset(&rs_state, 0, sizeof(rs_state));
 +   /*rs_state.sprite_coord_enable */
 +   rs_state.sprite_coord_mode = PIPE_SPRITE_COORD_UPPER_LEFT;
 +   rs_state.point_quad_rasterization = true;
 +   rs_state.point_size = BLOCK_WIDTH;
 +   rs_state.gl_rasterization_rules = true;
 +   r->rs_state = r->pipe->create_rasterizer_state(r->pipe, &rs_state);
 +
 +   return true;
 +}
 +
 +static void
 +cleanup_pipe_state(struct vl_mpeg12_mc_renderer *r)
 +{
 +   unsigned i;
 +
 +   assert(r);
 +
 +   for (i = 0; i < 5; ++i)
 +      r->pipe->delete_sampler_state(r->pipe, r->samplers.all[i]);
 +
 +   r->pipe->delete_rasterizer_state(r->pipe, r->rs_state);
 +}
 +
 +static bool
 +init_buffers(struct vl_mpeg12_mc_renderer *r)
 +{
 +   struct pipe_resource *idct_matrix;
 +   struct pipe_vertex_element vertex_elems[NUM_VS_INPUTS];
 +
 +   const unsigned mbw =
 +      align(r->buffer_width, MACROBLOCK_WIDTH) / MACROBLOCK_WIDTH;
 +   const unsigned mbh =
 +      align(r->buffer_height, MACROBLOCK_HEIGHT) / MACROBLOCK_HEIGHT;
 +
 +   unsigned i, chroma_width, chroma_height;
 +
 +   assert(r);
 +
 +   r->macroblocks_per_batch =
 +      mbw * (r->bufmode == VL_MPEG12_MC_RENDERER_BUFFER_PICTURE ? mbh : 1);
 +
 +   if (!(idct_matrix = vl_idct_upload_matrix(r->pipe)))
 +      return false;
 +
 +   if (!vl_idct_init(&r->idct_luma, r->pipe, r->buffer_width, r->buffer_height, idct_matrix))
 +      return false;
 +
 +   if (r->chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420) {
 +      chroma_width = r->buffer_width / 2;
 +      chroma_height = r->buffer_height / 2;
 +   } else if (r->chroma_format == PIPE_VIDEO_CHROMA_FORMAT_422) {
 +      chroma_width = r->buffer_width;
 +      chroma_height = r->buffer_height / 2;
 +   } else {
 +      chroma_width = r->buffer_width;
 +      chroma_height = r->buffer_height;
 +   }
 +
 +   if(!vl_idct_init(&r->idct_chroma, r->pipe, chroma_width, chroma_height, idct_matrix))
 +      return false;
 +
 +   memset(&vertex_elems, 0, sizeof(vertex_elems));
 +
 +   vertex_elems[VS_I_RECT] = vl_vb_get_quad_vertex_element();
 +   r->quad = vl_vb_upload_quads(r->pipe, r->macroblocks_per_batch);
 +
 +   /* Position element */
 +   vertex_elems[VS_I_VPOS].src_format = PIPE_FORMAT_R16G16_SSCALED;
 +
 +   for (i = 0; i < 4; ++i)
 +      /* motion vector 0..4 element */
 +      vertex_elems[VS_I_MV0 + i].src_format = PIPE_FORMAT_R16G16_SSCALED;
 +
 +   /* y, cr, cb empty block element top left block */
 +   vertex_elems[VS_I_EB_0_0].src_format = PIPE_FORMAT_R8G8B8A8_SSCALED;
 +
 +   /* y, cr, cb empty block element top right block */
 +   vertex_elems[VS_I_EB_0_1].src_format = PIPE_FORMAT_R8G8B8A8_SSCALED;
 +
 +   /* y, cr, cb empty block element bottom left block */
 +   vertex_elems[VS_I_EB_1_0].src_format = PIPE_FORMAT_R8G8B8A8_SSCALED;
 +
 +   /* y, cr, cb empty block element bottom right block */
 +   vertex_elems[VS_I_EB_1_1].src_format = PIPE_FORMAT_R8G8B8A8_SSCALED;
 +
 +   r->vertex_stream_stride = vl_vb_element_helper(&vertex_elems[VS_I_VPOS], 9, 1);
 +
 +   r->vertex_elems_state = r->pipe->create_vertex_elements_state(
 +      r->pipe, NUM_VS_INPUTS, vertex_elems);
 +
 +   if (r->vertex_elems_state == NULL)
 +      return false;
 +
 +   r->vs = create_vert_shader(r);
 +   r->fs = create_frag_shader(r);
 +
 +   if (r->vs == NULL || r->fs == NULL)
 +      return false;
 +
 +   return true;
 +}
 +
 +static void
 +cleanup_buffers(struct vl_mpeg12_mc_renderer *r)
 +{
 +   assert(r);
 +
 +   r->pipe->delete_vs_state(r->pipe, r->vs);
 +   r->pipe->delete_fs_state(r->pipe, r->fs);
 +
 +   vl_idct_cleanup(&r->idct_luma);
 +   vl_idct_cleanup(&r->idct_chroma);
 +
 +   r->pipe->delete_vertex_elements_state(r->pipe, r->vertex_elems_state);
 +}
 +
 +static struct pipe_sampler_view
 +*find_or_create_sampler_view(struct vl_mpeg12_mc_renderer *r, struct pipe_surface *surface)
 +{
 +   struct pipe_sampler_view *sampler_view;
 +   assert(r);
 +   assert(surface);
 +
 +   sampler_view = (struct pipe_sampler_view*)util_keymap_lookup(r->texview_map, &surface);
 +   if (!sampler_view) {
 +      struct pipe_sampler_view templat;
 +      boolean added_to_map;
 +
 +      u_sampler_view_default_template(&templat, surface->texture,
 +                                      surface->texture->format);
 +      sampler_view = r->pipe->create_sampler_view(r->pipe, surface->texture,
 +                                                  &templat);
 +      if (!sampler_view)
 +         return NULL;
 +
 +      added_to_map = util_keymap_insert(r->texview_map, &surface,
 +                                        sampler_view, r->pipe);
 +      assert(added_to_map);
 +   }
 +
 +   return sampler_view;
 +}
 +
 +static void
 +get_motion_vectors(struct pipe_mpeg12_macroblock *mb, struct vertex2s mv[4])
 +{
 +   switch (mb->mb_type) {
 +      case PIPE_MPEG12_MACROBLOCK_TYPE_BI:
 +      {
 +         if (mb->mo_type == PIPE_MPEG12_MOTION_TYPE_FRAME) {
 +            mv[2].x = mb->pmv[0][1][0];
 +            mv[2].y = mb->pmv[0][1][1];
 +
 +         } else {
 +            mv[2].x = mb->pmv[0][1][0];
 +            mv[2].y = mb->pmv[0][1][1] - (mb->pmv[0][1][1] % 4);
 +
 +            mv[3].x = mb->pmv[1][1][0];
 +            mv[3].y = mb->pmv[1][1][1] - (mb->pmv[1][1][1] % 4);
 +
 +            if(mb->mvfs[0][1]) mv[2].y += 2;
 +            if(!mb->mvfs[1][1]) mv[3].y -= 2;
 +         }
 +
 +         /* fall-through */
 +      }
 +      case PIPE_MPEG12_MACROBLOCK_TYPE_FWD:
 +      case PIPE_MPEG12_MACROBLOCK_TYPE_BKWD:
 +      {
 +         if (mb->mb_type == PIPE_MPEG12_MACROBLOCK_TYPE_BKWD) {
 +
 +            if (mb->mo_type == PIPE_MPEG12_MOTION_TYPE_FRAME) {
 +               mv[0].x = mb->pmv[0][1][0];
 +               mv[0].y = mb->pmv[0][1][1];
 +
 +            } else {
 +               mv[0].x = mb->pmv[0][1][0];
 +               mv[0].y = mb->pmv[0][1][1] - (mb->pmv[0][1][1] % 4);
 +
 +               mv[1].x = mb->pmv[1][1][0];
 +               mv[1].y = mb->pmv[1][1][1] - (mb->pmv[1][1][1] % 4);
 +
 +               if(mb->mvfs[0][1]) mv[0].y += 2;
 +               if(!mb->mvfs[1][1]) mv[1].y -= 2;
 +            }
 +
 +         } else {
 +
 +            if (mb->mo_type == PIPE_MPEG12_MOTION_TYPE_FRAME) {
 +               mv[0].x = mb->pmv[0][0][0];
 +               mv[0].y = mb->pmv[0][0][1];
 +
 +            } else {
 +               mv[0].x = mb->pmv[0][0][0];
 +               mv[0].y = mb->pmv[0][0][1] - (mb->pmv[0][0][1] % 4);
 +
 +               mv[1].x = mb->pmv[1][0][0];
 +               mv[1].y = mb->pmv[1][0][1] - (mb->pmv[1][0][1] % 4);
 +
 +               if(mb->mvfs[0][0]) mv[0].y += 2;
 +               if(!mb->mvfs[1][0]) mv[1].y -= 2;
 +            }
 +         }
 +      }
 +      default:
 +         break;
 +   }
 +}
 +
 +static void
 +grab_vectors(struct vl_mpeg12_mc_renderer *r,
 +             struct vl_mpeg12_mc_buffer *buffer,
 +             struct pipe_mpeg12_macroblock *mb)
 +{
 +   struct vertex_stream stream;
 +
 +   unsigned i, j;
 +
 +   assert(r);
 +   assert(mb);
 +
 +   stream.pos.x = mb->mbx;
 +   stream.pos.y = mb->mby;
 +   for ( i = 0; i < 2; ++i) {
 +      for ( j = 0; j < 2; ++j) {
 +         stream.eb[i][j].y = !(mb->cbp & (*r->empty_block_mask)[0][i][j]);
 +         stream.eb[i][j].cr = !(mb->cbp & (*r->empty_block_mask)[1][i][j]);
 +         stream.eb[i][j].cb = !(mb->cbp & (*r->empty_block_mask)[2][i][j]);
 +      }
 +   }
 +   stream.eb[0][0].flag = mb->dct_type == PIPE_MPEG12_DCT_TYPE_FIELD;
 +   stream.eb[0][1].flag = mb->mo_type == PIPE_MPEG12_MOTION_TYPE_FRAME;
 +   stream.eb[1][0].flag = mb->mb_type == PIPE_MPEG12_MACROBLOCK_TYPE_BKWD;
 +   switch (mb->mb_type) {
 +      case PIPE_MPEG12_MACROBLOCK_TYPE_INTRA:
 +         stream.eb[1][1].flag = -1;
 +         break;
 +
 +      case PIPE_MPEG12_MACROBLOCK_TYPE_FWD:
 +      case PIPE_MPEG12_MACROBLOCK_TYPE_BKWD:
 +         stream.eb[1][1].flag = 1;
 +         break;
 +
 +      case PIPE_MPEG12_MACROBLOCK_TYPE_BI:
 +         stream.eb[1][1].flag = 0;
 +         break;
 +
 +      default:
 +         assert(0);
 +   }
 +
 +   get_motion_vectors(mb, stream.mv);
 +   vl_vb_add_block(&buffer->vertex_stream, &stream);
 +}
 +
 +static void
 +grab_blocks(struct vl_mpeg12_mc_renderer *r,
 +            struct vl_mpeg12_mc_buffer *buffer,
 +            unsigned mbx, unsigned mby,
 +            unsigned cbp, short *blocks)
 +{
 +   unsigned tb = 0;
 +   unsigned x, y;
 +
 +   assert(r);
 +   assert(blocks);
 +
 +   for (y = 0; y < 2; ++y) {
 +      for (x = 0; x < 2; ++x, ++tb) {
 +         if (cbp & (*r->empty_block_mask)[0][y][x]) {
 +            vl_idct_add_block(&buffer->idct_y, mbx * 2 + x, mby * 2 + y, blocks);
 +            blocks += BLOCK_WIDTH * BLOCK_HEIGHT;
 +         }
 +      }
 +   }
 +
 +   /* TODO: Implement 422, 444 */
 +   assert(r->chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420);
 +
 +   for (tb = 1; tb < 3; ++tb) {
 +      if (cbp & (*r->empty_block_mask)[tb][0][0]) {
 +         if(tb == 1)
 +            vl_idct_add_block(&buffer->idct_cb, mbx, mby, blocks);
 +         else
 +            vl_idct_add_block(&buffer->idct_cr, mbx, mby, blocks);
 +         blocks += BLOCK_WIDTH * BLOCK_HEIGHT;
 +      }
 +   }
 +}
 +
 +static void
 +grab_macroblock(struct vl_mpeg12_mc_renderer *r,
 +                struct vl_mpeg12_mc_buffer *buffer,
 +                struct pipe_mpeg12_macroblock *mb)
 +{
 +   assert(r);
 +   assert(mb);
 +   assert(mb->blocks);
 +   assert(buffer->num_macroblocks < r->macroblocks_per_batch);
 +
 +   grab_vectors(r, buffer, mb);
 +   grab_blocks(r, buffer, mb->mbx, mb->mby, mb->cbp, mb->blocks);
 +
 +   ++buffer->num_macroblocks;
 +}
 +
 +static void
 +texview_map_delete(const struct keymap *map,
 +                   const void *key, void *data,
 +                   void *user)
 +{
 +   struct pipe_sampler_view *sv = (struct pipe_sampler_view*)data;
 +
 +   assert(map);
 +   assert(key);
 +   assert(data);
 +   assert(user);
 +
 +   pipe_sampler_view_reference(&sv, NULL);
 +}
 +
 +bool
 +vl_mpeg12_mc_renderer_init(struct vl_mpeg12_mc_renderer *renderer,
 +                           struct pipe_context *pipe,
 +                           unsigned buffer_width,
 +                           unsigned buffer_height,
 +                           enum pipe_video_chroma_format chroma_format,
 +                           enum VL_MPEG12_MC_RENDERER_BUFFER_MODE bufmode)
 +{
 +   assert(renderer);
 +   assert(pipe);
 +
 +   /* TODO: Implement other policies */
 +   assert(bufmode == VL_MPEG12_MC_RENDERER_BUFFER_PICTURE);
 +
 +   memset(renderer, 0, sizeof(struct vl_mpeg12_mc_renderer));
 +
 +   renderer->pipe = pipe;
 +   renderer->buffer_width = buffer_width;
 +   renderer->buffer_height = buffer_height;
 +   renderer->chroma_format = chroma_format;
 +   renderer->bufmode = bufmode;
 +
 +   /* TODO: Implement 422, 444 */
 +   assert(chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420);
 +   renderer->empty_block_mask = &const_empty_block_mask_420;
 +
 +   renderer->texview_map = util_new_keymap(sizeof(struct pipe_surface*), -1,
 +                                           texview_map_delete);
 +   if (!renderer->texview_map)
 +      return false;
 +
 +   if (!init_pipe_state(renderer))
 +      goto error_pipe_state;
 +
 +   if (!init_buffers(renderer))
 +      goto error_buffers;
 +
 +   return true;
 +
 +error_buffers:
 +   cleanup_pipe_state(renderer);
 +
 +error_pipe_state:
 +   util_delete_keymap(renderer->texview_map, renderer->pipe);
 +   return false;
 +}
 +
 +void
 +vl_mpeg12_mc_renderer_cleanup(struct vl_mpeg12_mc_renderer *renderer)
 +{
 +   assert(renderer);
 +
 +   util_delete_keymap(renderer->texview_map, renderer->pipe);
 +   cleanup_pipe_state(renderer);
 +   cleanup_buffers(renderer);
 +}
 +
 +bool
 +vl_mpeg12_mc_init_buffer(struct vl_mpeg12_mc_renderer *renderer, struct vl_mpeg12_mc_buffer *buffer)
 +{
 +   struct pipe_resource template;
 +   struct pipe_sampler_view sampler_view;
 +
 +   unsigned i;
 +
 +   assert(renderer && buffer);
 +
 +   buffer->surface = NULL;
 +   buffer->past = NULL;
 +   buffer->future = NULL;
 +   buffer->num_macroblocks = 0;
 +
 +   memset(&template, 0, sizeof(struct pipe_resource));
 +   template.target = PIPE_TEXTURE_2D;
 +   /* TODO: Accomodate HW that can't do this and also for cases when this isn't precise enough */
 +   template.format = PIPE_FORMAT_R16_SNORM;
 +   template.last_level = 0;
 +   template.width0 = renderer->buffer_width;
 +   template.height0 = renderer->buffer_height;
 +   template.depth0 = 1;
++   template.array_size = 1;
 +   template.usage = PIPE_USAGE_STATIC;
 +   template.bind = PIPE_BIND_SAMPLER_VIEW;
 +   template.flags = 0;
 +
 +   buffer->textures.individual.y = renderer->pipe->screen->resource_create(renderer->pipe->screen, &template);
 +
 +   if (!vl_idct_init_buffer(&renderer->idct_luma, &buffer->idct_y, buffer->textures.individual.y))
 +      return false;
 +
 +   if (renderer->chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420) {
 +      template.width0 = renderer->buffer_width / 2;
 +      template.height0 = renderer->buffer_height / 2;
 +   }
 +   else if (renderer->chroma_format == PIPE_VIDEO_CHROMA_FORMAT_422)
 +      template.height0 = renderer->buffer_height / 2;
 +
 +   buffer->textures.individual.cb =
 +      renderer->pipe->screen->resource_create(renderer->pipe->screen, &template);
 +   buffer->textures.individual.cr =
 +      renderer->pipe->screen->resource_create(renderer->pipe->screen, &template);
 +
 +   if (!vl_idct_init_buffer(&renderer->idct_chroma, &buffer->idct_cb, buffer->textures.individual.cb))
 +      return false;
 +
 +   if (!vl_idct_init_buffer(&renderer->idct_chroma, &buffer->idct_cr, buffer->textures.individual.cr))
 +      return false;
 +
 +   for (i = 0; i < 3; ++i) {
 +      u_sampler_view_default_template(&sampler_view,
 +                                      buffer->textures.all[i],
 +                                      buffer->textures.all[i]->format);
 +      sampler_view.swizzle_r = i == 0 ? PIPE_SWIZZLE_RED : PIPE_SWIZZLE_ZERO;
 +      sampler_view.swizzle_g = i == 1 ? PIPE_SWIZZLE_RED : PIPE_SWIZZLE_ZERO;
 +      sampler_view.swizzle_b = i == 2 ? PIPE_SWIZZLE_RED : PIPE_SWIZZLE_ZERO;
 +      sampler_view.swizzle_a = PIPE_SWIZZLE_ONE;
 +      buffer->sampler_views.all[i] = renderer->pipe->create_sampler_view(
 +         renderer->pipe, buffer->textures.all[i], &sampler_view);
 +   }
 +
 +   buffer->vertex_bufs.individual.quad.stride = renderer->quad.stride;
 +   buffer->vertex_bufs.individual.quad.buffer_offset = renderer->quad.buffer_offset;
 +   pipe_resource_reference(&buffer->vertex_bufs.individual.quad.buffer, renderer->quad.buffer);
 +
 +   buffer->vertex_bufs.individual.stream = vl_vb_init(
 +      &buffer->vertex_stream, renderer->pipe, renderer->macroblocks_per_batch,
 +      renderer->vertex_stream_stride);
 +
 +   return true;
 +}
 +
 +void
 +vl_mpeg12_mc_cleanup_buffer(struct vl_mpeg12_mc_renderer *renderer, struct vl_mpeg12_mc_buffer *buffer)
 +{
 +   unsigned i;
 +
 +   assert(renderer && buffer);
 +
 +   for (i = 0; i < 3; ++i) {
 +      pipe_sampler_view_reference(&buffer->sampler_views.all[i], NULL);
 +      pipe_resource_reference(&buffer->vertex_bufs.all[i].buffer, NULL);
 +      pipe_resource_reference(&buffer->textures.all[i], NULL);
 +   }
 +
 +   pipe_resource_reference(&buffer->vertex_bufs.individual.quad.buffer, NULL);
 +   vl_vb_cleanup(&buffer->vertex_stream);
 +
 +   vl_idct_cleanup_buffer(&renderer->idct_luma, &buffer->idct_y);
 +   vl_idct_cleanup_buffer(&renderer->idct_chroma, &buffer->idct_cb);
 +   vl_idct_cleanup_buffer(&renderer->idct_chroma, &buffer->idct_cr);
 +
 +   pipe_surface_reference(&buffer->surface, NULL);
 +   pipe_surface_reference(&buffer->past, NULL);
 +   pipe_surface_reference(&buffer->future, NULL);
 +}
 +
 +void
 +vl_mpeg12_mc_map_buffer(struct vl_mpeg12_mc_renderer *renderer, struct vl_mpeg12_mc_buffer *buffer)
 +{
 +   assert(renderer && buffer);
 +
 +   vl_idct_map_buffers(&renderer->idct_luma, &buffer->idct_y);
 +   vl_idct_map_buffers(&renderer->idct_chroma, &buffer->idct_cr);
 +   vl_idct_map_buffers(&renderer->idct_chroma, &buffer->idct_cb);
 +
 +   vl_vb_map(&buffer->vertex_stream, renderer->pipe);
 +}
 +
 +void
 +vl_mpeg12_mc_renderer_render_macroblocks(struct vl_mpeg12_mc_renderer *renderer,
 +                                         struct vl_mpeg12_mc_buffer *buffer,
 +                                         struct pipe_surface *surface,
 +                                         struct pipe_surface *past,
 +                                         struct pipe_surface *future,
 +                                         unsigned num_macroblocks,
 +                                         struct pipe_mpeg12_macroblock *mpeg12_macroblocks,
 +                                         struct pipe_fence_handle **fence)
 +{
 +   assert(renderer && buffer);
 +   assert(surface);
 +   assert(num_macroblocks);
 +   assert(mpeg12_macroblocks);
 +
 +   if (surface != buffer->surface) {
 +      pipe_surface_reference(&buffer->surface, surface);
 +      pipe_surface_reference(&buffer->past, past);
 +      pipe_surface_reference(&buffer->future, future);
 +      buffer->fence = fence;
 +   } else {
 +      /* If the surface we're rendering hasn't changed the ref frames shouldn't change. */
 +      assert(buffer->past == past);
 +      assert(buffer->future == future);
 +   }
 +
 +   while (num_macroblocks) {
 +      unsigned left_in_batch = renderer->macroblocks_per_batch - buffer->num_macroblocks;
 +      unsigned num_to_submit = MIN2(num_macroblocks, left_in_batch);
 +      unsigned i;
 +
 +      for (i = 0; i < num_to_submit; ++i) {
 +         assert(mpeg12_macroblocks[i].base.codec == PIPE_VIDEO_CODEC_MPEG12);
 +         grab_macroblock(renderer, buffer, &mpeg12_macroblocks[i]);
 +      }
 +
 +      num_macroblocks -= num_to_submit;
 +
 +      if (buffer->num_macroblocks == renderer->macroblocks_per_batch) {
 +         vl_mpeg12_mc_unmap_buffer(renderer, buffer);
 +         vl_mpeg12_mc_renderer_flush(renderer, buffer);
 +         pipe_surface_reference(&buffer->surface, surface);
 +         pipe_surface_reference(&buffer->past, past);
 +         pipe_surface_reference(&buffer->future, future);
 +         vl_mpeg12_mc_map_buffer(renderer, buffer);
 +      }
 +   }
 +}
 +
 +void
 +vl_mpeg12_mc_unmap_buffer(struct vl_mpeg12_mc_renderer *renderer, struct vl_mpeg12_mc_buffer *buffer)
 +{
 +   assert(renderer && buffer);
 +
 +   vl_idct_unmap_buffers(&renderer->idct_luma, &buffer->idct_y);
 +   vl_idct_unmap_buffers(&renderer->idct_chroma, &buffer->idct_cr);
 +   vl_idct_unmap_buffers(&renderer->idct_chroma, &buffer->idct_cb);
 +
 +   vl_vb_unmap(&buffer->vertex_stream, renderer->pipe);
 +}
 +
 +void
 +vl_mpeg12_mc_renderer_flush(struct vl_mpeg12_mc_renderer *renderer, struct vl_mpeg12_mc_buffer *buffer)
 +{
 +   assert(renderer && buffer);
 +   assert(buffer->num_macroblocks <= renderer->macroblocks_per_batch);
 +
 +   if (buffer->num_macroblocks == 0)
 +      return;
 +
 +   vl_idct_flush(&renderer->idct_luma, &buffer->idct_y);
 +   vl_idct_flush(&renderer->idct_chroma, &buffer->idct_cr);
 +   vl_idct_flush(&renderer->idct_chroma, &buffer->idct_cb);
 +
 +   vl_vb_restart(&buffer->vertex_stream);
 +
 +   renderer->fb_state.cbufs[0] = buffer->surface;
 +   renderer->pipe->bind_rasterizer_state(renderer->pipe, renderer->rs_state);
 +   renderer->pipe->set_framebuffer_state(renderer->pipe, &renderer->fb_state);
 +   renderer->pipe->set_viewport_state(renderer->pipe, &renderer->viewport);
 +   renderer->pipe->set_vertex_buffers(renderer->pipe, 2, buffer->vertex_bufs.all);
 +   renderer->pipe->bind_vertex_elements_state(renderer->pipe, renderer->vertex_elems_state);
 +
 +   if (buffer->past) {
 +      buffer->textures.individual.ref[0] = buffer->past->texture;
 +      buffer->sampler_views.individual.ref[0] = find_or_create_sampler_view(renderer, buffer->past);
 +   } else {
 +      buffer->textures.individual.ref[0] = buffer->surface->texture;
 +      buffer->sampler_views.individual.ref[0] = find_or_create_sampler_view(renderer, buffer->surface);
 +   }
 +
 +   if (buffer->future) {
 +      buffer->textures.individual.ref[1] = buffer->future->texture;
 +      buffer->sampler_views.individual.ref[1] = find_or_create_sampler_view(renderer, buffer->future);
 +   } else {
 +      buffer->textures.individual.ref[1] = buffer->surface->texture;
 +      buffer->sampler_views.individual.ref[1] = find_or_create_sampler_view(renderer, buffer->surface);
 +   }
 +
 +   renderer->pipe->set_fragment_sampler_views(renderer->pipe, 5, buffer->sampler_views.all);
 +   renderer->pipe->bind_fragment_sampler_states(renderer->pipe, 5, renderer->samplers.all);
 +
 +   renderer->pipe->bind_vs_state(renderer->pipe, renderer->vs);
 +   renderer->pipe->bind_fs_state(renderer->pipe, renderer->fs);
 +   util_draw_arrays(renderer->pipe, PIPE_PRIM_QUADS, 0, buffer->num_macroblocks * 4);
 +
 +   renderer->pipe->flush(renderer->pipe, PIPE_FLUSH_RENDER_CACHE, buffer->fence);
 +
 +   /* Next time we get this surface it may have new ref frames */
 +   pipe_surface_reference(&buffer->surface, NULL);
 +   pipe_surface_reference(&buffer->past, NULL);
 +   pipe_surface_reference(&buffer->future, NULL);
 +
 +   buffer->num_macroblocks = 0;
 +}
index 8599ed3533dc64fb1cc7895487c8cbca848d2117,0000000000000000000000000000000000000000..552a0451fef084e3f0e6e330765937b3776efdc4
mode 100644,000000..100644
--- /dev/null
@@@ -1,183 -1,0 +1,183 @@@
-    quad.max_index = 4 * max_blocks - 1;
 +/**************************************************************************
 + *
 + * Copyright 2010 Christian König
 + * All Rights Reserved.
 + *
 + * Permission is hereby granted, free of charge, to any person obtaining a
 + * copy of this software and associated documentation files (the
 + * "Software"), to deal in the Software without restriction, including
 + * without limitation the rights to use, copy, modify, merge, publish,
 + * distribute, sub license, and/or sell copies of the Software, and to
 + * permit persons to whom the Software is furnished to do so, subject to
 + * the following conditions:
 + *
 + * The above copyright notice and this permission notice (including the
 + * next paragraph) shall be included in all copies or substantial portions
 + * of the Software.
 + *
 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
 + * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
 + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
 + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
 + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 + *
 + **************************************************************************/
 +
 +#include <assert.h>
 +#include <pipe/p_context.h>
 +#include <pipe/p_screen.h>
 +#include <util/u_memory.h>
 +#include <util/u_inlines.h>
 +#include <util/u_format.h>
 +#include "vl_vertex_buffers.h"
 +#include "vl_types.h"
 +
 +/* vertices for a quad covering a block */
 +static const struct quadf const_quad = {
 +   {0.0f, 1.0f}, {0.0f, 0.0f}, {1.0f, 0.0f}, {1.0f, 1.0f}
 +};
 +
 +struct pipe_vertex_buffer
 +vl_vb_upload_quads(struct pipe_context *pipe, unsigned max_blocks)
 +{
 +   struct pipe_vertex_buffer quad;
 +   struct pipe_transfer *buf_transfer;
 +   struct quadf *v;
 +
 +   unsigned i;
 +
 +   assert(pipe);
 +   assert(max_blocks);
 +
 +   /* create buffer */
 +   quad.stride = sizeof(struct vertex2f);
-    unsigned i, size, offset = 0;
 +   quad.buffer_offset = 0;
 +   quad.buffer = pipe_buffer_create
 +   (
 +      pipe->screen,
 +      PIPE_BIND_VERTEX_BUFFER,
++      PIPE_USAGE_STATIC,
 +      sizeof(struct vertex2f) * 4 * max_blocks
 +   );
 +
 +   if(!quad.buffer)
 +      return quad;
 +
 +   /* and fill it */
 +   v = pipe_buffer_map
 +   (
 +      pipe,
 +      quad.buffer,
 +      PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
 +      &buf_transfer
 +   );
 +
 +   for ( i = 0; i < max_blocks; ++i)
 +     memcpy(v + i, &const_quad, sizeof(const_quad));
 +
 +   pipe_buffer_unmap(pipe, buf_transfer);
 +
 +   return quad;
 +}
 +
 +struct pipe_vertex_element
 +vl_vb_get_quad_vertex_element(void)
 +{
 +   struct pipe_vertex_element element;
 +
 +   /* setup rectangle element */
 +   element.src_offset = 0;
 +   element.instance_divisor = 0;
 +   element.vertex_buffer_index = 0;
 +   element.src_format = PIPE_FORMAT_R32G32_FLOAT;
 +
 +   return element;
 +}
 +
 +unsigned
 +vl_vb_element_helper(struct pipe_vertex_element* elements, unsigned num_elements,
 +                              unsigned vertex_buffer_index)
 +{
-    buf.max_index = 4 * max_blocks - 1;
++   unsigned i, offset = 0;
 +
 +   assert(elements && num_elements);
 +
 +   for ( i = 0; i < num_elements; ++i ) {
 +      elements[i].src_offset = offset;
 +      elements[i].instance_divisor = 0;
 +      elements[i].vertex_buffer_index = vertex_buffer_index;
 +      offset += util_format_get_blocksize(elements[i].src_format);
 +   }
 +
 +   return offset;
 +}
 +
 +struct pipe_vertex_buffer
 +vl_vb_init(struct vl_vertex_buffer *buffer, struct pipe_context *pipe,
 +           unsigned max_blocks, unsigned stride)
 +{
 +   struct pipe_vertex_buffer buf;
 +
 +   assert(buffer);
 +
 +   buffer->num_verts = 0;
 +   buffer->stride = stride;
 +
 +   buf.stride = stride;
 +   buf.buffer_offset = 0;
 +   buf.buffer = pipe_buffer_create
 +   (
 +      pipe->screen,
 +      PIPE_BIND_VERTEX_BUFFER,
++      PIPE_USAGE_STREAM,
 +      stride * 4 * max_blocks
 +   );
 +
 +   pipe_resource_reference(&buffer->resource, buf.buffer);
 +
 +   vl_vb_map(buffer, pipe);
 +
 +   return buf;
 +}
 +
 +void
 +vl_vb_map(struct vl_vertex_buffer *buffer, struct pipe_context *pipe)
 +{
 +   assert(buffer && pipe);
 +
 +   buffer->vectors = pipe_buffer_map
 +   (
 +      pipe,
 +      buffer->resource,
 +      PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
 +      &buffer->transfer
 +   );
 +}
 +
 +void
 +vl_vb_unmap(struct vl_vertex_buffer *buffer, struct pipe_context *pipe)
 +{
 +   assert(buffer && pipe);
 +
 +   pipe_buffer_unmap(pipe, buffer->transfer);
 +}
 +
 +unsigned
 +vl_vb_restart(struct vl_vertex_buffer *buffer)
 +{
 +   assert(buffer);
 +
 +   unsigned todo = buffer->num_verts;
 +   buffer->num_verts = 0;
 +   return todo;
 +}
 +
 +void
 +vl_vb_cleanup(struct vl_vertex_buffer *buffer)
 +{
 +   assert(buffer);
 +
 +   pipe_resource_reference(&buffer->resource, NULL);
 +}
index a690b671e4965b243ec63c11f00a1f1b6c30de14,a484f38e9f12fcbf795e35cf8332d612bb699e4d..436de9c4dbde0d46318a59d81402e5f8f804e44d
@@@ -17,12 -17,10 +17,11 @@@ C_SOURCES = 
        r600_shader.c \
        r600_state.c \
        r600_texture.c \
 +      r600_video_context.c \
        r700_asm.c \
        evergreen_state.c \
        eg_asm.c \
        r600_translate.c \
-       r600_state_common.c \
-       r600_upload.c
+       r600_state_common.c
  
  include ../../Makefile.template
index f4ff2fc3d43dc04fa4439c75bb32b969c1155bf3,de796188fdec8f71f371d2463de3ac7bee2e65de..1393df8875730e162231ae5adec6c6ffd42009de
@@@ -35,9 -35,6 +35,9 @@@
  #define NUM_OF_CYCLES 3
  #define NUM_OF_COMPONENTS 4
  
 +#define PREV_ALU(alu) LIST_ENTRY(struct r600_bc_alu, alu->list.prev, list)
 +#define NEXT_ALU(alu) LIST_ENTRY(struct r600_bc_alu, alu->list.next, list)
 +
  static inline unsigned int r600_bc_get_num_operands(struct r600_bc *bc, struct r600_bc_alu *alu)
  {
        if(alu->is_op3)
@@@ -50,6 -47,7 +50,7 @@@
                case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP:
                        return 0;
                case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD:
+               case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD_INT:
                case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLE:
                case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT:
                case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE:
@@@ -97,6 -95,7 +98,7 @@@
                case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP:
                        return 0;
                case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD:
+               case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD_INT:
                case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLE:
                case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT:
                case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE:
@@@ -157,7 -156,6 +159,7 @@@ static struct r600_bc_cf *r600_bc_cf(vo
        LIST_INITHEAD(&cf->alu);
        LIST_INITHEAD(&cf->vtx);
        LIST_INITHEAD(&cf->tex);
 +      cf->barrier = 1;
        return cf;
  }
  
@@@ -246,64 -244,66 +248,89 @@@ static int r600_bc_add_cf(struct r600_b
        return 0;
  }
  
 +static void r600_bc_remove_cf(struct r600_bc *bc, struct r600_bc_cf *cf)
 +{
 +      struct r600_bc_cf *other;
 +      LIST_FOR_EACH_ENTRY(other, &bc->cf, list) {
 +              if (other->id > cf->id)
 +                      other->id -= 2;
 +              if (other->cf_addr > cf->id)
 +                      other->cf_addr -= 2;
 +      }
 +      LIST_DEL(&cf->list);
 +      free(cf);
 +}
 +
 +static void r600_bc_move_cf(struct r600_bc *bc, struct r600_bc_cf *cf, struct r600_bc_cf *next)
 +{
 +      struct r600_bc_cf *prev = LIST_ENTRY(struct r600_bc_cf, next->list.prev, list);
 +      unsigned old_id = cf->id;
 +      unsigned new_id = next->list.prev == &bc->cf ? 0 : prev->id + 2;
 +      struct r600_bc_cf *other;
 +
 +      if (prev == cf || next == cf)
 +              return; /* position hasn't changed */
 +
 +      LIST_DEL(&cf->list);
 +      LIST_FOR_EACH_ENTRY(other, &bc->cf, list) {
 +              if (other->id > old_id)
 +                      other->id -= 2;
 +              if (other->id >= new_id)
 +                      other->id += 2;
 +              if (other->cf_addr > old_id)
 +                      other->cf_addr -= 2;
 +              if (other->cf_addr > new_id)
 +                      other->cf_addr += 2;
 +      }
 +      cf->id = new_id;
 +      LIST_ADD(&cf->list, &prev->list);
 +}
 +
  int r600_bc_add_output(struct r600_bc *bc, const struct r600_bc_output *output)
  {
        int r;
  
 -      if (bc->cf_last && (bc->cf_last->inst == output->inst ||
 -              (bc->cf_last->inst == BC_INST(bc, V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT) &&
 -              output->inst == BC_INST(bc, V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE))) &&
++      if (bc->cf_last && bc->cf_last->inst == BC_INST(bc, V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT) &&
+               output->type == bc->cf_last->output.type &&
+               output->elem_size == bc->cf_last->output.elem_size &&
+               output->swizzle_x == bc->cf_last->output.swizzle_x &&
+               output->swizzle_y == bc->cf_last->output.swizzle_y &&
+               output->swizzle_z == bc->cf_last->output.swizzle_z &&
+               output->swizzle_w == bc->cf_last->output.swizzle_w &&
+               (output->burst_count + bc->cf_last->output.burst_count) <= 16) {
+               if ((output->gpr + output->burst_count) == bc->cf_last->output.gpr &&
+                       (output->array_base + output->burst_count) == bc->cf_last->output.array_base) {
 -                      bc->cf_last->output.end_of_program |= output->end_of_program;
 -                      bc->cf_last->output.inst = output->inst;
+                       bc->cf_last->output.gpr = output->gpr;
+                       bc->cf_last->output.array_base = output->array_base;
+                       bc->cf_last->output.burst_count += output->burst_count;
+                       return 0;
+               } else if (output->gpr == (bc->cf_last->output.gpr + bc->cf_last->output.burst_count) &&
+                       output->array_base == (bc->cf_last->output.array_base + bc->cf_last->output.burst_count)) {
 -                      bc->cf_last->output.end_of_program |= output->end_of_program;
 -                      bc->cf_last->output.inst = output->inst;
+                       bc->cf_last->output.burst_count += output->burst_count;
+                       return 0;
+               }
+       }
        r = r600_bc_add_cf(bc);
        if (r)
                return r;
 -      bc->cf_last->inst = output->inst;
 +      bc->cf_last->inst = BC_INST(bc, V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT);
        memcpy(&bc->cf_last->output, output, sizeof(struct r600_bc_output));
 +      bc->cf_last->output.burst_count = 1;
        return 0;
  }
  
 -/* alu instructions that can ony exits once per group */
 -static int is_alu_once_inst(struct r600_bc *bc, struct r600_bc_alu *alu)
 +/* alu predicate instructions */
 +static int is_alu_pred_inst(struct r600_bc *bc, struct r600_bc_alu *alu)
  {
        switch (bc->chiprev) {
        case CHIPREV_R600:
        case CHIPREV_R700:
                return !alu->is_op3 && (
 -                      alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLE ||
 -                      alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT ||
 -                      alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE ||
 -                      alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLNE ||
 -                      alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT_UINT ||
 -                      alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE_UINT ||
 -                      alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLE_INT ||
 -                      alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT_INT ||
 -                      alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE_INT ||
 -                      alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLNE_INT ||
                        alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_UINT ||
                        alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_UINT ||
                        alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE ||
        case CHIPREV_EVERGREEN:
        default:
                return !alu->is_op3 && (
 -                      alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLE ||
 -                      alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT ||
 -                      alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE ||
 -                      alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLNE ||
 -                      alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT_UINT ||
 -                      alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE_UINT ||
 -                      alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLE_INT ||
 -                      alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT_INT ||
 -                      alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE_INT ||
 -                      alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLNE_INT ||
                        alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT_UINT ||
                        alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE_UINT ||
                        alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE ||
        }
  }
  
 +/* alu kill instructions */
 +static int is_alu_kill_inst(struct r600_bc *bc, struct r600_bc_alu *alu)
 +{
 +      switch (bc->chiprev) {
 +      case CHIPREV_R600:
 +      case CHIPREV_R700:
 +              return !alu->is_op3 && (
 +                      alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLE ||
 +                      alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT ||
 +                      alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE ||
 +                      alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLNE ||
 +                      alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT_UINT ||
 +                      alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE_UINT ||
 +                      alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLE_INT ||
 +                      alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT_INT ||
 +                      alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE_INT ||
 +                      alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLNE_INT);
 +      case CHIPREV_EVERGREEN:
 +      default:
 +              return !alu->is_op3 && (
 +                      alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLE ||
 +                      alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT ||
 +                      alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE ||
 +                      alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLNE ||
 +                      alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT_UINT ||
 +                      alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE_UINT ||
 +                      alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLE_INT ||
 +                      alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT_INT ||
 +                      alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE_INT ||
 +                      alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLNE_INT);
 +      }
 +}
 +
 +/* alu instructions that can ony exits once per group */
 +static int is_alu_once_inst(struct r600_bc *bc, struct r600_bc_alu *alu)
 +{
 +      return is_alu_kill_inst(bc, alu) ||
 +              is_alu_pred_inst(bc, alu);
 +}
 +
  static int is_alu_reduction_inst(struct r600_bc *bc, struct r600_bc_alu *alu)
  {
        switch (bc->chiprev) {
        }
  }
  
+ static int is_alu_cube_inst(struct r600_bc *bc, struct r600_bc_alu *alu)
+ {
+       switch (bc->chiprev) {
+       case CHIPREV_R600:
+       case CHIPREV_R700:
+               return !alu->is_op3 &&
+                       alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_CUBE;
+       case CHIPREV_EVERGREEN:
+       default:
+               return !alu->is_op3 &&
+                       alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_CUBE;
+       }
+ }
  static int is_alu_mova_inst(struct r600_bc *bc, struct r600_bc_alu *alu)
  {
        switch (bc->chiprev) {
@@@ -480,9 -464,9 +521,9 @@@ static int is_alu_trans_unit_inst(struc
        case CHIPREV_EVERGREEN:
        default:
                if (!alu->is_op3)
+                       /* Note that FLT_TO_INT* instructions are vector instructions
+                        * on Evergreen, despite what the documentation says. */
                        return alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ASHR_INT ||
-                               alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_INT ||
-                               alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_INT_FLOOR ||
                                alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_INT_TO_FLT ||
                                alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LSHL_INT ||
                                alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LSHR_INT ||
@@@ -563,7 -547,7 +604,7 @@@ struct alu_bank_swizzle 
        int     hw_cfile_elem[4];
  };
  
- const unsigned cycle_for_bank_swizzle_vec[][3] = {
static const unsigned cycle_for_bank_swizzle_vec[][3] = {
        [SQ_ALU_VEC_012] = { 0, 1, 2 },
        [SQ_ALU_VEC_021] = { 0, 2, 1 },
        [SQ_ALU_VEC_120] = { 1, 2, 0 },
        [SQ_ALU_VEC_210] = { 2, 1, 0 }
  };
  
- const unsigned cycle_for_bank_swizzle_scl[][3] = {
static const unsigned cycle_for_bank_swizzle_scl[][3] = {
        [SQ_ALU_SCL_210] = { 2, 1, 0 },
        [SQ_ALU_SCL_122] = { 1, 2, 2 },
        [SQ_ALU_SCL_212] = { 2, 1, 2 },
@@@ -785,7 -769,8 +826,8 @@@ static int replace_gpr_with_pv_ps(struc
        for (i = 0; i < 5; ++i) {
                if(prev[i] && prev[i]->dst.write && !prev[i]->dst.rel) {
                        gpr[i] = prev[i]->dst.sel;
-                       if (is_alu_reduction_inst(bc, prev[i]))
+                       /* cube writes more than PV.X */
+                       if (!is_alu_cube_inst(bc, prev[i]) && is_alu_reduction_inst(bc, prev[i]))
                                chan[i] = 0;
                        else
                                chan[i] = prev[i]->dst.chan;
@@@ -865,7 -850,7 +907,7 @@@ static int r600_bc_alu_nliterals(struc
  
        for (i = 0; i < num_src; ++i) {
                if (alu->src[i].sel == V_SQ_ALU_SRC_LITERAL) {
-                       uint32_t value = alu->src[i].value[alu->src[i].chan];
+                       uint32_t value = alu->src[i].value;
                        unsigned found = 0;
                        for (j = 0; j < *nliteral; ++j) {
                                if (literal[j] == value) {
@@@ -892,7 -877,7 +934,7 @@@ static void r600_bc_alu_adjust_literals
  
        for (i = 0; i < num_src; ++i) {
                if (alu->src[i].sel == V_SQ_ALU_SRC_LITERAL) {
-                       uint32_t value = alu->src[i].value[alu->src[i].chan];
+                       uint32_t value = alu->src[i].value;
                        for (j = 0; j < nliteral; ++j) {
                                if (literal[j] == value) {
                                        alu->src[i].chan = j;
@@@ -1195,8 -1180,7 +1237,7 @@@ int r600_bc_add_alu_type(struct r600_b
                        bc->ngpr = nalu->src[i].sel + 1;
                }
                if (nalu->src[i].sel == V_SQ_ALU_SRC_LITERAL)
-                       r600_bc_special_constants(
-                               nalu->src[i].value[nalu->src[i].chan],
+                       r600_bc_special_constants(nalu->src[i].value,
                                &nalu->src[i].sel, &nalu->src[i].neg);
        }
        if (nalu->dst.sel >= bc->ngpr) {
@@@ -1259,16 -1243,6 +1300,16 @@@ int r600_bc_add_alu(struct r600_bc *bc
        return r600_bc_add_alu_type(bc, alu, BC_INST(bc, V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU));
  }
  
 +static void r600_bc_remove_alu(struct r600_bc_cf *cf, struct r600_bc_alu *alu)
 +{
 +      if (alu->last && alu->list.prev != &cf->alu) {
 +              PREV_ALU(alu)->last = 1;
 +      }
 +      LIST_DEL(&alu->list);
 +      free(alu);
 +      cf->ndw -= 2;
 +}
 +
  int r600_bc_add_vtx(struct r600_bc *bc, const struct r600_bc_vtx *vtx)
  {
        struct r600_bc_vtx *nvtx = r600_bc_vtx();
@@@ -1308,6 -1282,18 +1349,18 @@@ int r600_bc_add_tex(struct r600_bc *bc
                return -ENOMEM;
        memcpy(ntex, tex, sizeof(struct r600_bc_tex));
  
+       /* we can't fetch data und use it as texture lookup address in the same TEX clause */
+       if (bc->cf_last != NULL &&
+               bc->cf_last->inst == V_SQ_CF_WORD1_SQ_CF_INST_TEX) {
+               struct r600_bc_tex *ttex;
+               LIST_FOR_EACH_ENTRY(ttex, &bc->cf_last->tex, list) {
+                       if (ttex->dst_gpr == ntex->src_gpr) {
+                               bc->force_add_cf = 1;
+                               break;
+                       }
+               }
+       }
        /* cf can contains only alu or only vtx or only tex */
        if (bc->cf_last == NULL ||
                bc->cf_last->inst != V_SQ_CF_WORD1_SQ_CF_INST_TEX ||
@@@ -1374,6 -1360,7 +1427,7 @@@ static int r600_bc_vtx_build(struct r60
                }
        }
        bc->bytecode[id++] = S_SQ_VTX_WORD0_BUFFER_ID(vtx->buffer_id + fetch_resource_start) |
+                       S_SQ_VTX_WORD0_FETCH_TYPE(vtx->fetch_type) |
                        S_SQ_VTX_WORD0_SRC_GPR(vtx->src_gpr) |
                        S_SQ_VTX_WORD0_SRC_SEL_X(vtx->src_sel_x) |
                        S_SQ_VTX_WORD0_MEGA_FETCH_COUNT(vtx->mega_fetch_count);
@@@ -1464,63 -1451,16 +1518,63 @@@ static int r600_bc_alu_build(struct r60
        return 0;
  }
  
 -/* common for r600/r700 - eg in eg_asm.c */
 -static int r600_bc_cf_build(struct r600_bc *bc, struct r600_bc_cf *cf)
 +enum cf_class
 +{
 +      CF_CLASS_ALU,
 +      CF_CLASS_TEXTURE,
 +      CF_CLASS_VERTEX,
 +      CF_CLASS_EXPORT,
 +      CF_CLASS_OTHER
 +};
 + 
 +static enum cf_class r600_bc_cf_class(struct r600_bc_cf *cf)
  {
 -      unsigned id = cf->id;
 -
        switch (cf->inst) {
        case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU << 3):
 -      case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE << 3):
        case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP_AFTER << 3):
        case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP2_AFTER << 3):
 +      case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE << 3):
 +              return CF_CLASS_ALU;
 +
 +      case V_SQ_CF_WORD1_SQ_CF_INST_TEX:
 +              return CF_CLASS_TEXTURE;
 +
 +      case V_SQ_CF_WORD1_SQ_CF_INST_VTX:
 +      case V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC:
 +              return CF_CLASS_VERTEX;
 +
 +      case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT:
 +      case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE:
 +      case EG_V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT:
 +      case EG_V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE:
 +              return CF_CLASS_EXPORT;
 +
 +      case V_SQ_CF_WORD1_SQ_CF_INST_JUMP:
 +      case V_SQ_CF_WORD1_SQ_CF_INST_ELSE:
 +      case V_SQ_CF_WORD1_SQ_CF_INST_POP:
 +      case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_START_NO_AL:
 +      case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_END:
 +      case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_CONTINUE:
 +      case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_BREAK:
 +      case V_SQ_CF_WORD1_SQ_CF_INST_CALL_FS:
 +      case V_SQ_CF_WORD1_SQ_CF_INST_RETURN:
 +              return CF_CLASS_OTHER;
 +
 +      default:
 +              R600_ERR("unsupported CF instruction (0x%X)\n", cf->inst);
 +              return -EINVAL;
 +      }
 +}
 +
 +/* common for r600/r700 - eg in eg_asm.c */
 +static int r600_bc_cf_build(struct r600_bc *bc, struct r600_bc_cf *cf)
 +{
 +      unsigned id = cf->id;
 +      unsigned end_of_program = bc->cf.prev == &cf->list;
 +
 +      switch (r600_bc_cf_class(cf)) {
 +      case CF_CLASS_ALU:
 +              assert(!end_of_program);
                bc->bytecode[id++] = S_SQ_CF_ALU_WORD0_ADDR(cf->addr >> 1) |
                        S_SQ_CF_ALU_WORD0_KCACHE_MODE0(cf->kcache[0].mode) |
                        S_SQ_CF_ALU_WORD0_KCACHE_BANK0(cf->kcache[0].bank) |
                        S_SQ_CF_ALU_WORD1_KCACHE_MODE1(cf->kcache[1].mode) |
                        S_SQ_CF_ALU_WORD1_KCACHE_ADDR0(cf->kcache[0].addr) |
                        S_SQ_CF_ALU_WORD1_KCACHE_ADDR1(cf->kcache[1].addr) |
 -                                      S_SQ_CF_ALU_WORD1_BARRIER(1) |
 -                                      S_SQ_CF_ALU_WORD1_USES_WATERFALL(bc->chiprev == CHIPREV_R600 ? cf->r6xx_uses_waterfall : 0) |
 -                                      S_SQ_CF_ALU_WORD1_COUNT((cf->ndw / 2) - 1);
 +                      S_SQ_CF_ALU_WORD1_BARRIER(cf->barrier) |
 +                      S_SQ_CF_ALU_WORD1_USES_WATERFALL(bc->chiprev == CHIPREV_R600 ? cf->r6xx_uses_waterfall : 0) |
 +                      S_SQ_CF_ALU_WORD1_COUNT((cf->ndw / 2) - 1);
                break;
 -      case V_SQ_CF_WORD1_SQ_CF_INST_TEX:
 -      case V_SQ_CF_WORD1_SQ_CF_INST_VTX:
 -      case V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC:
 +      case CF_CLASS_TEXTURE:
 +      case CF_CLASS_VERTEX:
                bc->bytecode[id++] = S_SQ_CF_WORD0_ADDR(cf->addr >> 1);
                bc->bytecode[id++] = S_SQ_CF_WORD1_CF_INST(cf->inst) |
 -                                      S_SQ_CF_WORD1_BARRIER(1) |
 -                                      S_SQ_CF_WORD1_COUNT((cf->ndw / 4) - 1);
 +                      S_SQ_CF_WORD1_BARRIER(cf->barrier) |
 +                      S_SQ_CF_WORD1_COUNT((cf->ndw / 4) - 1) |
 +                      S_SQ_CF_WORD1_END_OF_PROGRAM(end_of_program);
                break;
 -      case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT:
 -      case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE:
 +      case CF_CLASS_EXPORT:
                bc->bytecode[id++] = S_SQ_CF_ALLOC_EXPORT_WORD0_RW_GPR(cf->output.gpr) |
                        S_SQ_CF_ALLOC_EXPORT_WORD0_ELEM_SIZE(cf->output.elem_size) |
                        S_SQ_CF_ALLOC_EXPORT_WORD0_ARRAY_BASE(cf->output.array_base) |
                        S_SQ_CF_ALLOC_EXPORT_WORD1_SWIZ_SEL_Y(cf->output.swizzle_y) |
                        S_SQ_CF_ALLOC_EXPORT_WORD1_SWIZ_SEL_Z(cf->output.swizzle_z) |
                        S_SQ_CF_ALLOC_EXPORT_WORD1_SWIZ_SEL_W(cf->output.swizzle_w) |
 -                      S_SQ_CF_ALLOC_EXPORT_WORD1_BARRIER(cf->output.barrier) |
 -                      S_SQ_CF_ALLOC_EXPORT_WORD1_CF_INST(cf->output.inst) |
 -                      S_SQ_CF_ALLOC_EXPORT_WORD1_END_OF_PROGRAM(cf->output.end_of_program);
 +                      S_SQ_CF_ALLOC_EXPORT_WORD1_BARRIER(cf->barrier) |
 +                      S_SQ_CF_ALLOC_EXPORT_WORD1_CF_INST(cf->inst) |
 +                      S_SQ_CF_ALLOC_EXPORT_WORD1_END_OF_PROGRAM(end_of_program);
                break;
 -      case V_SQ_CF_WORD1_SQ_CF_INST_JUMP:
 -      case V_SQ_CF_WORD1_SQ_CF_INST_ELSE:
 -      case V_SQ_CF_WORD1_SQ_CF_INST_POP:
 -      case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_START_NO_AL:
 -      case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_END:
 -      case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_CONTINUE:
 -      case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_BREAK:
 -      case V_SQ_CF_WORD1_SQ_CF_INST_CALL_FS:
 -      case V_SQ_CF_WORD1_SQ_CF_INST_RETURN:
 +      case CF_CLASS_OTHER:
                bc->bytecode[id++] = S_SQ_CF_WORD0_ADDR(cf->cf_addr >> 1);
                bc->bytecode[id++] = S_SQ_CF_WORD1_CF_INST(cf->inst) |
 -                                      S_SQ_CF_WORD1_BARRIER(1) |
 -                                      S_SQ_CF_WORD1_COND(cf->cond) |
 -                                      S_SQ_CF_WORD1_POP_COUNT(cf->pop_count);
 +                      S_SQ_CF_WORD1_BARRIER(cf->barrier) |
 +                      S_SQ_CF_WORD1_COND(cf->cond) |
 +                      S_SQ_CF_WORD1_POP_COUNT(cf->pop_count) |
 +                      S_SQ_CF_WORD1_END_OF_PROGRAM(end_of_program);
  
                break;
        default:
        return 0;
  }
  
 +struct gpr_usage_range {
 +      int     replacement;
 +      int     rel_block;
 +      int     start;
 +      int     end;
 +};
 +
 +struct gpr_usage {
 +      unsigned                channels:4;
 +      int                     first_write;
 +      int                     last_write[4];
 +      unsigned                nranges;
 +      struct gpr_usage_range  *ranges;
 +};
 +
 +static struct gpr_usage_range* last_gpr_usage_range(struct gpr_usage *usage)
 +{
 +      if (usage->nranges)
 +              return usage->ranges + usage->nranges - 1;
 +      else
 +              return NULL;
 +}
 +
 +static struct gpr_usage_range* add_gpr_usage_range(struct gpr_usage *usage)
 +{
 +      struct gpr_usage_range *range;
 +
 +      usage->nranges++;
 +      usage->ranges = realloc(usage->ranges, usage->nranges * sizeof(struct gpr_usage_range));
 +      if (!usage->ranges)
 +              return NULL;
 +
 +      range = last_gpr_usage_range(usage);
 +      range->replacement = -1; /* no prefered replacement */
 +      range->rel_block = -1;
 +      range->start = -1;
 +      range->end = -1;
 +
 +      return range;
 +}
 +
 +static void notice_gpr_read(struct gpr_usage *usage, int id, unsigned chan)
 +{
 +      struct gpr_usage_range* range;
 +
 +        usage->channels |= 1 << chan;
 +        usage->first_write = -1;
 +        if (!usage->nranges) {
 +              range = add_gpr_usage_range(usage);
 +        } else
 +              range = last_gpr_usage_range(usage);
 +
 +        if (range && range->end < id)
 +              range->end = id;
 +}
 +
 +static void notice_gpr_rel_read(struct r600_bc *bc, struct gpr_usage usage[128],
 +                              int id, unsigned gpr, unsigned chan)
 +{
 +      unsigned i;
 +      for (i = gpr; i < bc->ngpr; ++i)
 +              notice_gpr_read(&usage[i], id, chan);
 +
 +      last_gpr_usage_range(&usage[gpr])->rel_block = bc->ngpr - gpr;
 +}
 +
 +static void notice_gpr_last_write(struct gpr_usage *usage, int id, unsigned chan)
 +{
 +        usage->last_write[chan] = id;
 +}
 +
 +static void notice_gpr_write(struct gpr_usage *usage, int id, unsigned chan,
 +                              int predicate, int prefered_replacement)
 +{
 +      struct gpr_usage_range* last_range = last_gpr_usage_range(usage);
 +      int start = usage->first_write != -1 ? usage->first_write : id;
 +      usage->channels &= ~(1 << chan);
 +      if (usage->channels) {
 +              if (usage->first_write == -1)
 +                      usage->first_write = id;
 +      } else if (!last_range || (last_range->start != start && !predicate)) {
 +              usage->first_write = start;
 +              struct gpr_usage_range* range = add_gpr_usage_range(usage);
 +              range->replacement = prefered_replacement;
 +                range->start = start;
 +        } else if (last_range->start == start && prefered_replacement != -1) {
 +              last_range->replacement = prefered_replacement;
 +        }
 +        notice_gpr_last_write(usage, id, chan);
 +}
 +
 +static void notice_gpr_rel_last_write(struct gpr_usage usage[128], int id, unsigned chan)
 +{
 +      unsigned i;
 +      for (i = 0; i < 128; ++i)
 +              notice_gpr_last_write(&usage[i], id, chan);
 +}
 +
 +static void notice_gpr_rel_write(struct gpr_usage usage[128], int id, unsigned chan)
 +{
 +      unsigned i;
 +      for (i = 0; i < 128; ++i)
 +              notice_gpr_write(&usage[i], id, chan, 1, -1);
 +}
 +
 +static void notice_alu_src_gprs(struct r600_bc *bc, struct r600_bc_alu *alu,
 +                                struct gpr_usage usage[128], int id)
 +{
 +      unsigned src, num_src;
 +
 +      num_src = r600_bc_get_num_operands(bc, alu);
 +      for (src = 0; src < num_src; ++src) {
 +              // constants doesn't matter
 +              if (!is_gpr(alu->src[src].sel))
 +                      continue;
 +
 +              if (alu->src[src].rel)
 +                      notice_gpr_rel_read(bc, usage, id, alu->src[src].sel, alu->src[src].chan);
 +              else
 +                      notice_gpr_read(&usage[alu->src[src].sel], id, alu->src[src].chan);
 +      }
 +}
 +
 +static void notice_alu_dst_gprs(struct r600_bc_alu *alu_first, struct gpr_usage usage[128],
 +                              int id, int predicate)
 +{
 +      struct r600_bc_alu *alu;
 +      for (alu = alu_first; alu; alu = LIST_ENTRY(struct r600_bc_alu, alu->list.next, list)) {
 +              if (alu->dst.write) {
 +                      if (alu->dst.rel)
 +                              notice_gpr_rel_write(usage, id, alu->dst.chan);
 +                      else if (alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV && is_gpr(alu->src[0].sel))
 +                              notice_gpr_write(&usage[alu->dst.sel], id, alu->dst.chan,
 +                                              predicate, alu->src[0].sel);
 +                      else
 +                              notice_gpr_write(&usage[alu->dst.sel], id, alu->dst.chan, predicate, -1);
 +              }
 +
 +              if (alu->last)
 +                      break;
 +      }
 +}
 +
 +static void notice_tex_gprs(struct r600_bc *bc, struct r600_bc_tex *tex,
 +                              struct gpr_usage usage[128],
 +                              int id, int predicate)
 +{
 +      if (tex->src_rel) {
 +                if (tex->src_sel_x < 4)
 +                      notice_gpr_rel_read(bc, usage, id, tex->src_gpr, tex->src_sel_x);
 +              if (tex->src_sel_y < 4)
 +                      notice_gpr_rel_read(bc, usage, id, tex->src_gpr, tex->src_sel_y);
 +              if (tex->src_sel_z < 4)
 +                      notice_gpr_rel_read(bc, usage, id, tex->src_gpr, tex->src_sel_z);
 +              if (tex->src_sel_w < 4)
 +                      notice_gpr_rel_read(bc, usage, id, tex->src_gpr, tex->src_sel_w);
 +        } else {
 +              if (tex->src_sel_x < 4)
 +                      notice_gpr_read(&usage[tex->src_gpr], id, tex->src_sel_x);
 +              if (tex->src_sel_y < 4)
 +                      notice_gpr_read(&usage[tex->src_gpr], id, tex->src_sel_y);
 +              if (tex->src_sel_z < 4)
 +                      notice_gpr_read(&usage[tex->src_gpr], id, tex->src_sel_z);
 +              if (tex->src_sel_w < 4)
 +                      notice_gpr_read(&usage[tex->src_gpr], id, tex->src_sel_w);
 +      }
 +      if (tex->dst_rel) {
 +              if (tex->dst_sel_x != 7)
 +                      notice_gpr_rel_write(usage, id, 0);
 +              if (tex->dst_sel_y != 7)
 +                      notice_gpr_rel_write(usage, id, 1);
 +              if (tex->dst_sel_z != 7)
 +                      notice_gpr_rel_write(usage, id, 2);
 +              if (tex->dst_sel_w != 7)
 +                      notice_gpr_rel_write(usage, id, 3);
 +      } else {
 +              if (tex->dst_sel_x != 7)
 +                      notice_gpr_write(&usage[tex->dst_gpr], id, 0, predicate, -1);
 +              if (tex->dst_sel_y != 7)
 +                      notice_gpr_write(&usage[tex->dst_gpr], id, 1, predicate, -1);
 +              if (tex->dst_sel_z != 7)
 +                      notice_gpr_write(&usage[tex->dst_gpr], id, 2, predicate, -1);
 +              if (tex->dst_sel_w != 7)
 +                      notice_gpr_write(&usage[tex->dst_gpr], id, 3, predicate, -1);
 +      }
 +}
 +
 +static void notice_vtx_gprs(struct r600_bc_vtx *vtx, struct gpr_usage usage[128],
 +                              int id, int predicate)
 +{
 +      notice_gpr_read(&usage[vtx->src_gpr], id, vtx->src_sel_x);
 +
 +      if (vtx->dst_sel_x != 7)
 +              notice_gpr_write(&usage[vtx->dst_gpr], id, 0, predicate, -1);
 +      if (vtx->dst_sel_y != 7)
 +              notice_gpr_write(&usage[vtx->dst_gpr], id, 1, predicate, -1);
 +      if (vtx->dst_sel_z != 7)
 +              notice_gpr_write(&usage[vtx->dst_gpr], id, 2, predicate, -1);
 +      if (vtx->dst_sel_w != 7)
 +              notice_gpr_write(&usage[vtx->dst_gpr], id, 3, predicate, -1);
 +}
 +
 +static void notice_export_gprs(struct r600_bc_cf *cf, struct gpr_usage usage[128],
 +                              struct r600_bc_cf *export_cf[128], int export_remap[128])
 +{
 +      //TODO handle other memory operations
 +      struct gpr_usage *output = &usage[cf->output.gpr];
 +      int id = MAX4(output->last_write[0], output->last_write[1],
 +              output->last_write[2], output->last_write[3]);
 +      id += 0x100;
 +      id &= ~0xFF;
 +
 +      export_cf[cf->output.gpr] = cf;
 +      export_remap[cf->output.gpr] = id;
 +      if (cf->output.swizzle_x < 4)
 +              notice_gpr_read(output, id, cf->output.swizzle_x);
 +      if (cf->output.swizzle_y < 4)
 +              notice_gpr_read(output, id, cf->output.swizzle_y);
 +      if (cf->output.swizzle_z < 4)
 +              notice_gpr_read(output, id, cf->output.swizzle_z);
 +      if (cf->output.swizzle_w < 4)
 +              notice_gpr_read(output, id, cf->output.swizzle_w);
 +}
 +
 +static struct gpr_usage_range *find_src_range(struct gpr_usage *usage, int id)
 +{
 +      unsigned i;
 +      for (i = 0; i < usage->nranges; ++i) {
 +              struct gpr_usage_range* range = &usage->ranges[i];
 +
 +              if (range->start < id && id <= range->end)
 +                      return range;
 +      }
 +      return NULL;
 +}
 +
 +static struct gpr_usage_range *find_dst_range(struct gpr_usage *usage, int id)
 +{
 +      unsigned i;
 +      for (i = 0; i < usage->nranges; ++i) {
 +              struct gpr_usage_range* range = &usage->ranges[i];
 +              int end = range->end;
 +
 +              if (range->start <= id && (id < end || end == -1))
 +                      return range;
 +      }
 +      return NULL;
 +}
 +
 +static int is_barrier_needed(struct gpr_usage *usage, int id, unsigned chan, int last_barrier)
 +{
 +      if (usage->last_write[chan] != (id & ~0xFF))
 +              return usage->last_write[chan] >= last_barrier;
 +      else
 +              return 0;
 +}
 +
 +static int is_intersection(struct gpr_usage_range* a, struct gpr_usage_range* b)
 +{
 +      return a->start <= b->end && b->start < a->end;
 +}
 +
 +static int rate_replacement(struct gpr_usage usage[128], unsigned current, unsigned gpr,
 +                              struct gpr_usage_range* range)
 +{
 +      int max_gpr = gpr + MAX2(range->rel_block, 1);
 +      int best_start = 0x3FFFFFFF, best_end = 0x3FFFFFFF;
 +      unsigned i;
 +
 +      for (; gpr < max_gpr; ++gpr) {
 +
 +              if (gpr >= 128) /* relative gpr block won't fit into clause temporaries */
 +                      return -1; /* forget it */
 +
 +              if (gpr == current) /* ignore ranges of to be replaced register */
 +                      continue;
 +
 +              for (i = 0; i < usage[gpr].nranges; ++i) {
 +                      if (usage[gpr].ranges[i].replacement < gpr)
 +                              continue; /* ignore already remapped ranges */
 +
 +                      if (is_intersection(&usage[gpr].ranges[i], range))
 +                              return -1; /* forget it if usages overlap */
 +
 +                      if (range->start >= usage[gpr].ranges[i].end)
 +                              best_start = MIN2(best_start, range->start - usage[gpr].ranges[i].end);
 +
 +                      if (range->end != -1 && range->end <= usage[gpr].ranges[i].start)
 +                              best_end = MIN2(best_end, usage[gpr].ranges[i].start - range->end);
 +              }
 +      }
 +      return best_start + best_end;
 +}
 +
 +static void find_replacement(struct gpr_usage usage[128], unsigned current,
 +                              struct gpr_usage_range *range)
 +{
 +      unsigned i, j;
 +      int best_gpr = -1, best_rate = 0x7FFFFFFF;
 +
 +      if (range->replacement == current) 
 +              return; /* register prefers to be not remapped */
 +
 +      if (range->replacement != -1 && range->replacement <= current) {
 +              struct gpr_usage_range *other = find_src_range(&usage[range->replacement], range->start);
 +              if (other && other->replacement != -1)
 +                      range->replacement = other->replacement;
 +      }
 +
 +      if (range->replacement != -1 && range->replacement < current) {
 +              int rate = rate_replacement(usage, current, range->replacement, range);
 +
 +              /* check if prefered replacement can be used */
 +              if (rate != -1) {
 +                      best_rate = rate;
 +                      best_gpr = range->replacement;
 +              }
 +      }
 +
 +      if (best_gpr == -1 && (range->start & ~0xFF) == (range->end & ~0xFF)) {
 +              /* register is just used inside one ALU clause */
 +              /* try to use clause temporaries for it */
 +              for (i = 127; i > 123; --i) {
 +                      int rate = rate_replacement(usage, current, i, range);
 +
 +                      if (rate == -1) /* can't be used because ranges overlap */
 +                              continue;
 +
 +                      if (rate < best_rate) {
 +                              best_rate = rate;
 +                              best_gpr = i;
 +
 +                              /* can't get better than this */
 +                              if (rate == 0)
 +                                      break;
 +                      }
 +              }
 +      }
 +
 +      if (best_gpr == -1) {
 +              for (i = 0; i < current; ++i) {
 +                      int rate = rate_replacement(usage, current, i, range);
 +
 +                      if (rate == -1) /* can't be used because ranges overlap */
 +                              continue;
 +
 +                      if (rate < best_rate) {
 +                              best_rate = rate;
 +                              best_gpr = i;
 +
 +                              /* can't get better than this */
 +                              if (rate == 0)
 +                                      break;
 +                      }
 +              }
 +      }
 +
 +      if (best_gpr != -1) {
 +              struct gpr_usage_range *reservation = add_gpr_usage_range(&usage[best_gpr]);
 +              reservation->replacement = best_gpr;
 +              reservation->rel_block = -1;
 +              reservation->start = range->start;
 +              reservation->end = range->end;
 +      } else
 +              best_gpr = current;
 +
 +      range->replacement = best_gpr;
 +      if (range->rel_block == -1)
 +              return; /* no relative block to handle we are done here */
 +
 +      /* set prefered register for the whole relative register block */
 +      for (i = current + 1, ++best_gpr; i < current + range->rel_block; ++i, ++best_gpr) {
 +              for (j = 0; j < usage[i].nranges; ++j) {
 +                      if (is_intersection(&usage[i].ranges[j], range))
 +                              usage[i].ranges[j].replacement = best_gpr;
 +              }
 +      }
 +}
 +
 +static void replace_alu_gprs(struct r600_bc *bc, struct r600_bc_alu *alu, struct gpr_usage usage[128],
 +                              int id, int last_barrier, unsigned *barrier)
 +{
 +      struct gpr_usage *cur_usage;
 +      struct gpr_usage_range *range;
 +      unsigned src, num_src;
 +
 +      num_src = r600_bc_get_num_operands(bc, alu);
 +      for (src = 0; src < num_src; ++src) {
 +              // constants doesn't matter
 +              if (!is_gpr(alu->src[src].sel))
 +                      continue;
 +
 +              cur_usage = &usage[alu->src[src].sel];
 +              range = find_src_range(cur_usage, id);
 +              alu->src[src].sel = range->replacement;
 +
 +              *barrier |= is_barrier_needed(cur_usage, id, alu->src[src].chan, last_barrier);
 +      }
 +
 +      if (alu->dst.write) {
 +              cur_usage = &usage[alu->dst.sel];
 +              range = find_dst_range(cur_usage, id);
 +              if (!range || range->replacement == -1) {
 +                      if (!alu->is_op3)
 +                              alu->dst.write = 0;
 +                      else
 +                              /*TODO: really check that register 123 is useable */
 +                              alu->dst.sel = 123;
 +              } else {
 +                      alu->dst.sel = range->replacement;
 +                      *barrier |= is_barrier_needed(cur_usage, id, alu->dst.chan, last_barrier);
 +              }
 +      }
 +      if (alu->dst.write) {
 +              if (alu->dst.rel)
 +                      notice_gpr_rel_last_write(usage, id, alu->dst.chan);
 +              else
 +                      notice_gpr_last_write(cur_usage, id, alu->dst.chan);
 +      }
 +}
 +
 +static void replace_tex_gprs(struct r600_bc_tex *tex, struct gpr_usage usage[128],
 +                              int id, int last_barrier, unsigned *barrier)
 +{
 +      struct gpr_usage *cur_usage = &usage[tex->src_gpr];
 +      struct gpr_usage_range *range = find_src_range(cur_usage, id);
 +
 +      if (tex->src_rel) {
 +              *barrier = 1;
 +        } else {
 +              if (tex->src_sel_x < 4)
 +                      *barrier |= is_barrier_needed(cur_usage, id, tex->src_sel_x, last_barrier);
 +              if (tex->src_sel_y < 4)
 +                      *barrier |= is_barrier_needed(cur_usage, id, tex->src_sel_y, last_barrier);
 +              if (tex->src_sel_z < 4)
 +                      *barrier |= is_barrier_needed(cur_usage, id, tex->src_sel_z, last_barrier);
 +              if (tex->src_sel_w < 4)
 +                      *barrier |= is_barrier_needed(cur_usage, id, tex->src_sel_w, last_barrier);
 +      }
 +      tex->src_gpr = range->replacement;
 +
 +      cur_usage = &usage[tex->dst_gpr];
 +
 +      range = find_dst_range(cur_usage, id);
 +      if (range) {
 +              tex->dst_gpr = range->replacement;
 +
 +              if (tex->dst_rel) {
 +                      if (tex->dst_sel_x != 7)
 +                              notice_gpr_rel_last_write(usage, id, tex->dst_sel_x);
 +                      if (tex->dst_sel_y != 7)
 +                              notice_gpr_rel_last_write(usage, id, tex->dst_sel_y);
 +                      if (tex->dst_sel_z != 7)
 +                              notice_gpr_rel_last_write(usage, id, tex->dst_sel_z);
 +                      if (tex->dst_sel_w != 7)
 +                              notice_gpr_rel_last_write(usage, id, tex->dst_sel_w);
 +              } else {
 +                      if (tex->dst_sel_x != 7)
 +                              notice_gpr_last_write(cur_usage, id, tex->dst_sel_x);
 +                      if (tex->dst_sel_y != 7)
 +                              notice_gpr_last_write(cur_usage, id, tex->dst_sel_y);
 +                      if (tex->dst_sel_z != 7)
 +                              notice_gpr_last_write(cur_usage, id, tex->dst_sel_z);
 +                      if (tex->dst_sel_w != 7)
 +                              notice_gpr_last_write(cur_usage, id, tex->dst_sel_w);
 +              }
 +      } else {
 +              tex->dst_gpr = 123;
 +      }
 +}
 +
 +static void replace_vtx_gprs(struct r600_bc_vtx *vtx, struct gpr_usage usage[128],
 +                              int id, int last_barrier, unsigned *barrier)
 +{
 +      struct gpr_usage *cur_usage = &usage[vtx->src_gpr];
 +      struct gpr_usage_range *range = find_src_range(cur_usage, id);
 +
 +      *barrier |= is_barrier_needed(cur_usage, id, vtx->src_sel_x, last_barrier);
 +
 +      vtx->src_gpr = range->replacement;
 +
 +      cur_usage = &usage[vtx->dst_gpr];
 +      range = find_dst_range(cur_usage, id);
 +      if (range) {
 +              vtx->dst_gpr = range->replacement;
 +
 +              if (vtx->dst_sel_x != 7)
 +                      notice_gpr_last_write(cur_usage, id, vtx->dst_sel_x);
 +              if (vtx->dst_sel_y != 7)
 +                      notice_gpr_last_write(cur_usage, id, vtx->dst_sel_y);
 +              if (vtx->dst_sel_z != 7)
 +                      notice_gpr_last_write(cur_usage, id, vtx->dst_sel_z);
 +              if (vtx->dst_sel_w != 7)
 +                      notice_gpr_last_write(cur_usage, id, vtx->dst_sel_w);
 +      } else {
 +              vtx->dst_gpr = 123;
 +      }
 +}
 +
 +static void replace_export_gprs(struct r600_bc_cf *cf, struct gpr_usage usage[128],
 +                              int id, int last_barrier)
 +{
 +      //TODO handle other memory operations
 +      struct gpr_usage *cur_usage = &usage[cf->output.gpr];
 +      struct gpr_usage_range *range = find_src_range(cur_usage, id);
 +
 +      cf->barrier = 0;
 +      if (cf->output.swizzle_x < 4)
 +              cf->barrier |= is_barrier_needed(cur_usage, -1, cf->output.swizzle_x, last_barrier);
 +      if (cf->output.swizzle_y < 4)
 +              cf->barrier |= is_barrier_needed(cur_usage, -1, cf->output.swizzle_y, last_barrier);
 +      if (cf->output.swizzle_z < 4)
 +              cf->barrier |= is_barrier_needed(cur_usage, -1, cf->output.swizzle_z, last_barrier);
 +      if (cf->output.swizzle_w < 4)
 +              cf->barrier |= is_barrier_needed(cur_usage, -1, cf->output.swizzle_w, last_barrier);
 +
 +      cf->output.gpr = range->replacement;
 +}
 +
 +static void optimize_alu_inst(struct r600_bc *bc, struct r600_bc_cf *cf, struct r600_bc_alu *alu)
 +{
 +      struct r600_bc_alu *alu_next;
 +      unsigned chan;
 +      unsigned src, num_src;
 +
 +      /* check if a MOV could be optimized away */
 +      if (alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV) {
 +
 +              /* destination equals source? */
 +              if (alu->dst.sel != alu->src[0].sel ||
 +                      alu->dst.chan != alu->src[0].chan)
 +                      return;
 +
 +              /* any special handling for the source? */
 +              if (alu->src[0].rel || alu->src[0].neg || alu->src[0].abs)
 +                      return;
 +
 +              /* any special handling for destination? */
 +              if (alu->dst.rel || alu->dst.clamp)
 +                      return;
 +
 +              /* ok find next instruction group and check if ps/pv is used */
 +              for (alu_next = alu; !alu_next->last; alu_next = NEXT_ALU(alu_next));
 +
 +              if (alu_next->list.next != &cf->alu) {
 +                      chan = is_alu_reduction_inst(bc, alu) ? 0 : alu->dst.chan;
 +                      for (alu_next = NEXT_ALU(alu_next); alu_next; alu_next = NEXT_ALU(alu_next)) {
 +                              num_src = r600_bc_get_num_operands(bc, alu_next);
 +                              for (src = 0; src < num_src; ++src) {
 +                                      if (alu_next->src[src].sel == V_SQ_ALU_SRC_PV &&
 +                                              alu_next->src[src].chan == chan)
 +                                              return;
 +
 +                                      if (alu_next->src[src].sel == V_SQ_ALU_SRC_PS)
 +                                              return;
 +                              }
 +
 +                              if (alu_next->last)
 +                                      break;
 +                      }
 +              }
 +
 +              r600_bc_remove_alu(cf, alu);
 +      }
 +}
 +
 +static void optimize_export_inst(struct r600_bc *bc, struct r600_bc_cf *cf)
 +{
 +      struct r600_bc_cf *prev = LIST_ENTRY(struct r600_bc_cf, cf->list.prev, list);
 +      if (&prev->list == &bc->cf ||
 +              prev->inst != cf->inst ||
 +              prev->output.type != cf->output.type ||
 +              prev->output.elem_size != cf->output.elem_size ||
 +              prev->output.swizzle_x != cf->output.swizzle_x ||
 +              prev->output.swizzle_y != cf->output.swizzle_y ||
 +              prev->output.swizzle_z != cf->output.swizzle_z ||
 +              prev->output.swizzle_w != cf->output.swizzle_w)
 +              return;
 +
 +      if ((prev->output.burst_count + cf->output.burst_count) > 16)
 +              return;
 +
 +      if ((prev->output.gpr + prev->output.burst_count) == cf->output.gpr &&
 +              (prev->output.array_base + prev->output.burst_count) == cf->output.array_base) {
 +
 +              prev->output.burst_count += cf->output.burst_count;
 +              r600_bc_remove_cf(bc, cf);
 +
 +      } else if (prev->output.gpr == (cf->output.gpr + cf->output.burst_count) &&
 +              prev->output.array_base == (cf->output.array_base + cf->output.burst_count)) {
 +
 +              cf->output.burst_count += prev->output.burst_count;
 +              r600_bc_remove_cf(bc, prev);
 +      }
 +}
 +
 +static void r600_bc_optimize(struct r600_bc *bc)
 +{
 +      struct r600_bc_cf *cf, *next_cf;
 +      struct r600_bc_alu *first, *next_alu;
 +      struct r600_bc_alu *alu;
 +      struct r600_bc_vtx *vtx;
 +      struct r600_bc_tex *tex;
 +      struct gpr_usage usage[128];
 +
 +      /* assume that each gpr is exported only once */
 +      struct r600_bc_cf *export_cf[128] = { NULL };
 +      int export_remap[128];
 +
 +      int id, cond_start, barrier[bc->nstack];
 +      unsigned i, j, stack, predicate, old_stack;
 +
 +      memset(&usage, 0, sizeof(usage));
 +      for (i = 0; i < 128; ++i) {
 +              usage[i].first_write = -1;
 +              usage[i].last_write[0] = -1;
 +              usage[i].last_write[1] = -1;
 +              usage[i].last_write[2] = -1;
 +              usage[i].last_write[3] = -1;
 +      }
 +
 +      /* first gather some informations about the gpr usage */
 +      id = 0; stack = 0;
 +      LIST_FOR_EACH_ENTRY(cf, &bc->cf, list) {
 +              old_stack = stack;
 +              if (stack == 0)
 +                      cond_start = stack;
 +
 +              switch (r600_bc_cf_class(cf)) {
 +              case CF_CLASS_ALU:
 +                      predicate = 0;
 +                      first = NULL;
 +                      LIST_FOR_EACH_ENTRY(alu, &cf->alu, list) {
 +                              if (!first)
 +                                      first = alu;
 +                              notice_alu_src_gprs(bc, alu, usage, id);
 +                              if (alu->last) {
 +                                      notice_alu_dst_gprs(first, usage, id, predicate || stack > 0);
 +                                      first = NULL;
 +                                      ++id;
 +                              }
 +                              if (is_alu_pred_inst(bc, alu))
 +                                      predicate++;
 +                      }
 +                      if (cf->inst == V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE << 3)
 +                              stack += predicate;
 +                      else if (cf->inst == V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP_AFTER << 3)
 +                              stack -= 1;
 +                      else if (cf->inst == V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP2_AFTER << 3)
 +                              stack -= 2;
 +                      break;
 +              case CF_CLASS_TEXTURE:
 +                      LIST_FOR_EACH_ENTRY(tex, &cf->tex, list) {
 +                              notice_tex_gprs(bc, tex, usage, id++, stack > 0);
 +                      }
 +                      break;
 +              case CF_CLASS_VERTEX:
 +                      LIST_FOR_EACH_ENTRY(vtx, &cf->vtx, list) {
 +                              notice_vtx_gprs(vtx, usage, id++, stack > 0);
 +                      }
 +                      break;
 +              case CF_CLASS_EXPORT:
 +                      notice_export_gprs(cf, usage, export_cf, export_remap);
 +                      continue; // don't increment id
 +              case CF_CLASS_OTHER:
 +                      switch (cf->inst) {
 +                      case V_SQ_CF_WORD1_SQ_CF_INST_JUMP:
 +                      case V_SQ_CF_WORD1_SQ_CF_INST_ELSE:
 +                      case V_SQ_CF_WORD1_SQ_CF_INST_CALL_FS:
 +                              break;
 +
 +                      case V_SQ_CF_WORD1_SQ_CF_INST_POP:
 +                              stack -= cf->pop_count;
 +                              break;
 +
 +                      default:
 +                              // TODO implement loop handling
 +                              goto out;
 +                      }
 +              }
 +
 +              /* extend last_write after conditional block */
 +              if (stack == 0 && old_stack != 0)
 +                      for (i = 0; i < 128; ++i)
 +                              for (j = 0; j < 4; ++j)
 +                                      if (usage[i].last_write[j] >= cond_start)
 +                                              usage[i].last_write[j] = id;
 +
 +              id += 0x100;
 +              id &= ~0xFF;
 +      }
 +      assert(stack == 0);
 +
 +      /* try to optimize gpr usage */
 +      for (i = 0; i < 124; ++i) {
 +              for (j = 0; j < usage[i].nranges; ++j) {
 +                      struct gpr_usage_range *range = &usage[i].ranges[j];
 +                      if (range->start == -1)
 +                              /* can't rearange shader inputs */
 +                              range->replacement = i;
 +                      else if (range->end == -1)
 +                              /* gpr isn't used any more after this instruction */
 +                              range->replacement = -1;
 +                      else
 +                              find_replacement(usage, i, range);
 +
 +                      if (range->replacement == i)
 +                              bc->ngpr = i;
 +                      else if (range->replacement < i && range->replacement > bc->ngpr)
 +                              bc->ngpr = range->replacement;
 +              }
 +      }
 +      bc->ngpr++;
 +
 +      /* apply the changes */
 +      for (i = 0; i < 128; ++i) {
 +              usage[i].last_write[0] = -1;
 +              usage[i].last_write[1] = -1;
 +              usage[i].last_write[2] = -1;
 +              usage[i].last_write[3] = -1;
 +      }
 +      barrier[0] = 0;
 +      id = 0; stack = 0;
 +      LIST_FOR_EACH_ENTRY_SAFE(cf, next_cf, &bc->cf, list) {
 +              old_stack = stack;
 +              switch (r600_bc_cf_class(cf)) {
 +              case CF_CLASS_ALU:
 +                      predicate = 0;
 +                      first = NULL;
 +                      cf->barrier = 0;
 +                      LIST_FOR_EACH_ENTRY_SAFE(alu, next_alu, &cf->alu, list) {
 +                              replace_alu_gprs(bc, alu, usage, id, barrier[stack], &cf->barrier);
 +                              if (alu->last)
 +                                      ++id;
 +
 +                              if (is_alu_pred_inst(bc, alu))
 +                                      predicate++;
 +
 +                              if (cf->inst == V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU << 3)
 +                                      optimize_alu_inst(bc, cf, alu);
 +                      }
 +                      if (cf->inst == V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE << 3)
 +                              stack += predicate;
 +                      else if (cf->inst == V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP_AFTER << 3)
 +                              stack -= 1;
 +                      else if (cf->inst == V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP2_AFTER << 3)
 +                              stack -= 2;
 +                      if (LIST_IS_EMPTY(&cf->alu)) {
 +                              r600_bc_remove_cf(bc, cf);
 +                              cf = NULL;
 +                      }
 +                      break;
 +              case CF_CLASS_TEXTURE:
 +                      cf->barrier = 0;
 +                      LIST_FOR_EACH_ENTRY(tex, &cf->tex, list) {
 +                              replace_tex_gprs(tex, usage, id++, barrier[stack], &cf->barrier);
 +                      }
 +                      break;
 +              case CF_CLASS_VERTEX:
 +                      cf->barrier = 0;
 +                      LIST_FOR_EACH_ENTRY(vtx, &cf->vtx, list) {
 +                              replace_vtx_gprs(vtx, usage, id++, barrier[stack], &cf->barrier);
 +                      }
 +                      break;
 +              case CF_CLASS_EXPORT:
 +                      continue; // don't increment id
 +              case CF_CLASS_OTHER:
 +                      if (cf->inst == V_SQ_CF_WORD1_SQ_CF_INST_POP) {
 +                              cf->barrier = 0;
 +                              stack -= cf->pop_count;
 +                      }
 +                      break;
 +              }
 +
 +              id &= ~0xFF;
 +              if (cf && cf->barrier)
 +                      barrier[old_stack] = id;
 +
 +              for (i = old_stack + 1; i <= stack; ++i)
 +                      barrier[i] = barrier[old_stack];
 +
 +              id += 0x100;
 +              if (stack != 0) /* ensure exports are placed outside of conditional blocks */
 +                      continue;
 +
 +              for (i = 0; i < 128; ++i) {
 +                      if (!export_cf[i] || id < export_remap[i])
 +                              continue;
 +
 +                      r600_bc_move_cf(bc, export_cf[i], next_cf);
 +                      replace_export_gprs(export_cf[i], usage, export_remap[i], barrier[stack]);
 +                      if (export_cf[i]->barrier)
 +                              barrier[stack] = id - 1;
 +                      next_cf = LIST_ENTRY(struct r600_bc_cf, export_cf[i]->list.next, list);
 +                      optimize_export_inst(bc, export_cf[i]);
 +                      export_cf[i] = NULL;
 +              }
 +      }
 +      assert(stack == 0);
 +
 +out:
 +      for (i = 0; i < 128; ++i) {
 +              free(usage[i].ranges);
 +      }
 +}
 +
  int r600_bc_build(struct r600_bc *bc)
  {
        struct r600_bc_cf *cf;
        struct r600_bc_alu *alu;
        struct r600_bc_vtx *vtx;
        struct r600_bc_tex *tex;
 +      struct r600_bc_cf *exports[4] = { NULL };
        uint32_t literal[4];
        unsigned nliteral;
        unsigned addr;
                bc->nstack = 1;
        }
  
 +      //r600_bc_optimize(bc);
 +
        /* first path compute addr of each CF block */
        /* addr start after all the CF instructions */
 -      addr = bc->cf_last->id + 2;
 +      addr = LIST_ENTRY(struct r600_bc_cf, bc->cf.prev, list)->id + 2;
        LIST_FOR_EACH_ENTRY(cf, &bc->cf, list) {
 -              switch (cf->inst) {
 -              case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU << 3):
 -              case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP_AFTER << 3):
 -              case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP2_AFTER << 3):
 -              case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE << 3):
 +              switch (r600_bc_cf_class(cf)) {
 +              case CF_CLASS_ALU:
                        break;
 -              case V_SQ_CF_WORD1_SQ_CF_INST_TEX:
 -              case V_SQ_CF_WORD1_SQ_CF_INST_VTX:
 -              case V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC:
 +              case CF_CLASS_TEXTURE:
 +              case CF_CLASS_VERTEX:
                        /* fetch node need to be 16 bytes aligned*/
                        addr += 3;
                        addr &= 0xFFFFFFFCUL;
                        break;
 -              case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT:
 -              case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE:
 -              case EG_V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT:
 -              case EG_V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE:
 +              case CF_CLASS_EXPORT:
 +                      if (cf->inst == BC_INST(bc, V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT))
 +                              exports[cf->output.type] = cf;
                        break;
 -              case V_SQ_CF_WORD1_SQ_CF_INST_JUMP:
 -              case V_SQ_CF_WORD1_SQ_CF_INST_ELSE:
 -              case V_SQ_CF_WORD1_SQ_CF_INST_POP:
 -              case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_START_NO_AL:
 -              case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_END:
 -              case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_CONTINUE:
 -              case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_BREAK:
 -              case V_SQ_CF_WORD1_SQ_CF_INST_CALL_FS:
 -              case V_SQ_CF_WORD1_SQ_CF_INST_RETURN:
 +              case CF_CLASS_OTHER:
                        break;
                default:
                        R600_ERR("unsupported CF instruction (0x%X)\n", cf->inst);
                addr += cf->ndw;
                bc->ndw = cf->addr + cf->ndw;
        }
 +
 +      /* set export done on last export of each type */
 +      for (i = 0; i < 4; ++i) {
 +              if (exports[i]) {
 +                      exports[i]->inst = BC_INST(bc, V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE);
 +              }
 +      }
 +
        free(bc->bytecode);
        bc->bytecode = calloc(1, bc->ndw * 4);
        if (bc->bytecode == NULL)
                        r = r600_bc_cf_build(bc, cf);
                if (r)
                        return r;
 -              switch (cf->inst) {
 -              case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU << 3):
 -              case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP_AFTER << 3):
 -              case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP2_AFTER << 3):
 -              case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE << 3):
 +              switch (r600_bc_cf_class(cf)) {
 +              case CF_CLASS_ALU:
                        nliteral = 0;
                        memset(literal, 0, sizeof(literal));
                        LIST_FOR_EACH_ENTRY(alu, &cf->alu, list) {
                                }
                        }
                        break;
 -              case V_SQ_CF_WORD1_SQ_CF_INST_VTX:
 -              case V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC:
 +              case CF_CLASS_VERTEX:
                        LIST_FOR_EACH_ENTRY(vtx, &cf->vtx, list) {
                                r = r600_bc_vtx_build(bc, vtx, addr);
                                if (r)
                                addr += 4;
                        }
                        break;
 -              case V_SQ_CF_WORD1_SQ_CF_INST_TEX:
 +              case CF_CLASS_TEXTURE:
                        LIST_FOR_EACH_ENTRY(tex, &cf->tex, list) {
                                r = r600_bc_tex_build(bc, tex, addr);
                                if (r)
                                addr += 4;
                        }
                        break;
 -              case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT:
 -              case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE:
 -              case EG_V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT:
 -              case EG_V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE:
 -              case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_START_NO_AL:
 -              case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_END:
 -              case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_CONTINUE:
 -              case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_BREAK:
 -              case V_SQ_CF_WORD1_SQ_CF_INST_JUMP:
 -              case V_SQ_CF_WORD1_SQ_CF_INST_ELSE:
 -              case V_SQ_CF_WORD1_SQ_CF_INST_POP:
 -              case V_SQ_CF_WORD1_SQ_CF_INST_CALL_FS:
 -              case V_SQ_CF_WORD1_SQ_CF_INST_RETURN:
 +              case CF_CLASS_EXPORT:
 +              case CF_CLASS_OTHER:
                        break;
                default:
                        R600_ERR("unsupported CF instruction (0x%X)\n", cf->inst);
@@@ -2571,10 -1730,13 +2625,10 @@@ void r600_bc_dump(struct r600_bc *bc
        LIST_FOR_EACH_ENTRY(cf, &bc->cf, list) {
                id = cf->id;
  
 -              switch (cf->inst) {
 -              case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU << 3):
 -              case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP_AFTER << 3):
 -              case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_POP2_AFTER << 3):
 -              case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE << 3):
 +              switch (r600_bc_cf_class(cf)) {
 +              case CF_CLASS_ALU:
                        fprintf(stderr, "%04d %08X ALU ", id, bc->bytecode[id]);
 -                      fprintf(stderr, "ADDR:%d ", cf->addr);
 +                      fprintf(stderr, "ADDR:%04d ", cf->addr);
                        fprintf(stderr, "KCACHE_MODE0:%X ", cf->kcache[0].mode);
                        fprintf(stderr, "KCACHE_BANK0:%X ", cf->kcache[0].bank);
                        fprintf(stderr, "KCACHE_BANK1:%X\n", cf->kcache[1].bank);
                        fprintf(stderr, "KCACHE_MODE1:%X ", cf->kcache[1].mode);
                        fprintf(stderr, "KCACHE_ADDR0:%X ", cf->kcache[0].addr);
                        fprintf(stderr, "KCACHE_ADDR1:%X ", cf->kcache[1].addr);
 +                      fprintf(stderr, "BARRIER:%d ", cf->barrier);
                        fprintf(stderr, "COUNT:%d\n", cf->ndw / 2);
                        break;
 -              case V_SQ_CF_WORD1_SQ_CF_INST_TEX:
 -              case V_SQ_CF_WORD1_SQ_CF_INST_VTX:
 -              case V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC:
 +              case CF_CLASS_TEXTURE:
 +              case CF_CLASS_VERTEX:
                        fprintf(stderr, "%04d %08X TEX/VTX ", id, bc->bytecode[id]);
 -                      fprintf(stderr, "ADDR:%d\n", cf->addr);
 +                      fprintf(stderr, "ADDR:%04d\n", cf->addr);
                        id++;
                        fprintf(stderr, "%04d %08X TEX/VTX ", id, bc->bytecode[id]);
                        fprintf(stderr, "INST:%d ", cf->inst);
 +                      fprintf(stderr, "BARRIER:%d ", cf->barrier);
                        fprintf(stderr, "COUNT:%d\n", cf->ndw / 4);
                        break;
 -              case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT:
 -              case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE:
 +              case CF_CLASS_EXPORT:
                        fprintf(stderr, "%04d %08X EXPORT ", id, bc->bytecode[id]);
 -                      fprintf(stderr, "GPR:%X ", cf->output.gpr);
 +                      fprintf(stderr, "GPR:%d ", cf->output.gpr);
                        fprintf(stderr, "ELEM_SIZE:%X ", cf->output.elem_size);
                        fprintf(stderr, "ARRAY_BASE:%X ", cf->output.array_base);
                        fprintf(stderr, "TYPE:%X\n", cf->output.type);
                        fprintf(stderr, "SWIZ_Y:%X ", cf->output.swizzle_y);
                        fprintf(stderr, "SWIZ_Z:%X ", cf->output.swizzle_z);
                        fprintf(stderr, "SWIZ_W:%X ", cf->output.swizzle_w);
 -                      fprintf(stderr, "BARRIER:%X ", cf->output.barrier);
 -                      fprintf(stderr, "INST:%d ", cf->output.inst);
 -                      fprintf(stderr, "BURST_COUNT:%d ", cf->output.burst_count);
 -                      fprintf(stderr, "EOP:%X\n", cf->output.end_of_program);
 +                      fprintf(stderr, "BARRIER:%d ", cf->barrier);
 +                      fprintf(stderr, "INST:%d ", cf->inst);
 +                      fprintf(stderr, "BURST_COUNT:%d\n", cf->output.burst_count);
                        break;
 -              case V_SQ_CF_WORD1_SQ_CF_INST_JUMP:
 -              case V_SQ_CF_WORD1_SQ_CF_INST_ELSE:
 -              case V_SQ_CF_WORD1_SQ_CF_INST_POP:
 -              case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_START_NO_AL:
 -              case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_END:
 -              case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_CONTINUE:
 -              case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_BREAK:
 -              case V_SQ_CF_WORD1_SQ_CF_INST_CALL_FS:
 -              case V_SQ_CF_WORD1_SQ_CF_INST_RETURN:
 +              case CF_CLASS_OTHER:
                        fprintf(stderr, "%04d %08X CF ", id, bc->bytecode[id]);
 -                      fprintf(stderr, "ADDR:%d\n", cf->cf_addr);
 +                      fprintf(stderr, "ADDR:%04d\n", cf->cf_addr);
                        id++;
                        fprintf(stderr, "%04d %08X CF ", id, bc->bytecode[id]);
                        fprintf(stderr, "INST:%d ", cf->inst);
                        fprintf(stderr, "COND:%X ", cf->cond);
 +                      fprintf(stderr, "BARRIER:%d ", cf->barrier);
                        fprintf(stderr, "POP_COUNT:%X\n", cf->pop_count);
                        break;
                }
                }
  
                LIST_FOR_EACH_ENTRY(tex, &cf->tex, list) {
-                       //TODO
+                       fprintf(stderr, "%04d %08X   ", id, bc->bytecode[id]);
+                       fprintf(stderr, "INST:%d ", tex->inst);
+                       fprintf(stderr, "RESOURCE_ID:%d ", tex->resource_id);
+                       fprintf(stderr, "SRC(GPR:%d ", tex->src_gpr);
+                       fprintf(stderr, "REL:%d)\n", tex->src_rel);
+                       id++;
+                       fprintf(stderr, "%04d %08X   ", id, bc->bytecode[id]);
+                       fprintf(stderr, "DST(GPR:%d ", tex->dst_gpr);
+                       fprintf(stderr, "REL:%d ", tex->dst_rel);
+                       fprintf(stderr, "SEL_X:%d ", tex->dst_sel_x);
+                       fprintf(stderr, "SEL_Y:%d ", tex->dst_sel_y);
+                       fprintf(stderr, "SEL_Z:%d ", tex->dst_sel_z);
+                       fprintf(stderr, "SEL_W:%d) ", tex->dst_sel_w);
+                       fprintf(stderr, "LOD_BIAS:%d ", tex->lod_bias);
+                       fprintf(stderr, "COORD_TYPE_X:%d ", tex->coord_type_x);
+                       fprintf(stderr, "COORD_TYPE_Y:%d ", tex->coord_type_y);
+                       fprintf(stderr, "COORD_TYPE_Z:%d ", tex->coord_type_z);
+                       fprintf(stderr, "COORD_TYPE_W:%d\n", tex->coord_type_w);
+                       id++;
+                       fprintf(stderr, "%04d %08X   ", id, bc->bytecode[id]);
+                       fprintf(stderr, "OFFSET_X:%d ", tex->offset_x);
+                       fprintf(stderr, "OFFSET_Y:%d ", tex->offset_y);
+                       fprintf(stderr, "OFFSET_Z:%d ", tex->offset_z);
+                       fprintf(stderr, "SAMPLER_ID:%d ", tex->sampler_id);
+                       fprintf(stderr, "SRC(SEL_X:%d ", tex->src_sel_x);
+                       fprintf(stderr, "SEL_Y:%d ", tex->src_sel_y);
+                       fprintf(stderr, "SEL_Z:%d ", tex->src_sel_z);
+                       fprintf(stderr, "SEL_W:%d)\n", tex->src_sel_w);
+                       id++;
+                       fprintf(stderr, "%04d %08X   \n", id, bc->bytecode[id]);
+                       id++;
                }
  
                LIST_FOR_EACH_ENTRY(vtx, &cf->vtx, list) {
+                       fprintf(stderr, "%04d %08X   ", id, bc->bytecode[id]);
+                       fprintf(stderr, "INST:%d ", vtx->inst);
+                       fprintf(stderr, "FETCH_TYPE:%d ", vtx->fetch_type);
+                       fprintf(stderr, "BUFFER_ID:%d\n", vtx->buffer_id);
+                       id++;
+                       /* This assumes that no semantic fetches exist */
+                       fprintf(stderr, "%04d %08X   ", id, bc->bytecode[id]);
+                       fprintf(stderr, "SRC(GPR:%d ", vtx->src_gpr);
+                       fprintf(stderr, "SEL_X:%d) ", vtx->src_sel_x);
+                       fprintf(stderr, "MEGA_FETCH_COUNT:%d ", vtx->mega_fetch_count);
+                       fprintf(stderr, "DST(GPR:%d ", vtx->dst_gpr);
+                       fprintf(stderr, "SEL_X:%d ", vtx->dst_sel_x);
+                       fprintf(stderr, "SEL_Y:%d ", vtx->dst_sel_y);
+                       fprintf(stderr, "SEL_Z:%d ", vtx->dst_sel_z);
+                       fprintf(stderr, "SEL_W:%d) ", vtx->dst_sel_w);
+                       fprintf(stderr, "USE_CONST_FIELDS:%d ", vtx->use_const_fields);
+                       fprintf(stderr, "DATA_FORMAT:%d ", vtx->data_format);
+                       fprintf(stderr, "NUM_FORMAT_ALL:%d ", vtx->num_format_all);
+                       fprintf(stderr, "FORMAT_COMP_ALL:%d ", vtx->format_comp_all);
+                       fprintf(stderr, "SRF_MODE_ALL:%d\n", vtx->srf_mode_all);
+                       id++;
+                       fprintf(stderr, "%04d %08X   \n", id, bc->bytecode[id]);
                        //TODO
+                       id++;
+                       fprintf(stderr, "%04d %08X   \n", id, bc->bytecode[id]);
+                       id++;
                }
        }
  
        fprintf(stderr, "--------------------------------------\n");
  }
  
- void r600_cf_vtx(struct r600_vertex_element *ve, u32 *bytecode, unsigned count)
static void r600_cf_vtx(struct r600_vertex_element *ve, u32 *bytecode, unsigned count)
  {
        struct r600_pipe_state *rstate;
        unsigned i = 0;
                                0xFFFFFFFF, ve->fetch_shader);
  }
  
- void r600_cf_vtx_tc(struct r600_vertex_element *ve, u32 *bytecode, unsigned count)
- {
-       struct r600_pipe_state *rstate;
-       unsigned i = 0;
-       if (count > 8) {
-               bytecode[i++] = S_SQ_CF_WORD0_ADDR(8 >> 1);
-               bytecode[i++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC) |
-                                               S_SQ_CF_WORD1_BARRIER(1) |
-                                               S_SQ_CF_WORD1_COUNT(8 - 1);
-               bytecode[i++] = S_SQ_CF_WORD0_ADDR(40 >> 1);
-               bytecode[i++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC) |
-                                               S_SQ_CF_WORD1_BARRIER(1) |
-                                               S_SQ_CF_WORD1_COUNT((count - 8) - 1);
-       } else {
-               bytecode[i++] = S_SQ_CF_WORD0_ADDR(8 >> 1);
-               bytecode[i++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC) |
-                                               S_SQ_CF_WORD1_BARRIER(1) |
-                                               S_SQ_CF_WORD1_COUNT(count - 1);
-       }
-       bytecode[i++] = S_SQ_CF_WORD0_ADDR(0);
-       bytecode[i++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_RETURN) |
-                       S_SQ_CF_WORD1_BARRIER(1);
-       rstate = &ve->rstate;
-       rstate->id = R600_PIPE_STATE_FETCH_SHADER;
-       rstate->nregs = 0;
-       r600_pipe_state_add_reg(rstate, R_0288A4_SQ_PGM_RESOURCES_FS,
-                               0x00000000, 0xFFFFFFFF, NULL);
-       r600_pipe_state_add_reg(rstate, R_0288DC_SQ_PGM_CF_OFFSET_FS,
-                               0x00000000, 0xFFFFFFFF, NULL);
-       r600_pipe_state_add_reg(rstate, R_028894_SQ_PGM_START_FS,
-                               r600_bo_offset(ve->fetch_shader) >> 8,
-                               0xFFFFFFFF, ve->fetch_shader);
- }
  static void r600_vertex_data_type(enum pipe_format pformat, unsigned *format,
                                unsigned *num_format, unsigned *format_comp)
  {
        }
  
        switch (desc->channel[i].type) {
-               /* Half-floats, floats, doubles */
+       /* Half-floats, floats, ints */
        case UTIL_FORMAT_TYPE_FLOAT:
                switch (desc->channel[i].size) {
                case 16:
                                *format = FMT_16_16_FLOAT;
                                break;
                        case 3:
-                               *format = FMT_16_16_16_FLOAT;
-                               break;
                        case 4:
                                *format = FMT_16_16_16_16_FLOAT;
                                break;
                                *format = FMT_8_8;
                                break;
                        case 3:
-                       //      *format = FMT_8_8_8; /* fails piglit draw-vertices test */
-                       //      break;
                        case 4:
                                *format = FMT_8_8_8_8;
                                break;
                                *format = FMT_16_16;
                                break;
                        case 3:
-                       //      *format = FMT_16_16_16; /* fails piglit draw-vertices test */
-                       //      break;
                        case 4:
                                *format = FMT_16_16_16_16;
                                break;
@@@ -2938,10 -2121,10 +3005,10 @@@ int r600_vertex_elements_build_fetch_sh
  
        for (i = 0; i < ve->count; i++) {
                unsigned vbuffer_index;
-               r600_vertex_data_type(ve->hw_format[i], &format, &num_format, &format_comp);
-               desc = util_format_description(ve->hw_format[i]);
+               r600_vertex_data_type(ve->elements[i].src_format, &format, &num_format, &format_comp);
+               desc = util_format_description(ve->elements[i].src_format);
                if (desc == NULL) {
-                       R600_ERR("unknown format %d\n", ve->hw_format[i]);
+                       R600_ERR("unknown format %d\n", ve->elements[i].src_format);
                        r600_bo_reference(rctx->radeon, &ve->fetch_shader, NULL);
                        return -EINVAL;
                }
index 519245f3af258fd947c8e7b552de4ee0a7f7aebd,921d0d984546e73a7f0c80c0d9cdae6738608306..453c29790c158b68309f7b73b64a983ddef60aa7
@@@ -34,7 -34,7 +34,7 @@@ struct r600_bc_alu_src 
        unsigned                        neg;
        unsigned                        abs;
        unsigned                        rel;
-       u32                             *value;
+       uint32_t                        value;
  };
  
  struct r600_bc_alu_dst {
@@@ -108,6 -108,8 +108,6 @@@ struct r600_bc_vtx 
  struct r600_bc_output {
        unsigned                        array_base;
        unsigned                        type;
 -      unsigned                        end_of_program;
 -      unsigned                        inst;
        unsigned                        elem_size;
        unsigned                        gpr;
        unsigned                        swizzle_x;
        unsigned                        swizzle_z;
        unsigned                        swizzle_w;
        unsigned                        burst_count;
 -      unsigned                        barrier;
  };
  
  struct r600_bc_kcache {
@@@ -132,7 -135,6 +132,7 @@@ struct r600_bc_cf 
        unsigned                        cond;
        unsigned                        pop_count;
        unsigned                        cf_addr; /* control flow addr */
 +      unsigned                        barrier;
        struct r600_bc_kcache           kcache[2];
        unsigned                        r6xx_uses_waterfall;
        struct list_head                alu;
@@@ -201,8 -203,6 +201,6 @@@ int r600_bc_add_cfinst(struct r600_bc *
  int r600_bc_add_alu_type(struct r600_bc *bc, const struct r600_bc_alu *alu, int type);
  void r600_bc_special_constants(u32 value, unsigned *sel, unsigned *neg);
  void r600_bc_dump(struct r600_bc *bc);
- void r600_cf_vtx(struct r600_vertex_element *ve, u32 *bytecode, unsigned count);
- void r600_cf_vtx_tc(struct r600_vertex_element *ve, u32 *bytecode, unsigned count);
  
  int r600_vertex_elements_build_fetch_shader(struct r600_pipe_context *rctx, struct r600_vertex_element *ve);
  
index 68b625cc3b4ec59997045dc508fadc736df5765b,62d108f3518f7467caa4825d699d0c5c06a540a6..34094001b755f77162bfba7a37e66d267dc8ff5e
  #include <tgsi/tgsi_util.h>
  #include <util/u_blitter.h>
  #include <util/u_double_list.h>
+ #include <util/u_format_s3tc.h>
  #include <util/u_transfer.h>
  #include <util/u_surface.h>
  #include <util/u_pack_color.h>
  #include <util/u_memory.h>
  #include <util/u_inlines.h>
+ #include "util/u_upload_mgr.h"
  #include <pipebuffer/pb_buffer.h>
  #include "r600.h"
  #include "r600d.h"
@@@ -42,7 -44,6 +44,7 @@@
  #include "r600_shader.h"
  #include "r600_pipe.h"
  #include "r600_state_inlines.h"
 +#include "r600_video_context.h"
  
  /*
   * pipe_context
@@@ -69,8 -70,30 +71,30 @@@ static void r600_flush(struct pipe_cont
  #endif
        r600_context_flush(&rctx->ctx);
  
-       r600_upload_flush(rctx->rupload_vb);
-       r600_upload_flush(rctx->rupload_const);
+       /* XXX This shouldn't be really necessary, but removing it breaks some tests.
+        * Needless buffer reallocations may significantly increase memory consumption,
+        * so getting rid of this call is important. */
+       u_upload_flush(rctx->vbuf_mgr->uploader);
+ }
+ static void r600_update_num_contexts(struct r600_screen *rscreen,
+                                      int diff)
+ {
+       pipe_mutex_lock(rscreen->mutex_num_contexts);
+       if (diff > 0) {
+               rscreen->num_contexts++;
+               if (rscreen->num_contexts > 1)
+                       util_slab_set_thread_safety(&rscreen->pool_buffers,
+                                                   UTIL_SLAB_MULTITHREADED);
+       } else {
+               rscreen->num_contexts--;
+               if (rscreen->num_contexts <= 1)
+                       util_slab_set_thread_safety(&rscreen->pool_buffers,
+                                                   UTIL_SLAB_SINGLETHREADED);
+       }
+       pipe_mutex_unlock(rscreen->mutex_num_contexts);
  }
  
  static void r600_destroy_context(struct pipe_context *context)
  
        rctx->context.delete_depth_stencil_alpha_state(&rctx->context, rctx->custom_dsa_flush);
  
-       r600_end_vertex_translate(rctx);
        r600_context_fini(&rctx->ctx);
  
        util_blitter_destroy(rctx->blitter);
                free(rctx->states[i]);
        }
  
-       r600_upload_destroy(rctx->rupload_vb);
-       r600_upload_destroy(rctx->rupload_const);
+       u_vbuf_mgr_destroy(rctx->vbuf_mgr);
+       util_slab_destroy(&rctx->pool_transfers);
  
-       if (rctx->tran.translate_cache)
-               translate_cache_destroy(rctx->tran.translate_cache);
+       r600_update_num_contexts(rctx->screen, -1);
  
-       FREE(rctx->ps_resource);
-       FREE(rctx->vs_resource);
        FREE(rctx);
  }
  
@@@ -108,6 -126,9 +127,9 @@@ static struct pipe_context *r600_create
  
        if (rctx == NULL)
                return NULL;
+       r600_update_num_contexts(rscreen, 1);
        rctx->context.winsys = rscreen->screen.winsys;
        rctx->context.screen = screen;
        rctx->context.priv = priv;
        r600_init_query_functions(rctx);
        r600_init_context_resource_functions(rctx);
        r600_init_surface_functions(rctx);
+       rctx->context.draw_vbo = r600_draw_vbo;
  
        switch (r600_get_family(rctx->radeon)) {
        case CHIP_R600:
        case CHIP_RV730:
        case CHIP_RV710:
        case CHIP_RV740:
-               rctx->context.draw_vbo = r600_draw_vbo;
                r600_init_state_functions(rctx);
                if (r600_context_init(&rctx->ctx, rctx->radeon)) {
                        r600_destroy_context(&rctx->context);
        case CHIP_BARTS:
        case CHIP_TURKS:
        case CHIP_CAICOS:
-               rctx->context.draw_vbo = evergreen_draw;
                evergreen_init_state_functions(rctx);
                if (evergreen_context_init(&rctx->ctx, rctx->radeon)) {
                        r600_destroy_context(&rctx->context);
                return NULL;
        }
  
-       rctx->rupload_vb = r600_upload_create(rctx, 128 * 1024, 16);
-       if (rctx->rupload_vb == NULL) {
-               r600_destroy_context(&rctx->context);
-               return NULL;
-       }
+       util_slab_create(&rctx->pool_transfers,
+                        sizeof(struct pipe_transfer), 64,
+                        UTIL_SLAB_SINGLETHREADED);
  
-       rctx->rupload_const = r600_upload_create(rctx, 128 * 1024, 256);
-       if (rctx->rupload_const == NULL) {
+       rctx->vbuf_mgr = u_vbuf_mgr_create(&rctx->context, 1024 * 1024, 256,
+                                          PIPE_BIND_VERTEX_BUFFER |
+                                          PIPE_BIND_INDEX_BUFFER |
+                                          PIPE_BIND_CONSTANT_BUFFER,
+                                          U_VERTEX_FETCH_DWORD_ALIGNED);
+       if (!rctx->vbuf_mgr) {
                r600_destroy_context(&rctx->context);
                return NULL;
        }
  
        rctx->blitter = util_blitter_create(&rctx->context);
        if (rctx->blitter == NULL) {
-               FREE(rctx);
-               return NULL;
-       }
-       rctx->tran.translate_cache = translate_cache_create();
-       if (rctx->tran.translate_cache == NULL) {
-               FREE(rctx);
-               return NULL;
-       }
-       rctx->vs_resource = CALLOC(R600_RESOURCE_ARRAY_SIZE, sizeof(struct r600_pipe_state));
-       if (!rctx->vs_resource) {
-               FREE(rctx);
-               return NULL;
-       }
-       rctx->ps_resource = CALLOC(R600_RESOURCE_ARRAY_SIZE, sizeof(struct r600_pipe_state));
-       if (!rctx->ps_resource) {
-               FREE(rctx);
+               r600_destroy_context(&rctx->context);
                return NULL;
        }
  
@@@ -284,13 -288,16 +289,16 @@@ static int r600_get_param(struct pipe_s
                return 1;
  
        /* Unsupported features (boolean caps). */
-       case PIPE_CAP_TIMER_QUERY:
        case PIPE_CAP_STREAM_OUTPUT:
        case PIPE_CAP_PRIMITIVE_RESTART:
        case PIPE_CAP_INDEP_BLEND_FUNC: /* FIXME allow this */
        case PIPE_CAP_INSTANCED_DRAWING:
                return 0;
  
+       case PIPE_CAP_ARRAY_TEXTURES:
+               /* fix once the CS checker upstream is fixed */
+               return debug_get_bool_option("R600_ARRAY_TEXTURE", FALSE);
        /* Texturing. */
        case PIPE_CAP_MAX_TEXTURE_2D_LEVELS:
        case PIPE_CAP_MAX_TEXTURE_3D_LEVELS:
        case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_INTEGER:
                return 0;
  
+       /* Timer queries, present when the clock frequency is non zero. */
+       case PIPE_CAP_TIMER_QUERY:
+               return r600_get_clock_crystal_freq(rscreen->radeon) != 0;
        default:
                R600_ERR("r600: unknown param %d\n", param);
                return 0;
@@@ -385,7 -396,7 +397,7 @@@ static int r600_get_shader_param(struc
        case PIPE_SHADER_CAP_MAX_CONSTS:
                return 256; //max native parameters
        case PIPE_SHADER_CAP_MAX_CONST_BUFFERS:
-               return 1;
+               return R600_MAX_CONST_BUFFERS;
        case PIPE_SHADER_CAP_MAX_PREDS:
                return 0; /* FIXME */
        case PIPE_SHADER_CAP_TGSI_CONT_SUPPORTED:
@@@ -441,9 -452,14 +453,14 @@@ static boolean r600_is_format_supported
                retval |= PIPE_BIND_DEPTH_STENCIL;
        }
  
-       if ((usage & PIPE_BIND_VERTEX_BUFFER) &&
-           r600_is_vertex_format_supported(format))
-               retval |= PIPE_BIND_VERTEX_BUFFER;
+       if (usage & PIPE_BIND_VERTEX_BUFFER) {
+               struct r600_screen *rscreen = (struct r600_screen *)screen;
+               enum radeon_family family = r600_get_family(rscreen->radeon);
+               if (r600_is_vertex_format_supported(format, family)) {
+                       retval |= PIPE_BIND_VERTEX_BUFFER;
+               }
+       }
  
        if (usage & PIPE_BIND_TRANSFER_READ)
                retval |= PIPE_BIND_TRANSFER_READ;
@@@ -462,6 -478,8 +479,8 @@@ static void r600_destroy_screen(struct 
  
        radeon_decref(rscreen->radeon);
  
+       util_slab_destroy(&rscreen->pool_buffers);
+       pipe_mutex_destroy(rscreen->mutex_num_contexts);
        FREE(rscreen);
  }
  
@@@ -485,10 -503,16 +504,17 @@@ struct pipe_screen *r600_screen_create(
        rscreen->screen.get_paramf = r600_get_paramf;
        rscreen->screen.is_format_supported = r600_is_format_supported;
        rscreen->screen.context_create = r600_create_context;
 +      rscreen->screen.video_context_create = r600_video_create;
        r600_init_screen_resource_functions(&rscreen->screen);
  
        rscreen->tiling_info = r600_get_tiling_info(radeon);
+       util_format_s3tc_init();
+       util_slab_create(&rscreen->pool_buffers,
+                        sizeof(struct r600_resource_buffer), 64,
+                        UTIL_SLAB_SINGLETHREADED);
+       pipe_mutex_init(rscreen->mutex_num_contexts);
  
        return &rscreen->screen;
  }
index c982471a04f7f8fa08fe63402c3fb4dcc1b68aa0,13ccc3fdc1fbabcba518295f3f793124666987c1..240c8f1ffd086c8d31143913d892086ea01b5860
@@@ -28,6 -28,7 +28,7 @@@
  #include "r600_pipe.h"
  #include "r600_asm.h"
  #include "r600_sq.h"
+ #include "r600_formats.h"
  #include "r600_opcodes.h"
  #include "r600d.h"
  #include <stdio.h>
@@@ -175,6 -176,13 +176,13 @@@ static void r600_pipe_shader_ps(struct 
                                R_0288CC_SQ_PGM_CF_OFFSET_PS,
                                0x00000000, 0xFFFFFFFF, NULL);
  
+       if (rshader->fs_write_all) {
+               r600_pipe_state_add_reg(rstate, R_028808_CB_COLOR_CONTROL,
+                                       S_028808_MULTIWRITE_ENABLE(1),
+                                       S_028808_MULTIWRITE_ENABLE(1),
+                                       NULL);
+       }
        if (rshader->uses_kill) {
                /* only set some bits here, the other bits are set in the dsa state */
                r600_pipe_state_add_reg(rstate,
                                0xFFFFFFFF, NULL);
  }
  
- int r600_pipe_shader(struct pipe_context *ctx, struct r600_pipe_shader *shader)
static int r600_pipe_shader(struct pipe_context *ctx, struct r600_pipe_shader *shader)
  {
        struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
        struct r600_shader *rshader = &shader->shader;
        return 0;
  }
  
- int r600_shader_from_tgsi(const struct tgsi_token *tokens, struct r600_shader *shader, u32 **literals);
+ static int r600_shader_from_tgsi(const struct tgsi_token *tokens, struct r600_shader *shader);
  int r600_pipe_shader_create(struct pipe_context *ctx, struct r600_pipe_shader *shader, const struct tgsi_token *tokens)
  {
        static int dump_shaders = -1;
        struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
-       u32 *literals;
        int r;
  
          /* Would like some magic "get_bool_option_once" routine.
                tgsi_dump(tokens, 0);
        }
        shader->shader.family = r600_get_family(rctx->radeon);
-       r = r600_shader_from_tgsi(tokens, &shader->shader, &literals);
+       r = r600_shader_from_tgsi(tokens, &shader->shader);
        if (r) {
                R600_ERR("translation from TGSI failed !\n");
                return r;
        }
        r = r600_bc_build(&shader->shader.bc);
-       free(literals);
        if (r) {
                R600_ERR("building bytecode failed !\n");
                return r;
@@@ -274,6 -281,15 +281,15 @@@ void r600_pipe_shader_destroy(struct pi
   */
  struct r600_shader_tgsi_instruction;
  
+ struct r600_shader_src {
+       unsigned                                sel;
+       unsigned                                swizzle[4];
+       unsigned                                neg;
+       unsigned                                abs;
+       unsigned                                rel;
+       uint32_t                                value[4];
+ };
  struct r600_shader_ctx {
        struct tgsi_shader_info                 info;
        struct tgsi_parse_context               parse;
        unsigned                                type;
        unsigned                                file_offset[TGSI_FILE_COUNT];
        unsigned                                temp_reg;
+       unsigned                                ar_reg;
        struct r600_shader_tgsi_instruction     *inst_info;
        struct r600_bc                          *bc;
        struct r600_shader                      *shader;
+       struct r600_shader_src                  src[3];
        u32                                     *literals;
        u32                                     nliterals;
        u32                                     max_driver_temp_used;
@@@ -492,12 -510,182 +510,182 @@@ static int evergreen_gpr_count(struct r
        return ctx->num_interp_gpr;
  }
  
- int r600_shader_from_tgsi(const struct tgsi_token *tokens, struct r600_shader *shader, u32 **literals)
+ static void tgsi_src(struct r600_shader_ctx *ctx,
+                    const struct tgsi_full_src_register *tgsi_src,
+                    struct r600_shader_src *r600_src)
+ {
+       memset(r600_src, 0, sizeof(*r600_src));
+       r600_src->swizzle[0] = tgsi_src->Register.SwizzleX;
+       r600_src->swizzle[1] = tgsi_src->Register.SwizzleY;
+       r600_src->swizzle[2] = tgsi_src->Register.SwizzleZ;
+       r600_src->swizzle[3] = tgsi_src->Register.SwizzleW;
+       r600_src->neg = tgsi_src->Register.Negate;
+       r600_src->abs = tgsi_src->Register.Absolute;
+       if (tgsi_src->Register.File == TGSI_FILE_IMMEDIATE) {
+               int index;
+               if ((tgsi_src->Register.SwizzleX == tgsi_src->Register.SwizzleY) &&
+                       (tgsi_src->Register.SwizzleX == tgsi_src->Register.SwizzleZ) &&
+                       (tgsi_src->Register.SwizzleX == tgsi_src->Register.SwizzleW)) {
+                       index = tgsi_src->Register.Index * 4 + tgsi_src->Register.SwizzleX;
+                       r600_bc_special_constants(ctx->literals[index], &r600_src->sel, &r600_src->neg);
+                       if (r600_src->sel != V_SQ_ALU_SRC_LITERAL)
+                               return;
+               }
+               index = tgsi_src->Register.Index;
+               r600_src->sel = V_SQ_ALU_SRC_LITERAL;
+               memcpy(r600_src->value, ctx->literals + index * 4, sizeof(r600_src->value));
+       } else {
+               if (tgsi_src->Register.Indirect)
+                       r600_src->rel = V_SQ_REL_RELATIVE;
+               r600_src->sel = tgsi_src->Register.Index;
+               r600_src->sel += ctx->file_offset[tgsi_src->Register.File];
+       }
+ }
+ static int tgsi_fetch_rel_const(struct r600_shader_ctx *ctx, unsigned int offset, unsigned int dst_reg)
+ {
+       struct r600_bc_vtx vtx;
+       unsigned int ar_reg;
+       int r;
+       if (offset) {
+               struct r600_bc_alu alu;
+               memset(&alu, 0, sizeof(alu));
+               alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD_INT);
+               alu.src[0].sel = ctx->ar_reg;
+               alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
+               alu.src[1].value = offset;
+               alu.dst.sel = dst_reg;
+               alu.dst.write = 1;
+               alu.last = 1;
+               if ((r = r600_bc_add_alu(ctx->bc, &alu)))
+                       return r;
+               ar_reg = dst_reg;
+       } else {
+               ar_reg = ctx->ar_reg;
+       }
+       memset(&vtx, 0, sizeof(vtx));
+       vtx.fetch_type = 2;             /* VTX_FETCH_NO_INDEX_OFFSET */
+       vtx.src_gpr = ar_reg;
+       vtx.mega_fetch_count = 16;
+       vtx.dst_gpr = dst_reg;
+       vtx.dst_sel_x = 0;              /* SEL_X */
+       vtx.dst_sel_y = 1;              /* SEL_Y */
+       vtx.dst_sel_z = 2;              /* SEL_Z */
+       vtx.dst_sel_w = 3;              /* SEL_W */
+       vtx.data_format = FMT_32_32_32_32_FLOAT;
+       vtx.num_format_all = 2;         /* NUM_FORMAT_SCALED */
+       vtx.format_comp_all = 1;        /* FORMAT_COMP_SIGNED */
+       vtx.srf_mode_all = 1;           /* SRF_MODE_NO_ZERO */
+       if ((r = r600_bc_add_vtx(ctx->bc, &vtx)))
+               return r;
+       return 0;
+ }
+ static int tgsi_split_constant(struct r600_shader_ctx *ctx)
+ {
+       struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
+       struct r600_bc_alu alu;
+       int i, j, k, nconst, r;
+       for (i = 0, nconst = 0; i < inst->Instruction.NumSrcRegs; i++) {
+               if (inst->Src[i].Register.File == TGSI_FILE_CONSTANT) {
+                       nconst++;
+               }
+               tgsi_src(ctx, &inst->Src[i], &ctx->src[i]);
+       }
+       for (i = 0, j = nconst - 1; i < inst->Instruction.NumSrcRegs; i++) {
+               if (inst->Src[i].Register.File != TGSI_FILE_CONSTANT) {
+                       continue;
+               }
+               if (ctx->src[i].rel) {
+                       int treg = r600_get_temp(ctx);
+                       if ((r = tgsi_fetch_rel_const(ctx, ctx->src[i].sel - 512, treg)))
+                               return r;
+                       ctx->src[i].sel = treg;
+                       ctx->src[i].rel = 0;
+                       j--;
+               } else if (j > 0) {
+                       int treg = r600_get_temp(ctx);
+                       for (k = 0; k < 4; k++) {
+                               memset(&alu, 0, sizeof(struct r600_bc_alu));
+                               alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV);
+                               alu.src[0].sel = ctx->src[i].sel;
+                               alu.src[0].chan = k;
+                               alu.src[0].rel = ctx->src[i].rel;
+                               alu.dst.sel = treg;
+                               alu.dst.chan = k;
+                               alu.dst.write = 1;
+                               if (k == 3)
+                                       alu.last = 1;
+                               r = r600_bc_add_alu(ctx->bc, &alu);
+                               if (r)
+                                       return r;
+                       }
+                       ctx->src[i].sel = treg;
+                       ctx->src[i].rel =0;
+                       j--;
+               }
+       }
+       return 0;
+ }
+ /* need to move any immediate into a temp - for trig functions which use literal for PI stuff */
+ static int tgsi_split_literal_constant(struct r600_shader_ctx *ctx)
+ {
+       struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
+       struct r600_bc_alu alu;
+       int i, j, k, nliteral, r;
+       for (i = 0, nliteral = 0; i < inst->Instruction.NumSrcRegs; i++) {
+               if (ctx->src[i].sel == V_SQ_ALU_SRC_LITERAL) {
+                       nliteral++;
+               }
+       }
+       for (i = 0, j = nliteral - 1; i < inst->Instruction.NumSrcRegs; i++) {
+               if (j > 0 && ctx->src[i].sel == V_SQ_ALU_SRC_LITERAL) {
+                       int treg = r600_get_temp(ctx);
+                       for (k = 0; k < 4; k++) {
+                               memset(&alu, 0, sizeof(struct r600_bc_alu));
+                               alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV);
+                               alu.src[0].sel = ctx->src[i].sel;
+                               alu.src[0].chan = k;
+                               alu.src[0].value = ctx->src[i].value[k];
+                               alu.dst.sel = treg;
+                               alu.dst.chan = k;
+                               alu.dst.write = 1;
+                               if (k == 3)
+                                       alu.last = 1;
+                               r = r600_bc_add_alu(ctx->bc, &alu);
+                               if (r)
+                                       return r;
+                       }
+                       ctx->src[i].sel = treg;
+                       j--;
+               }
+       }
+       return 0;
+ }
+ static int r600_shader_from_tgsi(const struct tgsi_token *tokens, struct r600_shader *shader)
  {
        struct tgsi_full_immediate *immediate;
+       struct tgsi_full_property *property;
        struct r600_shader_ctx ctx;
        struct r600_bc_output output[32];
 -      unsigned output_done, noutput;
 +      unsigned noutput;
        unsigned opcode;
        int i, r = 0, pos0;
  
        ctx.file_offset[TGSI_FILE_CONSTANT] = 512;
  
        ctx.file_offset[TGSI_FILE_IMMEDIATE] = V_SQ_ALU_SRC_LITERAL;
-       ctx.temp_reg = ctx.file_offset[TGSI_FILE_TEMPORARY] +
+       ctx.ar_reg = ctx.file_offset[TGSI_FILE_TEMPORARY] +
                        ctx.info.file_count[TGSI_FILE_TEMPORARY];
+       ctx.temp_reg = ctx.ar_reg + 1;
  
        ctx.nliterals = 0;
        ctx.literals = NULL;
+       shader->fs_write_all = FALSE;
        while (!tgsi_parse_end_of_tokens(&ctx.parse)) {
                tgsi_parse_token(&ctx.parse);
                switch (ctx.parse.FullToken.Token.Type) {
                        ctx.max_driver_temp_used = 0;
                        /* reserve first tmp for everyone */
                        r600_get_temp(&ctx);
                        opcode = ctx.parse.FullToken.FullInstruction.Instruction.Opcode;
+                       if ((r = tgsi_split_constant(&ctx)))
+                               goto out_err;
+                       if ((r = tgsi_split_literal_constant(&ctx)))
+                               goto out_err;
                        if (ctx.bc->chiprev == CHIPREV_EVERGREEN)
                                ctx.inst_info = &eg_shader_tgsi_instruction[opcode];
                        else
                                goto out_err;
                        break;
                case TGSI_TOKEN_TYPE_PROPERTY:
+                       property = &ctx.parse.FullToken.FullProperty;
+                       if (property->Property.PropertyName == TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS) {
+                               if (property->u[0].Data == 1)
+                                       shader->fs_write_all = TRUE;
+                       }
                        break;
                default:
                        R600_ERR("unsupported token type %d\n", ctx.parse.FullToken.Token.Type);
                output[i].swizzle_y = 1;
                output[i].swizzle_z = 2;
                output[i].swizzle_w = 3;
 -              output[i].barrier = 1;
+               output[i].burst_count = 1;
                output[i].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
                output[i].array_base = i - pos0;
 -              output[i].inst = BC_INST(ctx.bc, V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT);
                switch (ctx.type) {
                case TGSI_PROCESSOR_VERTEX:
                        if (shader->output[i].name == TGSI_SEMANTIC_POSITION) {
                        output[i].swizzle_y = 1;
                        output[i].swizzle_z = 2;
                        output[i].swizzle_w = 3;
 -                      output[i].barrier = 1;
+                       output[i].burst_count = 1;
                        output[i].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
                        output[i].array_base = 0;
 -                      output[i].inst = BC_INST(ctx.bc, V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT);
                        noutput++;
                }
        }
                output[0].swizzle_y = 7;
                output[0].swizzle_z = 7;
                output[0].swizzle_w = 7;
 -              output[0].barrier = 1;
+               output[0].burst_count = 1;
                output[0].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
                output[0].array_base = 0;
 -              output[0].inst = BC_INST(ctx.bc, V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT);
                noutput++;
        }
 -      /* set export done on last export of each type */
 -      for (i = noutput - 1, output_done = 0; i >= 0; i--) {
 -              if (i == (noutput - 1)) {
 -                      output[i].end_of_program = 1;
 -              }
 -              if (!(output_done & (1 << output[i].type))) {
 -                      output_done |= (1 << output[i].type);
 -                      output[i].inst = BC_INST(ctx.bc, V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE);
 -              }
 -      }
        /* add output to bytecode */
        for (i = 0; i < noutput; i++) {
                r = r600_bc_add_output(ctx.bc, &output[i]);
                if (r)
                        goto out_err;
        }
-       *literals = ctx.literals;
+       free(ctx.literals);
        tgsi_parse_free(&ctx.parse);
        return 0;
  out_err:
@@@ -724,40 -942,22 +926,22 @@@ static int tgsi_end(struct r600_shader_
        return 0;
  }
  
- static int tgsi_src(struct r600_shader_ctx *ctx,
-                       const struct tgsi_full_src_register *tgsi_src,
-                       struct r600_bc_alu_src *r600_src)
+ static void r600_bc_src(struct r600_bc_alu_src *bc_src,
+                       const struct r600_shader_src *shader_src,
+                       unsigned chan)
  {
-       memset(r600_src, 0, sizeof(struct r600_bc_alu_src));
-       r600_src->neg = tgsi_src->Register.Negate;
-       r600_src->abs = tgsi_src->Register.Absolute;
-       if (tgsi_src->Register.File == TGSI_FILE_IMMEDIATE) {
-               int index;
-               if((tgsi_src->Register.SwizzleX == tgsi_src->Register.SwizzleY) &&
-                       (tgsi_src->Register.SwizzleX == tgsi_src->Register.SwizzleZ) &&
-                       (tgsi_src->Register.SwizzleX == tgsi_src->Register.SwizzleW)) {
-                       index = tgsi_src->Register.Index * 4 + tgsi_src->Register.SwizzleX;
-                       r600_bc_special_constants(ctx->literals[index], &r600_src->sel, &r600_src->neg);
-                       if (r600_src->sel != V_SQ_ALU_SRC_LITERAL)
-                               return 0;
-               }
-               index = tgsi_src->Register.Index;
-               r600_src->sel = V_SQ_ALU_SRC_LITERAL;
-               r600_src->value = ctx->literals + index * 4;
-       } else {
-               if (tgsi_src->Register.Indirect)
-                       r600_src->rel = V_SQ_REL_RELATIVE;
-               r600_src->sel = tgsi_src->Register.Index;
-               r600_src->sel += ctx->file_offset[tgsi_src->Register.File];
-       }
-       return 0;
+       bc_src->sel = shader_src->sel;
+       bc_src->chan = shader_src->swizzle[chan];
+       bc_src->neg = shader_src->neg;
+       bc_src->abs = shader_src->abs;
+       bc_src->rel = shader_src->rel;
+       bc_src->value = shader_src->value[bc_src->chan];
  }
  
- static int tgsi_dst(struct r600_shader_ctx *ctx,
-                       const struct tgsi_full_dst_register *tgsi_dst,
-                       unsigned swizzle,
-                       struct r600_bc_alu_dst *r600_dst)
+ static void tgsi_dst(struct r600_shader_ctx *ctx,
+                    const struct tgsi_full_dst_register *tgsi_dst,
+                    unsigned swizzle,
+                    struct r600_bc_alu_dst *r600_dst)
  {
        struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
  
        if (inst->Instruction.Saturate) {
                r600_dst->clamp = 1;
        }
-       return 0;
- }
- static unsigned tgsi_chan(const struct tgsi_full_src_register *tgsi_src, unsigned swizzle)
- {
-       switch (swizzle) {
-       case 0:
-               return tgsi_src->Register.SwizzleX;
-       case 1:
-               return tgsi_src->Register.SwizzleY;
-       case 2:
-               return tgsi_src->Register.SwizzleZ;
-       case 3:
-               return tgsi_src->Register.SwizzleW;
-       default:
-               return 0;
-       }
- }
- static int tgsi_split_constant(struct r600_shader_ctx *ctx, struct r600_bc_alu_src r600_src[3])
- {
-       struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
-       struct r600_bc_alu alu;
-       int i, j, k, nconst, r;
-       for (i = 0, nconst = 0; i < inst->Instruction.NumSrcRegs; i++) {
-               if (inst->Src[i].Register.File == TGSI_FILE_CONSTANT) {
-                       nconst++;
-               }
-               r = tgsi_src(ctx, &inst->Src[i], &r600_src[i]);
-               if (r) {
-                       return r;
-               }
-       }
-       for (i = 0, j = nconst - 1; i < inst->Instruction.NumSrcRegs; i++) {
-               if (j > 0 && inst->Src[i].Register.File == TGSI_FILE_CONSTANT) {
-                       int treg = r600_get_temp(ctx);
-                       for (k = 0; k < 4; k++) {
-                               memset(&alu, 0, sizeof(struct r600_bc_alu));
-                               alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV);
-                               alu.src[0].sel = r600_src[i].sel;
-                               alu.src[0].chan = k;
-                               alu.src[0].rel = r600_src[i].rel;
-                               alu.dst.sel = treg;
-                               alu.dst.chan = k;
-                               alu.dst.write = 1;
-                               if (k == 3)
-                                       alu.last = 1;
-                               r = r600_bc_add_alu(ctx->bc, &alu);
-                               if (r)
-                                       return r;
-                       }
-                       r600_src[i].sel = treg;
-                       r600_src[i].rel =0;
-                       j--;
-               }
-       }
-       return 0;
- }
- /* need to move any immediate into a temp - for trig functions which use literal for PI stuff */
- static int tgsi_split_literal_constant(struct r600_shader_ctx *ctx, struct r600_bc_alu_src r600_src[3])
- {
-       struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
-       struct r600_bc_alu alu;
-       int i, j, k, nliteral, r;
-       for (i = 0, nliteral = 0; i < inst->Instruction.NumSrcRegs; i++) {
-               if (r600_src[i].sel == V_SQ_ALU_SRC_LITERAL) {
-                       nliteral++;
-               }
-       }
-       for (i = 0, j = nliteral - 1; i < inst->Instruction.NumSrcRegs; i++) {
-               if (j > 0 && r600_src[i].sel == V_SQ_ALU_SRC_LITERAL) {
-                       int treg = r600_get_temp(ctx);
-                       for (k = 0; k < 4; k++) {
-                               memset(&alu, 0, sizeof(struct r600_bc_alu));
-                               alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV);
-                               alu.src[0].sel = r600_src[i].sel;
-                               alu.src[0].chan = k;
-                               alu.src[0].value = r600_src[i].value;
-                               alu.dst.sel = treg;
-                               alu.dst.chan = k;
-                               alu.dst.write = 1;
-                               if (k == 3)
-                                       alu.last = 1;
-                               r = r600_bc_add_alu(ctx->bc, &alu);
-                               if (r)
-                                       return r;
-                       }
-                       r600_src[i].sel = treg;
-                       j--;
-               }
-       }
-       return 0;
  }
  
  static int tgsi_last_instruction(unsigned writemask)
  static int tgsi_op2_s(struct r600_shader_ctx *ctx, int swap)
  {
        struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
-       struct r600_bc_alu_src r600_src[3];
        struct r600_bc_alu alu;
        int i, j, r;
        int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
  
-       r = tgsi_split_constant(ctx, r600_src);
-       if (r)
-               return r;
-       r = tgsi_split_literal_constant(ctx, r600_src);
-       if (r)
-               return r;
        for (i = 0; i < lasti + 1; i++) {
                if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
                        continue;
  
                memset(&alu, 0, sizeof(struct r600_bc_alu));
-               r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
-               if (r)
-                       return r;
+               tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
  
                alu.inst = ctx->inst_info->r600_opcode;
                if (!swap) {
                        for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
-                               alu.src[j] = r600_src[j];
-                               alu.src[j].chan = tgsi_chan(&inst->Src[j], i);
+                               r600_bc_src(&alu.src[j], &ctx->src[j], i);
                        }
                } else {
-                       alu.src[0] = r600_src[1];
-                       alu.src[0].chan = tgsi_chan(&inst->Src[1], i);
-                       alu.src[1] = r600_src[0];
-                       alu.src[1].chan = tgsi_chan(&inst->Src[0], i);
+                       r600_bc_src(&alu.src[0], &ctx->src[1], i);
+                       r600_bc_src(&alu.src[1], &ctx->src[0], i);
                }
                /* handle some special cases */
                switch (ctx->inst_info->tgsi_opcode) {
@@@ -951,24 -1043,15 +1027,15 @@@ static int tgsi_op2_swap(struct r600_sh
   * r700 - normalize by dividing by 2PI
   * see fdo bug 27901
   */
- static int tgsi_setup_trig(struct r600_shader_ctx *ctx,
-                          struct r600_bc_alu_src r600_src[3])
+ static int tgsi_setup_trig(struct r600_shader_ctx *ctx)
  {
        static float half_inv_pi = 1.0 /(3.1415926535 * 2);
        static float double_pi = 3.1415926535 * 2;
        static float neg_pi = -3.1415926535;
  
-       struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
        int r;
        struct r600_bc_alu alu;
  
-       r = tgsi_split_constant(ctx, r600_src);
-       if (r)
-               return r;
-       r = tgsi_split_literal_constant(ctx, r600_src);
-       if (r)
-               return r;
        memset(&alu, 0, sizeof(struct r600_bc_alu));
        alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MULADD);
        alu.is_op3 = 1;
        alu.dst.sel = ctx->temp_reg;
        alu.dst.write = 1;
  
-       alu.src[0] = r600_src[0];
-       alu.src[0].chan = tgsi_chan(&inst->Src[0], 0);
+       r600_bc_src(&alu.src[0], &ctx->src[0], 0);
  
        alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
        alu.src[1].chan = 0;
-       alu.src[1].value = (uint32_t *)&half_inv_pi;
+       alu.src[1].value = *(uint32_t *)&half_inv_pi;
        alu.src[2].sel = V_SQ_ALU_SRC_0_5;
        alu.src[2].chan = 0;
        alu.last = 1;
        alu.src[2].chan = 0;
  
        if (ctx->bc->chiprev == CHIPREV_R600) {
-               alu.src[1].value = (uint32_t *)&double_pi;
-               alu.src[2].value = (uint32_t *)&neg_pi;
+               alu.src[1].value = *(uint32_t *)&double_pi;
+               alu.src[2].value = *(uint32_t *)&neg_pi;
        } else {
                alu.src[1].sel = V_SQ_ALU_SRC_1;
                alu.src[2].sel = V_SQ_ALU_SRC_0_5;
  static int tgsi_trig(struct r600_shader_ctx *ctx)
  {
        struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
-       struct r600_bc_alu_src r600_src[3];
        struct r600_bc_alu alu;
        int i, r;
        int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
  
-       r = tgsi_setup_trig(ctx, r600_src);
+       r = tgsi_setup_trig(ctx);
        if (r)
                return r;
  
                alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV);
  
                alu.src[0].sel = ctx->temp_reg;
-               r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
-               if (r)
-                       return r;
+               tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
                if (i == lasti)
                        alu.last = 1;
                r = r600_bc_add_alu(ctx->bc, &alu);
  static int tgsi_scs(struct r600_shader_ctx *ctx)
  {
        struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
-       struct r600_bc_alu_src r600_src[3];
        struct r600_bc_alu alu;
        int r;
  
         * X or Y components of the destination vector.
         */
        if (likely(inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_XY)) {
-               r = tgsi_setup_trig(ctx, r600_src);
+               r = tgsi_setup_trig(ctx);
                if (r)
                        return r;
        }
        if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_X) {
                memset(&alu, 0, sizeof(struct r600_bc_alu));
                alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_COS);
-               r = tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst);
-               if (r)
-                       return r;
+               tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst);
  
                alu.src[0].sel = ctx->temp_reg;
                alu.src[0].chan = 0;
        if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_Y) {
                memset(&alu, 0, sizeof(struct r600_bc_alu));
                alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SIN);
-               r = tgsi_dst(ctx, &inst->Dst[0], 1, &alu.dst);
-               if (r)
-                       return r;
+               tgsi_dst(ctx, &inst->Dst[0], 1, &alu.dst);
  
                alu.src[0].sel = ctx->temp_reg;
                alu.src[0].chan = 0;
  
                alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV);
  
-               r = tgsi_dst(ctx, &inst->Dst[0], 2, &alu.dst);
-               if (r)
-                       return r;
+               tgsi_dst(ctx, &inst->Dst[0], 2, &alu.dst);
  
                alu.src[0].sel = V_SQ_ALU_SRC_0;
                alu.src[0].chan = 0;
  
                alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV);
  
-               r = tgsi_dst(ctx, &inst->Dst[0], 3, &alu.dst);
-               if (r)
-                       return r;
+               tgsi_dst(ctx, &inst->Dst[0], 3, &alu.dst);
  
                alu.src[0].sel = V_SQ_ALU_SRC_1;
                alu.src[0].chan = 0;
  
  static int tgsi_kill(struct r600_shader_ctx *ctx)
  {
-       struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
        struct r600_bc_alu alu;
        int i, r;
  
                        alu.src[1].sel = V_SQ_ALU_SRC_1;
                        alu.src[1].neg = 1;
                } else {
-                       r = tgsi_src(ctx, &inst->Src[0], &alu.src[1]);
-                       if (r)
-                               return r;
-                       alu.src[1].chan = tgsi_chan(&inst->Src[0], i);
+                       r600_bc_src(&alu.src[1], &ctx->src[0], i);
                }
                if (i == 3) {
                        alu.last = 1;
@@@ -1214,24 -1280,14 +1264,14 @@@ static int tgsi_lit(struct r600_shader_
  {
        struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
        struct r600_bc_alu alu;
-       struct r600_bc_alu_src r600_src[3];
        int r;
  
-       r = tgsi_split_constant(ctx, r600_src);
-       if (r)
-               return r;
-       r = tgsi_split_literal_constant(ctx, r600_src);
-       if (r)
-               return r;
        /* dst.x, <- 1.0  */
        memset(&alu, 0, sizeof(struct r600_bc_alu));
        alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV);
        alu.src[0].sel  = V_SQ_ALU_SRC_1; /*1.0*/
        alu.src[0].chan = 0;
-       r = tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst);
-       if (r)
-               return r;
+       tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst);
        alu.dst.write = (inst->Dst[0].Register.WriteMask >> 0) & 1;
        r = r600_bc_add_alu(ctx->bc, &alu);
        if (r)
        /* dst.y = max(src.x, 0.0) */
        memset(&alu, 0, sizeof(struct r600_bc_alu));
        alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX);
-       alu.src[0] = r600_src[0];
+       r600_bc_src(&alu.src[0], &ctx->src[0], 0);
        alu.src[1].sel  = V_SQ_ALU_SRC_0; /*0.0*/
        alu.src[1].chan = 0;
-       r = tgsi_dst(ctx, &inst->Dst[0], 1, &alu.dst);
-       if (r)
-               return r;
+       tgsi_dst(ctx, &inst->Dst[0], 1, &alu.dst);
        alu.dst.write = (inst->Dst[0].Register.WriteMask >> 1) & 1;
        r = r600_bc_add_alu(ctx->bc, &alu);
        if (r)
        alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV);
        alu.src[0].sel  = V_SQ_ALU_SRC_1;
        alu.src[0].chan = 0;
-       r = tgsi_dst(ctx, &inst->Dst[0], 3, &alu.dst);
-       if (r)
-               return r;
+       tgsi_dst(ctx, &inst->Dst[0], 3, &alu.dst);
        alu.dst.write = (inst->Dst[0].Register.WriteMask >> 3) & 1;
        alu.last = 1;
        r = r600_bc_add_alu(ctx->bc, &alu);
                /* dst.z = log(src.y) */
                memset(&alu, 0, sizeof(struct r600_bc_alu));
                alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_CLAMPED);
-               alu.src[0] = r600_src[0];
-               alu.src[0].chan = tgsi_chan(&inst->Src[0], 1);
-               r = tgsi_dst(ctx, &inst->Dst[0], 2, &alu.dst);
-               if (r)
-                       return r;
+               r600_bc_src(&alu.src[0], &ctx->src[0], 1);
+               tgsi_dst(ctx, &inst->Dst[0], 2, &alu.dst);
                alu.last = 1;
                r = r600_bc_add_alu(ctx->bc, &alu);
                if (r)
                /* tmp.x = amd MUL_LIT(src.w, dst.z, src.x ) */
                memset(&alu, 0, sizeof(struct r600_bc_alu));
                alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MUL_LIT);
-               alu.src[0] = r600_src[0];
-               alu.src[0].chan = tgsi_chan(&inst->Src[0], 3);
+               r600_bc_src(&alu.src[0], &ctx->src[0], 3);
                alu.src[1].sel  = sel;
                alu.src[1].chan = chan;
  
-               alu.src[2] = r600_src[0];
-               alu.src[2].chan = tgsi_chan(&inst->Src[0], 0);
+               r600_bc_src(&alu.src[2], &ctx->src[0], 0);
                alu.dst.sel = ctx->temp_reg;
                alu.dst.chan = 0;
                alu.dst.write = 1;
                alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE);
                alu.src[0].sel = ctx->temp_reg;
                alu.src[0].chan = 0;
-               r = tgsi_dst(ctx, &inst->Dst[0], 2, &alu.dst);
-               if (r)
-                       return r;
+               tgsi_dst(ctx, &inst->Dst[0], 2, &alu.dst);
                alu.last = 1;
                r = r600_bc_add_alu(ctx->bc, &alu);
                if (r)
@@@ -1336,10 -1381,7 +1365,7 @@@ static int tgsi_rsq(struct r600_shader_
        alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_CLAMPED);
  
        for (i = 0; i < inst->Instruction.NumSrcRegs; i++) {
-               r = tgsi_src(ctx, &inst->Src[i], &alu.src[i]);
-               if (r)
-                       return r;
-               alu.src[i].chan = tgsi_chan(&inst->Src[i], 0);
+               r600_bc_src(&alu.src[i], &ctx->src[i], 0);
                alu.src[i].abs = 1;
        }
        alu.dst.sel = ctx->temp_reg;
@@@ -1363,9 -1405,7 +1389,7 @@@ static int tgsi_helper_tempx_replicate(
                alu.src[0].sel = ctx->temp_reg;
                alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV);
                alu.dst.chan = i;
-               r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
-               if (r)
-                       return r;
+               tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
                alu.dst.write = (inst->Dst[0].Register.WriteMask >> i) & 1;
                if (i == 3)
                        alu.last = 1;
@@@ -1385,10 -1425,7 +1409,7 @@@ static int tgsi_trans_srcx_replicate(st
        memset(&alu, 0, sizeof(struct r600_bc_alu));
        alu.inst = ctx->inst_info->r600_opcode;
        for (i = 0; i < inst->Instruction.NumSrcRegs; i++) {
-               r = tgsi_src(ctx, &inst->Src[i], &alu.src[i]);
-               if (r)
-                       return r;
-               alu.src[i].chan = tgsi_chan(&inst->Src[i], 0);
+               r600_bc_src(&alu.src[i], &ctx->src[i], 0);
        }
        alu.dst.sel = ctx->temp_reg;
        alu.dst.write = 1;
  
  static int tgsi_pow(struct r600_shader_ctx *ctx)
  {
-       struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
        struct r600_bc_alu alu;
        int r;
  
        /* LOG2(a) */
        memset(&alu, 0, sizeof(struct r600_bc_alu));
        alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE);
-       r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]);
-       if (r)
-               return r;
-       alu.src[0].chan = tgsi_chan(&inst->Src[0], 0);
+       r600_bc_src(&alu.src[0], &ctx->src[0], 0);
        alu.dst.sel = ctx->temp_reg;
        alu.dst.write = 1;
        alu.last = 1;
        /* b * LOG2(a) */
        memset(&alu, 0, sizeof(struct r600_bc_alu));
        alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL);
-       r = tgsi_src(ctx, &inst->Src[1], &alu.src[0]);
-       if (r)
-               return r;
-       alu.src[0].chan = tgsi_chan(&inst->Src[1], 0);
+       r600_bc_src(&alu.src[0], &ctx->src[1], 0);
        alu.src[1].sel = ctx->temp_reg;
        alu.dst.sel = ctx->temp_reg;
        alu.dst.write = 1;
@@@ -1450,16 -1480,8 +1464,8 @@@ static int tgsi_ssg(struct r600_shader_
  {
        struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
        struct r600_bc_alu alu;
-       struct r600_bc_alu_src r600_src[3];
        int i, r;
  
-       r = tgsi_split_constant(ctx, r600_src);
-       if (r)
-               return r;
-       r = tgsi_split_literal_constant(ctx, r600_src);
-       if (r)
-               return r;
        /* tmp = (src > 0 ? 1 : src) */
        for (i = 0; i < 4; i++) {
                memset(&alu, 0, sizeof(struct r600_bc_alu));
                alu.dst.sel = ctx->temp_reg;
                alu.dst.chan = i;
  
-               alu.src[0] = r600_src[0];
-               alu.src[0].chan = tgsi_chan(&inst->Src[0], i);
+               r600_bc_src(&alu.src[0], &ctx->src[0], i);
                alu.src[1].sel = V_SQ_ALU_SRC_1;
+               r600_bc_src(&alu.src[2], &ctx->src[0], i);
  
-               alu.src[2] = r600_src[0];
-               alu.src[2].chan = tgsi_chan(&inst->Src[0], i);
                if (i == 3)
                        alu.last = 1;
                r = r600_bc_add_alu(ctx->bc, &alu);
                memset(&alu, 0, sizeof(struct r600_bc_alu));
                alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_CNDGT);
                alu.is_op3 = 1;
-               r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
-               if (r)
-                       return r;
+               tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
  
                alu.src[0].sel = ctx->temp_reg;
                alu.src[0].chan = i;
@@@ -1523,9 -1540,7 +1524,7 @@@ static int tgsi_helper_copy(struct r600
                        alu.dst.chan = i;
                } else {
                        alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV);
-                       r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
-                       if (r)
-                               return r;
+                       tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
                        alu.src[0].sel = ctx->temp_reg;
                        alu.src[0].chan = i;
                }
  static int tgsi_op3(struct r600_shader_ctx *ctx)
  {
        struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
-       struct r600_bc_alu_src r600_src[3];
        struct r600_bc_alu alu;
        int i, j, r;
        int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
  
-       r = tgsi_split_constant(ctx, r600_src);
-       if (r)
-               return r;
-       r = tgsi_split_literal_constant(ctx, r600_src);
-       if (r)
-               return r;
        for (i = 0; i < lasti + 1; i++) {
                if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
                        continue;
                memset(&alu, 0, sizeof(struct r600_bc_alu));
                alu.inst = ctx->inst_info->r600_opcode;
                for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
-                       alu.src[j] = r600_src[j];
-                       alu.src[j].chan = tgsi_chan(&inst->Src[j], i);
+                       r600_bc_src(&alu.src[j], &ctx->src[j], i);
                }
  
-               r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
-               if (r)
-                       return r;
+               tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
                alu.dst.chan = i;
                alu.dst.write = 1;
                alu.is_op3 = 1;
  static int tgsi_dp(struct r600_shader_ctx *ctx)
  {
        struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
-       struct r600_bc_alu_src r600_src[3];
        struct r600_bc_alu alu;
        int i, j, r;
  
-       r = tgsi_split_constant(ctx, r600_src);
-       if (r)
-               return r;
-       r = tgsi_split_literal_constant(ctx, r600_src);
-       if (r)
-               return r;
        for (i = 0; i < 4; i++) {
                memset(&alu, 0, sizeof(struct r600_bc_alu));
                alu.inst = ctx->inst_info->r600_opcode;
                for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
-                       alu.src[j] = r600_src[j];
-                       alu.src[j].chan = tgsi_chan(&inst->Src[j], i);
+                       r600_bc_src(&alu.src[j], &ctx->src[j], i);
                }
  
-               r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
-               if (r)
-                       return r;
+               tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
                alu.dst.chan = i;
                alu.dst.write = (inst->Dst[0].Register.WriteMask >> i) & 1;
                /* handle some special cases */
@@@ -1661,11 -1654,8 +1638,8 @@@ static int tgsi_tex(struct r600_shader_
                /* Add perspective divide */
                memset(&alu, 0, sizeof(struct r600_bc_alu));
                alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE);
-               r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]);
-               if (r)
-                       return r;
+               r600_bc_src(&alu.src[0], &ctx->src[0], 3);
  
-               alu.src[0].chan = tgsi_chan(&inst->Src[0], 3);
                alu.dst.sel = ctx->temp_reg;
                alu.dst.chan = 3;
                alu.last = 1;
                        alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL);
                        alu.src[0].sel = ctx->temp_reg;
                        alu.src[0].chan = 3;
-                       r = tgsi_src(ctx, &inst->Src[0], &alu.src[1]);
-                       if (r)
-                               return r;
-                       alu.src[1].chan = tgsi_chan(&inst->Src[0], i);
+                       r600_bc_src(&alu.src[1], &ctx->src[0], i);
                        alu.dst.sel = ctx->temp_reg;
                        alu.dst.chan = i;
                        alu.dst.write = 1;
                                src2_chan = 0;
                                break;
                        }
-                       r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]);
-                       if (r)
-                               return r;
-                       alu.src[0].chan = tgsi_chan(&inst->Src[0], src_chan);
-                       r = tgsi_src(ctx, &inst->Src[0], &alu.src[1]);
-                       if (r)
-                               return r;
-                       alu.src[1].chan = tgsi_chan(&inst->Src[0], src2_chan);
+                       r600_bc_src(&alu.src[0], &ctx->src[0], src_chan);
+                       r600_bc_src(&alu.src[1], &ctx->src[0], src2_chan);
                        alu.dst.sel = ctx->temp_reg;
                        alu.dst.chan = i;
                        if (i == 3)
  
                alu.src[2].sel = V_SQ_ALU_SRC_LITERAL;
                alu.src[2].chan = 0;
-               alu.src[2].value = (u32*)&one_point_five;
+               alu.src[2].value = *(uint32_t *)&one_point_five;
  
                alu.dst.sel = ctx->temp_reg;
                alu.dst.chan = 0;
  
                alu.src[2].sel = V_SQ_ALU_SRC_LITERAL;
                alu.src[2].chan = 0;
-               alu.src[2].value = (u32*)&one_point_five;
+               alu.src[2].value = *(uint32_t *)&one_point_five;
  
                alu.dst.sel = ctx->temp_reg;
                alu.dst.chan = 1;
                for (i = 0; i < 4; i++) {
                        memset(&alu, 0, sizeof(struct r600_bc_alu));
                        alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV);
-                       r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]);
-                       if (r)
-                               return r;
-                       alu.src[0].chan = tgsi_chan(&inst->Src[0], i);
+                       r600_bc_src(&alu.src[0], &ctx->src[0], i);
                        alu.dst.sel = ctx->temp_reg;
                        alu.dst.chan = i;
                        if (i == 3)
        memset(&tex, 0, sizeof(struct r600_bc_tex));
        tex.inst = opcode;
        tex.sampler_id = ctx->file_offset[inst->Src[1].Register.File] + inst->Src[1].Register.Index;
-       tex.resource_id = tex.sampler_id;
+       tex.resource_id = tex.sampler_id + R600_MAX_CONST_BUFFERS;
        tex.src_gpr = src_gpr;
        tex.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
        tex.dst_sel_x = (inst->Dst[0].Register.WriteMask & 1) ? 0 : 7;
                tex.coord_type_w = 1;
        }
  
+       if (inst->Texture.Texture == TGSI_TEXTURE_1D_ARRAY) {
+               tex.coord_type_z = 0;
+               tex.src_sel_z = 1;
+       } else if (inst->Texture.Texture == TGSI_TEXTURE_2D_ARRAY)
+               tex.coord_type_z = 0;
        if (inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D || inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D)
                tex.src_sel_w = 2;
  
  static int tgsi_lrp(struct r600_shader_ctx *ctx)
  {
        struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
-       struct r600_bc_alu_src r600_src[3];
        struct r600_bc_alu alu;
        int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
        unsigned i;
        int r;
  
-       r = tgsi_split_constant(ctx, r600_src);
-       if (r)
-               return r;
-       r = tgsi_split_literal_constant(ctx, r600_src);
-       if (r)
-               return r;
        /* optimize if it's just an equal balance */
-       if(r600_src[0].sel == V_SQ_ALU_SRC_0_5) {
+       if (ctx->src[0].sel == V_SQ_ALU_SRC_0_5) {
                for (i = 0; i < lasti + 1; i++) {
                        if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
                                continue;
  
                        memset(&alu, 0, sizeof(struct r600_bc_alu));
                        alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD);
-                       alu.src[0] = r600_src[1];
-                       alu.src[0].chan = tgsi_chan(&inst->Src[1], i);
-                       alu.src[1] = r600_src[2];
-                       alu.src[1].chan = tgsi_chan(&inst->Src[2], i);
+                       r600_bc_src(&alu.src[0], &ctx->src[1], i);
+                       r600_bc_src(&alu.src[1], &ctx->src[2], i);
                        alu.omod = 3;
-                       r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
-                       if (r)
-                               return r;
+                       tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
                        alu.dst.chan = i;
                        if (i == lasti) {
                                alu.last = 1;
                alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD);
                alu.src[0].sel = V_SQ_ALU_SRC_1;
                alu.src[0].chan = 0;
-               alu.src[1] = r600_src[0];
-               alu.src[1].chan = tgsi_chan(&inst->Src[0], i);
+               r600_bc_src(&alu.src[1], &ctx->src[0], i);
                alu.src[1].neg = 1;
                alu.dst.sel = ctx->temp_reg;
                alu.dst.chan = i;
                alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL);
                alu.src[0].sel = ctx->temp_reg;
                alu.src[0].chan = i;
-               alu.src[1] = r600_src[2];
-               alu.src[1].chan = tgsi_chan(&inst->Src[2], i);
+               r600_bc_src(&alu.src[1], &ctx->src[2], i);
                alu.dst.sel = ctx->temp_reg;
                alu.dst.chan = i;
                if (i == lasti) {
                memset(&alu, 0, sizeof(struct r600_bc_alu));
                alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MULADD);
                alu.is_op3 = 1;
-               alu.src[0] = r600_src[0];
-               alu.src[0].chan = tgsi_chan(&inst->Src[0], i);
-               alu.src[1] = r600_src[1];
-               alu.src[1].chan = tgsi_chan(&inst->Src[1], i);
+               r600_bc_src(&alu.src[0], &ctx->src[0], i);
+               r600_bc_src(&alu.src[1], &ctx->src[1], i);
                alu.src[2].sel = ctx->temp_reg;
                alu.src[2].chan = i;
  
-               r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
-               if (r)
-                       return r;
+               tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
                alu.dst.chan = i;
                if (i == lasti) {
                        alu.last = 1;
  static int tgsi_cmp(struct r600_shader_ctx *ctx)
  {
        struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
-       struct r600_bc_alu_src r600_src[3];
        struct r600_bc_alu alu;
        int i, r;
        int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
  
-       r = tgsi_split_constant(ctx, r600_src);
-       if (r)
-               return r;
-       r = tgsi_split_literal_constant(ctx, r600_src);
-       if (r)
-               return r;
        for (i = 0; i < lasti + 1; i++) {
                if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
                        continue;
  
                memset(&alu, 0, sizeof(struct r600_bc_alu));
                alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_CNDGE);
-               alu.src[0] = r600_src[0];
-               alu.src[0].chan = tgsi_chan(&inst->Src[0], i);
-               alu.src[1] = r600_src[2];
-               alu.src[1].chan = tgsi_chan(&inst->Src[2], i);
-               alu.src[2] = r600_src[1];
-               alu.src[2].chan = tgsi_chan(&inst->Src[1], i);
-               r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
-               if (r)
-                       return r;
+               r600_bc_src(&alu.src[0], &ctx->src[0], i);
+               r600_bc_src(&alu.src[1], &ctx->src[2], i);
+               r600_bc_src(&alu.src[2], &ctx->src[1], i);
+               tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
                alu.dst.chan = i;
                alu.dst.write = 1;
                alu.is_op3 = 1;
  static int tgsi_xpd(struct r600_shader_ctx *ctx)
  {
        struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
-       struct r600_bc_alu_src r600_src[3];
        struct r600_bc_alu alu;
        uint32_t use_temp = 0;
        int i, r;
        if (inst->Dst[0].Register.WriteMask != 0xf)
                use_temp = 1;
  
-       r = tgsi_split_constant(ctx, r600_src);
-       if (r)
-               return r;
-       r = tgsi_split_literal_constant(ctx, r600_src);
-       if (r)
-               return r;
        for (i = 0; i < 4; i++) {
                memset(&alu, 0, sizeof(struct r600_bc_alu));
                alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL);
  
-               alu.src[0] = r600_src[0];
                switch (i) {
                case 0:
-                       alu.src[0].chan = tgsi_chan(&inst->Src[0], 2);
+                       r600_bc_src(&alu.src[0], &ctx->src[0], 2);
                        break;
                case 1:
-                       alu.src[0].chan = tgsi_chan(&inst->Src[0], 0);
+                       r600_bc_src(&alu.src[0], &ctx->src[0], 0);
                        break;
                case 2:
-                       alu.src[0].chan = tgsi_chan(&inst->Src[0], 1);
+                       r600_bc_src(&alu.src[0], &ctx->src[0], 1);
                        break;
                case 3:
                        alu.src[0].sel = V_SQ_ALU_SRC_0;
                        alu.src[0].chan = i;
                }
  
-               alu.src[1] = r600_src[1];
                switch (i) {
                case 0:
-                       alu.src[1].chan = tgsi_chan(&inst->Src[1], 1);
+                       r600_bc_src(&alu.src[1], &ctx->src[1], 1);
                        break;
                case 1:
-                       alu.src[1].chan = tgsi_chan(&inst->Src[1], 2);
+                       r600_bc_src(&alu.src[1], &ctx->src[1], 2);
                        break;
                case 2:
-                       alu.src[1].chan = tgsi_chan(&inst->Src[1], 0);
+                       r600_bc_src(&alu.src[1], &ctx->src[1], 0);
                        break;
                case 3:
                        alu.src[1].sel = V_SQ_ALU_SRC_0;
                memset(&alu, 0, sizeof(struct r600_bc_alu));
                alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MULADD);
  
-               alu.src[0] = r600_src[0];
                switch (i) {
                case 0:
-                       alu.src[0].chan = tgsi_chan(&inst->Src[0], 1);
+                       r600_bc_src(&alu.src[0], &ctx->src[0], 1);
                        break;
                case 1:
-                       alu.src[0].chan = tgsi_chan(&inst->Src[0], 2);
+                       r600_bc_src(&alu.src[0], &ctx->src[0], 2);
                        break;
                case 2:
-                       alu.src[0].chan = tgsi_chan(&inst->Src[0], 0);
+                       r600_bc_src(&alu.src[0], &ctx->src[0], 0);
                        break;
                case 3:
                        alu.src[0].sel = V_SQ_ALU_SRC_0;
                        alu.src[0].chan = i;
                }
  
-               alu.src[1] = r600_src[1];
                switch (i) {
                case 0:
-                       alu.src[1].chan = tgsi_chan(&inst->Src[1], 2);
+                       r600_bc_src(&alu.src[1], &ctx->src[1], 2);
                        break;
                case 1:
-                       alu.src[1].chan = tgsi_chan(&inst->Src[1], 0);
+                       r600_bc_src(&alu.src[1], &ctx->src[1], 0);
                        break;
                case 2:
-                       alu.src[1].chan = tgsi_chan(&inst->Src[1], 1);
+                       r600_bc_src(&alu.src[1], &ctx->src[1], 1);
                        break;
                case 3:
                        alu.src[1].sel = V_SQ_ALU_SRC_0;
  
                if (use_temp)
                        alu.dst.sel = ctx->temp_reg;
-               else {
-                       r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
-                       if (r)
-                               return r;
-               }
+               else
+                       tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
                alu.dst.chan = i;
                alu.dst.write = 1;
                alu.is_op3 = 1;
  static int tgsi_exp(struct r600_shader_ctx *ctx)
  {
        struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
-       struct r600_bc_alu_src r600_src[3] = { { 0 } };
        struct r600_bc_alu alu;
        int r;
  
                memset(&alu, 0, sizeof(struct r600_bc_alu));
  
                alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLOOR);
-               r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]);
-               if (r)
-                       return r;
-               alu.src[0].chan = tgsi_chan(&inst->Src[0], 0);
+               r600_bc_src(&alu.src[0], &ctx->src[0], 0);
  
                alu.dst.sel = ctx->temp_reg;
                alu.dst.chan = 0;
                memset(&alu, 0, sizeof(struct r600_bc_alu));
  
                alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FRACT);
-               alu.src[0] = r600_src[0];
-               r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]);
-               if (r)
-                       return r;
-               alu.src[0].chan = tgsi_chan(&inst->Src[0], 0);
+               r600_bc_src(&alu.src[0], &ctx->src[0], 0);
  
                alu.dst.sel = ctx->temp_reg;
  //            r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
        if ((inst->Dst[0].Register.WriteMask >> 2) & 0x1) {
                memset(&alu, 0, sizeof(struct r600_bc_alu));
                alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE);
-               r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]);
-               if (r)
-                       return r;
-               alu.src[0].chan = tgsi_chan(&inst->Src[0], 0);
+               r600_bc_src(&alu.src[0], &ctx->src[0], 0);
  
                alu.dst.sel = ctx->temp_reg;
                alu.dst.write = 1;
@@@ -2288,11 -2208,7 +2192,7 @@@ static int tgsi_log(struct r600_shader_
                memset(&alu, 0, sizeof(struct r600_bc_alu));
  
                alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE);
-               r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]);
-               if (r)
-                       return r;
-               alu.src[0].chan = tgsi_chan(&inst->Src[0], 0);
+               r600_bc_src(&alu.src[0], &ctx->src[0], 0);
  
                alu.dst.sel = ctx->temp_reg;
                alu.dst.chan = 0;
                memset(&alu, 0, sizeof(struct r600_bc_alu));
  
                alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE);
-               r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]);
-               if (r)
-                       return r;
-               alu.src[0].chan = tgsi_chan(&inst->Src[0], 0);
+               r600_bc_src(&alu.src[0], &ctx->src[0], 0);
  
                alu.dst.sel = ctx->temp_reg;
                alu.dst.chan = 1;
  
                alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL);
  
-               r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]);
-               if (r)
-                       return r;
-               alu.src[0].chan = tgsi_chan(&inst->Src[0], 0);
+               r600_bc_src(&alu.src[0], &ctx->src[0], 0);
  
                alu.src[1].sel = ctx->temp_reg;
                alu.src[1].chan = 1;
                memset(&alu, 0, sizeof(struct r600_bc_alu));
  
                alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE);
-               r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]);
-               if (r)
-                       return r;
-               alu.src[0].chan = tgsi_chan(&inst->Src[0], 0);
+               r600_bc_src(&alu.src[0], &ctx->src[0], 0);
  
                alu.dst.sel = ctx->temp_reg;
                alu.dst.write = 1;
@@@ -2451,6 -2355,7 +2339,7 @@@ static int tgsi_eg_arl(struct r600_shad
        struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
        struct r600_bc_alu alu;
        int r;
        memset(&alu, 0, sizeof(struct r600_bc_alu));
  
        switch (inst->Instruction.Opcode) {
                return -1;
        }
  
-       r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]);
-       if (r)
-               return r;
-       alu.src[0].chan = tgsi_chan(&inst->Src[0], 0);
+       r600_bc_src(&alu.src[0], &ctx->src[0], 0);
        alu.last = 1;
-       alu.dst.chan = 0;
-       alu.dst.sel = ctx->temp_reg;
+       alu.dst.sel = ctx->ar_reg;
        alu.dst.write = 1;
        r = r600_bc_add_alu(ctx->bc, &alu);
        if (r)
                return r;
+       /* TODO: Note that the MOVA can be avoided if we never use AR for
+        * indexing non-CB registers in the current ALU clause. Similarly, we
+        * need to load AR from ar_reg again if we started a new clause
+        * between ARL and AR usage. The easy way to do that is to remove
+        * the MOVA here, and load it for the first AR access after ar_reg
+        * has been modified in each clause. */
        memset(&alu, 0, sizeof(struct r600_bc_alu));
        alu.inst = EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_INT;
-       r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]);
-       if (r)
-               return r;
-       alu.src[0].sel = ctx->temp_reg;
+       alu.src[0].sel = ctx->ar_reg;
        alu.src[0].chan = 0;
        alu.last = 1;
        r = r600_bc_add_alu(ctx->bc, &alu);
@@@ -2495,26 -2400,48 +2384,48 @@@ static int tgsi_r600_arl(struct r600_sh
        struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
        struct r600_bc_alu alu;
        int r;
-       memset(&alu, 0, sizeof(struct r600_bc_alu));
  
        switch (inst->Instruction.Opcode) {
        case TGSI_OPCODE_ARL:
-               alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_FLOOR;
+               memset(&alu, 0, sizeof(alu));
+               alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLOOR;
+               r600_bc_src(&alu.src[0], &ctx->src[0], 0);
+               alu.dst.sel = ctx->ar_reg;
+               alu.dst.write = 1;
+               alu.last = 1;
+               if ((r = r600_bc_add_alu(ctx->bc, &alu)))
+                       return r;
+               memset(&alu, 0, sizeof(alu));
+               alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_INT;
+               alu.src[0].sel = ctx->ar_reg;
+               alu.dst.sel = ctx->ar_reg;
+               alu.dst.write = 1;
+               alu.last = 1;
+               if ((r = r600_bc_add_alu(ctx->bc, &alu)))
+                       return r;
                break;
        case TGSI_OPCODE_ARR:
-               alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA;
+               memset(&alu, 0, sizeof(alu));
+               alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_INT;
+               r600_bc_src(&alu.src[0], &ctx->src[0], 0);
+               alu.dst.sel = ctx->ar_reg;
+               alu.dst.write = 1;
+               alu.last = 1;
+               if ((r = r600_bc_add_alu(ctx->bc, &alu)))
+                       return r;
                break;
        default:
                assert(0);
                return -1;
        }
  
-       r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]);
-       if (r)
-               return r;
-       alu.src[0].chan = tgsi_chan(&inst->Src[0], 0);
+       memset(&alu, 0, sizeof(alu));
+       alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_INT;
+       alu.src[0].sel = ctx->ar_reg;
        alu.last = 1;
  
        r = r600_bc_add_alu(ctx->bc, &alu);
@@@ -2534,26 -2461,18 +2445,18 @@@ static int tgsi_opdst(struct r600_shade
                memset(&alu, 0, sizeof(struct r600_bc_alu));
  
                alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL);
-               r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
-               if (r)
-                       return r;
+               tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
  
                if (i == 0 || i == 3) {
                        alu.src[0].sel = V_SQ_ALU_SRC_1;
                } else {
-                       r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]);
-                       if (r)
-                               return r;
-                       alu.src[0].chan = tgsi_chan(&inst->Src[0], i);
+                       r600_bc_src(&alu.src[0], &ctx->src[0], i);
                }
  
-               if (i == 0 || i == 2) {
+               if (i == 0 || i == 2) {
                        alu.src[1].sel = V_SQ_ALU_SRC_1;
                } else {
-                       r = tgsi_src(ctx, &inst->Src[1], &alu.src[1]);
-                       if (r)
-                               return r;
-                       alu.src[1].chan = tgsi_chan(&inst->Src[1], i);
+                       r600_bc_src(&alu.src[1], &ctx->src[1], i);
                }
                if (i == 3)
                        alu.last = 1;
  
  static int emit_logic_pred(struct r600_shader_ctx *ctx, int opcode)
  {
-       struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
        struct r600_bc_alu alu;
        int r;
  
        alu.dst.write = 1;
        alu.dst.chan = 0;
  
-       r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]);
-       if (r)
-               return r;
-       alu.src[0].chan = tgsi_chan(&inst->Src[0], 0);
+       r600_bc_src(&alu.src[0], &ctx->src[0], 0);
        alu.src[1].sel = V_SQ_ALU_SRC_0;
        alu.src[1].chan = 0;
  
index de2668cee163f38d07ec1cb9c689e632f75e8d5d,c365979e43979f2f7ae4e9d248df22938e7239ff..576067ae81e215861d30927b73c14e001c1665e7
@@@ -37,6 -37,7 +37,7 @@@
  #include <util/u_memory.h>
  #include <util/u_inlines.h>
  #include <util/u_framebuffer.h>
+ #include "util/u_transfer.h"
  #include <pipebuffer/pb_buffer.h>
  #include "r600.h"
  #include "r600d.h"
@@@ -94,221 -95,6 +95,6 @@@ void r600_polygon_offset_update(struct 
        }
  }
  
- /* FIXME optimize away spi update when it's not needed */
- static void r600_spi_update(struct r600_pipe_context *rctx)
- {
-       struct r600_pipe_shader *shader = rctx->ps_shader;
-       struct r600_pipe_state rstate;
-       struct r600_shader *rshader = &shader->shader;
-       unsigned i, tmp;
-       rstate.nregs = 0;
-       for (i = 0; i < rshader->ninput; i++) {
-               tmp = S_028644_SEMANTIC(r600_find_vs_semantic_index(&rctx->vs_shader->shader, rshader, i));
-               if (rshader->input[i].centroid)
-                       tmp |= S_028644_SEL_CENTROID(1);
-               if (rshader->input[i].interpolate == TGSI_INTERPOLATE_LINEAR)
-                       tmp |= S_028644_SEL_LINEAR(1);
-               if (rshader->input[i].name == TGSI_SEMANTIC_COLOR ||
-                   rshader->input[i].name == TGSI_SEMANTIC_BCOLOR ||
-                   rshader->input[i].name == TGSI_SEMANTIC_POSITION) {
-                       tmp |= S_028644_FLAT_SHADE(rctx->flatshade);
-               }
-               if (rshader->input[i].name == TGSI_SEMANTIC_GENERIC &&
-                       rctx->sprite_coord_enable & (1 << rshader->input[i].sid)) {
-                       tmp |= S_028644_PT_SPRITE_TEX(1);
-               }
-               r600_pipe_state_add_reg(&rstate, R_028644_SPI_PS_INPUT_CNTL_0 + i * 4, tmp, 0xFFFFFFFF, NULL);
-       }
-       r600_context_pipe_state_set(&rctx->ctx, &rstate);
- }
- void r600_vertex_buffer_update(struct r600_pipe_context *rctx)
- {
-       struct r600_pipe_state *rstate;
-       struct r600_resource *rbuffer;
-       struct pipe_vertex_buffer *vertex_buffer;
-       unsigned i, offset;
-       /* we don't update until we know vertex elements */
-       if (rctx->vertex_elements == NULL || !rctx->nvertex_buffer)
-               return;
-       if (rctx->vertex_elements->incompatible_layout) {
-               /* translate rebind new vertex elements so
-                * return once translated
-                */
-               r600_begin_vertex_translate(rctx);
-               return;
-       }
-       if (rctx->any_user_vbs) {
-               r600_upload_user_buffers(rctx);
-               rctx->any_user_vbs = FALSE;
-       }
-       if (rctx->vertex_elements->vbuffer_need_offset) {
-               /* one resource per vertex elements */
-               rctx->nvs_resource = rctx->vertex_elements->count;
-       } else {
-               /* bind vertex buffer once */
-               rctx->nvs_resource = rctx->nvertex_buffer;
-       }
-       for (i = 0 ; i < rctx->nvs_resource; i++) {
-               rstate = &rctx->vs_resource[i];
-               rstate->id = R600_PIPE_STATE_RESOURCE;
-               rstate->nregs = 0;
-               if (rctx->vertex_elements->vbuffer_need_offset) {
-                       /* one resource per vertex elements */
-                       unsigned vbuffer_index;
-                       vbuffer_index = rctx->vertex_elements->elements[i].vertex_buffer_index;
-                       vertex_buffer = &rctx->vertex_buffer[vbuffer_index];
-                       rbuffer = (struct r600_resource*)vertex_buffer->buffer;
-                       offset = rctx->vertex_elements->vbuffer_offset[i];
-               } else {
-                       /* bind vertex buffer once */
-                       vertex_buffer = &rctx->vertex_buffer[i];
-                       rbuffer = (struct r600_resource*)vertex_buffer->buffer;
-                       offset = 0;
-               }
-               if (vertex_buffer == NULL || rbuffer == NULL)
-                       continue;
-               offset += vertex_buffer->buffer_offset + r600_bo_offset(rbuffer->bo);
-               r600_pipe_state_add_reg(rstate, R_038000_RESOURCE0_WORD0,
-                                       offset, 0xFFFFFFFF, rbuffer->bo);
-               r600_pipe_state_add_reg(rstate, R_038004_RESOURCE0_WORD1,
-                                       rbuffer->bo_size - offset - 1, 0xFFFFFFFF, NULL);
-               r600_pipe_state_add_reg(rstate, R_038008_RESOURCE0_WORD2,
-                                       S_038008_STRIDE(vertex_buffer->stride),
-                                       0xFFFFFFFF, NULL);
-               r600_pipe_state_add_reg(rstate, R_03800C_RESOURCE0_WORD3,
-                                       0x00000000, 0xFFFFFFFF, NULL);
-               r600_pipe_state_add_reg(rstate, R_038010_RESOURCE0_WORD4,
-                                       0x00000000, 0xFFFFFFFF, NULL);
-               r600_pipe_state_add_reg(rstate, R_038014_RESOURCE0_WORD5,
-                                       0x00000000, 0xFFFFFFFF, NULL);
-               r600_pipe_state_add_reg(rstate, R_038018_RESOURCE0_WORD6,
-                                       0xC0000000, 0xFFFFFFFF, NULL);
-               r600_context_pipe_state_set_fs_resource(&rctx->ctx, rstate, i);
-       }
- }
- static void r600_draw_common(struct r600_drawl *draw)
- {
-       struct r600_pipe_context *rctx = (struct r600_pipe_context *)draw->ctx;
-       struct r600_resource *rbuffer;
-       unsigned prim;
-       u32 vgt_dma_index_type, vgt_draw_initiator, mask;
-       struct r600_draw rdraw;
-       struct r600_pipe_state vgt;
-       switch (draw->index_size) {
-       case 2:
-               vgt_draw_initiator = 0;
-               vgt_dma_index_type = 0;
-               break;
-       case 4:
-               vgt_draw_initiator = 0;
-               vgt_dma_index_type = 1;
-               break;
-       case 0:
-               vgt_draw_initiator = 2;
-               vgt_dma_index_type = 0;
-               break;
-       default:
-               R600_ERR("unsupported index size %d\n", draw->index_size);
-               return;
-       }
-       if (r600_conv_pipe_prim(draw->mode, &prim))
-               return;
-       if (unlikely(rctx->ps_shader == NULL)) {
-               R600_ERR("missing vertex shader\n");
-               return;
-       }
-       if (unlikely(rctx->vs_shader == NULL)) {
-               R600_ERR("missing vertex shader\n");
-               return;
-       }
-       /* there should be enough input */
-       if (rctx->vertex_elements->count < rctx->vs_shader->shader.bc.nresource) {
-               R600_ERR("%d resources provided, expecting %d\n",
-                       rctx->vertex_elements->count, rctx->vs_shader->shader.bc.nresource);
-               return;
-       }
-       r600_spi_update(rctx);
-       mask = 0;
-       for (int i = 0; i < rctx->framebuffer.nr_cbufs; i++) {
-               mask |= (0xF << (i * 4));
-       }
-       vgt.id = R600_PIPE_STATE_VGT;
-       vgt.nregs = 0;
-       r600_pipe_state_add_reg(&vgt, R_008958_VGT_PRIMITIVE_TYPE, prim, 0xFFFFFFFF, NULL);
-       r600_pipe_state_add_reg(&vgt, R_028408_VGT_INDX_OFFSET, draw->index_bias, 0xFFFFFFFF, NULL);
-       r600_pipe_state_add_reg(&vgt, R_028400_VGT_MAX_VTX_INDX, draw->max_index, 0xFFFFFFFF, NULL);
-       r600_pipe_state_add_reg(&vgt, R_028404_VGT_MIN_VTX_INDX, draw->min_index, 0xFFFFFFFF, NULL);
-       r600_pipe_state_add_reg(&vgt, R_028238_CB_TARGET_MASK, rctx->cb_target_mask & mask, 0xFFFFFFFF, NULL);
-       r600_pipe_state_add_reg(&vgt, R_03CFF0_SQ_VTX_BASE_VTX_LOC, 0, 0xFFFFFFFF, NULL);
-       r600_pipe_state_add_reg(&vgt, R_03CFF4_SQ_VTX_START_INST_LOC, 0, 0xFFFFFFFF, NULL);
-       r600_context_pipe_state_set(&rctx->ctx, &vgt);
-       rdraw.vgt_num_indices = draw->count;
-       rdraw.vgt_num_instances = 1;
-       rdraw.vgt_index_type = vgt_dma_index_type;
-       rdraw.vgt_draw_initiator = vgt_draw_initiator;
-       rdraw.indices = NULL;
-       if (draw->index_buffer) {
-               rbuffer = (struct r600_resource*)draw->index_buffer;
-               rdraw.indices = rbuffer->bo;
-               rdraw.indices_bo_offset = draw->index_buffer_offset;
-       }
-       r600_context_draw(&rctx->ctx, &rdraw);
- }
- void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info)
- {
-       struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
-       struct r600_drawl draw;
-       memset(&draw, 0, sizeof(struct r600_drawl));
-       draw.ctx = ctx;
-       draw.mode = info->mode;
-       draw.start = info->start;
-       draw.count = info->count;
-       if (info->indexed && rctx->index_buffer.buffer) {
-               draw.start += rctx->index_buffer.offset / rctx->index_buffer.index_size;
-               draw.min_index = info->min_index;
-               draw.max_index = info->max_index;
-               draw.index_bias = info->index_bias;
-               r600_translate_index_buffer(rctx, &rctx->index_buffer.buffer,
-                                           &rctx->index_buffer.index_size,
-                                           &draw.start,
-                                           info->count);
-               draw.index_size = rctx->index_buffer.index_size;
-               pipe_resource_reference(&draw.index_buffer, rctx->index_buffer.buffer);
-               draw.index_buffer_offset = draw.start * draw.index_size;
-               draw.start = 0;
-               r600_upload_index_buffer(rctx, &draw);
-       } else {
-               draw.index_size = 0;
-               draw.index_buffer = NULL;
-               draw.min_index = info->min_index;
-               draw.max_index = info->max_index;
-               draw.index_bias = info->start;
-       }
-       r600_draw_common(&draw);
-       pipe_resource_reference(&draw.index_buffer, NULL);
- }
  static void r600_set_blend_color(struct pipe_context *ctx,
                                        const struct pipe_blend_color *state)
  {
@@@ -616,6 -402,7 +402,7 @@@ static struct pipe_sampler_view *r600_c
        uint32_t word4 = 0, yuv_format = 0, pitch = 0;
        unsigned char swizzle[4], array_mode = 0, tile_type = 0;
        struct r600_bo *bo[2];
+       unsigned height, depth;
  
        if (resource == NULL)
                return NULL;
        if (desc == NULL) {
                R600_ERR("unknow format %d\n", state->format);
        }
-       tmp = (struct r600_resource_texture*)texture;
+       tmp = (struct r600_resource_texture *)texture;
+       if (tmp->depth && !tmp->is_flushing_texture) {
+               r600_texture_depth_flush(ctx, texture, TRUE);
+               tmp = tmp->flushed_depth_texture;
+       }
+       if (tmp->force_int_type) {
+               word4 &= C_038010_NUM_FORMAT_ALL;
+               word4 |= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT);
+       }
        rbuffer = &tmp->resource;
        bo[0] = rbuffer->bo;
        bo[1] = rbuffer->bo;
-       /* FIXME depth texture decompression */
-       if (tmp->depth) {
-               r600_texture_depth_flush(ctx, texture);
-               tmp = (struct r600_resource_texture*)texture;
-               rbuffer = &tmp->flushed_depth_texture->resource;
-               bo[0] = rbuffer->bo;
-               bo[1] = rbuffer->bo;
-       }
-       pitch = align(tmp->pitch_in_pixels[0], 8);
-       if (tmp->tiled) {
-               array_mode = tmp->array_mode[0];
-               tile_type = tmp->tile_type;
+       pitch = align(tmp->pitch_in_blocks[0] * util_format_get_blockwidth(state->format), 8);
+       array_mode = tmp->array_mode[0];
+       tile_type = tmp->tile_type;
+       height = texture->height0;
+       depth = texture->depth0;
+       if (texture->target == PIPE_TEXTURE_1D_ARRAY) {
+               height = 1;
+               depth = texture->array_size;
+       } else if (texture->target == PIPE_TEXTURE_2D_ARRAY) {
+               depth = texture->array_size;
        }
  
        /* FIXME properly handle first level != 0 */
                                S_038000_PITCH((pitch / 8) - 1) |
                                S_038000_TEX_WIDTH(texture->width0 - 1), 0xFFFFFFFF, NULL);
        r600_pipe_state_add_reg(rstate, R_038004_RESOURCE0_WORD1,
-                               S_038004_TEX_HEIGHT(texture->height0 - 1) |
-                               S_038004_TEX_DEPTH(texture->depth0 - 1) |
+                               S_038004_TEX_HEIGHT(height - 1) |
+                               S_038004_TEX_DEPTH(depth - 1) |
                                S_038004_DATA_FORMAT(format), 0xFFFFFFFF, NULL);
        r600_pipe_state_add_reg(rstate, R_038008_RESOURCE0_WORD2,
                                (tmp->offset[0] + r600_bo_offset(bo[0])) >> 8, 0xFFFFFFFF, bo[0]);
        r600_pipe_state_add_reg(rstate, R_03800C_RESOURCE0_WORD3,
                                (tmp->offset[1] + r600_bo_offset(bo[1])) >> 8, 0xFFFFFFFF, bo[1]);
        r600_pipe_state_add_reg(rstate, R_038010_RESOURCE0_WORD4,
-                               word4 | S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_NORM) |
+                               word4 |
                                S_038010_SRF_MODE_ALL(V_038010_SRF_MODE_NO_ZERO) |
                                S_038010_REQUEST_SIZE(1) |
                                S_038010_BASE_LEVEL(state->u.tex.first_level), 0xFFFFFFFF, NULL);
        r600_pipe_state_add_reg(rstate, R_038014_RESOURCE0_WORD5,
                                S_038014_LAST_LEVEL(state->u.tex.last_level) |
-                               S_038014_BASE_ARRAY(0) |
-                               S_038014_LAST_ARRAY(0), 0xFFFFFFFF, NULL);
+                               S_038014_BASE_ARRAY(state->u.tex.first_layer) |
+                               S_038014_LAST_ARRAY(state->u.tex.last_layer), 0xFFFFFFFF, NULL);
        r600_pipe_state_add_reg(rstate, R_038018_RESOURCE0_WORD6,
                                S_038018_TYPE(V_038010_SQ_TEX_VTX_VALID_TEXTURE), 0xFFFFFFFF, NULL);
  
@@@ -714,9 -509,11 +509,11 @@@ static void r600_set_ps_sampler_view(st
        for (i = 0; i < count; i++) {
                if (&rctx->ps_samplers.views[i]->base != views[i]) {
                        if (resource[i])
-                               r600_context_pipe_state_set_ps_resource(&rctx->ctx, &resource[i]->state, i);
+                               r600_context_pipe_state_set_ps_resource(&rctx->ctx, &resource[i]->state,
+                                                                       i + R600_MAX_CONST_BUFFERS);
                        else
-                               r600_context_pipe_state_set_ps_resource(&rctx->ctx, NULL, i);
+                               r600_context_pipe_state_set_ps_resource(&rctx->ctx, NULL,
+                                                                       i + R600_MAX_CONST_BUFFERS);
  
                        pipe_sampler_view_reference(
                                (struct pipe_sampler_view **)&rctx->ps_samplers.views[i],
        }
        for (i = count; i < NUM_TEX_UNITS; i++) {
                if (rctx->ps_samplers.views[i]) {
-                       r600_context_pipe_state_set_ps_resource(&rctx->ctx, NULL, i);
+                       r600_context_pipe_state_set_ps_resource(&rctx->ctx, NULL,
+                                                               i + R600_MAX_CONST_BUFFERS);
                        pipe_sampler_view_reference((struct pipe_sampler_view **)&rctx->ps_samplers.views[i], NULL);
                }
        }
@@@ -908,44 -706,55 +706,66 @@@ static void r600_cb(struct r600_pipe_co
        unsigned offset;
        const struct util_format_description *desc;
        struct r600_bo *bo[3];
+       int i;
  
        surf = (struct r600_surface *)state->cbufs[cb];
        rtex = (struct r600_resource_texture*)state->cbufs[cb]->texture;
+       if (rtex->depth && !rtex->is_flushing_texture) {
+               r600_texture_depth_flush(&rctx->context, state->cbufs[cb]->texture, TRUE);
+               rtex = rtex->flushed_depth_texture;
+       }
        rbuffer = &rtex->resource;
        bo[0] = rbuffer->bo;
        bo[1] = rbuffer->bo;
        bo[2] = rbuffer->bo;
  
        /* XXX quite sure for dx10+ hw don't need any offset hacks */
-       offset = r600_texture_get_offset((struct r600_resource_texture *)state->cbufs[cb]->texture,
+       offset = r600_texture_get_offset(rtex,
                                         level, state->cbufs[cb]->u.tex.first_layer);
-       pitch = rtex->pitch_in_pixels[level] / 8 - 1;
-       slice = rtex->pitch_in_pixels[level] * surf->aligned_height / 64 - 1;
+       pitch = rtex->pitch_in_blocks[level] / 8 - 1;
+       slice = rtex->pitch_in_blocks[level] * surf->aligned_height / 64 - 1;
        ntype = 0;
-       desc = util_format_description(rtex->resource.base.b.format);
+       desc = util_format_description(surf->base.format);
        if (desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB)
                ntype = V_0280A0_NUMBER_SRGB;
 +        else if (desc->layout == UTIL_FORMAT_LAYOUT_PLAIN) {
 +              switch(desc->channel[0].type) {
 +              case UTIL_FORMAT_TYPE_UNSIGNED:
 +                      ntype = V_0280A0_NUMBER_UNORM;
 +                      break;
 +
 +              case UTIL_FORMAT_TYPE_SIGNED:
 +                      ntype = V_0280A0_NUMBER_SNORM;
 +                      break;
 +              }
 +      }
  
-       format = r600_translate_colorformat(rtex->resource.base.b.format);
-       swap = r600_translate_colorswap(rtex->resource.base.b.format);
+       for (i = 0; i < 4; i++) {
+               if (desc->channel[i].type != UTIL_FORMAT_TYPE_VOID) {
+                       break;
+               }
+       }
+       format = r600_translate_colorformat(surf->base.format);
+       swap = r600_translate_colorswap(surf->base.format);
+       /* disable when gallium grows int textures */
+       if ((format == FMT_32_32_32_32 || format == FMT_16_16_16_16) && rtex->force_int_type)
+               ntype = 4;
        color_info = S_0280A0_FORMAT(format) |
                S_0280A0_COMP_SWAP(swap) |
                S_0280A0_ARRAY_MODE(rtex->array_mode[level]) |
                S_0280A0_BLEND_CLAMP(1) |
                S_0280A0_NUMBER_TYPE(ntype);
-       if (desc->colorspace != UTIL_FORMAT_COLORSPACE_ZS)
-               color_info |= S_0280A0_SOURCE_FORMAT(1);
+       /* on R600 this can't be set if BLEND_CLAMP isn't set,
+          if BLEND_FLOAT32 is set of > 11 bits in a UNORM or SNORM */
+       if (desc->colorspace != UTIL_FORMAT_COLORSPACE_ZS &&
+           desc->channel[i].size < 12)
+               color_info |= S_0280A0_SOURCE_FORMAT(V_0280A0_EXPORT_NORM);
  
        r600_pipe_state_add_reg(rstate,
                                R_028040_CB_COLOR0_BASE + cb * 4,
@@@ -989,17 -798,14 +809,14 @@@ static void r600_db(struct r600_pipe_co
  
        surf = (struct r600_surface *)state->zsbuf;
        rtex = (struct r600_resource_texture*)state->zsbuf->texture;
-       rtex->tiled = 1;
-       rtex->array_mode[level] = 2;
-       rtex->tile_type = 1;
-       rtex->depth = 1;
        rbuffer = &rtex->resource;
  
        /* XXX quite sure for dx10+ hw don't need any offset hacks */
        offset = r600_texture_get_offset((struct r600_resource_texture *)state->zsbuf->texture,
                                         level, state->zsbuf->u.tex.first_layer);
-       pitch = rtex->pitch_in_pixels[level] / 8 - 1;
-       slice = rtex->pitch_in_pixels[level] * surf->aligned_height / 64 - 1;
+       pitch = rtex->pitch_in_blocks[level] / 8 - 1;
+       slice = rtex->pitch_in_blocks[level] * surf->aligned_height / 64 - 1;
        format = r600_translate_dbformat(state->zsbuf->texture->format);
  
        r600_pipe_state_add_reg(rstate, R_02800C_DB_DEPTH_BASE,
@@@ -1115,51 -921,6 +932,6 @@@ static void r600_set_framebuffer_state(
        }
  }
  
- static void r600_set_constant_buffer(struct pipe_context *ctx, uint shader, uint index,
-                                       struct pipe_resource *buffer)
- {
-       struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
-       struct r600_resource *rbuffer = (struct r600_resource*)buffer;
-       uint32_t offset;
-       /* Note that the state tracker can unbind constant buffers by
-        * passing NULL here.
-        */
-       if (buffer == NULL) {
-               return;
-       }
-       r600_upload_const_buffer(rctx, buffer, &offset);
-       switch (shader) {
-       case PIPE_SHADER_VERTEX:
-               rctx->vs_const_buffer.nregs = 0;
-               r600_pipe_state_add_reg(&rctx->vs_const_buffer,
-                                       R_028180_ALU_CONST_BUFFER_SIZE_VS_0,
-                                       ALIGN_DIVUP(buffer->width0 >> 4, 16),
-                                       0xFFFFFFFF, NULL);
-               r600_pipe_state_add_reg(&rctx->vs_const_buffer,
-                                       R_028980_ALU_CONST_CACHE_VS_0,
-                                       (r600_bo_offset(rbuffer->bo) + offset) >> 8, 0xFFFFFFFF, rbuffer->bo);
-               r600_context_pipe_state_set(&rctx->ctx, &rctx->vs_const_buffer);
-               break;
-       case PIPE_SHADER_FRAGMENT:
-               rctx->ps_const_buffer.nregs = 0;
-               r600_pipe_state_add_reg(&rctx->ps_const_buffer,
-                                       R_028140_ALU_CONST_BUFFER_SIZE_PS_0,
-                                       ALIGN_DIVUP(buffer->width0 >> 4, 16),
-                                       0xFFFFFFFF, NULL);
-               r600_pipe_state_add_reg(&rctx->ps_const_buffer,
-                                       R_028940_ALU_CONST_CACHE_PS_0,
-                                       (r600_bo_offset(rbuffer->bo) + offset) >> 8, 0xFFFFFFFF, rbuffer->bo);
-               r600_context_pipe_state_set(&rctx->ctx, &rctx->ps_const_buffer);
-               break;
-       default:
-               R600_ERR("unsupported %d\n", shader);
-               return;
-       }
- }
  void r600_init_state_functions(struct r600_pipe_context *rctx)
  {
        rctx->context.create_blend_state = r600_create_blend_state;
        rctx->context.set_vertex_sampler_views = r600_set_vs_sampler_view;
        rctx->context.set_viewport_state = r600_set_viewport_state;
        rctx->context.sampler_view_destroy = r600_sampler_view_destroy;
+       rctx->context.redefine_user_buffer = u_default_redefine_user_buffer;
  }
  
  void r600_init_config(struct r600_pipe_context *rctx)
@@@ -1489,3 -1251,25 +1262,25 @@@ void *r600_create_db_flush_dsa(struct r
                                S_028D0C_COPY_CENTROID(1), NULL);
        return rstate;
  }
+ void r600_pipe_set_buffer_resource(struct r600_pipe_context *rctx,
+                                  struct r600_pipe_state *rstate,
+                                  struct r600_resource *rbuffer,
+                                  unsigned offset, unsigned stride)
+ {
+       r600_pipe_state_add_reg(rstate, R_038000_RESOURCE0_WORD0,
+                               offset, 0xFFFFFFFF, rbuffer->bo);
+       r600_pipe_state_add_reg(rstate, R_038004_RESOURCE0_WORD1,
+                               rbuffer->bo_size - offset - 1, 0xFFFFFFFF, NULL);
+       r600_pipe_state_add_reg(rstate, R_038008_RESOURCE0_WORD2,
+                               S_038008_STRIDE(stride),
+                               0xFFFFFFFF, NULL);
+       r600_pipe_state_add_reg(rstate, R_03800C_RESOURCE0_WORD3,
+                               0x00000000, 0xFFFFFFFF, NULL);
+       r600_pipe_state_add_reg(rstate, R_038010_RESOURCE0_WORD4,
+                               0x00000000, 0xFFFFFFFF, NULL);
+       r600_pipe_state_add_reg(rstate, R_038014_RESOURCE0_WORD5,
+                               0x00000000, 0xFFFFFFFF, NULL);
+       r600_pipe_state_add_reg(rstate, R_038018_RESOURCE0_WORD6,
+                               0xC0000000, 0xFFFFFFFF, NULL);
+ }
index a0ec493fc8582736ea58664a216c891954f39829,29e12f1d468e68a24b798a63bcbb86f078a73e3f..3dd54f45202c50253566dbbb56a76aa83178af8f
@@@ -253,9 -253,13 +253,13 @@@ static inline unsigned r600_tex_dim(uns
        default:
        case PIPE_TEXTURE_1D:
                return V_038000_SQ_TEX_DIM_1D;
+       case PIPE_TEXTURE_1D_ARRAY:
+               return V_038000_SQ_TEX_DIM_1D_ARRAY;
        case PIPE_TEXTURE_2D:
        case PIPE_TEXTURE_RECT:
                return V_038000_SQ_TEX_DIM_2D;
+       case PIPE_TEXTURE_2D_ARRAY:
+               return V_038000_SQ_TEX_DIM_2D_ARRAY;
        case PIPE_TEXTURE_3D:
                return V_038000_SQ_TEX_DIM_3D;
        case PIPE_TEXTURE_CUBE:
@@@ -285,10 -289,14 +289,14 @@@ static inline uint32_t r600_translate_c
                return V_0280A0_SWAP_ALT_REV;
        case PIPE_FORMAT_I8_UNORM:
        case PIPE_FORMAT_L8_UNORM:
+       case PIPE_FORMAT_L8_SRGB:
        case PIPE_FORMAT_R8_UNORM:
        case PIPE_FORMAT_R8_SNORM:
                return V_0280A0_SWAP_STD;
  
+       case PIPE_FORMAT_L4A4_UNORM:
+               return V_0280A0_SWAP_ALT;
                /* 16-bit buffers. */
        case PIPE_FORMAT_B5G6R5_UNORM:
                return V_0280A0_SWAP_STD_REV;
                return V_0280A0_SWAP_STD;
  
        case PIPE_FORMAT_L8A8_UNORM:
+       case PIPE_FORMAT_L8A8_SRGB:
                return V_0280A0_SWAP_ALT;
        case PIPE_FORMAT_R8G8_UNORM:
                return V_0280A0_SWAP_STD;
  
        case PIPE_FORMAT_R16_UNORM:
 +      case PIPE_FORMAT_R16_SNORM:
                return V_0280A0_SWAP_STD;
  
                /* 32-bit buffers. */
        case PIPE_FORMAT_X8R8G8B8_UNORM:
                return V_0280A0_SWAP_ALT_REV;
        case PIPE_FORMAT_R8G8B8A8_SNORM:
+       case PIPE_FORMAT_R8G8B8A8_UNORM:
        case PIPE_FORMAT_R8G8B8X8_UNORM:
                return V_0280A0_SWAP_STD;
  
  
        case PIPE_FORMAT_R10G10B10A2_UNORM:
        case PIPE_FORMAT_R10G10B10X2_SNORM:
-       case PIPE_FORMAT_B10G10R10A2_UNORM:
        case PIPE_FORMAT_R10SG10SB10SA2U_NORM:
-               return V_0280A0_SWAP_STD_REV;
+               return V_0280A0_SWAP_STD;
+       case PIPE_FORMAT_B10G10R10A2_UNORM:
+               return V_0280A0_SWAP_ALT;
  
        case PIPE_FORMAT_R16G16_UNORM:
                return V_0280A0_SWAP_STD;
                /* 64-bit buffers. */
        case PIPE_FORMAT_R16G16B16A16_UNORM:
        case PIPE_FORMAT_R16G16B16A16_SNORM:
-               //              return FMT_16_16_16_16;
        case PIPE_FORMAT_R16G16B16A16_FLOAT:
-               //              return FMT_16_16_16_16_FLOAT;
  
                /* 128-bit buffers. */
-       //case PIPE_FORMAT_R32G32B32A32_FLOAT:
-               //              return FMT_32_32_32_32_FLOAT;
-               return 0;
+       case PIPE_FORMAT_R32G32B32A32_FLOAT:
+       case PIPE_FORMAT_R32G32B32A32_SNORM:
+       case PIPE_FORMAT_R32G32B32A32_UNORM:
+               return V_0280A0_SWAP_STD;
        default:
                R600_ERR("unsupported colorswap format %d\n", format);
                return ~0;
  static INLINE uint32_t r600_translate_colorformat(enum pipe_format format)
  {
        switch (format) {
+       case PIPE_FORMAT_L4A4_UNORM:
+               return V_0280A0_COLOR_4_4;
                /* 8-bit buffers. */
        case PIPE_FORMAT_A8_UNORM:
        case PIPE_FORMAT_I8_UNORM:
        case PIPE_FORMAT_L8_UNORM:
+       case PIPE_FORMAT_L8_SRGB:
        case PIPE_FORMAT_R8_UNORM:
        case PIPE_FORMAT_R8_SNORM:
                return V_0280A0_COLOR_8;
                return V_0280A0_COLOR_16;
  
        case PIPE_FORMAT_L8A8_UNORM:
+       case PIPE_FORMAT_L8A8_SRGB:
        case PIPE_FORMAT_R8G8_UNORM:
                return V_0280A0_COLOR_8_8;
  
        case PIPE_FORMAT_R16_UNORM:
 +      case PIPE_FORMAT_R16_SNORM:
                return V_0280A0_COLOR_16;
  
                /* 32-bit buffers. */
        case PIPE_FORMAT_R10G10B10X2_SNORM:
        case PIPE_FORMAT_B10G10R10A2_UNORM:
        case PIPE_FORMAT_R10SG10SB10SA2U_NORM:
-               return V_0280A0_COLOR_10_10_10_2;
+               return V_0280A0_COLOR_2_10_10_10;
  
        case PIPE_FORMAT_Z24X8_UNORM:
        case PIPE_FORMAT_Z24_UNORM_S8_USCALED:
        case PIPE_FORMAT_S8_USCALED_Z24_UNORM:
                return V_0280A0_COLOR_24_8;
  
 -      case PIPE_FORMAT_R32_FLOAT:
 -              return V_0280A0_COLOR_32_FLOAT;
 +      //case PIPE_FORMAT_R32_FLOAT:
 +      //      return V_0280A0_COLOR_32_FLOAT;
  
        case PIPE_FORMAT_R16G16_FLOAT:
                return V_0280A0_COLOR_16_16_FLOAT;
                return V_0280A0_COLOR_32_32;
  
                /* 128-bit buffers. */
-       //case PIPE_FORMAT_R32G32B32_FLOAT:
-       //      return V_0280A0_COLOR_32_32_32_FLOAT;
-       //case PIPE_FORMAT_R32G32B32A32_FLOAT:
-       //      return V_0280A0_COLOR_32_32_32_32_FLOAT;
+       case PIPE_FORMAT_R32G32B32_FLOAT:
+               return V_0280A0_COLOR_32_32_32_FLOAT;
+       case PIPE_FORMAT_R32G32B32A32_FLOAT:
+               return V_0280A0_COLOR_32_32_32_32_FLOAT;
+       case PIPE_FORMAT_R32G32B32A32_SNORM:
+       case PIPE_FORMAT_R32G32B32A32_UNORM:
+               return V_0280A0_COLOR_32_32_32_32;
  
                /* YUV buffers. */
        case PIPE_FORMAT_UYVY:
@@@ -497,9 -514,37 +516,37 @@@ static INLINE boolean r600_is_zs_format
        return r600_translate_dbformat(format) != ~0;
  }
  
- static INLINE boolean r600_is_vertex_format_supported(enum pipe_format format)
+ static INLINE boolean r600_is_vertex_format_supported(enum pipe_format format,
+                                                     enum radeon_family family)
  {
-       return r600_translate_colorformat(format) != ~0;
+       unsigned i;
+       const struct util_format_description *desc = util_format_description(format);
+       if (!desc)
+               return FALSE;
+       /* Find the first non-VOID channel. */
+       for (i = 0; i < 4; i++) {
+               if (desc->channel[i].type != UTIL_FORMAT_TYPE_VOID) {
+                       break;
+               }
+       }
+       if (i == 4)
+               return FALSE;
+       /* No fixed, no double. */
+       if (desc->layout != UTIL_FORMAT_LAYOUT_PLAIN ||
+           desc->channel[i].type == UTIL_FORMAT_TYPE_FIXED ||
+           (desc->channel[i].size == 64 &&
+            desc->channel[i].type == UTIL_FORMAT_TYPE_FLOAT))
+               return FALSE;
+       /* No scaled/norm formats with 32 bits per channel. */
+       if (desc->channel[i].size == 32 &&
+           (desc->channel[i].type == UTIL_FORMAT_TYPE_SIGNED ||
+            desc->channel[i].type == UTIL_FORMAT_TYPE_UNSIGNED))
+               return FALSE;
+       return TRUE;
  }
  
  #endif
index 1f4f453c091b81fd7b547bd57d9765b18d8d5731,095558d0337bacd9aa5b69ffb58b8bee88033e9b..03af367401d9b8c411072f42bc3b94bfa67d4e36
@@@ -27,6 -27,7 +27,7 @@@
  #include <errno.h>
  #include <pipe/p_screen.h>
  #include <util/u_format.h>
+ #include <util/u_format_s3tc.h>
  #include <util/u_math.h>
  #include <util/u_inlines.h>
  #include <util/u_memory.h>
@@@ -38,8 -39,6 +39,6 @@@
  #include "r600d.h"
  #include "r600_formats.h"
  
- extern struct u_resource_vtbl r600_texture_vtbl;
  /* Copy from a full GPU texture to a transfer's staging one. */
  static void r600_copy_to_staging_texture(struct pipe_context *ctx, struct r600_transfer *rtransfer)
  {
@@@ -77,17 -76,15 +76,15 @@@ unsigned r600_texture_get_offset(struc
  {
        unsigned offset = rtex->offset[level];
  
-       switch (rtex->resource.base.b.target) {
+       switch (rtex->resource.b.b.b.target) {
        case PIPE_TEXTURE_3D:
        case PIPE_TEXTURE_CUBE:
-               return offset + layer * rtex->layer_size[level];
        default:
-               assert(layer == 0);
-               return offset;
+               return offset + layer * rtex->layer_size[level];
        }
  }
  
- static unsigned r600_get_pixel_alignment(struct pipe_screen *screen,
+ static unsigned r600_get_block_alignment(struct pipe_screen *screen,
                                         enum pipe_format format,
                                         unsigned array_mode)
  {
                               (((rscreen->tiling_info->group_bytes / 8 / pixsize)) *
                                rscreen->tiling_info->num_banks)) * 8;
                break;
+       case V_038000_ARRAY_LINEAR_ALIGNED:
+               p_align = MAX2(64, rscreen->tiling_info->group_bytes / pixsize);
+               break;
        case V_038000_ARRAY_LINEAR_GENERAL:
        default:
                p_align = rscreen->tiling_info->group_bytes / pixsize;
@@@ -124,8 -124,10 +124,10 @@@ static unsigned r600_get_height_alignme
                h_align = rscreen->tiling_info->num_channels * 8;
                break;
        case V_038000_ARRAY_1D_TILED_THIN1:
+       case V_038000_ARRAY_LINEAR_ALIGNED:
                h_align = 8;
                break;
+       case V_038000_ARRAY_LINEAR_GENERAL:
        default:
                h_align = 1;
                break;
@@@ -139,7 -141,7 +141,7 @@@ static unsigned r600_get_base_alignment
  {
        struct r600_screen* rscreen = (struct r600_screen *)screen;
        unsigned pixsize = util_format_get_blocksize(format);
-       int p_align = r600_get_pixel_alignment(screen, format, array_mode);
+       int p_align = r600_get_block_alignment(screen, format, array_mode);
        int h_align = r600_get_height_alignment(screen, array_mode);
        int b_align;
  
                               p_align * pixsize * h_align);
                break;
        case V_038000_ARRAY_1D_TILED_THIN1:
+       case V_038000_ARRAY_LINEAR_ALIGNED:
+       case V_038000_ARRAY_LINEAR_GENERAL:
        default:
                b_align = rscreen->tiling_info->group_bytes;
                break;
@@@ -165,55 -169,46 +169,46 @@@ static unsigned mip_minify(unsigned siz
        return val;
  }
  
- static unsigned r600_texture_get_stride(struct pipe_screen *screen,
-                                       struct r600_resource_texture *rtex,
-                                       unsigned level)
+ static unsigned r600_texture_get_nblocksx(struct pipe_screen *screen,
+                                         struct r600_resource_texture *rtex,
+                                         unsigned level)
  {
-       struct pipe_resource *ptex = &rtex->resource.base.b;
-       unsigned width, stride, tile_width;
+       struct pipe_resource *ptex = &rtex->resource.b.b.b;
+       unsigned nblocksx, block_align, width;
+       unsigned blocksize = util_format_get_blocksize(ptex->format);
  
        if (rtex->pitch_override)
-               return rtex->pitch_override;
+               return rtex->pitch_override / blocksize;
  
        width = mip_minify(ptex->width0, level);
-       if (util_format_is_plain(ptex->format)) {
-               tile_width = r600_get_pixel_alignment(screen, ptex->format,
-                                                     rtex->array_mode[level]);
-               width = align(width, tile_width);
-       }
-       stride = util_format_get_stride(ptex->format, width);
+       nblocksx = util_format_get_nblocksx(ptex->format, width);
  
-       return stride;
+       block_align = r600_get_block_alignment(screen, ptex->format,
+                                             rtex->array_mode[level]);
+       nblocksx = align(nblocksx, block_align);
+       return nblocksx;
  }
  
  static unsigned r600_texture_get_nblocksy(struct pipe_screen *screen,
                                          struct r600_resource_texture *rtex,
                                          unsigned level)
  {
-       struct pipe_resource *ptex = &rtex->resource.base.b;
+       struct pipe_resource *ptex = &rtex->resource.b.b.b;
        unsigned height, tile_height;
  
        height = mip_minify(ptex->height0, level);
-       if (util_format_is_plain(ptex->format)) {
-               tile_height = r600_get_height_alignment(screen,
-                                                       rtex->array_mode[level]);
-               height = align(height, tile_height);
-       }
-       return util_format_get_nblocksy(ptex->format, height);
- }
- /* Get a width in pixels from a stride in bytes. */
- static unsigned pitch_to_width(enum pipe_format format, unsigned pitch_in_bytes)
- {
-       return (pitch_in_bytes / util_format_get_blocksize(format)) *
-               util_format_get_blockwidth(format);
+       height = util_format_get_nblocksy(ptex->format, height);
+       tile_height = r600_get_height_alignment(screen,
+                                               rtex->array_mode[level]);
+       height = align(height, tile_height);
+       return height;
  }
  
  static void r600_texture_set_array_mode(struct pipe_screen *screen,
                                        struct r600_resource_texture *rtex,
                                        unsigned level, unsigned array_mode)
  {
-       struct pipe_resource *ptex = &rtex->resource.base.b;
+       struct pipe_resource *ptex = &rtex->resource.b.b.b;
  
        switch (array_mode) {
        case V_0280A0_ARRAY_LINEAR_GENERAL:
                unsigned w, h, tile_height, tile_width;
  
                tile_height = r600_get_height_alignment(screen, array_mode);
-               tile_width = r600_get_pixel_alignment(screen, ptex->format, array_mode);
+               tile_width = r600_get_block_alignment(screen, ptex->format, array_mode);
  
                w = mip_minify(ptex->width0, level);
                h = mip_minify(ptex->height0, level);
@@@ -244,40 -239,128 +239,128 @@@ static void r600_setup_miptree(struct p
                               struct r600_resource_texture *rtex,
                               unsigned array_mode)
  {
-       struct pipe_resource *ptex = &rtex->resource.base.b;
+       struct pipe_resource *ptex = &rtex->resource.b.b.b;
        struct radeon *radeon = (struct radeon *)screen->winsys;
        enum chip_class chipc = r600_get_family_class(radeon);
-       unsigned pitch, size, layer_size, i, offset;
-       unsigned nblocksy;
+       unsigned size, layer_size, i, offset;
+       unsigned nblocksx, nblocksy;
  
        for (i = 0, offset = 0; i <= ptex->last_level; i++) {
+               unsigned blocksize = util_format_get_blocksize(ptex->format);
                r600_texture_set_array_mode(screen, rtex, i, array_mode);
  
-               pitch = r600_texture_get_stride(screen, rtex, i);
+               nblocksx = r600_texture_get_nblocksx(screen, rtex, i);
                nblocksy = r600_texture_get_nblocksy(screen, rtex, i);
  
-               layer_size = pitch * nblocksy;
+               layer_size = nblocksx * nblocksy * blocksize;
                if (ptex->target == PIPE_TEXTURE_CUBE) {
                        if (chipc >= R700)
                                size = layer_size * 8;
                        else
                                size = layer_size * 6;
                }
-               else
+               else if (ptex->target == PIPE_TEXTURE_3D)
                        size = layer_size * u_minify(ptex->depth0, i);
+               else
+                       size = layer_size * ptex->array_size;
                /* align base image and start of miptree */
                if ((i == 0) || (i == 1))
                        offset = align(offset, r600_get_base_alignment(screen, ptex->format, array_mode));
                rtex->offset[i] = offset;
                rtex->layer_size[i] = layer_size;
-               rtex->pitch_in_bytes[i] = pitch;
-               rtex->pitch_in_pixels[i] = pitch_to_width(ptex->format, pitch);
+               rtex->pitch_in_blocks[i] = nblocksx; /* CB talks in elements */
+               rtex->pitch_in_bytes[i] = nblocksx * blocksize;
                offset += size;
        }
        rtex->size = offset;
  }
  
 -          
+ /* Figure out whether u_blitter will fallback to a transfer operation.
+  * If so, don't use a staging resource.
+  */
+ static boolean permit_hardware_blit(struct pipe_screen *screen,
+                                       const struct pipe_resource *res)
+ {
+       unsigned bind;
+       if (util_format_is_depth_or_stencil(res->format))
+               bind = PIPE_BIND_DEPTH_STENCIL;
+       else
+               bind = PIPE_BIND_RENDER_TARGET;
+       /* hackaround for S3TC */
+       if (util_format_is_s3tc(res->format))
+               return TRUE;
++
+       if (!screen->is_format_supported(screen,
+                               res->format,
+                               res->target,
+                               res->nr_samples,
+                               bind, 0))
+               return FALSE;
+       if (!screen->is_format_supported(screen,
+                               res->format,
+                               res->target,
+                               res->nr_samples,
+                               PIPE_BIND_SAMPLER_VIEW, 0))
+               return FALSE;
+       return TRUE;
+ }
+ static boolean r600_texture_get_handle(struct pipe_screen* screen,
+                                       struct pipe_resource *ptex,
+                                       struct winsys_handle *whandle)
+ {
+       struct r600_resource_texture *rtex = (struct r600_resource_texture*)ptex;
+       struct r600_resource *resource = &rtex->resource;
+       struct radeon *radeon = (struct radeon *)screen->winsys;
+       return r600_bo_get_winsys_handle(radeon, resource->bo,
+                       rtex->pitch_in_bytes[0], whandle);
+ }
+ static void r600_texture_destroy(struct pipe_screen *screen,
+                                struct pipe_resource *ptex)
+ {
+       struct r600_resource_texture *rtex = (struct r600_resource_texture*)ptex;
+       struct r600_resource *resource = &rtex->resource;
+       struct radeon *radeon = (struct radeon *)screen->winsys;
+       if (rtex->flushed_depth_texture)
+               pipe_resource_reference((struct pipe_resource **)&rtex->flushed_depth_texture, NULL);
+       if (resource->bo) {
+               r600_bo_reference(radeon, &resource->bo, NULL);
+       }
+       FREE(rtex);
+ }
+ static unsigned int r600_texture_is_referenced(struct pipe_context *context,
+                                               struct pipe_resource *texture,
+                                               unsigned level, int layer)
+ {
+       /* FIXME */
+       return PIPE_REFERENCED_FOR_READ | PIPE_REFERENCED_FOR_WRITE;
+ }
+ static const struct u_resource_vtbl r600_texture_vtbl =
+ {
+       r600_texture_get_handle,        /* get_handle */
+       r600_texture_destroy,           /* resource_destroy */
+       r600_texture_is_referenced,     /* is_resource_referenced */
+       r600_texture_get_transfer,      /* get_transfer */
+       r600_texture_transfer_destroy,  /* transfer_destroy */
+       r600_texture_transfer_map,      /* transfer_map */
+       u_default_transfer_flush_region,/* transfer_flush_region */
+       r600_texture_transfer_unmap,    /* transfer_unmap */
+       u_default_transfer_inline_write /* transfer_inline_write */
+ };
  static struct r600_resource_texture *
  r600_texture_create_object(struct pipe_screen *screen,
                           const struct pipe_resource *base,
                return NULL;
  
        resource = &rtex->resource;
-       resource->base.b = *base;
-       resource->base.vtbl = &r600_texture_vtbl;
-       pipe_reference_init(&resource->base.b.reference, 1);
-       resource->base.b.screen = screen;
+       resource->b.b.b = *base;
+       resource->b.b.vtbl = &r600_texture_vtbl;
+       pipe_reference_init(&resource->b.b.b.reference, 1);
+       resource->b.b.b.screen = screen;
        resource->bo = bo;
        rtex->pitch_override = pitch_in_bytes_override;
+       /* only mark depth textures the HW can hit as depth textures */
+       if (util_format_is_depth_or_stencil(base->format) && permit_hardware_blit(screen, base))
+               rtex->depth = 1;
  
-       if (array_mode)
-               rtex->tiled = 1;
        r600_setup_miptree(screen, rtex, array_mode);
  
        resource->size = rtex->size;
  
        if (!resource->bo) {
-               struct pipe_resource *ptex = &rtex->resource.base.b;
+               struct pipe_resource *ptex = &rtex->resource.b.b.b;
                int base_align = r600_get_base_alignment(screen, ptex->format, array_mode);
  
                resource->bo = r600_bo(radeon, rtex->size, base_align, base->bind, base->usage);
        return rtex;
  }
  
- /* Figure out whether u_blitter will fallback to a transfer operation.
-  * If so, don't use a staging resource.
-  */
- static boolean permit_hardware_blit(struct pipe_screen *screen,
-                                       const struct pipe_resource *res)
- {
-       unsigned bind;
-       if (util_format_is_depth_or_stencil(res->format))
-               bind = PIPE_BIND_DEPTH_STENCIL;
-       else
-               bind = PIPE_BIND_RENDER_TARGET;
-       /* See r600_resource_copy_region: there is something wrong
-        * with depth resource copies at the moment so avoid them for
-        * now.
-        */
-       if (util_format_get_component_bits(res->format,
-                               UTIL_FORMAT_COLORSPACE_ZS,
-                               0) != 0)
-               return FALSE;
-       if (!screen->is_format_supported(screen,
-                               res->format,
-                               res->target,
-                               res->nr_samples,
-                               bind, 0))
-               return FALSE;
-       if (!screen->is_format_supported(screen,
-                               res->format,
-                               res->target,
-                               res->nr_samples,
-                               PIPE_BIND_SAMPLER_VIEW, 0))
-               return FALSE;
-       if (res->usage == PIPE_USAGE_STREAM)
-               return FALSE;
-       return TRUE;
- }
  struct pipe_resource *r600_texture_create(struct pipe_screen *screen,
                                                const struct pipe_resource *templ)
  {
                }
        }
  
+       if (!(templ->flags & R600_RESOURCE_FLAG_TRANSFER) &&
+           util_format_is_s3tc(templ->format))
+               array_mode = V_038000_ARRAY_1D_TILED_THIN1;
        return (struct pipe_resource *)r600_texture_create_object(screen, templ, array_mode,
                                                                  0, 0, NULL);
  
  }
  
- static void r600_texture_destroy(struct pipe_screen *screen,
-                                struct pipe_resource *ptex)
- {
-       struct r600_resource_texture *rtex = (struct r600_resource_texture*)ptex;
-       struct r600_resource *resource = &rtex->resource;
-       struct radeon *radeon = (struct radeon *)screen->winsys;
-       if (rtex->flushed_depth_texture)
-               pipe_resource_reference((struct pipe_resource **)&rtex->flushed_depth_texture, NULL);
-       if (resource->bo) {
-               r600_bo_reference(radeon, &resource->bo, NULL);
-       }
-       FREE(rtex);
- }
- static boolean r600_texture_get_handle(struct pipe_screen* screen,
-                                       struct pipe_resource *ptex,
-                                       struct winsys_handle *whandle)
- {
-       struct r600_resource_texture *rtex = (struct r600_resource_texture*)ptex;
-       struct r600_resource *resource = &rtex->resource;
-       struct radeon *radeon = (struct radeon *)screen->winsys;
-       return r600_bo_get_winsys_handle(radeon, resource->bo,
-                       rtex->pitch_in_bytes[0], whandle);
- }
  static struct pipe_surface *r600_create_surface(struct pipe_context *pipe,
                                                struct pipe_resource *texture,
                                                const struct pipe_surface *surf_tmpl)
  {
        struct r600_resource_texture *rtex = (struct r600_resource_texture*)texture;
        struct r600_surface *surface = CALLOC_STRUCT(r600_surface);
-       unsigned tile_height;
        unsigned level = surf_tmpl->u.tex.level;
  
        assert(surf_tmpl->u.tex.first_layer == surf_tmpl->u.tex.last_layer);
        surface->base.u.tex.last_layer = surf_tmpl->u.tex.last_layer;
        surface->base.u.tex.level = level;
  
-       tile_height = r600_get_height_alignment(pipe->screen, rtex->array_mode[level]);
-       surface->aligned_height = align(surface->base.height, tile_height);
+       surface->aligned_height = r600_texture_get_nblocksy(pipe->screen,
+                                                           rtex, level);
        return &surface->base;
  }
  
@@@ -477,16 -494,8 +494,8 @@@ struct pipe_resource *r600_texture_from
                                                                  bo);
  }
  
- static unsigned int r600_texture_is_referenced(struct pipe_context *context,
-                                               struct pipe_resource *texture,
-                                               unsigned level, int layer)
- {
-       /* FIXME */
-       return PIPE_REFERENCED_FOR_READ | PIPE_REFERENCED_FOR_WRITE;
- }
  int r600_texture_depth_flush(struct pipe_context *ctx,
-                            struct pipe_resource *texture)
+                            struct pipe_resource *texture, boolean just_create)
  {
        struct r600_resource_texture *rtex = (struct r600_resource_texture*)texture;
        struct pipe_resource resource;
        resource.width0 = texture->width0;
        resource.height0 = texture->height0;
        resource.depth0 = 1;
-       resource.last_level = 0;
+       resource.array_size = 1;
+       resource.last_level = texture->last_level;
        resource.nr_samples = 0;
        resource.usage = PIPE_USAGE_DYNAMIC;
        resource.bind = 0;
                return -ENOMEM;
        }
  
+       ((struct r600_resource_texture *)rtex->flushed_depth_texture)->is_flushing_texture = TRUE;
  out:
+       if (just_create)
+               return 0;
        /* XXX: only do this if the depth texture has actually changed:
         */
        r600_blit_uncompress_depth(ctx, rtex);
@@@ -546,7 -560,7 +560,7 @@@ struct pipe_transfer* r600_texture_get_
         * the CPU is much happier reading out of cached system memory
         * than uncached VRAM.
         */
-       if (rtex->tiled)
+       if (R600_TEX_IS_TILED(rtex, level))
                use_staging_texture = TRUE;
  
        if ((usage & PIPE_TRANSFER_READ) && u_box_volume(box) > 1024)
                */
                /* XXX: when discard is true, no need to read back from depth texture
                */
-               r = r600_texture_depth_flush(ctx, texture);
+               r = r600_texture_depth_flush(ctx, texture, FALSE);
                if (r < 0) {
                        R600_ERR("failed to create temporary texture to hold untiled copy\n");
                        pipe_resource_reference(&trans->transfer.resource, NULL);
                        FREE(trans);
                        return NULL;
                }
+               trans->transfer.stride = rtex->flushed_depth_texture->pitch_in_bytes[level];
+               trans->offset = r600_texture_get_offset(rtex->flushed_depth_texture, level, box->z);
+               return &trans->transfer;
        } else if (use_staging_texture) {
                resource.target = PIPE_TEXTURE_2D;
                resource.format = texture->format;
                return &trans->transfer;
        }
        trans->transfer.stride = rtex->pitch_in_bytes[level];
+       trans->transfer.layer_stride = rtex->layer_size[level];
        trans->offset = r600_texture_get_offset(rtex, level, box->z);
        return &trans->transfer;
  }
@@@ -635,7 -653,8 +653,8 @@@ void r600_texture_transfer_destroy(stru
                                   struct pipe_transfer *transfer)
  {
        struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
-       struct r600_resource_texture *rtex = (struct r600_resource_texture*)transfer->resource;
+       struct pipe_resource *texture = transfer->resource;
+       struct r600_resource_texture *rtex = (struct r600_resource_texture*)texture;
  
        if (rtransfer->staging_texture) {
                if (transfer->usage & PIPE_TRANSFER_WRITE) {
                }
                pipe_resource_reference(&rtransfer->staging_texture, NULL);
        }
-       if (rtex->flushed_depth_texture) {
-               pipe_resource_reference((struct pipe_resource **)&rtex->flushed_depth_texture, NULL);
+       if (rtex->depth && !rtex->is_flushing_texture) {
+               if ((transfer->usage & PIPE_TRANSFER_WRITE) && rtex->flushed_depth_texture)
+                       r600_blit_push_depth(ctx, rtex);
        }
        pipe_resource_reference(&transfer->resource, NULL);
        FREE(transfer);
  }
@@@ -727,19 -749,6 +749,6 @@@ void r600_texture_transfer_unmap(struc
        r600_bo_unmap(radeon, bo);
  }
  
- struct u_resource_vtbl r600_texture_vtbl =
- {
-       r600_texture_get_handle,        /* get_handle */
-       r600_texture_destroy,           /* resource_destroy */
-       r600_texture_is_referenced,     /* is_resource_referenced */
-       r600_texture_get_transfer,      /* get_transfer */
-       r600_texture_transfer_destroy,  /* transfer_destroy */
-       r600_texture_transfer_map,      /* transfer_map */
-       u_default_transfer_flush_region,/* transfer_flush_region */
-       r600_texture_transfer_unmap,    /* transfer_unmap */
-       u_default_transfer_inline_write /* transfer_inline_write */
- };
  void r600_init_surface_functions(struct r600_pipe_context *r600)
  {
        r600->context.create_surface = r600_create_surface;
@@@ -802,6 -811,8 +811,8 @@@ uint32_t r600_translate_texformat(enum 
        uint32_t result = 0, word4 = 0, yuv_format = 0;
        const struct util_format_description *desc;
        boolean uniform = TRUE;
+       static int r600_enable_s3tc = -1;
        int i;
        const uint32_t sign_bit[4] = {
                S_038010_FORMAT_COMP_X(V_038010_SQ_FORMAT_COMP_SIGNED),
  
        case UTIL_FORMAT_COLORSPACE_SRGB:
                word4 |= S_038010_FORCE_DEGAMMA(1);
-               if (format == PIPE_FORMAT_L8A8_SRGB || format == PIPE_FORMAT_L8_SRGB)
-                       goto out_unknown; /* fails for some reason - TODO */
                break;
  
        default:
                break;
        }
  
-       /* S3TC formats. TODO */
-       if (desc->layout == UTIL_FORMAT_LAYOUT_S3TC) {
-               static int r600_enable_s3tc = -1;
+       if (r600_enable_s3tc == -1)
+               r600_enable_s3tc = debug_get_bool_option("R600_ENABLE_S3TC", FALSE);
+       if (desc->layout == UTIL_FORMAT_LAYOUT_RGTC) {
+               if (!r600_enable_s3tc)
+                       goto out_unknown;
+               switch (format) {
+               case PIPE_FORMAT_RGTC1_UNORM:
+               case PIPE_FORMAT_RGTC1_SNORM:
+                       result = FMT_BC4;
+                       goto out_word4;
+               case PIPE_FORMAT_RGTC2_UNORM:
+               case PIPE_FORMAT_RGTC2_SNORM:
+                       result = FMT_BC5;
+                       goto out_word4;
+               default:
+                       goto out_unknown;
+               }
+       }
  
-               if (r600_enable_s3tc == -1)
-                       r600_enable_s3tc =
-                               debug_get_bool_option("R600_ENABLE_S3TC", FALSE);
+       if (desc->layout == UTIL_FORMAT_LAYOUT_S3TC) {
  
                if (!r600_enable_s3tc)
                        goto out_unknown;
  
+               if (!util_format_s3tc_enabled) {
+                       goto out_unknown;
+               }
                switch (format) {
                case PIPE_FORMAT_DXT1_RGB:
                case PIPE_FORMAT_DXT1_RGBA:
+               case PIPE_FORMAT_DXT1_SRGB:
+               case PIPE_FORMAT_DXT1_SRGBA:
                        result = FMT_BC1;
                        goto out_word4;
                case PIPE_FORMAT_DXT3_RGBA:
+               case PIPE_FORMAT_DXT3_SRGBA:
                        result = FMT_BC2;
                        goto out_word4;
                case PIPE_FORMAT_DXT5_RGBA:
+               case PIPE_FORMAT_DXT5_SRGBA:
                        result = FMT_BC3;
                        goto out_word4;
                default:
  
        /* R8G8Bx_SNORM - TODO CxV8U8 */
  
-       /* RGTC - TODO */
        /* See whether the components are of the same size. */
        for (i = 1; i < desc->nr_channels; i++) {
                uniform = uniform && desc->channel[0].size == desc->channel[i].size;
                            desc->channel[1].size == 10 &&
                            desc->channel[2].size == 10 &&
                            desc->channel[3].size == 2) {
-                               result = FMT_10_10_10_2;
+                               result = FMT_2_10_10_10;
                                goto out_word4;
                        }
                        goto out_unknown;
                                result = FMT_16_16_16_16;
                                goto out_word4;
                        }
+                       goto out_unknown;
+               case 32:
+                       switch (desc->nr_channels) {
+                       case 1:
+                               result = FMT_32;
+                               goto out_word4;
+                       case 2:
+                               result = FMT_32_32;
+                               goto out_word4;
+                       case 4:
+                               result = FMT_32_32_32_32;
+                               goto out_word4;
+                       }
                }
                goto out_unknown;
  
index c433405cb66365b105becbcf0d0638da69381be0,6d47fb9628032431a1b89f67974444ba8b42d9db..a06817c573527f677737f7f9a4cf9f044f99c63d
@@@ -39,7 -39,6 +39,7 @@@
  #include "sp_texture.h"
  #include "sp_screen.h"
  #include "sp_context.h"
 +#include "sp_video_context.h"
  #include "sp_fence.h"
  #include "sp_public.h"
  
@@@ -126,6 -125,8 +126,8 @@@ softpipe_get_param(struct pipe_screen *
        return 1;
     case PIPE_CAP_INSTANCED_DRAWING:
        return 1;
+    case PIPE_CAP_ARRAY_TEXTURES:
+       return 1;
     default:
        return 0;
     }
@@@ -186,7 -187,9 +188,9 @@@ softpipe_is_format_supported( struct pi
  
     assert(target == PIPE_BUFFER ||
            target == PIPE_TEXTURE_1D ||
+           target == PIPE_TEXTURE_1D_ARRAY ||
            target == PIPE_TEXTURE_2D ||
+           target == PIPE_TEXTURE_2D_ARRAY ||
            target == PIPE_TEXTURE_RECT ||
            target == PIPE_TEXTURE_3D ||
            target == PIPE_TEXTURE_CUBE);
@@@ -307,7 -310,6 +311,7 @@@ softpipe_create_screen(struct sw_winsy
     screen->base.is_format_supported = softpipe_is_format_supported;
     screen->base.context_create = softpipe_create_context;
     screen->base.flush_frontbuffer = softpipe_flush_frontbuffer;
 +   screen->base.video_context_create = sp_video_create;
  
     util_format_s3tc_init();
  
index a7f8503251b529a90ef14556ed677df433d1bdea,81da4b864487713577ea7a275cdd393cf15717c9..8cf738fa2c081bc76068c9696a42e224eaa6508c
@@@ -225,13 -225,13 +225,13 @@@ enum pipe_transfer_usage 
     /**
      * Discards the memory within the mapped region.
      *
-     * It should not be used with PIPE_TRANSFER_CPU_READ.
+     * It should not be used with PIPE_TRANSFER_READ.
      *
      * See also:
      * - OpenGL's ARB_map_buffer_range extension, MAP_INVALIDATE_RANGE_BIT flag.
-     * - Direct3D's D3DLOCK_DISCARD flag.
      */
-    PIPE_TRANSFER_DISCARD = (1 << 8),
+    PIPE_TRANSFER_DISCARD = (1 << 8), /* DEPRECATED */
+    PIPE_TRANSFER_DISCARD_RANGE = (1 << 8),
  
     /**
      * Fail if the resource cannot be mapped immediately.
     /**
      * Do not attempt to synchronize pending operations on the resource when mapping.
      *
-     * It should not be used with PIPE_TRANSFER_CPU_READ.
+     * It should not be used with PIPE_TRANSFER_READ.
      *
      * See also:
      * - OpenGL's ARB_map_buffer_range extension, MAP_UNSYNCHRONIZED_BIT flag.
      * Written ranges will be notified later with
      * pipe_context::transfer_flush_region.
      *
-     * It should not be used with PIPE_TRANSFER_CPU_READ.
+     * It should not be used with PIPE_TRANSFER_READ.
      *
      * See also:
      * - pipe_context::transfer_flush_region
      * - OpenGL's ARB_map_buffer_range extension, MAP_FLUSH_EXPLICIT_BIT flag.
      */
-    PIPE_TRANSFER_FLUSH_EXPLICIT = (1 << 11)
+    PIPE_TRANSFER_FLUSH_EXPLICIT = (1 << 11),
+    /**
+     * Discards all memory backing the resource.
+     *
+     * It should not be used with PIPE_TRANSFER_READ.
+     *
+     * This is equivalent to:
+     * - OpenGL's ARB_map_buffer_range extension, MAP_INVALIDATE_BUFFER_BIT
+     * - BufferData(NULL) on a GL buffer
+     * - Direct3D's D3DLOCK_DISCARD flag.
+     * - WDDM's D3DDDICB_LOCKFLAGS.Discard flag.
+     * - D3D10 DDI's D3D10_DDI_MAP_WRITE_DISCARD flag
+     * - D3D10's D3D10_MAP_WRITE_DISCARD flag.
+     */
+    PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE = (1 << 12)
  
  };
  
  #define PIPE_QUERY_SO_STATISTICS         5
  #define PIPE_QUERY_GPU_FINISHED          6
  #define PIPE_QUERY_TIMESTAMP_DISJOINT    7
- #define PIPE_QUERY_TYPES                 8
+ #define PIPE_QUERY_OCCLUSION_PREDICATE   8
+ #define PIPE_QUERY_TYPES                 9
  
  
  /**
@@@ -496,32 -512,6 +512,32 @@@ enum pipe_shader_ca
  #define PIPE_REFERENCED_FOR_READ  (1 << 0)
  #define PIPE_REFERENCED_FOR_WRITE (1 << 1)
  
 +enum pipe_video_codec
 +{
 +   PIPE_VIDEO_CODEC_UNKNOWN = 0,
 +   PIPE_VIDEO_CODEC_MPEG12,   /**< MPEG1, MPEG2 */
 +   PIPE_VIDEO_CODEC_MPEG4,    /**< DIVX, XVID */
 +   PIPE_VIDEO_CODEC_VC1,      /**< WMV */
 +   PIPE_VIDEO_CODEC_MPEG4_AVC /**< H.264 */
 +};
 +
 +enum pipe_video_profile
 +{
 +   PIPE_VIDEO_PROFILE_UNKNOWN,
 +   PIPE_VIDEO_PROFILE_MPEG1,
 +   PIPE_VIDEO_PROFILE_MPEG2_SIMPLE,
 +   PIPE_VIDEO_PROFILE_MPEG2_MAIN,
 +   PIPE_VIDEO_PROFILE_MPEG4_SIMPLE,
 +   PIPE_VIDEO_PROFILE_MPEG4_ADVANCED_SIMPLE,
 +   PIPE_VIDEO_PROFILE_VC1_SIMPLE,
 +   PIPE_VIDEO_PROFILE_VC1_MAIN,
 +   PIPE_VIDEO_PROFILE_VC1_ADVANCED,
 +   PIPE_VIDEO_PROFILE_MPEG4_AVC_BASELINE,
 +   PIPE_VIDEO_PROFILE_MPEG4_AVC_MAIN,
 +   PIPE_VIDEO_PROFILE_MPEG4_AVC_HIGH
 +};
 +
 +
  /**
   * Composite query types
   */
@@@ -536,7 -526,6 +552,7 @@@ struct pipe_query_data_timestamp_disjoi
     boolean  disjoint;
  };
  
 +
  #ifdef __cplusplus
  }
  #endif
index 3aa11be4b5b9aa26bfa0ab85f1494dc2b8d83280,c8904d4f16b2a629264c43fe2a776c7dda697646..e2cc32222de0fb5b6a596afb01a10100c418f8d1
  extern "C" {
  #endif
  
+ enum pipe_type {
+    PIPE_TYPE_UNORM = 0,
+    PIPE_TYPE_SNORM,
+    PIPE_TYPE_SINT,
+    PIPE_TYPE_UINT,
+    PIPE_TYPE_FLOAT,
+    PIPE_TYPE_COUNT
+ };
  /**
   * Texture/surface image formats (preliminary)
   */
@@@ -186,37 -196,19 +196,37 @@@ enum pipe_format 
     PIPE_FORMAT_R8G8B8X8_UNORM          = 134,
     PIPE_FORMAT_B4G4R4X4_UNORM          = 135,
  
 +   PIPE_FORMAT_YV12                  = 136,
 +   PIPE_FORMAT_YV16                  = 137,
 +   PIPE_FORMAT_IYUV                  = 138,  /**< aka I420 */
 +   PIPE_FORMAT_NV12                  = 139,
 +   PIPE_FORMAT_NV21                  = 140,
 +   PIPE_FORMAT_AYUV                  = PIPE_FORMAT_A8R8G8B8_UNORM,
 +   PIPE_FORMAT_VUYA                  = PIPE_FORMAT_B8G8R8A8_UNORM,
 +   PIPE_FORMAT_XYUV                  = PIPE_FORMAT_X8R8G8B8_UNORM,
 +   PIPE_FORMAT_VUYX                  = PIPE_FORMAT_B8G8R8X8_UNORM,
 +   PIPE_FORMAT_IA44                  = 141,
 +   PIPE_FORMAT_AI44                  = 142,
 +
     /* some stencil samplers formats */
 -   PIPE_FORMAT_X24S8_USCALED           = 136,
 -   PIPE_FORMAT_S8X24_USCALED           = 137,
 -   PIPE_FORMAT_X32_S8X24_USCALED       = 138,
 +   PIPE_FORMAT_X24S8_USCALED           = 143,
 +   PIPE_FORMAT_S8X24_USCALED           = 144,
 +   PIPE_FORMAT_X32_S8X24_USCALED       = 145,
  
 -   PIPE_FORMAT_B2G3R3_UNORM            = 139,
 -   PIPE_FORMAT_L16A16_UNORM            = 140,
 -   PIPE_FORMAT_A16_UNORM               = 141,
 -   PIPE_FORMAT_I16_UNORM               = 142,
 +   PIPE_FORMAT_B2G3R3_UNORM            = 146,
 +   PIPE_FORMAT_L16A16_UNORM            = 147,
 +   PIPE_FORMAT_A16_UNORM               = 148,
 +   PIPE_FORMAT_I16_UNORM               = 149,
  
     PIPE_FORMAT_COUNT
  };
  
 +enum pipe_video_chroma_format
 +{
 +   PIPE_VIDEO_CHROMA_FORMAT_420,
 +   PIPE_VIDEO_CHROMA_FORMAT_422,
 +   PIPE_VIDEO_CHROMA_FORMAT_444
 +};
  
  #ifdef __cplusplus
  }
index 4a835c04d8ad8e173cb1ac2fac8f6c7661eccce1,0000000000000000000000000000000000000000..78de154bdd7b962fee05026ccf95d90f067b1854
mode 100644,000000..100644
--- /dev/null
@@@ -1,452 -1,0 +1,453 @@@
-  * 
 +/**************************************************************************
 + *
 + * Copyright 2009 Younes Manton.
 + * All Rights Reserved.
 + *
 + * Permission is hereby granted, free of charge, to any person obtaining a
 + * copy of this software and associated documentation files (the
 + * "Software"), to deal in the Software without restriction, including
 + * without limitation the rights to use, copy, modify, merge, publish,
 + * distribute, sub license, and/or sell copies of the Software, and to
 + * permit persons to whom the Software is furnished to do so, subject to
 + * the following conditions:
 + *
 + * The above copyright notice and this permission notice (including the
 + * next paragraph) shall be included in all copies or substantial portions
 + * of the Software.
-               
++ *
 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
 + * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
 + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
 + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
 + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 + *
 + **************************************************************************/
 +
 +#include <assert.h>
 +#include <X11/Xlibint.h>
 +#include <X11/extensions/XvMClib.h>
 +#include <xorg/fourcc.h>
 +#include <vl_winsys.h>
 +#include <pipe/p_screen.h>
 +#include <pipe/p_video_context.h>
 +#include <pipe/p_state.h>
 +#include <util/u_memory.h>
 +#include <util/u_math.h>
 +#include "xvmc_private.h"
 +
 +#define FOURCC_RGB 0x0000003
 +
 +static enum pipe_format XvIDToPipe(int xvimage_id)
 +{
 +   switch (xvimage_id) {
 +      case FOURCC_RGB:
 +         return PIPE_FORMAT_B8G8R8X8_UNORM;
 +      default:
 +         XVMC_MSG(XVMC_ERR, "[XvMC] Unrecognized Xv image ID 0x%08X.\n", xvimage_id);
 +         return PIPE_FORMAT_NONE;
 +   }
 +}
 +
 +static int PipeToComponentOrder(enum pipe_format format, char *component_order)
 +{
 +   assert(component_order);
 +
 +   switch (format) {
 +      case PIPE_FORMAT_B8G8R8X8_UNORM:
 +         return 0;
 +      default:
 +         XVMC_MSG(XVMC_ERR, "[XvMC] Unrecognized PIPE_FORMAT 0x%08X.\n", format);
 +         component_order[0] = 0;
 +         component_order[1] = 0;
 +         component_order[2] = 0;
 +         component_order[3] = 0;
 +   }
 +
 +      return 0;
 +}
 +
 +static Status Validate(Display *dpy, XvPortID port, int surface_type_id, int xvimage_id)
 +{
 +   XvImageFormatValues *subpictures;
 +   int num_subpics;
 +   unsigned int i;
 +
 +   subpictures = XvMCListSubpictureTypes(dpy, port, surface_type_id, &num_subpics);
 +   if (num_subpics < 1) {
 +      if (subpictures)
 +         XFree(subpictures);
 +      return BadMatch;
 +   }
 +   if (!subpictures)
 +      return BadAlloc;
 +
 +   for (i = 0; i < num_subpics; ++i) {
 +      if (subpictures[i].id == xvimage_id) {
 +         XVMC_MSG(XVMC_TRACE, "[XvMC] Found requested subpicture format.\n" \
 +                              "[XvMC]   port=%u\n" \
 +                              "[XvMC]   surface id=0x%08X\n" \
 +                              "[XvMC]   image id=0x%08X\n" \
 +                              "[XvMC]   type=%08X\n" \
 +                              "[XvMC]   byte order=%08X\n" \
 +                              "[XvMC]   bits per pixel=%u\n" \
 +                              "[XvMC]   format=%08X\n" \
 +                              "[XvMC]   num planes=%d\n",
 +                              port, surface_type_id, xvimage_id, subpictures[i].type, subpictures[i].byte_order,
 +                              subpictures[i].bits_per_pixel, subpictures[i].format, subpictures[i].num_planes);
 +         if (subpictures[i].type == XvRGB) {
 +            XVMC_MSG(XVMC_TRACE, "[XvMC]   depth=%d\n" \
 +                                 "[XvMC]   red mask=0x%08X\n" \
 +                                 "[XvMC]   green mask=0x%08X\n" \
 +                                 "[XvMC]   blue mask=0x%08X\n",
 +                                 subpictures[i].depth, subpictures[i].red_mask, subpictures[i].green_mask, subpictures[i].blue_mask);
 +         }
 +         else if (subpictures[i].type == XvYUV) {
 +            XVMC_MSG(XVMC_TRACE, "[XvMC]   y sample bits=0x%08X\n" \
 +                                 "[XvMC]   u sample bits=0x%08X\n" \
 +                                 "[XvMC]   v sample bits=0x%08X\n" \
 +                                 "[XvMC]   horz y period=%u\n" \
 +                                 "[XvMC]   horz u period=%u\n" \
 +                                 "[XvMC]   horz v period=%u\n" \
 +                                 "[XvMC]   vert y period=%u\n" \
 +                                 "[XvMC]   vert u period=%u\n" \
 +                                 "[XvMC]   vert v period=%u\n",
 +                                 subpictures[i].y_sample_bits, subpictures[i].u_sample_bits, subpictures[i].v_sample_bits,
 +                                 subpictures[i].horz_y_period, subpictures[i].horz_u_period, subpictures[i].horz_v_period,
 +                                 subpictures[i].vert_y_period, subpictures[i].vert_u_period, subpictures[i].vert_v_period);
 +         }
 +         break;
 +      }
 +   }
 +
 +   XFree(subpictures);
 +
 +   return i < num_subpics ? Success : BadMatch;
 +}
 +
 +PUBLIC
 +Status XvMCCreateSubpicture(Display *dpy, XvMCContext *context, XvMCSubpicture *subpicture,
 +                            unsigned short width, unsigned short height, int xvimage_id)
 +{
 +   XvMCContextPrivate *context_priv;
 +   XvMCSubpicturePrivate *subpicture_priv;
 +   struct pipe_video_context *vpipe;
 +   struct pipe_resource template;
 +   struct pipe_resource *tex;
 +   struct pipe_surface surf_template;
 +   Status ret;
 +
 +   XVMC_MSG(XVMC_TRACE, "[XvMC] Creating subpicture %p.\n", subpicture);
 +
 +   assert(dpy);
 +
 +   if (!context)
 +      return XvMCBadContext;
 +
 +   context_priv = context->privData;
 +   vpipe = context_priv->vctx->vpipe;
 +
 +   if (!subpicture)
 +      return XvMCBadSubpicture;
 +
 +   if (width > context_priv->subpicture_max_width ||
 +       height > context_priv->subpicture_max_height)
 +      return BadValue;
 +
 +   ret = Validate(dpy, context->port, context->surface_type_id, xvimage_id);
 +   if (ret != Success)
 +      return ret;
 +
 +   subpicture_priv = CALLOC(1, sizeof(XvMCSubpicturePrivate));
 +   if (!subpicture_priv)
 +      return BadAlloc;
 +
 +   memset(&template, 0, sizeof(struct pipe_resource));
 +   template.target = PIPE_TEXTURE_2D;
 +   template.format = XvIDToPipe(xvimage_id);
 +   template.last_level = 0;
 +   if (vpipe->get_param(vpipe, PIPE_CAP_NPOT_TEXTURES)) {
 +      template.width0 = width;
 +      template.height0 = height;
 +   }
 +   else {
 +      template.width0 = util_next_power_of_two(width);
 +      template.height0 = util_next_power_of_two(height);
 +   }
 +   template.depth0 = 1;
++   template.array_size = 1;
 +   template.usage = PIPE_USAGE_DYNAMIC;
 +   template.bind = PIPE_BIND_SAMPLER_VIEW;
 +   template.flags = 0;
 +
 +   subpicture_priv->context = context;
 +   tex = vpipe->screen->resource_create(vpipe->screen, &template);
 +
 +   memset(&surf_template, 0, sizeof(surf_template));
 +   surf_template.format = tex->format;
 +   surf_template.usage = PIPE_BIND_SAMPLER_VIEW;
 +   subpicture_priv->sfc = vpipe->create_surface(vpipe, tex, &surf_template);
 +   pipe_resource_reference(&tex, NULL);
 +   if (!subpicture_priv->sfc) {
 +      FREE(subpicture_priv);
 +      return BadAlloc;
 +   }
 +
 +   subpicture->subpicture_id = XAllocID(dpy);
 +   subpicture->context_id = context->context_id;
 +   subpicture->xvimage_id = xvimage_id;
 +   subpicture->width = width;
 +   subpicture->height = height;
 +   subpicture->num_palette_entries = 0;
 +   subpicture->entry_bytes = PipeToComponentOrder(template.format, subpicture->component_order);
 +   subpicture->privData = subpicture_priv;
 +
 +   SyncHandle();
 +
 +   XVMC_MSG(XVMC_TRACE, "[XvMC] Subpicture %p created.\n", subpicture);
 +
 +   return Success;
 +}
 +
 +PUBLIC
 +Status XvMCClearSubpicture(Display *dpy, XvMCSubpicture *subpicture, short x, short y,
 +                           unsigned short width, unsigned short height, unsigned int color)
 +{
 +   XvMCSubpicturePrivate *subpicture_priv;
 +   XvMCContextPrivate *context_priv;
 +   unsigned int tmp_color;
 +   float color_f[4];
 +
 +   assert(dpy);
 +
 +   if (!subpicture)
 +      return XvMCBadSubpicture;
++
 +   /* Convert color to float */
 +   util_format_read_4f(PIPE_FORMAT_B8G8R8A8_UNORM,
 +                    color_f, 1,
 +                    &color, 4,
 +                    0, 0, 1, 1);
 +
 +   subpicture_priv = subpicture->privData;
 +   context_priv = subpicture_priv->context->privData;
 +   /* TODO: Assert clear rect is within bounds? Or clip? */
 +   context_priv->vctx->vpipe->clear_render_target(context_priv->vctx->vpipe,
 +                                           subpicture_priv->sfc, x, y,
 +                                                                                 color_f,
 +                                           width, height);
 +
 +   return Success;
 +}
 +
 +PUBLIC
 +Status XvMCCompositeSubpicture(Display *dpy, XvMCSubpicture *subpicture, XvImage *image,
 +                               short srcx, short srcy, unsigned short width, unsigned short height,
 +                               short dstx, short dsty)
 +{
 +   XvMCSubpicturePrivate *subpicture_priv;
 +   XvMCContextPrivate *context_priv;
 +   struct pipe_video_context *vpipe;
 +   struct pipe_transfer *xfer;
 +   unsigned char *src, *dst, *dst_line;
 +   unsigned x, y;
 +   struct pipe_box dst_box = {dstx, dsty, 0, width, height, 1};
 +
 +   XVMC_MSG(XVMC_TRACE, "[XvMC] Compositing subpicture %p.\n", subpicture);
 +
 +   assert(dpy);
 +
 +   if (!subpicture)
 +      return XvMCBadSubpicture;
 +
 +   assert(image);
 +
 +   if (subpicture->xvimage_id != image->id)
 +      return BadMatch;
 +
 +   /* No planar support for now */
 +   if (image->num_planes != 1)
 +      return BadMatch;
 +
 +   subpicture_priv = subpicture->privData;
 +   context_priv = subpicture_priv->context->privData;
 +   vpipe = context_priv->vctx->vpipe;
 +
 +   /* TODO: Assert rects are within bounds? Or clip? */
 +
 +   xfer = vpipe->get_transfer(vpipe, subpicture_priv->sfc->texture,
 +                              0, PIPE_TRANSFER_WRITE, &dst_box);
 +   if (!xfer)
 +      return BadAlloc;
 +
 +   src = image->data;
 +   dst = vpipe->transfer_map(vpipe, xfer);
 +   if (!dst) {
 +      vpipe->transfer_destroy(vpipe, xfer);
 +      return BadAlloc;
 +   }
 +
 +   switch (image->id) {
 +      case FOURCC_RGB:
 +         assert(subpicture_priv->sfc->format == XvIDToPipe(image->id));
 +         for (y = 0; y < height; ++y) {
 +            dst_line = dst;
 +            for (x = 0; x < width; ++x, src += 3, dst_line += 4) {
 +               dst_line[0] = src[2]; /* B */
 +               dst_line[1] = src[1]; /* G */
 +               dst_line[2] = src[0]; /* R */
 +            }
 +            dst += xfer->stride;
 +         }
 +         break;
 +      default:
 +         XVMC_MSG(XVMC_ERR, "[XvMC] Unrecognized Xv image ID 0x%08X.\n", image->id);
 +   }
 +
 +   vpipe->transfer_unmap(vpipe, xfer);
 +   vpipe->transfer_destroy(vpipe, xfer);
 +
 +   XVMC_MSG(XVMC_TRACE, "[XvMC] Subpicture %p composited.\n", subpicture);
 +
 +   return Success;
 +}
 +
 +PUBLIC
 +Status XvMCDestroySubpicture(Display *dpy, XvMCSubpicture *subpicture)
 +{
 +   XvMCSubpicturePrivate *subpicture_priv;
 +
 +   XVMC_MSG(XVMC_TRACE, "[XvMC] Destroying subpicture %p.\n", subpicture);
 +
 +   assert(dpy);
 +
 +   if (!subpicture)
 +      return XvMCBadSubpicture;
 +
 +   subpicture_priv = subpicture->privData;
 +   pipe_surface_reference(&subpicture_priv->sfc, NULL);
 +   FREE(subpicture_priv);
 +
 +   XVMC_MSG(XVMC_TRACE, "[XvMC] Subpicture %p destroyed.\n", subpicture);
 +
 +   return Success;
 +}
 +
 +PUBLIC
 +Status XvMCSetSubpicturePalette(Display *dpy, XvMCSubpicture *subpicture, unsigned char *palette)
 +{
 +   assert(dpy);
 +
 +   if (!subpicture)
 +      return XvMCBadSubpicture;
 +
 +   assert(palette);
 +
 +   /* We don't support paletted subpictures */
 +   return BadMatch;
 +}
 +
 +PUBLIC
 +Status XvMCBlendSubpicture(Display *dpy, XvMCSurface *target_surface, XvMCSubpicture *subpicture,
 +                           short subx, short suby, unsigned short subw, unsigned short subh,
 +                           short surfx, short surfy, unsigned short surfw, unsigned short surfh)
 +{
 +   XvMCSurfacePrivate *surface_priv;
 +   XvMCSubpicturePrivate *subpicture_priv;
 +
 +   XVMC_MSG(XVMC_TRACE, "[XvMC] Associating subpicture %p with surface %p.\n", subpicture, target_surface);
 +
 +   assert(dpy);
 +
 +   if (!target_surface)
 +      return XvMCBadSurface;
 +
 +   if (!subpicture)
 +      return XvMCBadSubpicture;
 +
 +   if (target_surface->context_id != subpicture->context_id)
 +      return BadMatch;
 +
 +   /* TODO: Verify against subpicture independent scaling */
 +
 +   surface_priv = target_surface->privData;
 +   subpicture_priv = subpicture->privData;
 +
 +   /* TODO: Assert rects are within bounds? Or clip? */
 +
 +   surface_priv->subpicture = subpicture;
 +   surface_priv->subx = subx;
 +   surface_priv->suby = suby;
 +   surface_priv->subw = subw;
 +   surface_priv->subh = subh;
 +   surface_priv->surfx = surfx;
 +   surface_priv->surfy = surfy;
 +   surface_priv->surfw = surfw;
 +   surface_priv->surfh = surfh;
 +   subpicture_priv->surface = target_surface;
 +
 +   return Success;
 +}
 +
 +PUBLIC
 +Status XvMCBlendSubpicture2(Display *dpy, XvMCSurface *source_surface, XvMCSurface *target_surface,
 +                            XvMCSubpicture *subpicture, short subx, short suby, unsigned short subw, unsigned short subh,
 +                            short surfx, short surfy, unsigned short surfw, unsigned short surfh)
 +{
 +   assert(dpy);
 +
 +   if (!source_surface || !target_surface)
 +      return XvMCBadSurface;
 +
 +   if (!subpicture)
 +      return XvMCBadSubpicture;
 +
 +   if (source_surface->context_id != subpicture->context_id)
 +      return BadMatch;
 +
 +   if (source_surface->context_id != subpicture->context_id)
 +      return BadMatch;
 +
 +   /* TODO: Assert rects are within bounds? Or clip? */
 +
 +   return Success;
 +}
 +
 +PUBLIC
 +Status XvMCSyncSubpicture(Display *dpy, XvMCSubpicture *subpicture)
 +{
 +   assert(dpy);
 +
 +   if (!subpicture)
 +      return XvMCBadSubpicture;
 +
 +   return Success;
 +}
 +
 +PUBLIC
 +Status XvMCFlushSubpicture(Display *dpy, XvMCSubpicture *subpicture)
 +{
 +   assert(dpy);
 +
 +   if (!subpicture)
 +      return XvMCBadSubpicture;
 +
 +   return Success;
 +}
 +
 +PUBLIC
 +Status XvMCGetSubpictureStatus(Display *dpy, XvMCSubpicture *subpicture, int *status)
 +{
 +   assert(dpy);
 +
 +   if (!subpicture)
 +      return XvMCBadSubpicture;
 +
 +   assert(status);
 +
 +   /* TODO */
 +   *status = 0;
 +
 +   return Success;
 +}
index d7285a478fb4e473663cb0060e4b52dadf76d6b6,0000000000000000000000000000000000000000..c90ad409c1008f40eabd14b928c08b69ac35a57c
mode 100644,000000..100644
--- /dev/null
@@@ -1,525 -1,0 +1,527 @@@
 +/**************************************************************************
 + *
 + * Copyright 2009 Younes Manton.
 + * All Rights Reserved.
 + *
 + * Permission is hereby granted, free of charge, to any person obtaining a
 + * copy of this software and associated documentation files (the
 + * "Software"), to deal in the Software without restriction, including
 + * without limitation the rights to use, copy, modify, merge, publish,
 + * distribute, sub license, and/or sell copies of the Software, and to
 + * permit persons to whom the Software is furnished to do so, subject to
 + * the following conditions:
 + *
 + * The above copyright notice and this permission notice (including the
 + * next paragraph) shall be included in all copies or substantial portions
 + * of the Software.
 + *
 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
 + * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
 + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
 + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
 + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 + *
 + **************************************************************************/
 +
 +#include <assert.h>
 +#include <stdio.h>
 +#include <X11/Xlibint.h>
 +#include <vl_winsys.h>
 +#include <pipe/p_video_context.h>
 +#include <pipe/p_video_state.h>
 +#include <pipe/p_state.h>
 +#include <util/u_inlines.h>
 +#include <util/u_memory.h>
 +#include <util/u_math.h>
 +#include "xvmc_private.h"
 +
 +static enum pipe_mpeg12_macroblock_type TypeToPipe(int xvmc_mb_type)
 +{
 +   if (xvmc_mb_type & XVMC_MB_TYPE_INTRA)
 +      return PIPE_MPEG12_MACROBLOCK_TYPE_INTRA;
 +   if ((xvmc_mb_type & (XVMC_MB_TYPE_MOTION_FORWARD | XVMC_MB_TYPE_MOTION_BACKWARD)) == XVMC_MB_TYPE_MOTION_FORWARD)
 +      return PIPE_MPEG12_MACROBLOCK_TYPE_FWD;
 +   if ((xvmc_mb_type & (XVMC_MB_TYPE_MOTION_FORWARD | XVMC_MB_TYPE_MOTION_BACKWARD)) == XVMC_MB_TYPE_MOTION_BACKWARD)
 +      return PIPE_MPEG12_MACROBLOCK_TYPE_BKWD;
 +   if ((xvmc_mb_type & (XVMC_MB_TYPE_MOTION_FORWARD | XVMC_MB_TYPE_MOTION_BACKWARD)) == (XVMC_MB_TYPE_MOTION_FORWARD | XVMC_MB_TYPE_MOTION_BACKWARD))
 +      return PIPE_MPEG12_MACROBLOCK_TYPE_BI;
 +
 +   assert(0);
 +
 +   XVMC_MSG(XVMC_ERR, "[XvMC] Unrecognized mb type 0x%08X.\n", xvmc_mb_type);
 +
 +   return -1;
 +}
 +
 +static enum pipe_mpeg12_picture_type PictureToPipe(int xvmc_pic)
 +{
 +   switch (xvmc_pic) {
 +      case XVMC_TOP_FIELD:
 +         return PIPE_MPEG12_PICTURE_TYPE_FIELD_TOP;
 +      case XVMC_BOTTOM_FIELD:
 +         return PIPE_MPEG12_PICTURE_TYPE_FIELD_BOTTOM;
 +      case XVMC_FRAME_PICTURE:
 +         return PIPE_MPEG12_PICTURE_TYPE_FRAME;
 +      default:
 +         assert(0);
 +   }
 +
 +   XVMC_MSG(XVMC_ERR, "[XvMC] Unrecognized picture type 0x%08X.\n", xvmc_pic);
 +
 +   return -1;
 +}
 +
 +static enum pipe_mpeg12_motion_type MotionToPipe(int xvmc_motion_type, unsigned int xvmc_picture_structure)
 +{
 +   switch (xvmc_motion_type) {
 +      case XVMC_PREDICTION_FRAME:
 +         if (xvmc_picture_structure == XVMC_FRAME_PICTURE)
 +            return PIPE_MPEG12_MOTION_TYPE_FRAME;
 +         else
 +            return PIPE_MPEG12_MOTION_TYPE_16x8;
 +         break;
 +      case XVMC_PREDICTION_FIELD:
 +         return PIPE_MPEG12_MOTION_TYPE_FIELD;
 +      case XVMC_PREDICTION_DUAL_PRIME:
 +         return PIPE_MPEG12_MOTION_TYPE_DUALPRIME;
 +      default:
 +         assert(0);
 +   }
 +
 +   XVMC_MSG(XVMC_ERR, "[XvMC] Unrecognized motion type 0x%08X (with picture structure 0x%08X).\n", xvmc_motion_type, xvmc_picture_structure);
 +
 +   return -1;
 +}
 +
 +#if 0
 +static bool
 +CreateOrResizeBackBuffer(struct vl_context *vctx, unsigned int width, unsigned int height,
 +                         struct pipe_surface **backbuffer)
 +{
 +   struct pipe_video_context *vpipe;
 +   struct pipe_resource template;
 +   struct pipe_resource *tex;
 +
 +   assert(vctx);
 +
 +   vpipe = vctx->vpipe;
 +
 +   if (*backbuffer) {
 +      if ((*backbuffer)->width != width || (*backbuffer)->height != height)
 +         pipe_surface_reference(backbuffer, NULL);
 +      else
 +         return true;
 +   }
 +
 +   memset(&template, 0, sizeof(struct pipe_resource));
 +   template.target = PIPE_TEXTURE_2D;
 +   template.format = vctx->vscreen->format;
 +   template.last_level = 0;
 +   template.width0 = width;
 +   template.height0 = height;
 +   template.depth0 = 1;
++   template.array_size = 1;
 +   template.usage = PIPE_USAGE_DEFAULT;
 +   template.bind = PIPE_BIND_RENDER_TARGET | PIPE_BIND_DISPLAY_TARGET | PIPE_BIND_BLIT_SOURCE;
 +   template.flags = 0;
 +
 +   tex = vpipe->screen->resource_create(vpipe->screen, &template);
 +   if (!tex)
 +      return false;
 +
 +   *backbuffer = vpipe->screen->get_tex_surface(vpipe->screen, tex, 0, 0, 0,
 +                                                template.bind);
 +   pipe_resource_reference(&tex, NULL);
 +
 +   if (!*backbuffer)
 +      return false;
 +
 +   /* Clear the backbuffer in case the video doesn't cover the whole window */
 +   /* FIXME: Need to clear every time a frame moves and leaves dirty rects */
 +   vpipe->surface_fill(vpipe, *backbuffer, 0, 0, width, height, 0);
 +
 +   return true;
 +}
 +#endif
 +
 +static void
 +MacroBlocksToPipe(struct pipe_screen *screen,
 +                  unsigned int xvmc_picture_structure,
 +                  const XvMCMacroBlockArray *xvmc_macroblocks,
 +                  const XvMCBlockArray *xvmc_blocks,
 +                  unsigned int first_macroblock,
 +                  unsigned int num_macroblocks,
 +                  struct pipe_mpeg12_macroblock *pipe_macroblocks)
 +{
 +   unsigned int i, j, k, l;
 +   XvMCMacroBlock *xvmc_mb;
 +
 +   assert(xvmc_macroblocks);
 +   assert(xvmc_blocks);
 +   assert(pipe_macroblocks);
 +   assert(num_macroblocks);
 +
 +   xvmc_mb = xvmc_macroblocks->macro_blocks + first_macroblock;
 +
 +   for (i = 0; i < num_macroblocks; ++i) {
 +      pipe_macroblocks->base.codec = PIPE_VIDEO_CODEC_MPEG12;
 +      pipe_macroblocks->mbx = xvmc_mb->x;
 +      pipe_macroblocks->mby = xvmc_mb->y;
 +      pipe_macroblocks->mb_type = TypeToPipe(xvmc_mb->macroblock_type);
 +      if (pipe_macroblocks->mb_type != PIPE_MPEG12_MACROBLOCK_TYPE_INTRA)
 +         pipe_macroblocks->mo_type = MotionToPipe(xvmc_mb->motion_type, xvmc_picture_structure);
 +      /* Get rid of Valgrind 'undefined' warnings */
 +      else
 +         pipe_macroblocks->mo_type = -1;
 +      pipe_macroblocks->dct_type = xvmc_mb->dct_type == XVMC_DCT_TYPE_FIELD ?
 +         PIPE_MPEG12_DCT_TYPE_FIELD : PIPE_MPEG12_DCT_TYPE_FRAME;
 +
 +      for (j = 0; j < 2; ++j)
 +         for (k = 0; k < 2; ++k)
 +            for (l = 0; l < 2; ++l)
 +               pipe_macroblocks->pmv[j][k][l] = xvmc_mb->PMV[j][k][l];
 +
 +      pipe_macroblocks->mvfs[0][0] = xvmc_mb->motion_vertical_field_select & XVMC_SELECT_FIRST_FORWARD;
 +      pipe_macroblocks->mvfs[0][1] = xvmc_mb->motion_vertical_field_select & XVMC_SELECT_FIRST_BACKWARD;
 +      pipe_macroblocks->mvfs[1][0] = xvmc_mb->motion_vertical_field_select & XVMC_SELECT_SECOND_FORWARD;
 +      pipe_macroblocks->mvfs[1][1] = xvmc_mb->motion_vertical_field_select & XVMC_SELECT_SECOND_BACKWARD;
 +
 +      pipe_macroblocks->cbp = xvmc_mb->coded_block_pattern;
 +      pipe_macroblocks->blocks = xvmc_blocks->blocks + xvmc_mb->index * BLOCK_SIZE_SAMPLES;
 +
 +      ++pipe_macroblocks;
 +      ++xvmc_mb;
 +   }
 +}
 +
 +PUBLIC
 +Status XvMCCreateSurface(Display *dpy, XvMCContext *context, XvMCSurface *surface)
 +{
 +   XvMCContextPrivate *context_priv;
 +   struct pipe_video_context *vpipe;
 +   XvMCSurfacePrivate *surface_priv;
 +   struct pipe_resource template;
 +   struct pipe_resource *vsfc_tex;
 +   struct pipe_surface surf_template;
 +   struct pipe_surface *vsfc;
 +
 +   XVMC_MSG(XVMC_TRACE, "[XvMC] Creating surface %p.\n", surface);
 +
 +   assert(dpy);
 +
 +   if (!context)
 +      return XvMCBadContext;
 +   if (!surface)
 +      return XvMCBadSurface;
 +
 +   context_priv = context->privData;
 +   vpipe = context_priv->vctx->vpipe;
 +
 +   surface_priv = CALLOC(1, sizeof(XvMCSurfacePrivate));
 +   if (!surface_priv)
 +      return BadAlloc;
 +
 +   memset(&template, 0, sizeof(struct pipe_resource));
 +   template.target = PIPE_TEXTURE_2D;
 +   template.format = (enum pipe_format)vpipe->get_param(vpipe, PIPE_CAP_DECODE_TARGET_PREFERRED_FORMAT);
 +   template.last_level = 0;
 +   if (vpipe->is_format_supported(vpipe, template.format,
 +                                  PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET,
 +                                  PIPE_TEXTURE_GEOM_NON_POWER_OF_TWO)) {
 +      template.width0 = context->width;
 +      template.height0 = context->height;
 +   }
 +   else {
 +      assert(vpipe->is_format_supported(vpipe, template.format,
 +                                       PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET,
 +                                       PIPE_TEXTURE_GEOM_NON_SQUARE));
 +      template.width0 = util_next_power_of_two(context->width);
 +      template.height0 = util_next_power_of_two(context->height);
 +   }
 +   template.depth0 = 1;
++   template.array_size = 1;
 +   template.usage = PIPE_USAGE_DEFAULT;
 +   template.bind = PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET;
 +   template.flags = 0;
 +   vsfc_tex = vpipe->screen->resource_create(vpipe->screen, &template);
 +   if (!vsfc_tex) {
 +      FREE(surface_priv);
 +      return BadAlloc;
 +   }
 +
 +   memset(&surf_template, 0, sizeof(surf_template));
 +   surf_template.format = vsfc_tex->format;
 +   surf_template.usage = PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET;
 +   vsfc = vpipe->create_surface(vpipe, vsfc_tex, &surf_template);
 +   pipe_resource_reference(&vsfc_tex, NULL);
 +   if (!vsfc) {
 +      FREE(surface_priv);
 +      return BadAlloc;
 +   }
 +
 +   surface_priv->pipe_vsfc = vsfc;
 +   surface_priv->context = context;
 +
 +   surface->surface_id = XAllocID(dpy);
 +   surface->context_id = context->context_id;
 +   surface->surface_type_id = context->surface_type_id;
 +   surface->width = context->width;
 +   surface->height = context->height;
 +   surface->privData = surface_priv;
 +
 +   SyncHandle();
 +
 +   XVMC_MSG(XVMC_TRACE, "[XvMC] Surface %p created.\n", surface);
 +
 +   return Success;
 +}
 +
 +PUBLIC
 +Status XvMCRenderSurface(Display *dpy, XvMCContext *context, unsigned int picture_structure,
 +                         XvMCSurface *target_surface, XvMCSurface *past_surface, XvMCSurface *future_surface,
 +                         unsigned int flags, unsigned int num_macroblocks, unsigned int first_macroblock,
 +                         XvMCMacroBlockArray *macroblocks, XvMCBlockArray *blocks
 +)
 +{
 +   struct pipe_video_context *vpipe;
 +   struct pipe_surface *t_vsfc;
 +   struct pipe_surface *p_vsfc;
 +   struct pipe_surface *f_vsfc;
 +   XvMCContextPrivate *context_priv;
 +   XvMCSurfacePrivate *target_surface_priv;
 +   XvMCSurfacePrivate *past_surface_priv;
 +   XvMCSurfacePrivate *future_surface_priv;
 +   struct pipe_mpeg12_macroblock pipe_macroblocks[num_macroblocks];
 +
 +   XVMC_MSG(XVMC_TRACE, "[XvMC] Rendering to surface %p.\n", target_surface);
 +
 +   assert(dpy);
 +
 +   if (!context || !context->privData)
 +      return XvMCBadContext;
 +   if (!target_surface || !target_surface->privData)
 +      return XvMCBadSurface;
 +
 +   if (picture_structure != XVMC_TOP_FIELD &&
 +       picture_structure != XVMC_BOTTOM_FIELD &&
 +       picture_structure != XVMC_FRAME_PICTURE)
 +      return BadValue;
 +   /* Bkwd pred equivalent to fwd (past && !future) */
 +   if (future_surface && !past_surface)
 +      return BadMatch;
 +
 +   assert(context->context_id == target_surface->context_id);
 +   assert(!past_surface || context->context_id == past_surface->context_id);
 +   assert(!future_surface || context->context_id == future_surface->context_id);
 +
 +   assert(macroblocks);
 +   assert(blocks);
 +
 +   assert(macroblocks->context_id == context->context_id);
 +   assert(blocks->context_id == context->context_id);
 +
 +   assert(flags == 0 || flags == XVMC_SECOND_FIELD);
 +
 +   target_surface_priv = target_surface->privData;
 +   past_surface_priv = past_surface ? past_surface->privData : NULL;
 +   future_surface_priv = future_surface ? future_surface->privData : NULL;
 +
 +   assert(target_surface_priv->context == context);
 +   assert(!past_surface || past_surface_priv->context == context);
 +   assert(!future_surface || future_surface_priv->context == context);
 +
 +   context_priv = context->privData;
 +   vpipe = context_priv->vctx->vpipe;
 +
 +   t_vsfc = target_surface_priv->pipe_vsfc;
 +   p_vsfc = past_surface ? past_surface_priv->pipe_vsfc : NULL;
 +   f_vsfc = future_surface ? future_surface_priv->pipe_vsfc : NULL;
 +
 +   MacroBlocksToPipe(vpipe->screen, picture_structure, macroblocks, blocks, first_macroblock,
 +                     num_macroblocks, pipe_macroblocks);
 +
 +   vpipe->set_decode_target(vpipe, t_vsfc);
 +   vpipe->decode_macroblocks(vpipe, p_vsfc, f_vsfc, num_macroblocks,
 +                             &pipe_macroblocks->base, &target_surface_priv->render_fence);
 +
 +   XVMC_MSG(XVMC_TRACE, "[XvMC] Submitted surface %p for rendering.\n", target_surface);
 +
 +   return Success;
 +}
 +
 +PUBLIC
 +Status XvMCFlushSurface(Display *dpy, XvMCSurface *surface)
 +{
 +   assert(dpy);
 +
 +   if (!surface)
 +      return XvMCBadSurface;
 +
 +   return Success;
 +}
 +
 +PUBLIC
 +Status XvMCSyncSurface(Display *dpy, XvMCSurface *surface)
 +{
 +   assert(dpy);
 +
 +   if (!surface)
 +      return XvMCBadSurface;
 +
 +   return Success;
 +}
 +
 +PUBLIC
 +Status XvMCPutSurface(Display *dpy, XvMCSurface *surface, Drawable drawable,
 +                      short srcx, short srcy, unsigned short srcw, unsigned short srch,
 +                      short destx, short desty, unsigned short destw, unsigned short desth,
 +                      int flags)
 +{
 +   static int dump_window = -1;
 +
 +   struct pipe_video_context *vpipe;
 +   XvMCSurfacePrivate *surface_priv;
 +   XvMCContextPrivate *context_priv;
 +   XvMCSubpicturePrivate *subpicture_priv;
 +   XvMCContext *context;
 +   struct pipe_video_rect src_rect = {srcx, srcy, srcw, srch};
 +   struct pipe_video_rect dst_rect = {destx, desty, destw, desth};
 +   struct pipe_surface *drawable_surface;
 +
 +   XVMC_MSG(XVMC_TRACE, "[XvMC] Displaying surface %p.\n", surface);
 +
 +   assert(dpy);
 +
 +   if (!surface || !surface->privData)
 +      return XvMCBadSurface;
 +
 +   surface_priv = surface->privData;
 +   context = surface_priv->context;
 +   context_priv = context->privData;
 +
 +   drawable_surface = vl_drawable_surface_get(context_priv->vctx, drawable);
 +   if (!drawable_surface)
 +      return BadDrawable;
 +
 +   assert(flags == XVMC_TOP_FIELD || flags == XVMC_BOTTOM_FIELD || flags == XVMC_FRAME_PICTURE);
 +   assert(srcx + srcw - 1 < surface->width);
 +   assert(srcy + srch - 1 < surface->height);
 +   /*
 +    * Some apps (mplayer) hit these asserts because they call
 +    * this function after the window has been resized by the WM
 +    * but before they've handled the corresponding XEvent and
 +    * know about the new dimensions. The output should be clipped
 +    * until the app updates destw and desth.
 +    */
 +   /*
 +   assert(destx + destw - 1 < drawable_surface->width);
 +   assert(desty + desth - 1 < drawable_surface->height);
 +    */
 +
 +   subpicture_priv = surface_priv->subpicture ? surface_priv->subpicture->privData : NULL;
 +   vpipe = context_priv->vctx->vpipe;
 +
 +#if 0
 +   if (!CreateOrResizeBackBuffer(context_priv->vctx, width, height, &context_priv->backbuffer))
 +      return BadAlloc;
 +#endif
 +
 +   if (subpicture_priv) {
 +      struct pipe_video_rect src_rect = {surface_priv->subx, surface_priv->suby, surface_priv->subw, surface_priv->subh};
 +      struct pipe_video_rect dst_rect = {surface_priv->surfx, surface_priv->surfy, surface_priv->surfw, surface_priv->surfh};
 +      struct pipe_video_rect *src_rects[1] = {&src_rect};
 +      struct pipe_video_rect *dst_rects[1] = {&dst_rect};
 +
 +      XVMC_MSG(XVMC_TRACE, "[XvMC] Surface %p has subpicture %p.\n", surface, surface_priv->subpicture);
 +
 +      assert(subpicture_priv->surface == surface);
 +      vpipe->set_picture_layers(vpipe, &subpicture_priv->sfc, src_rects, dst_rects, 1);
 +
 +      surface_priv->subpicture = NULL;
 +      subpicture_priv->surface = NULL;
 +   }
 +   else
 +      vpipe->set_picture_layers(vpipe, NULL, NULL, NULL, 0);
 +
 +   vpipe->render_picture(vpipe, surface_priv->pipe_vsfc, PictureToPipe(flags), &src_rect,
 +                         drawable_surface, &dst_rect, &surface_priv->disp_fence);
 +
 +   XVMC_MSG(XVMC_TRACE, "[XvMC] Submitted surface %p for display. Pushing to front buffer.\n", surface);
 +
 +   vpipe->screen->flush_frontbuffer
 +   (
 +      vpipe->screen,
 +      drawable_surface->texture,
 +      0, 0,
 +      vl_contextprivate_get(context_priv->vctx, drawable_surface)
 +   );
 +
 +   pipe_surface_reference(&drawable_surface, NULL);
 +
 +   if(dump_window == -1) {
 +      dump_window = debug_get_num_option("XVMC_DUMP", 0);
 +   }
 +
 +   if(dump_window) {
 +      static unsigned int framenum = 0;
 +      char cmd[256];
 +      sprintf(cmd, "xwd -id %d -out xvmc_frame_%08d.xwd", (int)drawable, ++framenum);
 +      system(cmd);
 +   }
 +
 +   XVMC_MSG(XVMC_TRACE, "[XvMC] Pushed surface %p to front buffer.\n", surface);
 +
 +   return Success;
 +}
 +
 +PUBLIC
 +Status XvMCGetSurfaceStatus(Display *dpy, XvMCSurface *surface, int *status)
 +{
 +   assert(dpy);
 +
 +   if (!surface)
 +      return XvMCBadSurface;
 +
 +   assert(status);
 +
 +   *status = 0;
 +
 +   return Success;
 +}
 +
 +PUBLIC
 +Status XvMCDestroySurface(Display *dpy, XvMCSurface *surface)
 +{
 +   XvMCSurfacePrivate *surface_priv;
 +
 +   XVMC_MSG(XVMC_TRACE, "[XvMC] Destroying surface %p.\n", surface);
 +
 +   assert(dpy);
 +
 +   if (!surface || !surface->privData)
 +      return XvMCBadSurface;
 +
 +   surface_priv = surface->privData;
 +   pipe_surface_reference(&surface_priv->pipe_vsfc, NULL);
 +   FREE(surface_priv);
 +   surface->privData = NULL;
 +
 +   XVMC_MSG(XVMC_TRACE, "[XvMC] Surface %p destroyed.\n", surface);
 +
 +   return Success;
 +}
 +
 +PUBLIC
 +Status XvMCHideSurface(Display *dpy, XvMCSurface *surface)
 +{
 +   assert(dpy);
 +
 +   if (!surface || !surface->privData)
 +      return XvMCBadSurface;
 +
 +   /* No op, only for overlaid rendering */
 +
 +   return Success;
 +}
index 339d5dc47f46a35d409c5b9f85d0f347b4062e17,edd774e0e0052758a2a623a13230ddb6074b570c..06e8f6910f339ed49e439f9c9f52c1a9433cc231
@@@ -63,25 -63,12 +63,25 @@@ struct r600_bo *r600_bo(struct radeon *
         * and are used for uploads and downloads from regular
         * resources.  We generate them internally for some transfers.
         */
 -      if (usage == PIPE_USAGE_STAGING)
 -              bo->domains = RADEON_GEM_DOMAIN_CPU | RADEON_GEM_DOMAIN_GTT;
 -      else
 -              bo->domains = (RADEON_GEM_DOMAIN_CPU |
 +      switch (usage) {
 +        case PIPE_USAGE_DEFAULT:
 +              bo->domains = RADEON_GEM_DOMAIN_CPU |
                                RADEON_GEM_DOMAIN_GTT |
 -                              RADEON_GEM_DOMAIN_VRAM);
 +                              RADEON_GEM_DOMAIN_VRAM;
 +                break;
 +
 +        case PIPE_USAGE_DYNAMIC:
 +        case PIPE_USAGE_STREAM:
 +        case PIPE_USAGE_STAGING:
 +              bo->domains = RADEON_GEM_DOMAIN_CPU |
 +                                RADEON_GEM_DOMAIN_GTT;
 +              break;
 +
 +        case PIPE_USAGE_STATIC:
 +        case PIPE_USAGE_IMMUTABLE:
 +              bo->domains = RADEON_GEM_DOMAIN_VRAM;
 +              break;
 +        }
  
        pipe_reference_init(&bo->reference, 1);
        return bo;
@@@ -108,11 -95,10 +108,10 @@@ struct r600_bo *r600_bo_handle(struct r
        radeon_bo_get_tiling_flags(radeon, rbo, &bo->tiling_flags, &bo->kernel_pitch);
        if (array_mode) {
                if (bo->tiling_flags) {
-                       if (bo->tiling_flags & RADEON_TILING_MICRO)
-                               *array_mode = V_0280A0_ARRAY_1D_TILED_THIN1;
-                       if ((bo->tiling_flags & (RADEON_TILING_MICRO | RADEON_TILING_MACRO)) ==
-                           (RADEON_TILING_MICRO | RADEON_TILING_MACRO))
+                       if (bo->tiling_flags & RADEON_TILING_MACRO)
                                *array_mode = V_0280A0_ARRAY_2D_TILED_THIN1;
+                       else if (bo->tiling_flags & RADEON_TILING_MICRO)
+                               *array_mode = V_0280A0_ARRAY_1D_TILED_THIN1;
                } else {
                        *array_mode = 0;
                }