Merge remote-tracking branch 'mesa-public/master' into vulkan
authorJason Ekstrand <jason.ekstrand@intel.com>
Wed, 24 Jun 2015 01:05:25 +0000 (18:05 -0700)
committerJason Ekstrand <jason.ekstrand@intel.com>
Wed, 24 Jun 2015 01:05:25 +0000 (18:05 -0700)
31 files changed:
1  2 
configure.ac
src/Makefile.am
src/glsl/Makefile.am
src/glsl/ast_to_hir.cpp
src/glsl/glsl_parser_extras.cpp
src/glsl/glsl_types.cpp
src/glsl/glsl_types.h
src/glsl/ir.h
src/glsl/link_uniform_initializers.cpp
src/glsl/nir/glsl_to_nir.cpp
src/glsl/nir/nir_lower_io.c
src/glsl/nir/nir_lower_samplers.cpp
src/glsl/standalone_scaffolding.cpp
src/mesa/drivers/dri/i965/Makefile.am
src/mesa/drivers/dri/i965/brw_context.c
src/mesa/drivers/dri/i965/brw_context.h
src/mesa/drivers/dri/i965/brw_cs.cpp
src/mesa/drivers/dri/i965/brw_defines.h
src/mesa/drivers/dri/i965/brw_fs.cpp
src/mesa/drivers/dri/i965/brw_fs_nir.cpp
src/mesa/drivers/dri/i965/brw_nir.c
src/mesa/drivers/dri/i965/brw_program.c
src/mesa/drivers/dri/i965/brw_shader.cpp
src/mesa/drivers/dri/i965/brw_vec4_visitor.cpp
src/mesa/drivers/dri/i965/brw_wm.c
src/mesa/drivers/dri/i965/intel_debug.c
src/mesa/drivers/dri/i965/intel_extensions.c
src/mesa/drivers/dri/i965/intel_screen.c
src/mesa/main/mtypes.h
src/mesa/program/ir_to_mesa.cpp
src/vulkan/compiler.cpp

diff --combined configure.ac
index 1f23de4d1ce4c360ce2630e8e25cf8e112956d46,ddc757e16294c1436dd67a4e51e0209cc232c21f..33aacd2ec06aae5756661694b4a20f4cf22164ba
@@@ -649,6 -649,7 +649,7 @@@ if test "x$enable_asm" = xyes; the
  fi
  
  AC_CHECK_HEADER([xlocale.h], [DEFINES="$DEFINES -DHAVE_XLOCALE_H"])
+ AC_CHECK_HEADER([sys/sysctl.h], [DEFINES="$DEFINES -DHAVE_SYS_SYSCTL_H"])
  AC_CHECK_FUNC([strtof], [DEFINES="$DEFINES -DHAVE_STRTOF"])
  
  dnl Check to see if dlopen is in default libraries (like Solaris, which
@@@ -713,15 -714,15 +714,15 @@@ AC_ARG_ENABLE([opengl]
      [enable_opengl="$enableval"],
      [enable_opengl=yes])
  AC_ARG_ENABLE([gles1],
-     [AS_HELP_STRING([--enable-gles1],
-         [enable support for OpenGL ES 1.x API @<:@default=disabled@:>@])],
+     [AS_HELP_STRING([--disable-gles1],
+         [disable support for OpenGL ES 1.x API @<:@default=enabled@:>@])],
      [enable_gles1="$enableval"],
-     [enable_gles1=no])
+     [enable_gles1=yes])
  AC_ARG_ENABLE([gles2],
-     [AS_HELP_STRING([--enable-gles2],
-         [enable support for OpenGL ES 2.x API @<:@default=disabled@:>@])],
+     [AS_HELP_STRING([--disable-gles2],
+         [disable support for OpenGL ES 2.x API @<:@default=enabled@:>@])],
      [enable_gles2="$enableval"],
-     [enable_gles2=no])
+     [enable_gles2=yes])
  
  AC_ARG_ENABLE([dri],
      [AS_HELP_STRING([--enable-dri],
@@@ -940,12 -941,6 +941,6 @@@ x*yes*yes*
      ;;
  esac
  
- # Building Xlib-GLX requires shared glapi to be disabled.
- if test "x$enable_xlib_glx" = xyes; then
-     AC_MSG_NOTICE([Shared GLAPI should not used with Xlib-GLX, disabling])
-     enable_shared_glapi=no
- fi
  AM_CONDITIONAL(HAVE_SHARED_GLAPI, test "x$enable_shared_glapi" = xyes)
  
  # Build the pipe-drivers as separate libraries/modules.
@@@ -1158,10 -1153,6 +1153,10 @@@ AC_ARG_ENABLE([driglx-direct]
      [driglx_direct="$enableval"],
      [driglx_direct="yes"])
  
 +# Check for libcaca
 +PKG_CHECK_EXISTS([caca], [have_libcaca=yes], [have_libcaca=no])
 +AM_CONDITIONAL([HAVE_LIBCACA], [test x$have_libcaca = xyes])
 +
  dnl
  dnl libGL configuration per driver
  dnl
@@@ -1516,7 -1507,6 +1511,6 @@@ if test "x$enable_gbm" = xyes; the
      fi
  
      if test "x$enable_dri" = xyes; then
-         GBM_BACKEND_DIRS="$GBM_BACKEND_DIRS dri"
          if test "x$enable_shared_glapi" = xno; then
              AC_MSG_ERROR([gbm_dri requires --enable-shared-glapi])
          fi
@@@ -1539,8 -1529,6 +1533,8 @@@ GBM_PC_LIB_PRIV="$DLOPEN_LIBS
  AC_SUBST([GBM_PC_REQ_PRIV])
  AC_SUBST([GBM_PC_LIB_PRIV])
  
 +AM_CONDITIONAL(HAVE_VULKAN, true)
 +
  dnl
  dnl EGL configuration
  dnl
@@@ -1553,8 -1541,15 +1547,15 @@@ if test "x$enable_egl" = xyes; the
  
      if test "$enable_static" != yes; then
          if test "x$enable_dri" = xyes; then
-           HAVE_EGL_DRIVER_DRI2=1
-       fi
+             HAVE_EGL_DRIVER_DRI2=1
+             if test "x$enable_shared_glapi" = xno; then
+                 AC_MSG_ERROR([egl_dri2 requires --enable-shared-glapi])
+             fi
+         else
+             # Avoid building an "empty" libEGL. Drop/update this
+             # when other backends (haiku?) come along.
+             AC_MSG_ERROR([egl requires --enable-dri])
+         fi
  
      fi
  fi
@@@ -1782,6 -1777,11 +1783,11 @@@ for plat in $egl_platforms; d
                        AC_MSG_ERROR([EGL platform drm requires libdrm >= $LIBDRM_REQUIRED])
                ;;
  
+       surfaceless)
+               test "x$have_libdrm" != xyes &&
+                       AC_MSG_ERROR([EGL platform surfaceless requires libdrm >= $LIBDRM_REQUIRED])
+               ;;
        android|gdi|null)
                ;;
  
  AM_CONDITIONAL(HAVE_EGL_PLATFORM_X11, echo "$egl_platforms" | grep -q 'x11')
  AM_CONDITIONAL(HAVE_EGL_PLATFORM_WAYLAND, echo "$egl_platforms" | grep -q 'wayland')
  AM_CONDITIONAL(HAVE_EGL_PLATFORM_DRM, echo "$egl_platforms" | grep -q 'drm')
+ AM_CONDITIONAL(HAVE_EGL_PLATFORM_SURFACELESS, echo "$egl_platforms" | grep -q 'surfaceless')
  AM_CONDITIONAL(HAVE_EGL_PLATFORM_NULL, echo "$egl_platforms" | grep -q 'null')
  
  AM_CONDITIONAL(HAVE_EGL_DRIVER_DRI2, test "x$HAVE_EGL_DRIVER_DRI2" != "x")
@@@ -1926,10 -1927,7 +1933,7 @@@ if test "x$enable_gallium_llvm" = xyes
              AC_MSG_ERROR([LLVM $LLVM_REQUIRED_VERSION_MAJOR.$LLVM_REQUIRED_VERSION_MINOR or newer is required])
          fi
  
-         LLVM_COMPONENTS="engine bitwriter"
-         if $LLVM_CONFIG --components | grep -qw 'mcjit'; then
-             LLVM_COMPONENTS="${LLVM_COMPONENTS} mcjit"
-         fi
+         LLVM_COMPONENTS="engine bitwriter mcjit mcdisassembler"
  
          if test "x$enable_opencl" = xyes; then
              llvm_check_version_for "3" "5" "0" "opencl"
              LLVM_COMPONENTS="${LLVM_COMPONENTS} all-targets ipo linker instrumentation"
              LLVM_COMPONENTS="${LLVM_COMPONENTS} irreader option objcarcopts profiledata"
          fi
-         DEFINES="${DEFINES} -DHAVE_LLVM=0x0$LLVM_VERSION_INT -DLLVM_VERSION_PATCH=$LLVM_VERSION_PATCH"
+         DEFINES="${DEFINES} -DHAVE_LLVM=0x0$LLVM_VERSION_INT -DMESA_LLVM_VERSION_PATCH=$LLVM_VERSION_PATCH"
          MESA_LLVM=1
  
          dnl Check for Clang internal headers
@@@ -2056,16 -2054,19 +2060,19 @@@ require_egl_drm() 
  }
  
  radeon_llvm_check() {
+     if test ${LLVM_VERSION_INT} -lt 307; then
+         amdgpu_llvm_target_name='r600'
+     else
+         amdgpu_llvm_target_name='amdgpu'
+     fi
      if test "x$enable_gallium_llvm" != "xyes"; then
          AC_MSG_ERROR([--enable-gallium-llvm is required when building $1])
      fi
      llvm_check_version_for "3" "4" "2" $1 
-     if test true && $LLVM_CONFIG --targets-built | grep -qvw 'R600' ; then
-         AC_MSG_ERROR([LLVM R600 Target not enabled.  You can enable it when building the LLVM
-                       sources with the --enable-experimental-targets=R600
-                       configure flag])
+     if test true && $LLVM_CONFIG --targets-built | grep -iqvw $amdgpu_llvm_target_name ; then
+         AC_MSG_ERROR([LLVM $amdgpu_llvm_target_name not enabled in your LLVM build.])
      fi
-     LLVM_COMPONENTS="${LLVM_COMPONENTS} r600 bitreader ipo"
+     LLVM_COMPONENTS="${LLVM_COMPONENTS} $amdgpu_llvm_target_name bitreader ipo"
      NEED_RADEON_LLVM=yes
      if test "x$have_libelf" != xyes; then
         AC_MSG_ERROR([$1 requires libelf when using llvm])
@@@ -2317,13 -2318,6 +2324,13 @@@ AC_SUBST([XA_MINOR], $XA_MINOR
  AC_SUBST([XA_TINY], $XA_TINY)
  AC_SUBST([XA_VERSION], "$XA_MAJOR.$XA_MINOR.$XA_TINY")
  
 +PKG_CHECK_MODULES(VALGRIND, [valgrind],
 +                  [have_valgrind=yes], [have_valgrind=no])
 +if test "x$have_valgrind" = "xyes"; then
 +    AC_DEFINE([HAVE_VALGRIND], 1,
 +              [Use valgrind intrinsics to suppress false warnings])
 +fi
 +
  dnl Restore LDFLAGS and CPPFLAGS
  LDFLAGS="$_SAVE_LDFLAGS"
  CPPFLAGS="$_SAVE_CPPFLAGS"
@@@ -2365,7 -2359,6 +2372,6 @@@ AC_CONFIG_FILES([Makefil
                src/gallium/drivers/svga/Makefile
                src/gallium/drivers/trace/Makefile
                src/gallium/drivers/vc4/Makefile
-               src/gallium/drivers/vc4/kernel/Makefile
                src/gallium/state_trackers/clover/Makefile
                src/gallium/state_trackers/dri/Makefile
                src/gallium/state_trackers/glx/xlib/Makefile
                src/mesa/drivers/osmesa/osmesa.pc
                src/mesa/drivers/x11/Makefile
                src/mesa/main/tests/Makefile
 +              src/vulkan/Makefile
                src/util/Makefile
                src/util/tests/hash_table/Makefile])
  
diff --combined src/Makefile.am
index bf76e35f144b7f0d7d6975629933c266be187ab5,5d69abd996d658bb8c5e7ac0de7fbc6cb0163088..d41a087ae1c1cafc3f4a807a992fdd53180581e7
@@@ -61,10 -61,6 +61,10 @@@ EXTRA_DIST = 
  AM_CFLAGS = $(VISIBILITY_CFLAGS)
  AM_CXXFLAGS = $(VISIBILITY_CXXFLAGS)
  
 +if HAVE_VULKAN
 +SUBDIRS += vulkan
 +endif
 +
  AM_CPPFLAGS = \
        -I$(top_srcdir)/include/ \
        -I$(top_srcdir)/src/mapi/ \
@@@ -76,4 -72,5 +76,5 @@@ noinst_LTLIBRARIES = libglsl_util.l
  libglsl_util_la_SOURCES = \
        mesa/main/imports.c \
        mesa/program/prog_hash_table.c \
-       mesa/program/symbol_table.c
+       mesa/program/symbol_table.c \
+       mesa/program/dummy_errors.c
diff --combined src/glsl/Makefile.am
index 7af9a709d5adcd7a62b109ece5e0249f098d433a,fa8c9f5d3ca63404477678831e98c25e7d0213f6..74da9e5b979987efc852d9908986ca1c74c90a3d
@@@ -77,7 -77,7 +77,7 @@@ check_PROGRAMS =                                      
        tests/sampler-types-test                        \
        tests/uniform-initializer-test
  
 -noinst_PROGRAMS = glsl_compiler
 +noinst_PROGRAMS = glsl_compiler spirv2nir
  
  tests_blob_test_SOURCES =                             \
        tests/blob_test.c
@@@ -89,8 -89,7 +89,7 @@@ tests_general_ir_test_SOURCES =               
        tests/builtin_variable_test.cpp                 \
        tests/invalidate_locations_test.cpp             \
        tests/general_ir_test.cpp                       \
-       tests/varyings_test.cpp                         \
-       tests/common.c
+       tests/varyings_test.cpp
  tests_general_ir_test_CFLAGS =                                \
        $(PTHREAD_CFLAGS)
  tests_general_ir_test_LDADD =                         \
@@@ -103,8 -102,7 +102,7 @@@ tests_uniform_initializer_test_SOURCES 
        tests/copy_constant_to_storage_tests.cpp        \
        tests/set_uniform_initializer_tests.cpp         \
        tests/uniform_initializer_utils.cpp             \
-       tests/uniform_initializer_utils.h               \
-       tests/common.c
+       tests/uniform_initializer_utils.h
  tests_uniform_initializer_test_CFLAGS =                       \
        $(PTHREAD_CFLAGS)
  tests_uniform_initializer_test_LDADD =                        \
        $(PTHREAD_LIBS)
  
  tests_sampler_types_test_SOURCES =                    \
-       tests/sampler_types_test.cpp                    \
-       tests/common.c
+       tests/sampler_types_test.cpp
  tests_sampler_types_test_CFLAGS =                     \
        $(PTHREAD_CFLAGS)
  tests_sampler_types_test_LDADD =                      \
@@@ -133,8 -130,7 +130,7 @@@ libglcpp_la_SOURCES =                                      
        $(LIBGLCPP_FILES)
  
  glcpp_glcpp_SOURCES =                                 \
-       glcpp/glcpp.c                                   \
-       tests/common.c
+       glcpp/glcpp.c
  glcpp_glcpp_LDADD =                                   \
        libglcpp.la                                     \
        $(top_builddir)/src/libglsl_util.la             \
@@@ -162,19 -158,8 +158,18 @@@ glsl_compiler_LDADD =                                    
        $(top_builddir)/src/libglsl_util.la             \
        $(PTHREAD_LIBS)
  
 +spirv2nir_SOURCES = \
 +      standalone_scaffolding.cpp \
 +      standalone_scaffolding.h \
 +      nir/spirv2nir.c
 +
 +spirv2nir_LDADD =                                     \
 +      libglsl.la                                      \
 +      $(top_builddir)/src/libglsl_util.la             \
 +      $(PTHREAD_LIBS)
 +
  glsl_test_SOURCES = \
        standalone_scaffolding.cpp \
-       tests/common.c \
        test.cpp \
        test_optpass.cpp \
        test_optpass.h
@@@ -257,21 -242,21 +252,21 @@@ dist-hook
        $(RM) glcpp/tests/subtest*/*.out
  
  nir/nir_builder_opcodes.h: nir/nir_opcodes.py nir/nir_builder_opcodes_h.py
-       $(MKDIR_P) nir;                                                 \
-       $(PYTHON2) $(PYTHON_FLAGS) $(srcdir)/nir/nir_builder_opcodes_h.py > $@
+       $(AM_V_at)$(MKDIR_P) nir
+       $(AM_V_GEN)$(PYTHON2) $(PYTHON_FLAGS) $(srcdir)/nir/nir_builder_opcodes_h.py > $@
  
  nir/nir_constant_expressions.c: nir/nir_opcodes.py nir/nir_constant_expressions.py nir/nir_constant_expressions.h
-       $(MKDIR_P) nir;                                                 \
-       $(PYTHON2) $(PYTHON_FLAGS) $(srcdir)/nir/nir_constant_expressions.py > $@
+       $(AM_V_at)$(MKDIR_P) nir
+       $(AM_V_GEN)$(PYTHON2) $(PYTHON_FLAGS) $(srcdir)/nir/nir_constant_expressions.py > $@
  
  nir/nir_opcodes.h: nir/nir_opcodes.py nir/nir_opcodes_h.py
-       $(MKDIR_P) nir;                                                 \
-       $(PYTHON2) $(PYTHON_FLAGS) $(srcdir)/nir/nir_opcodes_h.py > $@
+       $(AM_V_at)$(MKDIR_P) nir
+       $(AM_V_GEN)$(PYTHON2) $(PYTHON_FLAGS) $(srcdir)/nir/nir_opcodes_h.py > $@
  
  nir/nir_opcodes.c: nir/nir_opcodes.py nir/nir_opcodes_c.py
-       $(MKDIR_P) nir;                                                 \
-       $(PYTHON2) $(PYTHON_FLAGS) $(srcdir)/nir/nir_opcodes_c.py > $@
+       $(AM_V_at)$(MKDIR_P) nir
+       $(AM_V_GEN)$(PYTHON2) $(PYTHON_FLAGS) $(srcdir)/nir/nir_opcodes_c.py > $@
  
  nir/nir_opt_algebraic.c: nir/nir_opt_algebraic.py nir/nir_algebraic.py
-       $(MKDIR_P) nir;                                                 \
-       $(PYTHON2) $(PYTHON_FLAGS) $(srcdir)/nir/nir_opt_algebraic.py > $@
+       $(AM_V_at)$(MKDIR_P) nir
+       $(AM_V_GEN)$(PYTHON2) $(PYTHON_FLAGS) $(srcdir)/nir/nir_opt_algebraic.py > $@
diff --combined src/glsl/ast_to_hir.cpp
index cd6a068e97d87743e35929d05789a214e7a47d87,8cb46beab1ecea34e42c21405fea23de10baca89..6896b700cd6a0216a3df41de9b2326aa53364e8c
@@@ -678,7 -678,7 +678,7 @@@ validate_assignment(struct _mesa_glsl_p
      * is handled by ir_dereference::is_lvalue.
      */
     if (lhs_type->is_unsized_array() && rhs->type->is_array()
-        && (lhs_type->element_type() == rhs->type->element_type())) {
+        && (lhs_type->fields.array == rhs->type->fields.array)) {
        if (is_initializer) {
           return rhs;
        } else {
@@@ -820,7 -820,7 +820,7 @@@ do_assignment(exec_list *instructions, 
                               var->data.max_array_access);
           }
  
-          var->type = glsl_type::get_array_instance(lhs->type->element_type(),
+          var->type = glsl_type::get_array_instance(lhs->type->fields.array,
                                                     rhs->type->array_size());
           d->type = var->type;
        }
@@@ -970,7 -970,6 +970,7 @@@ do_comparison(void *mem_ctx, int operat
     case GLSL_TYPE_SAMPLER:
     case GLSL_TYPE_IMAGE:
     case GLSL_TYPE_INTERFACE:
 +   case GLSL_TYPE_FUNCTION:
     case GLSL_TYPE_ATOMIC_UINT:
        /* I assume a comparison of a struct containing a sampler just
         * ignores the sampler present in the type.
@@@ -2087,7 -2086,7 +2087,7 @@@ validate_binding_qualifier(struct _mesa
         *  with an array of size N, all elements of the array from binding
         *  through binding + N - 1 must be within this range."
         */
-       unsigned limit = ctx->Const.Program[state->stage].MaxTextureImageUnits;
+       unsigned limit = ctx->Const.MaxCombinedTextureImageUnits;
  
        if (max_index >= limit) {
           _mesa_glsl_error(loc, state, "layout(binding = %d) for %d samplers "
@@@ -2331,8 -2330,7 +2331,7 @@@ apply_image_qualifier_to_variable(cons
                                    struct _mesa_glsl_parse_state *state,
                                    YYLTYPE *loc)
  {
-    const glsl_type *base_type =
-       (var->type->is_array() ? var->type->element_type() : var->type);
+    const glsl_type *base_type = var->type->without_array();
  
     if (base_type->is_image()) {
        if (var->data.mode != ir_var_uniform &&
@@@ -2646,16 -2644,7 +2645,16 @@@ apply_type_qualifier_to_variable(const 
           state->fs_redeclares_gl_fragcoord_with_no_layout_qualifiers;
     }
  
 -   if (qual->flags.q.explicit_location) {
 +   if (qual->flags.q.vk_set) {
 +      if (!qual->flags.q.explicit_binding)
 +         _mesa_glsl_error(loc, state,
 +                          "Vulkan descriptor set layout requires both set "
 +                          "and binding qualifiers");
 +
 +      var->data.vk_set = true;
 +      var->data.set = qual->set;
 +      var->data.binding = qual->binding;
 +   } else if (qual->flags.q.explicit_location) {
        validate_explicit_location(qual, var, state, loc);
     } else if (qual->flags.q.explicit_index) {
        _mesa_glsl_error(loc, state, "explicit index requires explicit location");
      *    GL_ARB_conservative_depth
      *    GL_ARB_gpu_shader5
      *    GL_ARB_separate_shader_objects
-     *    GL_ARB_tesselation_shader
+     *    GL_ARB_tessellation_shader
      *    GL_ARB_transform_feedback3
      *    GL_ARB_uniform_buffer_object
      *
@@@ -2865,7 -2854,7 +2864,7 @@@ get_variable_being_redeclared(ir_variab
      *  type and specify a size."
      */
     if (earlier->type->is_unsized_array() && var->type->is_array()
-        && (var->type->element_type() == earlier->type->element_type())) {
+        && (var->type->fields.array == earlier->type->fields.array)) {
        /* FINISHME: This doesn't match the qualifiers on the two
         * FINISHME: declarations.  It's not 100% clear whether this is
         * FINISHME: required or not.
@@@ -3618,6 -3607,51 +3617,51 @@@ ast_declarator_list::hir(exec_list *ins
              }
  
              handle_geometry_shader_input_decl(state, loc, var);
+          } else if (state->stage == MESA_SHADER_FRAGMENT) {
+             /* From section 4.3.4 (Input Variables) of the GLSL ES 3.10 spec:
+              *
+              *     It is a compile-time error to declare a fragment shader
+              *     input with, or that contains, any of the following types:
+              *
+              *     * A boolean type
+              *     * An opaque type
+              *     * An array of arrays
+              *     * An array of structures
+              *     * A structure containing an array
+              *     * A structure containing a structure
+              */
+             if (state->es_shader) {
+                const glsl_type *check_type = var->type->without_array();
+                if (check_type->is_boolean() ||
+                    check_type->contains_opaque()) {
+                   _mesa_glsl_error(&loc, state,
+                                    "fragment shader input cannot have type %s",
+                                    check_type->name);
+                }
+                if (var->type->is_array() &&
+                    var->type->fields.array->is_array()) {
+                   _mesa_glsl_error(&loc, state,
+                                    "%s shader output "
+                                    "cannot have an array of arrays",
+                                    _mesa_shader_stage_to_string(state->stage));
+                }
+                if (var->type->is_array() &&
+                    var->type->fields.array->is_record()) {
+                   _mesa_glsl_error(&loc, state,
+                                    "fragment shader input "
+                                    "cannot have an array of structs");
+                }
+                if (var->type->is_record()) {
+                   for (unsigned i = 0; i < var->type->length; i++) {
+                      if (var->type->fields.structure[i].type->is_array() ||
+                          var->type->fields.structure[i].type->is_record())
+                         _mesa_glsl_error(&loc, state,
+                                          "fragement shader input cannot have "
+                                          "a struct that contains an "
+                                          "array or struct");
+                   }
+                }
+             }
           }
        } else if (var->data.mode == ir_var_shader_out) {
           const glsl_type *check_type = var->type->without_array();
              if (check_type->is_record() || check_type->is_matrix())
                 _mesa_glsl_error(&loc, state,
                                  "fragment shader output "
-                                 "cannot have struct or array type");
+                                 "cannot have struct or matrix type");
              switch (check_type->base_type) {
              case GLSL_TYPE_UINT:
              case GLSL_TYPE_INT:
                                  "type %s", check_type->name);
              }
           }
+          /* From section 4.3.6 (Output Variables) of the GLSL ES 3.10 spec:
+           *
+           *     It is a compile-time error to declare a vertex shader output
+           *     with, or that contains, any of the following types:
+           *
+           *     * A boolean type
+           *     * An opaque type
+           *     * An array of arrays
+           *     * An array of structures
+           *     * A structure containing an array
+           *     * A structure containing a structure
+           *
+           *     It is a compile-time error to declare a fragment shader output
+           *     with, or that contains, any of the following types:
+           *
+           *     * A boolean type
+           *     * An opaque type
+           *     * A matrix
+           *     * A structure
+           *     * An array of array
+           */
+          if (state->es_shader) {
+             if (var->type->is_array() &&
+                 var->type->fields.array->is_array()) {
+                _mesa_glsl_error(&loc, state,
+                                 "%s shader output "
+                                 "cannot have an array of arrays",
+                                 _mesa_shader_stage_to_string(state->stage));
+             }
+             if (state->stage == MESA_SHADER_VERTEX) {
+                if (var->type->is_array() &&
+                    var->type->fields.array->is_record()) {
+                   _mesa_glsl_error(&loc, state,
+                                    "vertex shader output "
+                                    "cannot have an array of structs");
+                }
+                if (var->type->is_record()) {
+                   for (unsigned i = 0; i < var->type->length; i++) {
+                      if (var->type->fields.structure[i].type->is_array() ||
+                          var->type->fields.structure[i].type->is_record())
+                         _mesa_glsl_error(&loc, state,
+                                          "vertex shader output cannot have a "
+                                          "struct that contains an "
+                                          "array or struct");
+                   }
+                }
+             }
+          }
        }
  
        /* Integer fragment inputs must be qualified with 'flat'.  In GLSL ES,
@@@ -5756,6 -5839,17 +5849,17 @@@ ast_interface_block::hir(exec_list *ins
           const glsl_type *block_array_type =
              process_array_type(&loc, block_type, this->array_specifier, state);
  
+           /* From section 4.3.9 (Interface Blocks) of the GLSL ES 3.10 spec:
+           *
+           *     * Arrays of arrays of blocks are not allowed
+           */
+          if (state->es_shader && block_array_type->is_array() &&
+              block_array_type->fields.array->is_array()) {
+             _mesa_glsl_error(&loc, state,
+                              "arrays of arrays interface blocks are "
+                              "not allowed");
+          }
           var = new(state) ir_variable(block_array_type,
                                        this->instance_name,
                                        var_mode);
           var->data.explicit_binding = this->layout.flags.q.explicit_binding;
           var->data.binding = this->layout.binding;
  
 +         var->data.vk_set = this->layout.flags.q.vk_set;
 +         var->data.set = this->layout.set;
 +         var->data.binding = this->layout.binding;
 +
           state->symbols->add_variable(var);
           instructions->push_tail(var);
        }
           var->data.explicit_binding = this->layout.flags.q.explicit_binding;
           var->data.binding = this->layout.binding;
  
 +         var->data.vk_set = this->layout.flags.q.vk_set;
 +         var->data.set = this->layout.set;
 +         var->data.binding = this->layout.binding;
 +
           state->symbols->add_variable(var);
           instructions->push_tail(var);
        }
index 982ade6a70a9f10abc373b9b72719cce901ed1e8,046d5d7b5bf3b0cfdee56826b246e37a3f448f78..e26931de42f80cb8b2a3d565bc80b61ba76697c3
@@@ -87,8 -87,6 +87,8 @@@ _mesa_glsl_parse_state::_mesa_glsl_pars
  
     this->extensions = &ctx->Extensions;
  
 +   this->ARB_compute_shader_enable = true;
 +
     this->Const.MaxLights = ctx->Const.MaxLights;
     this->Const.MaxClipPlanes = ctx->Const.MaxClipPlanes;
     this->Const.MaxTextureUnits = ctx->Const.MaxTextureUnits;
@@@ -780,7 -778,7 +780,7 @@@ _mesa_ast_set_aggregate_type(const glsl
  
     /* If the aggregate is an array, recursively set its elements' types. */
     if (type->is_array()) {
-       /* Each array element has the type type->element_type().
+       /* Each array element has the type type->fields.array.
         *
         * E.g., if <type> if struct S[2] we want to set each element's type to
         * struct S.
                                                 link);
  
           if (expr->oper == ast_aggregate)
-             _mesa_ast_set_aggregate_type(type->element_type(), expr);
+             _mesa_ast_set_aggregate_type(type->fields.array, expr);
        }
  
     /* If the aggregate is a struct, recursively set its fields' types. */
diff --combined src/glsl/glsl_types.cpp
index 0d83ee68e427d5d095a8ba0831ff99769ab2da72,f675e90cb0d800a331f9dbd971e5cbc972aa6a7a..37406b8073efb9acb26ea3316342f2c9e26d4db7
@@@ -32,7 -32,6 +32,7 @@@ mtx_t glsl_type::mutex = _MTX_INITIALIZ
  hash_table *glsl_type::array_types = NULL;
  hash_table *glsl_type::record_types = NULL;
  hash_table *glsl_type::interface_types = NULL;
 +hash_table *glsl_type::function_types = NULL;
  void *glsl_type::mem_ctx = NULL;
  
  void
@@@ -160,39 -159,6 +160,39 @@@ glsl_type::glsl_type(const glsl_struct_
     mtx_unlock(&glsl_type::mutex);
  }
  
 +glsl_type::glsl_type(const glsl_type *return_type,
 +                     const glsl_function_param *params, unsigned num_params) :
 +   gl_type(0),
 +   base_type(GLSL_TYPE_FUNCTION),
 +   sampler_dimensionality(0), sampler_shadow(0), sampler_array(0),
 +   sampler_type(0), interface_packing(0),
 +   vector_elements(0), matrix_columns(0),
 +   length(num_params)
 +{
 +   unsigned int i;
 +
 +   mtx_lock(&glsl_type::mutex);
 +
 +   init_ralloc_type_ctx();
 +
 +   this->fields.parameters = rzalloc_array(this->mem_ctx,
 +                                           glsl_function_param, num_params + 1);
 +
 +   /* We store the return type as the first parameter */
 +   this->fields.parameters[0].type = return_type;
 +   this->fields.parameters[0].in = false;
 +   this->fields.parameters[0].out = true;
 +
 +   /* We store the i'th parameter in slot i+1 */
 +   for (i = 0; i < length; i++) {
 +      this->fields.parameters[i + 1].type = params[i].type;
 +      this->fields.parameters[i + 1].in = params[i].in;
 +      this->fields.parameters[i + 1].out = params[i].out;
 +   }
 +
 +   mtx_unlock(&glsl_type::mutex);
 +}
 +
  
  bool
  glsl_type::contains_sampler() const
@@@ -251,7 -217,7 +251,7 @@@ glsl_type::contains_opaque() const 
     case GLSL_TYPE_ATOMIC_UINT:
        return true;
     case GLSL_TYPE_ARRAY:
-       return element_type()->contains_opaque();
+       return fields.array->contains_opaque();
     case GLSL_TYPE_STRUCT:
        for (unsigned int i = 0; i < length; i++) {
           if (fields.structure[i].type->contains_opaque())
@@@ -861,72 -827,6 +861,72 @@@ glsl_type::get_interface_instance(cons
  }
  
  
 +static int
 +function_key_compare(const void *a, const void *b)
 +{
 +   const glsl_type *const key1 = (glsl_type *) a;
 +   const glsl_type *const key2 = (glsl_type *) b;
 +
 +   if (key1->length != key2->length)
 +      return 1;
 +
 +   return memcmp(key1->fields.parameters, key2->fields.parameters,
 +                 (key1->length + 1) * sizeof(*key1->fields.parameters));
 +}
 +
 +
 +static unsigned
 +function_key_hash(const void *a)
 +{
 +   const glsl_type *const key = (glsl_type *) a;
 +   char hash_key[128];
 +   unsigned size = 0;
 +
 +   size = snprintf(hash_key, sizeof(hash_key), "%08x", key->length);
 +
 +   for (unsigned i = 0; i < key->length; i++) {
 +      if (size >= sizeof(hash_key))
 +       break;
 +
 +      size += snprintf(& hash_key[size], sizeof(hash_key) - size,
 +                     "%p", (void *) key->fields.structure[i].type);
 +   }
 +
 +   return hash_table_string_hash(& hash_key);
 +}
 +
 +const glsl_type *
 +glsl_type::get_function_instance(const glsl_type *return_type,
 +                                 const glsl_function_param *params,
 +                                 unsigned num_params)
 +{
 +   const glsl_type key(return_type, params, num_params);
 +
 +   mtx_lock(&glsl_type::mutex);
 +
 +   if (function_types == NULL) {
 +      function_types = hash_table_ctor(64, function_key_hash,
 +                                       function_key_compare);
 +   }
 +
 +   const glsl_type *t = (glsl_type *) hash_table_find(function_types, &key);
 +   if (t == NULL) {
 +      mtx_unlock(&glsl_type::mutex);
 +      t = new glsl_type(return_type, params, num_params);
 +      mtx_lock(&glsl_type::mutex);
 +
 +      hash_table_insert(function_types, (void *) t, t);
 +   }
 +
 +   assert(t->base_type == GLSL_TYPE_FUNCTION);
 +   assert(t->length == num_params);
 +
 +   mtx_unlock(&glsl_type::mutex);
 +
 +   return t;
 +}
 +
 +
  const glsl_type *
  glsl_type::get_mul_type(const glsl_type *type_a, const glsl_type *type_b)
  {
@@@ -1055,7 -955,6 +1055,7 @@@ glsl_type::component_slots() cons
     case GLSL_TYPE_IMAGE:
        return 1;
  
 +   case GLSL_TYPE_FUNCTION:
     case GLSL_TYPE_SAMPLER:
     case GLSL_TYPE_ATOMIC_UINT:
     case GLSL_TYPE_VOID:
@@@ -1427,7 -1326,6 +1427,7 @@@ glsl_type::count_attribute_slots() cons
     case GLSL_TYPE_ARRAY:
        return this->length * this->fields.array->count_attribute_slots();
  
 +   case GLSL_TYPE_FUNCTION:
     case GLSL_TYPE_SAMPLER:
     case GLSL_TYPE_IMAGE:
     case GLSL_TYPE_ATOMIC_UINT:
diff --combined src/glsl/glsl_types.h
index 2d4718572afd498dd1fdad9404baee0617894147,f54a9393e7335f1240c876a0ebf3679f8b8aae95..836259a506cb1724b45c4f449c33c65149c09558
@@@ -56,7 -56,6 +56,7 @@@ enum glsl_base_type 
     GLSL_TYPE_IMAGE,
     GLSL_TYPE_ATOMIC_UINT,
     GLSL_TYPE_STRUCT,
 +   GLSL_TYPE_FUNCTION,
     GLSL_TYPE_INTERFACE,
     GLSL_TYPE_ARRAY,
     GLSL_TYPE_VOID,
@@@ -179,7 -178,7 +179,7 @@@ struct glsl_type 
      */
     union {
        const struct glsl_type *array;            /**< Type of array elements. */
 -      const struct glsl_type *parameters;       /**< Parameters to function. */
 +      struct glsl_function_param *parameters;   /**< Parameters to function. */
        struct glsl_struct_field *structure;      /**< List of struct fields. */
     } fields;
  
      */
     const glsl_type *get_scalar_type() const;
  
-    /**
-     * Query the type of elements in an array
-     *
-     * \return
-     * Pointer to the type of elements in the array for array types, or \c NULL
-     * for non-array types.
-     */
-    const glsl_type *element_type() const
-    {
-       return is_array() ? fields.array : NULL;
-    }
     /**
      * Get the instance of a built-in scalar, vector, or matrix type
      */
                                                  enum glsl_interface_packing packing,
                                                  const char *block_name);
  
 +   /**
 +    * Get the instance of a function type
 +    */
 +   static const glsl_type *get_function_instance(const struct glsl_type *return_type,
 +                                                 const glsl_function_param *parameters,
 +                                                 unsigned num_params);
 +
     /**
      * Get the type resulting from a multiplication of \p type_a * \p type_b
      */
        if (base_type == GLSL_TYPE_ATOMIC_UINT)
           return ATOMIC_COUNTER_SIZE;
        else if (is_array())
-          return length * element_type()->atomic_size();
+          return length * fields.array->atomic_size();
        else
           return 0;
     }
@@@ -696,10 -676,6 +684,10 @@@ private
     glsl_type(const glsl_struct_field *fields, unsigned num_fields,
             enum glsl_interface_packing packing, const char *name);
  
 +   /** Constructor for interface types */
 +   glsl_type(const glsl_type *return_type,
 +             const glsl_function_param *params, unsigned num_params);
 +
     /** Constructor for array types */
     glsl_type(const glsl_type *array, unsigned length);
  
     /** Hash table containing the known interface types. */
     static struct hash_table *interface_types;
  
 +   /** Hash table containing the known function types. */
 +   static struct hash_table *function_types;
 +
     static int record_key_compare(const void *a, const void *b);
     static unsigned record_key_hash(const void *key);
  
     /*@}*/
  };
  
 +#undef DECL_TYPE
 +#undef STRUCT_TYPE
 +#endif /* __cplusplus */
 +
  struct glsl_struct_field {
     const struct glsl_type *type;
     const char *name;
     int stream;
  };
  
 +struct glsl_function_param {
 +   const struct glsl_type *type;
 +
 +   bool in;
 +   bool out;
 +};
 +
  static inline unsigned int
  glsl_align(unsigned int a, unsigned int align)
  {
     return (a + align - 1) / align * align;
  }
  
 -#undef DECL_TYPE
 -#undef STRUCT_TYPE
 -#endif /* __cplusplus */
 -
  #endif /* GLSL_TYPES_H */
diff --combined src/glsl/ir.h
index fdb595106c2bb7c6ea05583a2646d0af795700b2,f904555350156915ac335078332c5ec2e977674d..5af029b97655f1d9675a002ae3bbf98d4ebb3b9b
@@@ -78,6 -78,7 +78,7 @@@ enum ir_node_type 
     ir_type_discard,
     ir_type_emit_vertex,
     ir_type_end_primitive,
+    ir_type_barrier,
     ir_type_max, /**< maximum ir_type enum number, for validation */
     ir_type_unset = ir_type_max
  };
@@@ -682,11 -683,6 +683,11 @@@ public
        unsigned explicit_location:1;
        unsigned explicit_index:1;
  
 +      /**
 +       * Do we have a Vulkan (group, index) qualifier for this variable?
 +       */
 +      unsigned vk_set:1;
 +
        /**
         * Was an initial binding explicitly set in the shader?
         *
         * \note
         * The GLSL spec only allows the values 0 or 1 for the index in \b dual
         * source blending.
 +       *
 +       * This is now also used for the Vulkan descriptor set index.
         */
 -      unsigned index:1;
 +      int16_t index;
  
        /**
         * \brief Layout qualifier for gl_FragDepth.
         */
        int16_t binding;
  
 +      /**
 +       * Vulkan descriptor set for the resource.
 +       */
 +      int16_t set;
 +
        /**
         * Storage location of the base of this variable
         *
@@@ -2408,6 -2397,29 +2409,29 @@@ public
     ir_rvalue *stream;
  };
  
+ /**
+  * IR instruction for tessellation control and compute shader barrier.
+  */
+ class ir_barrier : public ir_instruction {
+ public:
+    ir_barrier()
+       : ir_instruction(ir_type_barrier)
+    {
+    }
+    virtual void accept(ir_visitor *v)
+    {
+       v->visit(this);
+    }
+    virtual ir_barrier *clone(void *mem_ctx, struct hash_table *) const
+    {
+       return new(mem_ctx) ir_barrier();
+    }
+    virtual ir_visitor_status accept(ir_hierarchical_visitor *);
+ };
  /*@}*/
  
  /**
index 60bfc9c15c9458452c341946b230bc3b5ddbff86,204acfa22b2d133003be2547be1103de00bcf45b..5f57079d1b8ccaf422cf147f7865fedf1c32ffc0
@@@ -88,7 -88,6 +88,7 @@@ copy_constant_to_storage(union gl_const
        case GLSL_TYPE_IMAGE:
        case GLSL_TYPE_ATOMIC_UINT:
        case GLSL_TYPE_INTERFACE:
 +      case GLSL_TYPE_FUNCTION:
        case GLSL_TYPE_VOID:
        case GLSL_TYPE_ERROR:
         /* All other types should have already been filtered by other
@@@ -104,7 -103,7 +104,7 @@@ voi
  set_sampler_binding(gl_shader_program *prog, const char *name, int binding)
  {
     struct gl_uniform_storage *const storage =
-       get_storage(prog->UniformStorage, prog->NumUserUniformStorage, name);
+       get_storage(prog->UniformStorage, prog->NumUniformStorage, name);
  
     if (storage == NULL) {
        assert(storage != NULL);
@@@ -194,7 -193,7 +194,7 @@@ set_uniform_initializer(void *mem_ctx, 
  
     struct gl_uniform_storage *const storage =
        get_storage(prog->UniformStorage,
-                 prog->NumUserUniformStorage,
+                   prog->NumUniformStorage,
                  name);
     if (storage == NULL) {
        assert(storage != NULL);
index 7c30be3fa720ff25a72f4ac0601243c28f502ef1,95531bbcd8fb22ed73cd2a0f6b9cd29bab07494e..0338af67567e537dfb2a00a19e67493706ce5b10
@@@ -65,6 -65,7 +65,7 @@@ public
     virtual void visit(ir_dereference_variable *);
     virtual void visit(ir_dereference_record *);
     virtual void visit(ir_dereference_array *);
+    virtual void visit(ir_barrier *);
  
     void create_function(ir_function *ir);
  
@@@ -321,7 -322,6 +322,7 @@@ nir_visitor::visit(ir_variable *ir
     }
  
     var->data.index = ir->data.index;
 +   var->data.descriptor_set = ir->data.set;
     var->data.binding = ir->data.binding;
     /* XXX Get rid of buffer_index */
     var->data.atomic.buffer_index = ir->data.binding;
@@@ -615,27 -615,135 +616,135 @@@ nir_visitor::visit(ir_call *ir
           op = nir_intrinsic_atomic_counter_inc_var;
        } else if (strcmp(ir->callee_name(), "__intrinsic_atomic_predecrement") == 0) {
           op = nir_intrinsic_atomic_counter_dec_var;
+       } else if (strcmp(ir->callee_name(), "__intrinsic_image_load") == 0) {
+          op = nir_intrinsic_image_load;
+       } else if (strcmp(ir->callee_name(), "__intrinsic_image_store") == 0) {
+          op = nir_intrinsic_image_store;
+       } else if (strcmp(ir->callee_name(), "__intrinsic_image_atomic_add") == 0) {
+          op = nir_intrinsic_image_atomic_add;
+       } else if (strcmp(ir->callee_name(), "__intrinsic_image_atomic_min") == 0) {
+          op = nir_intrinsic_image_atomic_min;
+       } else if (strcmp(ir->callee_name(), "__intrinsic_image_atomic_max") == 0) {
+          op = nir_intrinsic_image_atomic_max;
+       } else if (strcmp(ir->callee_name(), "__intrinsic_image_atomic_and") == 0) {
+          op = nir_intrinsic_image_atomic_and;
+       } else if (strcmp(ir->callee_name(), "__intrinsic_image_atomic_or") == 0) {
+          op = nir_intrinsic_image_atomic_or;
+       } else if (strcmp(ir->callee_name(), "__intrinsic_image_atomic_xor") == 0) {
+          op = nir_intrinsic_image_atomic_xor;
+       } else if (strcmp(ir->callee_name(), "__intrinsic_image_atomic_exchange") == 0) {
+          op = nir_intrinsic_image_atomic_exchange;
+       } else if (strcmp(ir->callee_name(), "__intrinsic_image_atomic_comp_swap") == 0) {
+          op = nir_intrinsic_image_atomic_comp_swap;
+       } else if (strcmp(ir->callee_name(), "__intrinsic_memory_barrier") == 0) {
+          op = nir_intrinsic_memory_barrier;
        } else {
           unreachable("not reached");
        }
  
        nir_intrinsic_instr *instr = nir_intrinsic_instr_create(shader, op);
-       ir_dereference *param =
-          (ir_dereference *) ir->actual_parameters.get_head();
-       instr->variables[0] = evaluate_deref(&instr->instr, param);
-       nir_ssa_dest_init(&instr->instr, &instr->dest, 1, NULL);
+       switch (op) {
+       case nir_intrinsic_atomic_counter_read_var:
+       case nir_intrinsic_atomic_counter_inc_var:
+       case nir_intrinsic_atomic_counter_dec_var: {
+          ir_dereference *param =
+             (ir_dereference *) ir->actual_parameters.get_head();
+          instr->variables[0] = evaluate_deref(&instr->instr, param);
+          nir_ssa_dest_init(&instr->instr, &instr->dest, 1, NULL);
+          break;
+       }
+       case nir_intrinsic_image_load:
+       case nir_intrinsic_image_store:
+       case nir_intrinsic_image_atomic_add:
+       case nir_intrinsic_image_atomic_min:
+       case nir_intrinsic_image_atomic_max:
+       case nir_intrinsic_image_atomic_and:
+       case nir_intrinsic_image_atomic_or:
+       case nir_intrinsic_image_atomic_xor:
+       case nir_intrinsic_image_atomic_exchange:
+       case nir_intrinsic_image_atomic_comp_swap: {
+          nir_ssa_undef_instr *instr_undef =
+             nir_ssa_undef_instr_create(shader, 1);
+          nir_instr_insert_after_cf_list(this->cf_node_list,
+                                         &instr_undef->instr);
+          /* Set the image variable dereference. */
+          exec_node *param = ir->actual_parameters.get_head();
+          ir_dereference *image = (ir_dereference *)param;
+          const glsl_type *type =
+             image->variable_referenced()->type->without_array();
+          instr->variables[0] = evaluate_deref(&instr->instr, image);
+          param = param->get_next();
+          /* Set the address argument, extending the coordinate vector to four
+           * components.
+           */
+          const nir_src src_addr = evaluate_rvalue((ir_dereference *)param);
+          nir_alu_instr *instr_addr = nir_alu_instr_create(shader, nir_op_vec4);
+          nir_ssa_dest_init(&instr_addr->instr, &instr_addr->dest.dest, 4, NULL);
+          for (int i = 0; i < 4; i++) {
+             if (i < type->coordinate_components()) {
+                instr_addr->src[i].src = src_addr;
+                instr_addr->src[i].swizzle[0] = i;
+             } else {
+                instr_addr->src[i].src = nir_src_for_ssa(&instr_undef->def);
+             }
+          }
+          nir_instr_insert_after_cf_list(cf_node_list, &instr_addr->instr);
+          instr->src[0] = nir_src_for_ssa(&instr_addr->dest.dest.ssa);
+          param = param->get_next();
+          /* Set the sample argument, which is undefined for single-sample
+           * images.
+           */
+          if (type->sampler_dimensionality == GLSL_SAMPLER_DIM_MS) {
+             instr->src[1] = evaluate_rvalue((ir_dereference *)param);
+             param = param->get_next();
+          } else {
+             instr->src[1] = nir_src_for_ssa(&instr_undef->def);
+          }
+          /* Set the intrinsic parameters. */
+          if (!param->is_tail_sentinel()) {
+             instr->src[2] = evaluate_rvalue((ir_dereference *)param);
+             param = param->get_next();
+          }
+          if (!param->is_tail_sentinel()) {
+             instr->src[3] = evaluate_rvalue((ir_dereference *)param);
+             param = param->get_next();
+          }
+          /* Set the intrinsic destination. */
+          if (ir->return_deref)
+             nir_ssa_dest_init(&instr->instr, &instr->dest,
+                               ir->return_deref->type->vector_elements, NULL);
+          break;
+       }
+       case nir_intrinsic_memory_barrier:
+          break;
+       default:
+          unreachable("not reached");
+       }
  
        nir_instr_insert_after_cf_list(this->cf_node_list, &instr->instr);
  
-       nir_intrinsic_instr *store_instr =
-          nir_intrinsic_instr_create(shader, nir_intrinsic_store_var);
-       store_instr->num_components = 1;
+       if (ir->return_deref) {
+          nir_intrinsic_instr *store_instr =
+             nir_intrinsic_instr_create(shader, nir_intrinsic_store_var);
+          store_instr->num_components = ir->return_deref->type->vector_elements;
  
-       store_instr->variables[0] = evaluate_deref(&store_instr->instr, ir->return_deref);
-       store_instr->src[0].is_ssa = true;
-       store_instr->src[0].ssa = &instr->dest.ssa;
+          store_instr->variables[0] =
+             evaluate_deref(&store_instr->instr, ir->return_deref);
+          store_instr->src[0] = nir_src_for_ssa(&instr->dest.ssa);
  
-       nir_instr_insert_after_cf_list(this->cf_node_list, &store_instr->instr);
+          nir_instr_insert_after_cf_list(this->cf_node_list,
+                                         &store_instr->instr);
+       }
  
        return;
     }
@@@ -823,13 -931,9 +932,9 @@@ nir_visitor::evaluate_rvalue(ir_rvalue
     }
  
     nir_dest *dest = get_instr_dest(this->result);
     assert(dest->is_ssa);
-    nir_src src = NIR_SRC_INIT;
-    src.is_ssa = true;
-    src.ssa = &dest->ssa;
  
-    return src;
+    return nir_src_for_ssa(&dest->ssa);
  }
  
  nir_alu_instr *
@@@ -1786,3 -1890,11 +1891,11 @@@ nir_visitor::visit(ir_dereference_arra
     ralloc_steal(this->deref_tail, deref);
     this->deref_tail = &deref->deref;
  }
+ void
+ nir_visitor::visit(ir_barrier *ir)
+ {
+    nir_intrinsic_instr *instr =
+       nir_intrinsic_instr_create(this->shader, nir_intrinsic_barrier);
+    nir_instr_insert_after_cf_list(this->cf_node_list, &instr->instr);
+ }
index 561bebd3a9c114a585255b4706bb06de6cac7cff,6761d5bad333f006c5951f7eaf890403f582ea0e..4c59298ecb7ccf332f29679621187b6ac93258bf
@@@ -67,7 -67,6 +67,7 @@@ type_size(const struct glsl_type *type
        return 0;
     case GLSL_TYPE_IMAGE:
        return 0;
 +   case GLSL_TYPE_FUNCTION:
     case GLSL_TYPE_VOID:
     case GLSL_TYPE_ERROR:
     case GLSL_TYPE_DOUBLE:
@@@ -289,7 -288,6 +289,6 @@@ nir_lower_io_block(nir_block *block, vo
           offset += intrin->variables[0]->var->data.driver_location;
  
           load->const_index[0] = offset;
-          load->const_index[1] = 1;
  
           if (has_indirect)
              load->src[0] = indirect;
           offset += intrin->variables[0]->var->data.driver_location;
  
           store->const_index[0] = offset;
-          store->const_index[1] = 1;
  
           nir_src_copy(&store->src[0], &intrin->src[0], state->mem_ctx);
  
index 6ed5a4cb2b5814c65b558b8ea5735299e880f479,7a0b0a09ffeb7dad0b11e8fc7d53a96cd22ba4e9..9a9cdd16a9ac1be49a439b8ae2a00e4f3ead75a9
@@@ -35,30 -35,6 +35,30 @@@ extern "C" 
  #include "program/program.h"
  }
  
 +static void
 +add_indirect_to_tex(nir_tex_instr *instr, nir_src indirect)
 +{
 +   /* First, we have to resize the array of texture sources */
 +   nir_tex_src *new_srcs = rzalloc_array(instr, nir_tex_src,
 +                                         instr->num_srcs + 1);
 +
 +   for (unsigned i = 0; i < instr->num_srcs; i++) {
 +      new_srcs[i].src_type = instr->src[i].src_type;
 +      nir_instr_move_src(&instr->instr, &new_srcs[i].src, &instr->src[i].src);
 +   }
 +
 +   ralloc_free(instr->src);
 +   instr->src = new_srcs;
 +
 +   /* Now we can go ahead and move the source over to being a
 +    * first-class texture source.
 +    */
 +   instr->src[instr->num_srcs].src_type = nir_tex_src_sampler_offset;
 +   instr->num_srcs++;
 +   nir_instr_rewrite_src(&instr->instr, &instr->src[instr->num_srcs - 1].src,
 +                         indirect);
 +}
 +
  static unsigned
  get_sampler_index(const struct gl_shader_program *shader_program,
                    gl_shader_stage stage, const char *name)
@@@ -94,34 -70,45 +94,45 @@@ lower_sampler(nir_tex_instr *instr, con
        case nir_deref_type_array: {
           nir_deref_array *deref_array = nir_deref_as_array(deref->child);
  
+          assert(deref_array->deref_array_type != nir_deref_array_type_wildcard);
+          if (deref_array->deref.child) {
+             ralloc_asprintf_append(&name, "[%u]",
+                deref_array->deref_array_type == nir_deref_array_type_direct ?
+                   deref_array->base_offset : 0);
+          } else {
+             assert(deref->child->type->base_type == GLSL_TYPE_SAMPLER);
+             instr->sampler_index = deref_array->base_offset;
+          }
           /* XXX: We're assuming here that the indirect is the last array
            * thing we have.  This should be ok for now as we don't support
            * arrays_of_arrays yet.
            */
-          instr->sampler_index *= glsl_get_length(deref->type);
-          switch (deref_array->deref_array_type) {
-          case nir_deref_array_type_direct:
-             instr->sampler_index += deref_array->base_offset;
-             if (deref_array->deref.child)
-                ralloc_asprintf_append(&name, "[%u]", deref_array->base_offset);
-             break;
-          case nir_deref_array_type_indirect: {
-             add_indirect_to_tex(instr, deref_array->indirect);
-             nir_instr_rewrite_src(&instr->instr, &deref_array->indirect,
-                                   NIR_SRC_INIT);
+          if (deref_array->deref_array_type == nir_deref_array_type_indirect) {
+             /* First, we have to resize the array of texture sources */
+             nir_tex_src *new_srcs = rzalloc_array(instr, nir_tex_src,
+                                                   instr->num_srcs + 1);
+             for (unsigned i = 0; i < instr->num_srcs; i++) {
+                new_srcs[i].src_type = instr->src[i].src_type;
+                nir_instr_move_src(&instr->instr, &new_srcs[i].src,
+                                   &instr->src[i].src);
+             }
+             ralloc_free(instr->src);
+             instr->src = new_srcs;
+             /* Now we can go ahead and move the source over to being a
+              * first-class texture source.
+              */
+             instr->src[instr->num_srcs].src_type = nir_tex_src_sampler_offset;
+             instr->num_srcs++;
+             nir_instr_move_src(&instr->instr,
+                                &instr->src[instr->num_srcs - 1].src,
+                                &deref_array->indirect);
  
              instr->sampler_array_size = glsl_get_length(deref->type);
-             if (deref_array->deref.child)
-                ralloc_strcat(&name, "[0]");
-             break;
-          }
-          case nir_deref_array_type_wildcard:
-             unreachable("Cannot copy samplers");
-          default:
-             unreachable("Invalid deref array type");
           }
           break;
        }
@@@ -189,49 -176,3 +200,49 @@@ nir_lower_samplers(nir_shader *shader, 
           lower_impl(overload->impl, shader_program, stage);
     }
  }
 +
 +static bool
 +lower_samplers_for_vk_block(nir_block *block, void *data)
 +{
 +   nir_foreach_instr(block, instr) {
 +      if (instr->type != nir_instr_type_tex)
 +         continue;
 +
 +      nir_tex_instr *tex = nir_instr_as_tex(instr);
 +
 +      assert(tex->sampler);
 +
 +      tex->sampler_set = tex->sampler->var->data.descriptor_set;
 +      tex->sampler_index = tex->sampler->var->data.binding;
 +
 +      if (tex->sampler->deref.child) {
 +         assert(tex->sampler->deref.child->deref_type == nir_deref_type_array);
 +         nir_deref_array *arr = nir_deref_as_array(tex->sampler->deref.child);
 +
 +         /* Only one-level arrays are allowed in vulkan */
 +         assert(arr->deref.child == NULL);
 +
 +         tex->sampler_index += arr->base_offset;
 +         if (arr->deref_array_type == nir_deref_array_type_indirect) {
 +            add_indirect_to_tex(tex, arr->indirect);
 +            nir_instr_rewrite_src(instr, &arr->indirect, NIR_SRC_INIT);
 +
 +            tex->sampler_array_size = glsl_get_length(tex->sampler->deref.type);
 +         }
 +      }
 +
 +      tex->sampler = NULL;
 +   }
 +
 +   return true;
 +}
 +
 +extern "C" void
 +nir_lower_samplers_for_vk(nir_shader *shader)
 +{
 +   nir_foreach_overload(shader, overload) {
 +      if (overload->impl) {
 +         nir_foreach_block(overload->impl, lower_samplers_for_vk_block, NULL);
 +      }
 +   }
 +}
index 6e1ecec32350d4a6f46ca686ad094314b88144d2,00db61e409be188e766f3005a0cd0cfac3514185..050e733d549cc3adf9b025e8d36f7502f23f1b40
  #include <string.h>
  #include "util/ralloc.h"
  
 +extern "C" void
 +_mesa_error_no_memory(const char *caller)
 +{
 +   fprintf(stderr, "Mesa error: out of memory in %s", caller);
 +}
 +
  void
  _mesa_warning(struct gl_context *ctx, const char *fmt, ...)
  {
@@@ -95,7 -89,7 +95,7 @@@ _mesa_clear_shader_program_data(struct 
  {
     unsigned i;
  
-    shProg->NumUserUniformStorage = 0;
+    shProg->NumUniformStorage = 0;
     shProg->UniformStorage = NULL;
     shProg->NumUniformRemapTable = 0;
     shProg->UniformRemapTable = NULL;
index cf2424e34b411939dcd47535019b445d010ace44,566f2ddd98ffdcf164c22491899b6dcc66bb7626..9c947be88a08cbf4db6dd817dc624a8366eb9115
@@@ -41,28 -41,20 +41,29 @@@ AM_CFLAGS = 
  
  AM_CXXFLAGS = $(AM_CFLAGS)
  
 -noinst_LTLIBRARIES = libi965_dri.la
 +noinst_LTLIBRARIES = libi965_dri.la libi965_compiler.la
  libi965_dri_la_SOURCES = $(i965_FILES)
  libi965_dri_la_LIBADD = $(INTEL_LIBS)
  
  TEST_LIBS = \
        libi965_dri.la \
        ../common/libdricommon.la \
+       ../common/libxmlconfig.la \
        ../common/libmegadriver_stub.la \
          ../../../libmesa.la \
        $(DRI_LIB_DEPS) \
          $(CLOCK_LIB) \
        ../common/libdri_test_stubs.la
  
 +libi965_compiler_la_SOURCES = $(i965_FILES)
 +libi965_compiler_la_LIBADD = $(INTEL_LIBS) \
 +      ../common/libdricommon.la \
 +      ../common/libmegadriver_stub.la \
 +        ../../../libmesa.la \
 +      $(DRI_LIB_DEPS) \
 +        $(CLOCK_LIB) \
 +      ../common/libdri_test_stubs.la -lm
 +
  TESTS = \
        test_fs_cmod_propagation \
        test_fs_saturate_propagation \
index 23838056690fa5a1422cab8c3e803fd3212737c7,cf4088306209ac459fc9ff1c42fd9a8ef65bfeed..ebf12fab69e6f5aacc6065581bbfaa2769533752
@@@ -50,6 -50,7 +50,7 @@@
  
  #include "brw_context.h"
  #include "brw_defines.h"
+ #include "brw_shader.h"
  #include "brw_draw.h"
  #include "brw_state.h"
  
@@@ -68,8 -69,6 +69,6 @@@
  #include "tnl/t_pipeline.h"
  #include "util/ralloc.h"
  
- #include "glsl/nir/nir.h"
  /***************************************
   * Mesa's Driver Functions
   ***************************************/
@@@ -289,6 -288,8 +288,8 @@@ brw_init_driver_functions(struct brw_co
     else
        gen4_init_queryobj_functions(functions);
     brw_init_compute_functions(functions);
+    if (brw->gen >= 7)
+       brw_init_conditional_render_functions(functions);
  
     functions->QuerySamplesForFormat = brw_query_samples_for_format;
  
        functions->GetSamplePosition = gen6_get_sample_position;
  }
  
 -static void
 +void
  brw_initialize_context_constants(struct brw_context *brw)
  {
     struct gl_context *ctx = &brw->ctx;
     int max_samples;
     const int *msaa_modes = intel_supported_msaa_modes(brw->intelScreen);
     const int clamp_max_samples =
 -      driQueryOptioni(&brw->optionCache, "clamp_max_samples");
 +      brw->optionCache.info != NULL ?
 +         driQueryOptioni(&brw->optionCache, "clamp_max_samples") : -1;
  
     if (clamp_max_samples < 0) {
        max_samples = msaa_modes[0];
  
     ctx->Const.MinLineWidth = 1.0;
     ctx->Const.MinLineWidthAA = 1.0;
-    if (brw->gen >= 9 || brw->is_cherryview) {
-       ctx->Const.MaxLineWidth = 40.0;
-       ctx->Const.MaxLineWidthAA = 40.0;
-       ctx->Const.LineWidthGranularity = 0.125;
-    } else if (brw->gen >= 6) {
+    if (brw->gen >= 6) {
        ctx->Const.MaxLineWidth = 7.375;
        ctx->Const.MaxLineWidthAA = 7.375;
        ctx->Const.LineWidthGranularity = 0.125;
        ctx->Const.LineWidthGranularity = 0.5;
     }
  
+    /* For non-antialiased lines, we have to round the line width to the
+     * nearest whole number. Make sure that we don't advertise a line
+     * width that, when rounded, will be beyond the actual hardware
+     * maximum.
+     */
+    assert(roundf(ctx->Const.MaxLineWidth) <= ctx->Const.MaxLineWidth);
     ctx->Const.MinPointSize = 1.0;
     ctx->Const.MinPointSizeAA = 1.0;
     ctx->Const.MaxPointSize = 255.0;
      */
     ctx->Const.UniformBufferOffsetAlignment = 16;
     ctx->Const.TextureBufferOffsetAlignment = 16;
+    ctx->Const.MaxTextureBufferSize = 128 * 1024 * 1024;
  
     if (brw->gen >= 6) {
        ctx->Const.MaxVarying = 32;
        ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxInputComponents = 128;
     }
  
-    static const nir_shader_compiler_options nir_options = {
-       .native_integers = true,
-       /* In order to help allow for better CSE at the NIR level we tell NIR
-        * to split all ffma instructions during opt_algebraic and we then
-        * re-combine them as a later step.
-        */
-       .lower_ffma = true,
-       .lower_sub = true,
-    };
     /* We want the GLSL compiler to emit code that uses condition codes */
     for (int i = 0; i < MESA_SHADER_STAGES; i++) {
-       ctx->Const.ShaderCompilerOptions[i].MaxIfDepth = brw->gen < 6 ? 16 : UINT_MAX;
-       ctx->Const.ShaderCompilerOptions[i].EmitCondCodes = true;
-       ctx->Const.ShaderCompilerOptions[i].EmitNoNoise = true;
-       ctx->Const.ShaderCompilerOptions[i].EmitNoMainReturn = true;
-       ctx->Const.ShaderCompilerOptions[i].EmitNoIndirectInput = true;
-       ctx->Const.ShaderCompilerOptions[i].EmitNoIndirectOutput =
-        (i == MESA_SHADER_FRAGMENT);
-       ctx->Const.ShaderCompilerOptions[i].EmitNoIndirectTemp =
-        (i == MESA_SHADER_FRAGMENT);
-       ctx->Const.ShaderCompilerOptions[i].EmitNoIndirectUniform = false;
-       ctx->Const.ShaderCompilerOptions[i].LowerClipDistance = true;
+       ctx->Const.ShaderCompilerOptions[i] =
+          brw->intelScreen->compiler->glsl_compiler_options[i];
     }
  
-    ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].OptimizeForAOS = true;
-    ctx->Const.ShaderCompilerOptions[MESA_SHADER_GEOMETRY].OptimizeForAOS = true;
-    if (brw->scalar_vs) {
-       /* If we're using the scalar backend for vertex shaders, we need to
-        * configure these accordingly.
-        */
-       ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].EmitNoIndirectOutput = true;
-       ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].EmitNoIndirectTemp = true;
-       ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].OptimizeForAOS = false;
-       if (brw_env_var_as_boolean("INTEL_USE_NIR", true))
-          ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].NirOptions = &nir_options;
-    }
-    if (brw_env_var_as_boolean("INTEL_USE_NIR", true))
-       ctx->Const.ShaderCompilerOptions[MESA_SHADER_FRAGMENT].NirOptions = &nir_options;
-    ctx->Const.ShaderCompilerOptions[MESA_SHADER_COMPUTE].NirOptions = &nir_options;
     /* ARB_viewport_array */
     if (brw->gen >= 6 && ctx->API == API_OPENGL_CORE) {
        ctx->Const.MaxViewports = GEN6_NUM_VIEWPORTS;
     /* ARB_gpu_shader5 */
     if (brw->gen >= 7)
        ctx->Const.MaxVertexStreams = MIN2(4, MAX_VERTEX_STREAMS);
+    /* ARB_framebuffer_no_attachments */
+    ctx->Const.MaxFramebufferWidth = ctx->Const.MaxViewportWidth;
+    ctx->Const.MaxFramebufferHeight = ctx->Const.MaxViewportHeight;
+    ctx->Const.MaxFramebufferLayers = ctx->Const.MaxArrayTextureLayers;
+    ctx->Const.MaxFramebufferSamples = max_samples;
  }
  
  static void
@@@ -814,10 -785,9 +786,9 @@@ brwCreateContext(gl_api api
     _mesa_meta_init(ctx);
  
     brw_process_driconf_options(brw);
-    brw_process_intel_debug_variable(brw);
  
-    if (brw->gen >= 8 && !(INTEL_DEBUG & DEBUG_VEC4VS))
-       brw->scalar_vs = true;
+    if (INTEL_DEBUG & DEBUG_PERF)
+       brw->perf_debug = true;
  
     brw_initialize_context_constants(brw);
  
  
     intel_batchbuffer_init(brw);
  
 +#if 0
     if (brw->gen >= 6) {
        /* Create a new hardware context.  Using a hardware context means that
         * our GPU state will be saved/restored on context switch, allowing us
     }
  
     brw_init_state(brw);
 +#endif
  
     intelInitExtensions(ctx);
  
     brw->gs.enabled = false;
     brw->sf.viewport_transform_enable = true;
  
+    brw->predicate.state = BRW_PREDICATE_STATE_RENDER;
     ctx->VertexProgram._MaintainTnlProgram = true;
     ctx->FragmentProgram._MaintainTexEnvProgram = true;
  
  
     _mesa_compute_version(ctx);
  
 +#if 0
     _mesa_initialize_dispatch_tables(ctx);
     _mesa_initialize_vbo_vtxfmt(ctx);
 +#endif
  
     if (ctx->Extensions.AMD_performance_monitor) {
        brw_init_performance_monitors(brw);
index cb4cc7fb36b0dacafb06d72c28adf792d8c1e36e,a7d83f8d7b4091cb2b27002c74284318f661fb0b..9e1f722df9e7a29ec3f95a250c65a823aab9279d
@@@ -359,12 -359,6 +359,12 @@@ struct brw_stage_prog_data 
        /** @} */
     } binding_table;
  
 +   uint32_t *map_entries;
 +   struct {
 +      uint32_t index_count;
 +      uint32_t *index;
 +   } bind_map[8]; /* MAX_SETS from vulkan/private.h */
 +
     GLuint nr_params;       /**< number of float params/constants */
     GLuint nr_pull_params;
  
@@@ -611,6 -605,12 +611,12 @@@ struct brw_ff_gs_prog_data 
     unsigned svbi_postincrement_value;
  };
  
+ enum shader_dispatch_mode {
+    DISPATCH_MODE_4X1_SINGLE = 0,
+    DISPATCH_MODE_4X2_DUAL_INSTANCE = 1,
+    DISPATCH_MODE_4X2_DUAL_OBJECT = 2,
+    DISPATCH_MODE_SIMD8 = 3,
+ };
  
  /* Note: brw_vue_prog_data_compare() must be updated when adding fields to
   * this struct!
@@@ -628,7 -628,7 +634,7 @@@ struct brw_vue_prog_data 
      */
     GLuint urb_entry_size;
  
-    bool simd8;
+    enum shader_dispatch_mode dispatch_mode;
  };
  
  
@@@ -725,14 -725,6 +731,6 @@@ struct brw_gs_prog_dat
  
     int invocations;
  
-    /**
-     * Dispatch mode, can be any of:
-     * GEN7_GS_DISPATCH_MODE_DUAL_OBJECT
-     * GEN7_GS_DISPATCH_MODE_DUAL_INSTANCE
-     * GEN7_GS_DISPATCH_MODE_SINGLE
-     */
-    int dispatch_mode;
     /**
      * Gen6 transform feedback enabled flag.
      */
@@@ -829,20 -821,10 +827,10 @@@ struct brw_tracked_state 
  enum shader_time_shader_type {
     ST_NONE,
     ST_VS,
-    ST_VS_WRITTEN,
-    ST_VS_RESET,
     ST_GS,
-    ST_GS_WRITTEN,
-    ST_GS_RESET,
     ST_FS8,
-    ST_FS8_WRITTEN,
-    ST_FS8_RESET,
     ST_FS16,
-    ST_FS16_WRITTEN,
-    ST_FS16_RESET,
     ST_CS,
-    ST_CS_WRITTEN,
-    ST_CS_RESET,
  };
  
  struct brw_vertex_buffer {
@@@ -972,6 -954,22 +960,22 @@@ struct brw_stage_stat
     uint32_t sampler_offset;
  };
  
+ enum brw_predicate_state {
+    /* The first two states are used if we can determine whether to draw
+     * without having to look at the values in the query object buffer. This
+     * will happen if there is no conditional render in progress, if the query
+     * object is already completed or if something else has already added
+     * samples to the preliminary result such as via a BLT command.
+     */
+    BRW_PREDICATE_STATE_RENDER,
+    BRW_PREDICATE_STATE_DONT_RENDER,
+    /* In this case whether to draw or not depends on the result of an
+     * MI_PREDICATE command so the predicate enable bit needs to be checked.
+     */
+    BRW_PREDICATE_STATE_USE_BIT
+ };
+ struct shader_times;
  
  /**
   * brw_context is derived from gl_context.
@@@ -1131,7 -1129,6 +1135,6 @@@ struct brw_contex
     bool has_pln;
     bool no_simd8;
     bool use_rep_send;
-    bool scalar_vs;
  
     /**
      * Some versions of Gen hardware don't do centroid interpolation correctly
        bool begin_emitted;
     } query;
  
+    struct {
+       enum brw_predicate_state state;
+       bool supported;
+    } predicate;
     struct {
        /** A map from pipeline statistics counter IDs to MMIO addresses. */
        const int *statistics_registers;
        uint32_t offset;
        uint32_t size;
        enum aub_state_struct_type type;
+       int index;
     } *state_batch_list;
     int state_batch_count;
  
        const char **names;
        int *ids;
        enum shader_time_shader_type *types;
-       uint64_t *cumulative;
+       struct shader_times *cumulative;
        int num_entries;
        int max_entries;
        double report_time;
@@@ -1606,12 -1609,21 +1615,21 @@@ void brw_write_depth_count(struct brw_c
  void brw_store_register_mem64(struct brw_context *brw,
                                drm_intel_bo *bo, uint32_t reg, int idx);
  
+ /** brw_conditional_render.c */
+ void brw_init_conditional_render_functions(struct dd_function_table *functions);
+ bool brw_check_conditional_render(struct brw_context *brw);
  /** intel_batchbuffer.c */
  void brw_load_register_mem(struct brw_context *brw,
                             uint32_t reg,
                             drm_intel_bo *bo,
                             uint32_t read_domains, uint32_t write_domain,
                             uint32_t offset);
+ void brw_load_register_mem64(struct brw_context *brw,
+                              uint32_t reg,
+                              drm_intel_bo *bo,
+                              uint32_t read_domains, uint32_t write_domain,
+                              uint32_t offset);
  
  /*======================================================================
   * brw_state_dump.c
@@@ -1982,15 -1994,10 +2000,19 @@@ gen6_upload_push_constants(struct brw_c
                             struct brw_stage_state *stage_state,
                             enum aub_state_struct_type type);
  
 +struct intel_screen *intel_screen_create(int fd);
 +void intel_screen_destroy(struct intel_screen *screen);
 +
 +struct brw_context *intel_context_create(struct intel_screen *screen);
 +void intel_context_destroy(struct brw_context *brw);
 +
 +void
 +brw_initialize_context_constants(struct brw_context *brw);
 +
+ bool
+ gen9_use_linear_1d_layout(const struct brw_context *brw,
+                           const struct intel_mipmap_tree *mt);
  #ifdef __cplusplus
  }
  #endif
index 2432875d0f492e726a0f5192e7b7f880fa277a87,4c5082c82c4dcc966c8bd316be897a7319f60935..42a082b57b663b9d165e60d17ad41365990eb694
@@@ -55,7 -55,7 +55,7 @@@ brw_cs_prog_data_compare(const void *in
  }
  
  
 -static const unsigned *
 +const unsigned *
  brw_cs_emit(struct brw_context *brw,
              void *mem_ctx,
              const struct brw_cs_prog_key *key,
     cfg_t *cfg = NULL;
     const char *fail_msg = NULL;
  
+    int st_index = -1;
+    if (INTEL_DEBUG & DEBUG_SHADER_TIME)
+       st_index = brw_get_shader_time_index(brw, prog, &cp->Base, ST_CS);
     /* Now the main event: Visit the shader IR and generate our CS IR for it.
      */
-    fs_visitor v8(brw, mem_ctx, key, prog_data, prog, cp, 8);
+    fs_visitor v8(brw->intelScreen->compiler, brw,
+                  mem_ctx, MESA_SHADER_COMPUTE, key, &prog_data->base, prog,
+                  &cp->Base, 8, st_index);
     if (!v8.run_cs()) {
        fail_msg = v8.fail_msg;
     } else if (local_workgroup_size <= 8 * brw->max_cs_threads) {
        prog_data->simd_size = 8;
     }
  
-    fs_visitor v16(brw, mem_ctx, key, prog_data, prog, cp, 16);
+    fs_visitor v16(brw->intelScreen->compiler, brw,
+                   mem_ctx, MESA_SHADER_COMPUTE, key, &prog_data->base, prog,
+                   &cp->Base, 16, st_index);
     if (likely(!(INTEL_DEBUG & DEBUG_NO16)) &&
         !fail_msg && !v8.simd16_unsupported &&
         local_workgroup_size <= 16 * brw->max_cs_threads) {
        return NULL;
     }
  
-    fs_generator g(brw, mem_ctx, (void*) key, &prog_data->base, &cp->Base,
+    fs_generator g(brw->intelScreen->compiler, brw,
+                   mem_ctx, (void*) key, &prog_data->base, &cp->Base,
                    v8.promoted_constants, v8.runtime_check_aads_emit, "CS");
     if (INTEL_DEBUG & DEBUG_CS) {
        char *name = ralloc_asprintf(mem_ctx, "%s compute shader %d",
@@@ -368,9 -377,11 +377,11 @@@ brw_upload_cs_state(struct brw_context 
  
  extern "C"
  const struct brw_tracked_state brw_cs_state = {
-    .dirty = {
-       .mesa  = 0,
-       .brw   = BRW_NEW_CS_PROG_DATA,
+    /* explicit initialisers aren't valid C++, comment
+     * them for documentation purposes */
+    /* .dirty = */{
+       /* .mesa = */ 0,
+       /* .brw = */  BRW_NEW_CS_PROG_DATA,
     },
-    .emit = brw_upload_cs_state
+    /* .emit = */ brw_upload_cs_state
  };
index 3c704ee9d08a8c958906adcb897d3af54fab2ff5,66b9abc99910a329cb83d5cba110442ff9ba7bd5..c113d52a3d3aa8f9cae45108ac1ca830956ef720
@@@ -38,6 -38,7 +38,7 @@@
        fieldval & field ## _MASK;                                        \
     })
  
+ #define GET_BITS(data, high, low) ((data & INTEL_MASK((high), (low))) >> (low))
  #define GET_FIELD(word, field) (((word)  & field ## _MASK) >> field ## _SHIFT)
  
  #ifndef BRW_DEFINES_H
  # define GEN4_3DPRIM_VERTEXBUFFER_ACCESS_SEQUENTIAL (0 << 15)
  # define GEN4_3DPRIM_VERTEXBUFFER_ACCESS_RANDOM     (1 << 15)
  # define GEN7_3DPRIM_INDIRECT_PARAMETER_ENABLE      (1 << 10)
+ # define GEN7_3DPRIM_PREDICATE_ENABLE               (1 << 8)
  /* DW1 */
  # define GEN7_3DPRIM_VERTEXBUFFER_ACCESS_SEQUENTIAL (0 << 8)
  # define GEN7_3DPRIM_VERTEXBUFFER_ACCESS_RANDOM     (1 << 8)
  
 +#ifndef _3DPRIM_POINTLIST /* FIXME: Avoid clashing with defines from bdw_pack.h */
  #define _3DPRIM_POINTLIST         0x01
  #define _3DPRIM_LINELIST          0x02
  #define _3DPRIM_LINESTRIP         0x03
@@@ -77,7 -78,6 +79,7 @@@
  #define _3DPRIM_LINESTRIP_BF      0x13
  #define _3DPRIM_LINESTRIP_CONT_BF 0x14
  #define _3DPRIM_TRIFAN_NOSTIPPLE  0x15
 +#endif
  
  /* We use this offset to be able to pass native primitive types in struct
   * _mesa_prim::mode.  Native primitive types are BRW_PRIM_OFFSET +
  #define GEN7_SURFACE_ARYSPC_FULL      (0 << 10)
  #define GEN7_SURFACE_ARYSPC_LOD0      (1 << 10)
  
- /* Surface state DW0 */
+ /* Surface state DW1 */
  #define GEN8_SURFACE_MOCS_SHIFT         24
  #define GEN8_SURFACE_MOCS_MASK          INTEL_MASK(30, 24)
+ #define GEN8_SURFACE_QPITCH_SHIFT       0
+ #define GEN8_SURFACE_QPITCH_MASK        INTEL_MASK(14, 0)
  
  /* Surface state DW2 */
  #define BRW_SURFACE_HEIGHT_SHIFT      19
  #define GEN7_SURFACE_MOCS_SHIFT                 16
  #define GEN7_SURFACE_MOCS_MASK                  INTEL_MASK(19, 16)
  
+ #define GEN9_SURFACE_TRMODE_SHIFT          18
+ #define GEN9_SURFACE_TRMODE_MASK           INTEL_MASK(19, 18)
+ #define GEN9_SURFACE_TRMODE_NONE           0
+ #define GEN9_SURFACE_TRMODE_TILEYF         1
+ #define GEN9_SURFACE_TRMODE_TILEYS         2
+ #define GEN9_SURFACE_MIP_TAIL_START_LOD_SHIFT      8
+ #define GEN9_SURFACE_MIP_TAIL_START_LOD_MASK       INTEL_MASK(11, 8)
  /* Surface state DW6 */
  #define GEN7_SURFACE_MCS_ENABLE                 (1 << 0)
  #define GEN7_SURFACE_MCS_PITCH_SHIFT            3
  #define GEN8_SURFACE_AUX_MODE_HIZ               3
  
  /* Surface state DW7 */
+ #define GEN9_SURFACE_RT_COMPRESSION_SHIFT       30
+ #define GEN9_SURFACE_RT_COMPRESSION_MASK        INTEL_MASK(30, 30)
  #define GEN7_SURFACE_CLEAR_COLOR_SHIFT                28
  #define GEN7_SURFACE_SCS_R_SHIFT                25
  #define GEN7_SURFACE_SCS_R_MASK                 INTEL_MASK(27, 25)
@@@ -1131,6 -1144,11 +1146,11 @@@ enum opcode 
      * Terminate the compute shader.
      */
     CS_OPCODE_CS_TERMINATE,
+    /**
+     * GLSL barrier()
+     */
+    SHADER_OPCODE_BARRIER,
  };
  
  enum brw_urb_write_flags {
@@@ -1592,6 -1610,14 +1612,14 @@@ enum brw_message_target 
  #define BRW_SCRATCH_SPACE_SIZE_1M     10
  #define BRW_SCRATCH_SPACE_SIZE_2M     11
  
+ #define BRW_MESSAGE_GATEWAY_SFID_OPEN_GATEWAY         0
+ #define BRW_MESSAGE_GATEWAY_SFID_CLOSE_GATEWAY        1
+ #define BRW_MESSAGE_GATEWAY_SFID_FORWARD_MSG          2
+ #define BRW_MESSAGE_GATEWAY_SFID_GET_TIMESTAMP        3
+ #define BRW_MESSAGE_GATEWAY_SFID_BARRIER_MSG          4
+ #define BRW_MESSAGE_GATEWAY_SFID_UPDATE_GATEWAY_STATE 5
+ #define BRW_MESSAGE_GATEWAY_SFID_MMIO_READ_WRITE      6
  
  #define CMD_URB_FENCE                 0x6000
  #define CMD_CS_URB_STATE              0x6001
  # define GEN7_GS_CONTROL_DATA_FORMAT_GSCTL_SID                1
  # define GEN7_GS_CONTROL_DATA_HEADER_SIZE_SHIFT               20
  # define GEN7_GS_INSTANCE_CONTROL_SHIFT                       15
- # define GEN7_GS_DISPATCH_MODE_SINGLE                 (0 << 11)
- # define GEN7_GS_DISPATCH_MODE_DUAL_INSTANCE          (1 << 11)
- # define GEN7_GS_DISPATCH_MODE_DUAL_OBJECT            (2 << 11)
+ # define GEN7_GS_DISPATCH_MODE_SHIFT                    11
+ # define GEN7_GS_DISPATCH_MODE_MASK                     INTEL_MASK(12, 11)
  # define GEN6_GS_STATISTICS_ENABLE                    (1 << 10)
  # define GEN6_GS_SO_STATISTICS_ENABLE                 (1 << 9)
  # define GEN6_GS_RENDERING_ENABLE                     (1 << 8)
@@@ -2470,8 -2495,8 +2497,8 @@@ enum brw_wm_barycentric_interp_mode 
   * cache settings.  We still use only either write-back or write-through; and
   * rely on the documented default values.
   */
- #define SKL_MOCS_WB 9
- #define SKL_MOCS_WT 5
+ #define SKL_MOCS_WB (0b001001 << 1)
+ #define SKL_MOCS_WT (0b000101 << 1)
  
  #define MEDIA_VFE_STATE                         0x7000
  /* GEN7 DW2, GEN8+ DW3 */
index 5ce1dfc663305a4ddc6cb06c6623c009fbc9ecd3,4292aa6b9fbcd24a9bba235151c7d1af6cef5f3e..2c0ff961182cea05f8998dfeeeeab99874db5fa5
@@@ -49,6 -49,8 +49,8 @@@
  #include "glsl/glsl_types.h"
  #include "program/sampler.h"
  
+ using namespace brw;
  void
  fs_inst::init(enum opcode opcode, uint8_t exec_size, const fs_reg &dst,
                const fs_reg *src, unsigned sources)
@@@ -212,152 -214,13 +214,13 @@@ fs_inst::resize_sources(uint8_t num_sou
     }
  }
  
- #define ALU1(op)                                                        \
-    fs_inst *                                                            \
-    fs_visitor::op(const fs_reg &dst, const fs_reg &src0)                \
-    {                                                                    \
-       return new(mem_ctx) fs_inst(BRW_OPCODE_##op, dst, src0);          \
-    }
- #define ALU2(op)                                                        \
-    fs_inst *                                                            \
-    fs_visitor::op(const fs_reg &dst, const fs_reg &src0,                \
-                   const fs_reg &src1)                                   \
-    {                                                                    \
-       return new(mem_ctx) fs_inst(BRW_OPCODE_##op, dst, src0, src1);    \
-    }
- #define ALU2_ACC(op)                                                    \
-    fs_inst *                                                            \
-    fs_visitor::op(const fs_reg &dst, const fs_reg &src0,                \
-                   const fs_reg &src1)                                   \
-    {                                                                    \
-       fs_inst *inst = new(mem_ctx) fs_inst(BRW_OPCODE_##op, dst, src0, src1);\
-       inst->writes_accumulator = true;                                  \
-       return inst;                                                      \
-    }
- #define ALU3(op)                                                        \
-    fs_inst *                                                            \
-    fs_visitor::op(const fs_reg &dst, const fs_reg &src0,                \
-                   const fs_reg &src1, const fs_reg &src2)               \
-    {                                                                    \
-       return new(mem_ctx) fs_inst(BRW_OPCODE_##op, dst, src0, src1, src2);\
-    }
- ALU1(NOT)
- ALU1(MOV)
- ALU1(FRC)
- ALU1(RNDD)
- ALU1(RNDE)
- ALU1(RNDZ)
- ALU2(ADD)
- ALU2(MUL)
- ALU2_ACC(MACH)
- ALU2(AND)
- ALU2(OR)
- ALU2(XOR)
- ALU2(SHL)
- ALU2(SHR)
- ALU2(ASR)
- ALU3(LRP)
- ALU1(BFREV)
- ALU3(BFE)
- ALU2(BFI1)
- ALU3(BFI2)
- ALU1(FBH)
- ALU1(FBL)
- ALU1(CBIT)
- ALU3(MAD)
- ALU2_ACC(ADDC)
- ALU2_ACC(SUBB)
- ALU2(SEL)
- ALU2(MAC)
- /** Gen4 predicated IF. */
- fs_inst *
- fs_visitor::IF(enum brw_predicate predicate)
- {
-    fs_inst *inst = new(mem_ctx) fs_inst(BRW_OPCODE_IF, dispatch_width);
-    inst->predicate = predicate;
-    return inst;
- }
- /** Gen6 IF with embedded comparison. */
- fs_inst *
- fs_visitor::IF(const fs_reg &src0, const fs_reg &src1,
-                enum brw_conditional_mod condition)
- {
-    assert(devinfo->gen == 6);
-    fs_inst *inst = new(mem_ctx) fs_inst(BRW_OPCODE_IF, dispatch_width,
-                                         reg_null_d, src0, src1);
-    inst->conditional_mod = condition;
-    return inst;
- }
- /**
-  * CMP: Sets the low bit of the destination channels with the result
-  * of the comparison, while the upper bits are undefined, and updates
-  * the flag register with the packed 16 bits of the result.
-  */
- fs_inst *
- fs_visitor::CMP(fs_reg dst, fs_reg src0, fs_reg src1,
-                 enum brw_conditional_mod condition)
- {
-    fs_inst *inst;
-    /* Take the instruction:
-     *
-     * CMP null<d> src0<f> src1<f>
-     *
-     * Original gen4 does type conversion to the destination type before
-     * comparison, producing garbage results for floating point comparisons.
-     *
-     * The destination type doesn't matter on newer generations, so we set the
-     * type to match src0 so we can compact the instruction.
-     */
-    dst.type = src0.type;
-    if (dst.file == HW_REG)
-       dst.fixed_hw_reg.type = dst.type;
-    resolve_ud_negate(&src0);
-    resolve_ud_negate(&src1);
-    inst = new(mem_ctx) fs_inst(BRW_OPCODE_CMP, dst, src0, src1);
-    inst->conditional_mod = condition;
-    return inst;
- }
- fs_inst *
- fs_visitor::LOAD_PAYLOAD(const fs_reg &dst, fs_reg *src, int sources,
-                          int header_size)
- {
-    assert(dst.width % 8 == 0);
-    fs_inst *inst = new(mem_ctx) fs_inst(SHADER_OPCODE_LOAD_PAYLOAD, dst.width,
-                                         dst, src, sources);
-    inst->header_size = header_size;
-    for (int i = 0; i < header_size; i++)
-       assert(src[i].file != GRF || src[i].width * type_sz(src[i].type) == 32);
-    inst->regs_written = header_size;
-    for (int i = header_size; i < sources; ++i)
-       assert(src[i].file != GRF || src[i].width == dst.width);
-    inst->regs_written += (sources - header_size) * (dst.width / 8);
-    return inst;
- }
- exec_list
- fs_visitor::VARYING_PULL_CONSTANT_LOAD(const fs_reg &dst,
+ void
+ fs_visitor::VARYING_PULL_CONSTANT_LOAD(const fs_builder &bld,
+                                        const fs_reg &dst,
                                         const fs_reg &surf_index,
                                         const fs_reg &varying_offset,
                                         uint32_t const_offset)
  {
-    exec_list instructions;
-    fs_inst *inst;
     /* We have our constant surface use a pitch of 4 bytes, so our index can
      * be any component of a vector, and then we load 4 contiguous
      * components starting from that.
      * the redundant ones.
      */
     fs_reg vec4_offset = vgrf(glsl_type::int_type);
-    instructions.push_tail(ADD(vec4_offset,
-                               varying_offset, fs_reg(const_offset & ~3)));
+    bld.ADD(vec4_offset, varying_offset, fs_reg(const_offset & ~3));
  
     int scale = 1;
     if (devinfo->gen == 4 && dst.width == 8) {
     int regs_written = 4 * (dst.width / 8) * scale;
     fs_reg vec4_result = fs_reg(GRF, alloc.allocate(regs_written),
                                 dst.type, dst.width);
-    inst = new(mem_ctx) fs_inst(op, vec4_result, surf_index, vec4_offset);
+    fs_inst *inst = bld.emit(op, vec4_result, surf_index, vec4_offset);
     inst->regs_written = regs_written;
-    instructions.push_tail(inst);
  
     if (devinfo->gen < 7) {
        inst->base_mrf = 13;
           inst->mlen = 1 + dispatch_width / 8;
     }
  
-    fs_reg result = offset(vec4_result, (const_offset & 3) * scale);
-    instructions.push_tail(MOV(dst, result));
-    return instructions;
+    bld.MOV(dst, offset(vec4_result, (const_offset & 3) * scale));
  }
  
  /**
   * A helper for MOV generation for fixing up broken hardware SEND dependency
   * handling.
   */
- fs_inst *
- fs_visitor::DEP_RESOLVE_MOV(int grf)
+ void
+ fs_visitor::DEP_RESOLVE_MOV(const fs_builder &bld, int grf)
  {
-    fs_inst *inst = MOV(brw_null_reg(), fs_reg(GRF, grf, BRW_REGISTER_TYPE_F));
-    inst->ir = NULL;
-    inst->annotation = "send dependency resolve";
     /* The caller always wants uncompressed to emit the minimal extra
      * dependencies, and to avoid having to deal with aligning its regs to 2.
      */
-    inst->exec_size = 8;
+    const fs_builder ubld = bld.annotate("send dependency resolve")
+                               .half(0);
  
-    return inst;
+    ubld.MOV(ubld.null_reg_f(), fs_reg(GRF, grf, BRW_REGISTER_TYPE_F));
  }
  
  bool
@@@ -671,7 -525,6 +525,7 @@@ fs_visitor::type_size(const struct glsl
     case GLSL_TYPE_ERROR:
     case GLSL_TYPE_INTERFACE:
     case GLSL_TYPE_DOUBLE:
 +   case GLSL_TYPE_FUNCTION:
        unreachable("not reached");
     }
  
   * the destination of the MOV, with extra parameters set.
   */
  fs_reg
- fs_visitor::get_timestamp(fs_inst **out_mov)
+ fs_visitor::get_timestamp(const fs_builder &bld)
  {
     assert(devinfo->gen >= 7);
  
  
     fs_reg dst = fs_reg(GRF, alloc.allocate(1), BRW_REGISTER_TYPE_UD, 4);
  
-    fs_inst *mov = MOV(dst, ts);
     /* We want to read the 3 fields we care about even if it's not enabled in
      * the dispatch.
      */
-    mov->force_writemask_all = true;
+    bld.exec_all().MOV(dst, ts);
  
     /* The caller wants the low 32 bits of the timestamp.  Since it's running
      * at the GPU clock rate of ~1.2ghz, it will roll over every ~3 seconds,
      */
     dst.set_smear(0);
  
-    *out_mov = mov;
     return dst;
  }
  
  void
  fs_visitor::emit_shader_time_begin()
  {
-    current_annotation = "shader time start";
-    fs_inst *mov;
-    shader_start_time = get_timestamp(&mov);
-    emit(mov);
+    shader_start_time = get_timestamp(bld.annotate("shader time start"));
  }
  
  void
  fs_visitor::emit_shader_time_end()
  {
-    current_annotation = "shader time end";
-    enum shader_time_shader_type type, written_type, reset_type;
-    switch (stage) {
-    case MESA_SHADER_VERTEX:
-       type = ST_VS;
-       written_type = ST_VS_WRITTEN;
-       reset_type = ST_VS_RESET;
-       break;
-    case MESA_SHADER_GEOMETRY:
-       type = ST_GS;
-       written_type = ST_GS_WRITTEN;
-       reset_type = ST_GS_RESET;
-       break;
-    case MESA_SHADER_FRAGMENT:
-       if (dispatch_width == 8) {
-          type = ST_FS8;
-          written_type = ST_FS8_WRITTEN;
-          reset_type = ST_FS8_RESET;
-       } else {
-          assert(dispatch_width == 16);
-          type = ST_FS16;
-          written_type = ST_FS16_WRITTEN;
-          reset_type = ST_FS16_RESET;
-       }
-       break;
-    case MESA_SHADER_COMPUTE:
-       type = ST_CS;
-       written_type = ST_CS_WRITTEN;
-       reset_type = ST_CS_RESET;
-       break;
-    default:
-       unreachable("fs_visitor::emit_shader_time_end missing code");
-    }
     /* Insert our code just before the final SEND with EOT. */
     exec_node *end = this->instructions.get_tail();
     assert(end && ((fs_inst *) end)->eot);
+    const fs_builder ibld = bld.annotate("shader time end")
+                               .exec_all().at(NULL, end);
  
-    fs_inst *tm_read;
-    fs_reg shader_end_time = get_timestamp(&tm_read);
-    end->insert_before(tm_read);
+    fs_reg shader_end_time = get_timestamp(ibld);
  
     /* Check that there weren't any timestamp reset events (assuming these
      * were the only two timestamp reads that happened).
      */
     fs_reg reset = shader_end_time;
     reset.set_smear(2);
-    fs_inst *test = AND(reg_null_d, reset, fs_reg(1u));
-    test->conditional_mod = BRW_CONDITIONAL_Z;
-    test->force_writemask_all = true;
-    end->insert_before(test);
-    end->insert_before(IF(BRW_PREDICATE_NORMAL));
+    set_condmod(BRW_CONDITIONAL_Z,
+                ibld.AND(ibld.null_reg_ud(), reset, fs_reg(1u)));
+    ibld.IF(BRW_PREDICATE_NORMAL);
  
     fs_reg start = shader_start_time;
     start.negate = true;
     fs_reg diff = fs_reg(GRF, alloc.allocate(1), BRW_REGISTER_TYPE_UD, 1);
     diff.set_smear(0);
-    fs_inst *add = ADD(diff, start, shader_end_time);
-    add->force_writemask_all = true;
-    end->insert_before(add);
+    ibld.ADD(diff, start, shader_end_time);
  
     /* If there were no instructions between the two timestamp gets, the diff
      * is 2 cycles.  Remove that overhead, so I can forget about that when
      * trying to determine the time taken for single instructions.
      */
-    add = ADD(diff, diff, fs_reg(-2u));
-    add->force_writemask_all = true;
-    end->insert_before(add);
-    end->insert_before(SHADER_TIME_ADD(type, diff));
-    end->insert_before(SHADER_TIME_ADD(written_type, fs_reg(1u)));
-    end->insert_before(new(mem_ctx) fs_inst(BRW_OPCODE_ELSE, dispatch_width));
-    end->insert_before(SHADER_TIME_ADD(reset_type, fs_reg(1u)));
-    end->insert_before(new(mem_ctx) fs_inst(BRW_OPCODE_ENDIF, dispatch_width));
+    ibld.ADD(diff, diff, fs_reg(-2u));
+    SHADER_TIME_ADD(ibld, 0, diff);
+    SHADER_TIME_ADD(ibld, 1, fs_reg(1u));
+    ibld.emit(BRW_OPCODE_ELSE);
+    SHADER_TIME_ADD(ibld, 2, fs_reg(1u));
+    ibld.emit(BRW_OPCODE_ENDIF);
  }
  
- fs_inst *
- fs_visitor::SHADER_TIME_ADD(enum shader_time_shader_type type, fs_reg value)
+ void
+ fs_visitor::SHADER_TIME_ADD(const fs_builder &bld,
+                             int shader_time_subindex,
+                             fs_reg value)
  {
-    int shader_time_index =
-       brw_get_shader_time_index(brw, shader_prog, prog, type);
-    fs_reg offset = fs_reg(shader_time_index * SHADER_TIME_STRIDE);
+    int index = shader_time_index * 3 + shader_time_subindex;
+    fs_reg offset = fs_reg(index * SHADER_TIME_STRIDE);
  
     fs_reg payload;
     if (dispatch_width == 8)
     else
        payload = vgrf(glsl_type::uint_type);
  
-    return new(mem_ctx) fs_inst(SHADER_OPCODE_SHADER_TIME_ADD,
-                                fs_reg(), payload, offset, value);
+    bld.emit(SHADER_OPCODE_SHADER_TIME_ADD, fs_reg(), payload, offset, value);
  }
  
  void
@@@ -864,65 -670,16 +671,16 @@@ fs_visitor::fail(const char *format, ..
   * During a SIMD16 compile (if one happens anyway), this just calls fail().
   */
  void
- fs_visitor::no16(const char *format, ...)
+ fs_visitor::no16(const char *msg)
  {
-    va_list va;
-    va_start(va, format);
     if (dispatch_width == 16) {
-       vfail(format, va);
+       fail("%s", msg);
     } else {
        simd16_unsupported = true;
  
-       if (brw->perf_debug) {
-          if (no16_msg)
-             ralloc_vasprintf_append(&no16_msg, format, va);
-          else
-             no16_msg = ralloc_vasprintf(mem_ctx, format, va);
-       }
+       compiler->shader_perf_log(log_data,
+                                 "SIMD16 shader failed to compile: %s", msg);
     }
-    va_end(va);
- }
- fs_inst *
- fs_visitor::emit(enum opcode opcode)
- {
-    return emit(new(mem_ctx) fs_inst(opcode, dispatch_width));
- }
- fs_inst *
- fs_visitor::emit(enum opcode opcode, const fs_reg &dst)
- {
-    return emit(new(mem_ctx) fs_inst(opcode, dst));
- }
- fs_inst *
- fs_visitor::emit(enum opcode opcode, const fs_reg &dst, const fs_reg &src0)
- {
-    return emit(new(mem_ctx) fs_inst(opcode, dst, src0));
- }
- fs_inst *
- fs_visitor::emit(enum opcode opcode, const fs_reg &dst, const fs_reg &src0,
-                  const fs_reg &src1)
- {
-    return emit(new(mem_ctx) fs_inst(opcode, dst, src0, src1));
- }
- fs_inst *
- fs_visitor::emit(enum opcode opcode, const fs_reg &dst, const fs_reg &src0,
-                  const fs_reg &src1, const fs_reg &src2)
- {
-    return emit(new(mem_ctx) fs_inst(opcode, dst, src0, src1, src2));
- }
- fs_inst *
- fs_visitor::emit(enum opcode opcode, const fs_reg &dst,
-                  fs_reg src[], int sources)
- {
-    return emit(new(mem_ctx) fs_inst(opcode, dst, src, sources));
  }
  
  /**
@@@ -1051,7 -808,7 +809,7 @@@ fs_visitor::implied_mrf_writes(fs_inst 
     case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD:
        return inst->mlen;
     case SHADER_OPCODE_GEN4_SCRATCH_WRITE:
-       return 2;
+       return inst->mlen;
     case SHADER_OPCODE_UNTYPED_ATOMIC:
     case SHADER_OPCODE_UNTYPED_SURFACE_READ:
     case SHADER_OPCODE_UNTYPED_SURFACE_WRITE:
@@@ -1077,14 -834,6 +835,6 @@@ fs_visitor::vgrf(const glsl_type *cons
                   brw_type_for_base_type(type), dispatch_width);
  }
  
- fs_reg
- fs_visitor::vgrf(int num_components)
- {
-    int reg_width = dispatch_width / 8;
-    return fs_reg(GRF, alloc.allocate(num_components * reg_width),
-                  BRW_REGISTER_TYPE_F, dispatch_width);
- }
  /** Fixed HW reg constructor. */
  fs_reg::fs_reg(enum register_file file, int reg)
  {
@@@ -1130,117 -879,18 +880,18 @@@ fs_reg::fs_reg(enum register_file file
     this->width = width;
  }
  
- fs_reg *
- fs_visitor::variable_storage(ir_variable *var)
- {
-    return (fs_reg *)hash_table_find(this->variable_ht, var);
- }
- void
- import_uniforms_callback(const void *key,
-                        void *data,
-                        void *closure)
- {
-    struct hash_table *dst_ht = (struct hash_table *)closure;
-    const fs_reg *reg = (const fs_reg *)data;
-    if (reg->file != UNIFORM)
-       return;
-    hash_table_insert(dst_ht, data, key);
- }
  /* For SIMD16, we need to follow from the uniform setup of SIMD8 dispatch.
   * This brings in those uniform definitions
   */
  void
  fs_visitor::import_uniforms(fs_visitor *v)
  {
-    hash_table_call_foreach(v->variable_ht,
-                          import_uniforms_callback,
-                          variable_ht);
     this->push_constant_loc = v->push_constant_loc;
     this->pull_constant_loc = v->pull_constant_loc;
     this->uniforms = v->uniforms;
     this->param_size = v->param_size;
  }
  
- /* Our support for uniforms is piggy-backed on the struct
-  * gl_fragment_program, because that's where the values actually
-  * get stored, rather than in some global gl_shader_program uniform
-  * store.
-  */
- void
- fs_visitor::setup_uniform_values(ir_variable *ir)
- {
-    int namelen = strlen(ir->name);
-    /* The data for our (non-builtin) uniforms is stored in a series of
-     * gl_uniform_driver_storage structs for each subcomponent that
-     * glGetUniformLocation() could name.  We know it's been set up in the same
-     * order we'd walk the type, so walk the list of storage and find anything
-     * with our name, or the prefix of a component that starts with our name.
-     */
-    unsigned params_before = uniforms;
-    for (unsigned u = 0; u < shader_prog->NumUserUniformStorage; u++) {
-       struct gl_uniform_storage *storage = &shader_prog->UniformStorage[u];
-       if (strncmp(ir->name, storage->name, namelen) != 0 ||
-           (storage->name[namelen] != 0 &&
-            storage->name[namelen] != '.' &&
-            storage->name[namelen] != '[')) {
-          continue;
-       }
-       unsigned slots = storage->type->component_slots();
-       if (storage->array_elements)
-          slots *= storage->array_elements;
-       for (unsigned i = 0; i < slots; i++) {
-          stage_prog_data->param[uniforms++] = &storage->storage[i];
-       }
-    }
-    /* Make sure we actually initialized the right amount of stuff here. */
-    assert(params_before + ir->type->component_slots() == uniforms);
-    (void)params_before;
- }
- /* Our support for builtin uniforms is even scarier than non-builtin.
-  * It sits on top of the PROG_STATE_VAR parameters that are
-  * automatically updated from GL context state.
-  */
- void
- fs_visitor::setup_builtin_uniform_values(ir_variable *ir)
- {
-    const ir_state_slot *const slots = ir->get_state_slots();
-    assert(slots != NULL);
-    for (unsigned int i = 0; i < ir->get_num_state_slots(); i++) {
-       /* This state reference has already been setup by ir_to_mesa, but we'll
-        * get the same index back here.
-        */
-       int index = _mesa_add_state_reference(this->prog->Parameters,
-                                           (gl_state_index *)slots[i].tokens);
-       /* Add each of the unique swizzles of the element as a parameter.
-        * This'll end up matching the expected layout of the
-        * array/matrix/structure we're trying to fill in.
-        */
-       int last_swiz = -1;
-       for (unsigned int j = 0; j < 4; j++) {
-        int swiz = GET_SWZ(slots[i].swizzle, j);
-        if (swiz == last_swiz)
-           break;
-        last_swiz = swiz;
-          stage_prog_data->param[uniforms++] =
-             &prog->Parameters->ParameterValues[index][swiz];
-       }
-    }
- }
  fs_reg *
  fs_visitor::emit_fragcoord_interpolation(bool pixel_center_integer,
                                           bool origin_upper_left)
  
     /* gl_FragCoord.x */
     if (pixel_center_integer) {
-       emit(MOV(wpos, this->pixel_x));
+       bld.MOV(wpos, this->pixel_x);
     } else {
-       emit(ADD(wpos, this->pixel_x, fs_reg(0.5f)));
+       bld.ADD(wpos, this->pixel_x, fs_reg(0.5f));
     }
     wpos = offset(wpos, 1);
  
     /* gl_FragCoord.y */
     if (!flip && pixel_center_integer) {
-       emit(MOV(wpos, this->pixel_y));
+       bld.MOV(wpos, this->pixel_y);
     } else {
        fs_reg pixel_y = this->pixel_y;
        float offset = (pixel_center_integer ? 0.0 : 0.5);
         offset += key->drawable_height - 1.0;
        }
  
-       emit(ADD(wpos, pixel_y, fs_reg(offset)));
+       bld.ADD(wpos, pixel_y, fs_reg(offset));
     }
     wpos = offset(wpos, 1);
  
     /* gl_FragCoord.z */
     if (devinfo->gen >= 6) {
-       emit(MOV(wpos, fs_reg(brw_vec8_grf(payload.source_depth_reg, 0))));
+       bld.MOV(wpos, fs_reg(brw_vec8_grf(payload.source_depth_reg, 0)));
     } else {
-       emit(FS_OPCODE_LINTERP, wpos,
+       bld.emit(FS_OPCODE_LINTERP, wpos,
             this->delta_xy[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC],
             interp_reg(VARYING_SLOT_POS, 2));
     }
     wpos = offset(wpos, 1);
  
     /* gl_FragCoord.w: Already set up in emit_interpolation */
-    emit(BRW_OPCODE_MOV, wpos, this->wpos_w);
+    bld.MOV(wpos, this->wpos_w);
  
     return reg;
  }
@@@ -1321,8 -971,8 +972,8 @@@ fs_visitor::emit_linterp(const fs_reg &
         */
        barycoord_mode = BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC;
     }
-    return emit(FS_OPCODE_LINTERP, attr,
-                this->delta_xy[barycoord_mode], interp);
+    return bld.emit(FS_OPCODE_LINTERP, attr,
+                    this->delta_xy[barycoord_mode], interp);
  }
  
  void
@@@ -1380,7 -1030,7 +1031,7 @@@ fs_visitor::emit_general_interpolation(
               struct brw_reg interp = interp_reg(location, k);
               interp = suboffset(interp, 3);
                 interp.type = attr.type;
-              emit(FS_OPCODE_CINTERP, attr, fs_reg(interp));
+                bld.emit(FS_OPCODE_CINTERP, attr, fs_reg(interp));
               attr = offset(attr, 1);
            }
         } else {
                     * unlit, replace the centroid data with non-centroid
                     * data.
                     */
-                   emit(FS_OPCODE_MOV_DISPATCH_TO_FLAGS);
+                   bld.emit(FS_OPCODE_MOV_DISPATCH_TO_FLAGS);
  
                    fs_inst *inst;
                    inst = emit_linterp(attr, fs_reg(interp), interpolation_mode,
                                 mod_sample || key->persample_shading);
                 }
                 if (devinfo->gen < 6 && interpolation_mode == INTERP_QUALIFIER_SMOOTH) {
-                   emit(BRW_OPCODE_MUL, attr, attr, this->pixel_w);
+                   bld.MUL(attr, attr, this->pixel_w);
                 }
               attr = offset(attr, 1);
            }
@@@ -1448,7 -1098,7 +1099,7 @@@ fs_visitor::emit_frontfacing_interpolat
        fs_reg g0 = fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_W));
        g0.negate = true;
  
-       emit(ASR(*reg, g0, fs_reg(15)));
+       bld.ASR(*reg, g0, fs_reg(15));
     } else {
        /* Bit 31 of g1.6 is 0 if the polygon is front facing. We want to create
         * a boolean result from this (1/true or 0/false).
        fs_reg g1_6 = fs_reg(retype(brw_vec1_grf(1, 6), BRW_REGISTER_TYPE_D));
        g1_6.negate = true;
  
-       emit(ASR(*reg, g1_6, fs_reg(31)));
+       bld.ASR(*reg, g1_6, fs_reg(31));
     }
  
     return reg;
@@@ -1478,9 -1128,9 +1129,9 @@@ fs_visitor::compute_sample_position(fs_
  
     if (key->compute_pos_offset) {
        /* Convert int_sample_pos to floating point */
-       emit(MOV(dst, int_sample_pos));
+       bld.MOV(dst, int_sample_pos);
        /* Scale to the range [0, 1] */
-       emit(MUL(dst, dst, fs_reg(1 / 16.0f)));
+       bld.MUL(dst, dst, fs_reg(1 / 16.0f));
     }
     else {
        /* From ARB_sample_shading specification:
         *  rasterization is disabled, gl_SamplePosition will always be
         *  (0.5, 0.5).
         */
-       emit(MOV(dst, fs_reg(0.5f)));
+       bld.MOV(dst, fs_reg(0.5f));
     }
  }
  
@@@ -1497,7 -1147,7 +1148,7 @@@ fs_visitor::emit_samplepos_setup(
  {
     assert(devinfo->gen >= 6);
  
-    this->current_annotation = "compute sample position";
+    const fs_builder abld = bld.annotate("compute sample position");
     fs_reg *reg = new(this->mem_ctx) fs_reg(vgrf(glsl_type::vec2_type));
     fs_reg pos = *reg;
     fs_reg int_sample_x = vgrf(glsl_type::int_type);
                      BRW_REGISTER_TYPE_B), 16, 8, 2);
  
     if (dispatch_width == 8) {
-       emit(MOV(int_sample_x, fs_reg(sample_pos_reg)));
+       abld.MOV(int_sample_x, fs_reg(sample_pos_reg));
     } else {
-       emit(MOV(half(int_sample_x, 0), fs_reg(sample_pos_reg)));
-       emit(MOV(half(int_sample_x, 1), fs_reg(suboffset(sample_pos_reg, 16))))
-          ->force_sechalf = true;
+       abld.half(0).MOV(half(int_sample_x, 0), fs_reg(sample_pos_reg));
+       abld.half(1).MOV(half(int_sample_x, 1),
+                        fs_reg(suboffset(sample_pos_reg, 16)));
     }
     /* Compute gl_SamplePosition.x */
     compute_sample_position(pos, int_sample_x);
     pos = offset(pos, 1);
     if (dispatch_width == 8) {
-       emit(MOV(int_sample_y, fs_reg(suboffset(sample_pos_reg, 1))));
+       abld.MOV(int_sample_y, fs_reg(suboffset(sample_pos_reg, 1)));
     } else {
-       emit(MOV(half(int_sample_y, 0),
-                fs_reg(suboffset(sample_pos_reg, 1))));
-       emit(MOV(half(int_sample_y, 1), fs_reg(suboffset(sample_pos_reg, 17))))
-          ->force_sechalf = true;
+       abld.half(0).MOV(half(int_sample_y, 0),
+                        fs_reg(suboffset(sample_pos_reg, 1)));
+       abld.half(1).MOV(half(int_sample_y, 1),
+                        fs_reg(suboffset(sample_pos_reg, 17)));
     }
     /* Compute gl_SamplePosition.y */
     compute_sample_position(pos, int_sample_y);
@@@ -1548,7 -1198,7 +1199,7 @@@ fs_visitor::emit_sampleid_setup(
     brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
     assert(devinfo->gen >= 6);
  
-    this->current_annotation = "compute sample id";
+    const fs_builder abld = bld.annotate("compute sample id");
     fs_reg *reg = new(this->mem_ctx) fs_reg(vgrf(glsl_type::int_type));
  
     if (key->compute_sample_id) {
         * are sample 1 of subspan 0; the third group is sample 0 of
         * subspan 1, and finally sample 1 of subspan 1.
         */
-       fs_inst *inst;
-       inst = emit(BRW_OPCODE_AND, t1,
-                   fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UD)),
-                   fs_reg(0xc0));
-       inst->force_writemask_all = true;
-       inst = emit(BRW_OPCODE_SHR, t1, t1, fs_reg(5));
-       inst->force_writemask_all = true;
+       abld.exec_all()
+           .AND(t1, fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UD)),
+                fs_reg(0xc0));
+       abld.exec_all().SHR(t1, t1, fs_reg(5));
        /* This works for both SIMD8 and SIMD16 */
-       inst = emit(MOV(t2, brw_imm_v(key->persample_2x ? 0x1010 : 0x3210)));
-       inst->force_writemask_all = true;
+       abld.exec_all()
+           .MOV(t2, brw_imm_v(key->persample_2x ? 0x1010 : 0x3210));
        /* This special instruction takes care of setting vstride=1,
         * width=4, hstride=0 of t2 during an ADD instruction.
         */
-       emit(FS_OPCODE_SET_SAMPLE_ID, *reg, t1, t2);
+       abld.emit(FS_OPCODE_SET_SAMPLE_ID, *reg, t1, t2);
     } else {
        /* As per GL_ARB_sample_shading specification:
         * "When rendering to a non-multisample buffer, or if multisample
         *  rasterization is disabled, gl_SampleID will always be zero."
         */
-       emit(BRW_OPCODE_MOV, *reg, fs_reg(0));
+       abld.MOV(*reg, fs_reg(0));
     }
  
     return reg;
@@@ -1606,111 -1255,11 +1256,11 @@@ fs_visitor::resolve_source_modifiers(fs
     if (!src->abs && !src->negate)
        return;
  
-    fs_reg temp = retype(vgrf(1), src->type);
-    emit(MOV(temp, *src));
+    fs_reg temp = bld.vgrf(src->type);
+    bld.MOV(temp, *src);
     *src = temp;
  }
  
- fs_reg
- fs_visitor::fix_math_operand(fs_reg src)
- {
-    /* Can't do hstride == 0 args on gen6 math, so expand it out. We
-     * might be able to do better by doing execsize = 1 math and then
-     * expanding that result out, but we would need to be careful with
-     * masking.
-     *
-     * The hardware ignores source modifiers (negate and abs) on math
-     * instructions, so we also move to a temp to set those up.
-     */
-    if (devinfo->gen == 6 && src.file != UNIFORM && src.file != IMM &&
-        !src.abs && !src.negate)
-       return src;
-    /* Gen7 relaxes most of the above restrictions, but still can't use IMM
-     * operands to math
-     */
-    if (devinfo->gen >= 7 && src.file != IMM)
-       return src;
-    fs_reg expanded = vgrf(glsl_type::float_type);
-    expanded.type = src.type;
-    emit(BRW_OPCODE_MOV, expanded, src);
-    return expanded;
- }
- fs_inst *
- fs_visitor::emit_math(enum opcode opcode, fs_reg dst, fs_reg src)
- {
-    switch (opcode) {
-    case SHADER_OPCODE_RCP:
-    case SHADER_OPCODE_RSQ:
-    case SHADER_OPCODE_SQRT:
-    case SHADER_OPCODE_EXP2:
-    case SHADER_OPCODE_LOG2:
-    case SHADER_OPCODE_SIN:
-    case SHADER_OPCODE_COS:
-       break;
-    default:
-       unreachable("not reached: bad math opcode");
-    }
-    /* Can't do hstride == 0 args to gen6 math, so expand it out.  We
-     * might be able to do better by doing execsize = 1 math and then
-     * expanding that result out, but we would need to be careful with
-     * masking.
-     *
-     * Gen 6 hardware ignores source modifiers (negate and abs) on math
-     * instructions, so we also move to a temp to set those up.
-     */
-    if (devinfo->gen == 6 || devinfo->gen == 7)
-       src = fix_math_operand(src);
-    fs_inst *inst = emit(opcode, dst, src);
-    if (devinfo->gen < 6) {
-       inst->base_mrf = 2;
-       inst->mlen = dispatch_width / 8;
-    }
-    return inst;
- }
- fs_inst *
- fs_visitor::emit_math(enum opcode opcode, fs_reg dst, fs_reg src0, fs_reg src1)
- {
-    int base_mrf = 2;
-    fs_inst *inst;
-    if (devinfo->gen >= 8) {
-       inst = emit(opcode, dst, src0, src1);
-    } else if (devinfo->gen >= 6) {
-       src0 = fix_math_operand(src0);
-       src1 = fix_math_operand(src1);
-       inst = emit(opcode, dst, src0, src1);
-    } else {
-       /* From the Ironlake PRM, Volume 4, Part 1, Section 6.1.13
-        * "Message Payload":
-        *
-        * "Operand0[7].  For the INT DIV functions, this operand is the
-        *  denominator."
-        *  ...
-        * "Operand1[7].  For the INT DIV functions, this operand is the
-        *  numerator."
-        */
-       bool is_int_div = opcode != SHADER_OPCODE_POW;
-       fs_reg &op0 = is_int_div ? src1 : src0;
-       fs_reg &op1 = is_int_div ? src0 : src1;
-       emit(MOV(fs_reg(MRF, base_mrf + 1, op1.type, dispatch_width), op1));
-       inst = emit(opcode, dst, op0, reg_null_f);
-       inst->base_mrf = base_mrf;
-       inst->mlen = 2 * dispatch_width / 8;
-    }
-    return inst;
- }
  void
  fs_visitor::emit_discard_jump()
  {
     /* For performance, after a discard, jump to the end of the
      * shader if all relevant channels have been discarded.
      */
-    fs_inst *discard_jump = emit(FS_OPCODE_DISCARD_JUMP);
+    fs_inst *discard_jump = bld.emit(FS_OPCODE_DISCARD_JUMP);
     discard_jump->flag_subreg = 1;
  
     discard_jump->predicate = (dispatch_width == 8)
@@@ -1911,10 -1460,6 +1461,10 @@@ fs_visitor::assign_vs_urb_setup(
     unsigned vue_entries =
        MAX2(count, vs_prog_data->base.vue_map.num_slots);
  
 +   /* URB entry size is counted in units of 64 bytes (for the 3DSTATE_URB_VS
 +    * command).  Each attribute is 16 bytes (4 floats/dwords), so each unit
 +    * fits four attributes.
 +    */
     vs_prog_data->base.urb_entry_size = ALIGN(vue_entries, 4) / 4;
     vs_prog_data->base.urb_read_length = (count + 1) / 2;
  
@@@ -2317,26 -1862,22 +1867,22 @@@ fs_visitor::demote_pull_constants(
            continue;
  
           /* Set up the annotation tracking for new generated instructions. */
-          base_ir = inst->ir;
-          current_annotation = inst->annotation;
+          const fs_builder ibld = bld.annotate(inst->annotation, inst->ir)
+                                     .at(block, inst);
           fs_reg surf_index(stage_prog_data->binding_table.pull_constants_start);
           fs_reg dst = vgrf(glsl_type::float_type);
  
           /* Generate a pull load into dst. */
           if (inst->src[i].reladdr) {
-             exec_list list = VARYING_PULL_CONSTANT_LOAD(dst,
-                                                         surf_index,
-                                                         *inst->src[i].reladdr,
-                                                         pull_index);
-             inst->insert_before(block, &list);
+             VARYING_PULL_CONSTANT_LOAD(ibld, dst,
+                                        surf_index,
+                                        *inst->src[i].reladdr,
+                                        pull_index);
              inst->src[i].reladdr = NULL;
           } else {
              fs_reg offset = fs_reg((unsigned)(pull_index * 4) & ~15);
-             fs_inst *pull =
-                new(mem_ctx) fs_inst(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD, 8,
-                                     dst, surf_index, offset);
-             inst->insert_before(block, pull);
+             ibld.emit(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD,
+                       dst, surf_index, offset);
              inst->src[i].set_smear(pull_index & 3);
           }
  
@@@ -2663,6 -2204,16 +2209,16 @@@ fs_visitor::opt_sampler_eot(
     if (unlikely(tex_inst->is_head_sentinel()) || !tex_inst->is_tex())
        return false;
  
+    /* This optimisation doesn't seem to work for textureGather for some
+     * reason. I can't find any documentation or known workarounds to indicate
+     * that this is expected, but considering that it is probably pretty
+     * unlikely that a shader would directly write out the results from
+     * textureGather we might as well just disable it.
+     */
+    if (tex_inst->opcode == SHADER_OPCODE_TG4 ||
+        tex_inst->opcode == SHADER_OPCODE_TG4_OFFSET)
+       return false;
     /* If there's no header present, we need to munge the LOAD_PAYLOAD as well.
      * It's very likely to be the previous instruction.
      */
  
     tex_inst->offset |= fb_write->target << 24;
     tex_inst->eot = true;
-    tex_inst->dst = reg_null_ud;
+    tex_inst->dst = bld.null_reg_ud();
     fb_write->remove(cfg->blocks[cfg->num_blocks - 1]);
  
     /* If a header is present, marking the eot is sufficient. Otherwise, we need
     if (tex_inst->header_size != 0)
        return true;
  
-    fs_reg send_header = vgrf(load_payload->sources + 1);
+    fs_reg send_header = bld.vgrf(BRW_REGISTER_TYPE_F,
+                                  load_payload->sources + 1);
     fs_reg *new_sources =
        ralloc_array(mem_ctx, fs_reg, load_payload->sources + 1);
  
@@@ -3038,27 -2590,13 +2595,26 @@@ fs_visitor::emit_repclear_shader(
     brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
     int base_mrf = 1;
     int color_mrf = base_mrf + 2;
 +   fs_inst *mov;
  
 -   fs_inst *mov = bld.exec_all().MOV(vec4(brw_message_reg(color_mrf)),
 -                                     fs_reg(UNIFORM, 0, BRW_REGISTER_TYPE_F));
 +   if (uniforms == 1) {
-       mov = emit(MOV(vec4(brw_message_reg(color_mrf)),
-                      fs_reg(UNIFORM, 0, BRW_REGISTER_TYPE_F)));
++      mov = bld.exec_all().MOV(vec4(brw_message_reg(color_mrf)),
++                               fs_reg(UNIFORM, 0, BRW_REGISTER_TYPE_F));
 +   } else {
 +      struct brw_reg reg =
 +         brw_reg(BRW_GENERAL_REGISTER_FILE,
 +                 2, 3, 0, 0, BRW_REGISTER_TYPE_F,
 +                 BRW_VERTICAL_STRIDE_8,
 +                 BRW_WIDTH_2,
 +                 BRW_HORIZONTAL_STRIDE_4, BRW_SWIZZLE_XYZW, WRITEMASK_XYZW);
 +
-       mov = emit(MOV(vec4(brw_message_reg(color_mrf)), fs_reg(reg)));
++      mov = bld.exec_all().MOV(vec4(brw_message_reg(color_mrf)),
++                               fs_reg(reg));
 +   }
  
-    mov->force_writemask_all = true;
     fs_inst *write;
     if (key->nr_color_regions == 1) {
-       write = emit(FS_OPCODE_REP_FB_WRITE);
+       write = bld.emit(FS_OPCODE_REP_FB_WRITE);
        write->saturate = key->clamp_fragment_color;
        write->base_mrf = color_mrf;
        write->target = 0;
     } else {
        assume(key->nr_color_regions > 0);
        for (int i = 0; i < key->nr_color_regions; ++i) {
-          write = emit(FS_OPCODE_REP_FB_WRITE);
+          write = bld.emit(FS_OPCODE_REP_FB_WRITE);
           write->saturate = key->clamp_fragment_color;
           write->base_mrf = base_mrf;
           write->target = i;
     assign_curb_setup();
  
     /* Now that we have the uniform assigned, go ahead and force it to a vec4. */
 -   assert(mov->src[0].file == HW_REG);
 -   mov->src[0] = brw_vec4_grf(mov->src[0].fixed_hw_reg.nr, 0);
 +   if (uniforms == 1) {
 +      assert(mov->src[0].file == HW_REG);
 +      mov->src[0] = brw_vec4_grf(mov->src[0].fixed_hw_reg.nr, 0);
 +   }
  }
  
  /**
@@@ -3223,9 -2759,8 +2779,8 @@@ fs_visitor::insert_gen4_pre_send_depend
         */
        if (block->start() == scan_inst) {
           for (int i = 0; i < write_len; i++) {
-             if (needs_dep[i]) {
-                inst->insert_before(block, DEP_RESOLVE_MOV(first_write_grf + i));
-             }
+             if (needs_dep[i])
+                DEP_RESOLVE_MOV(bld.at(block, inst), first_write_grf + i);
           }
           return;
        }
              if (reg >= first_write_grf &&
                  reg < first_write_grf + write_len &&
                  needs_dep[reg - first_write_grf]) {
-                inst->insert_before(block, DEP_RESOLVE_MOV(reg));
+                DEP_RESOLVE_MOV(bld.at(block, inst), reg);
                 needs_dep[reg - first_write_grf] = false;
                 if (scan_inst->exec_size == 16)
                    needs_dep[reg - first_write_grf + 1] = false;
@@@ -3288,8 -2823,7 +2843,7 @@@ fs_visitor::insert_gen4_post_send_depen
        if (block->end() == scan_inst) {
           for (int i = 0; i < write_len; i++) {
              if (needs_dep[i])
-                scan_inst->insert_before(block,
-                                         DEP_RESOLVE_MOV(first_write_grf + i));
+                DEP_RESOLVE_MOV(bld.at(block, scan_inst), first_write_grf + i);
           }
           return;
        }
            scan_inst->dst.reg >= first_write_grf &&
            scan_inst->dst.reg < first_write_grf + write_len &&
            needs_dep[scan_inst->dst.reg - first_write_grf]) {
-          scan_inst->insert_before(block, DEP_RESOLVE_MOV(scan_inst->dst.reg));
+          DEP_RESOLVE_MOV(bld.at(block, scan_inst), scan_inst->dst.reg);
           needs_dep[scan_inst->dst.reg - first_write_grf] = false;
        }
  
@@@ -3429,6 -2963,9 +2983,9 @@@ fs_visitor::lower_load_payload(
        assert(inst->dst.file == MRF || inst->dst.file == GRF);
        assert(inst->saturate == false);
  
+       const fs_builder ibld = bld.group(inst->exec_size, inst->force_sechalf)
+                                  .exec_all(inst->force_writemask_all)
+                                  .at(block, inst);
        fs_reg dst = inst->dst;
  
        /* Get rid of COMPR4.  We'll add it back in if we need it */
              fs_reg mov_dst = retype(dst, BRW_REGISTER_TYPE_UD);
              fs_reg mov_src = retype(inst->src[i], BRW_REGISTER_TYPE_UD);
              mov_src.width = 8;
-             fs_inst *mov = MOV(mov_dst, mov_src);
-             mov->force_writemask_all = true;
-             inst->insert_before(block, mov);
+             ibld.exec_all().MOV(mov_dst, mov_src);
           }
           dst = offset(dst, 1);
        }
                 if (devinfo->has_compr4) {
                    fs_reg compr4_dst = retype(dst, inst->src[i].type);
                    compr4_dst.reg |= BRW_MRF_COMPR4;
-                   fs_inst *mov = MOV(compr4_dst, inst->src[i]);
-                   mov->force_writemask_all = inst->force_writemask_all;
-                   inst->insert_before(block, mov);
+                   ibld.MOV(compr4_dst, inst->src[i]);
                 } else {
                    /* Platform doesn't have COMPR4.  We have to fake it */
                    fs_reg mov_dst = retype(dst, inst->src[i].type);
                    mov_dst.width = 8;
-                   fs_inst *mov = MOV(mov_dst, half(inst->src[i], 0));
-                   mov->force_writemask_all = inst->force_writemask_all;
-                   inst->insert_before(block, mov);
-                   mov = MOV(offset(mov_dst, 4), half(inst->src[i], 1));
-                   mov->force_writemask_all = inst->force_writemask_all;
-                   mov->force_sechalf = true;
-                   inst->insert_before(block, mov);
+                   ibld.half(0).MOV(mov_dst, half(inst->src[i], 0));
+                   ibld.half(1).MOV(offset(mov_dst, 4), half(inst->src[i], 1));
                 }
              }
  
        }
  
        for (uint8_t i = inst->header_size; i < inst->sources; i++) {
-          if (inst->src[i].file != BAD_FILE) {
-             fs_inst *mov = MOV(retype(dst, inst->src[i].type),
-                                inst->src[i]);
-             mov->force_writemask_all = inst->force_writemask_all;
-             inst->insert_before(block, mov);
-          }
+          if (inst->src[i].file != BAD_FILE)
+             ibld.MOV(retype(dst, inst->src[i].type), inst->src[i]);
           dst = offset(dst, 1);
        }
  
     return progress;
  }
  
+ bool
+ fs_visitor::lower_integer_multiplication()
+ {
+    bool progress = false;
+    /* Gen8's MUL instruction can do a 32-bit x 32-bit -> 32-bit operation
+     * directly, but Cherryview cannot.
+     */
+    if (devinfo->gen >= 8 && !devinfo->is_cherryview)
+       return false;
+    foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
+       if (inst->opcode != BRW_OPCODE_MUL ||
+           inst->dst.is_accumulator() ||
+           (inst->dst.type != BRW_REGISTER_TYPE_D &&
+            inst->dst.type != BRW_REGISTER_TYPE_UD))
+          continue;
+       const fs_builder ibld = bld.at(block, inst);
+       /* The MUL instruction isn't commutative. On Gen <= 6, only the low
+        * 16-bits of src0 are read, and on Gen >= 7 only the low 16-bits of
+        * src1 are used.
+        *
+        * If multiplying by an immediate value that fits in 16-bits, do a
+        * single MUL instruction with that value in the proper location.
+        */
+       if (inst->src[1].file == IMM &&
+           inst->src[1].fixed_hw_reg.dw1.ud < (1 << 16)) {
+          if (devinfo->gen < 7) {
+             fs_reg imm(GRF, alloc.allocate(dispatch_width / 8),
+                        inst->dst.type, dispatch_width);
+             ibld.MOV(imm, inst->src[1]);
+             ibld.MUL(inst->dst, imm, inst->src[0]);
+          } else {
+             ibld.MUL(inst->dst, inst->src[0], inst->src[1]);
+          }
+       } else {
+          /* Gen < 8 (and some Gen8+ low-power parts like Cherryview) cannot
+           * do 32-bit integer multiplication in one instruction, but instead
+           * must do a sequence (which actually calculates a 64-bit result):
+           *
+           *    mul(8)  acc0<1>D   g3<8,8,1>D      g4<8,8,1>D
+           *    mach(8) null       g3<8,8,1>D      g4<8,8,1>D
+           *    mov(8)  g2<1>D     acc0<8,8,1>D
+           *
+           * But on Gen > 6, the ability to use second accumulator register
+           * (acc1) for non-float data types was removed, preventing a simple
+           * implementation in SIMD16. A 16-channel result can be calculated by
+           * executing the three instructions twice in SIMD8, once with quarter
+           * control of 1Q for the first eight channels and again with 2Q for
+           * the second eight channels.
+           *
+           * Which accumulator register is implicitly accessed (by AccWrEnable
+           * for instance) is determined by the quarter control. Unfortunately
+           * Ivybridge (and presumably Baytrail) has a hardware bug in which an
+           * implicit accumulator access by an instruction with 2Q will access
+           * acc1 regardless of whether the data type is usable in acc1.
+           *
+           * Specifically, the 2Q mach(8) writes acc1 which does not exist for
+           * integer data types.
+           *
+           * Since we only want the low 32-bits of the result, we can do two
+           * 32-bit x 16-bit multiplies (like the mul and mach are doing), and
+           * adjust the high result and add them (like the mach is doing):
+           *
+           *    mul(8)  g7<1>D     g3<8,8,1>D      g4.0<8,8,1>UW
+           *    mul(8)  g8<1>D     g3<8,8,1>D      g4.1<8,8,1>UW
+           *    shl(8)  g9<1>D     g8<8,8,1>D      16D
+           *    add(8)  g2<1>D     g7<8,8,1>D      g8<8,8,1>D
+           *
+           * We avoid the shl instruction by realizing that we only want to add
+           * the low 16-bits of the "high" result to the high 16-bits of the
+           * "low" result and using proper regioning on the add:
+           *
+           *    mul(8)  g7<1>D     g3<8,8,1>D      g4.0<16,8,2>UW
+           *    mul(8)  g8<1>D     g3<8,8,1>D      g4.1<16,8,2>UW
+           *    add(8)  g7.1<2>UW  g7.1<16,8,2>UW  g8<16,8,2>UW
+           *
+           * Since it does not use the (single) accumulator register, we can
+           * schedule multi-component multiplications much better.
+           */
+          if (inst->conditional_mod && inst->dst.is_null()) {
+             inst->dst = fs_reg(GRF, alloc.allocate(dispatch_width / 8),
+                                inst->dst.type, dispatch_width);
+          }
+          fs_reg low = inst->dst;
+          fs_reg high(GRF, alloc.allocate(dispatch_width / 8),
+                      inst->dst.type, dispatch_width);
+          if (devinfo->gen >= 7) {
+             fs_reg src1_0_w = inst->src[1];
+             fs_reg src1_1_w = inst->src[1];
+             if (inst->src[1].file == IMM) {
+                src1_0_w.fixed_hw_reg.dw1.ud &= 0xffff;
+                src1_1_w.fixed_hw_reg.dw1.ud >>= 16;
+             } else {
+                src1_0_w.type = BRW_REGISTER_TYPE_UW;
+                if (src1_0_w.stride != 0) {
+                   assert(src1_0_w.stride == 1);
+                   src1_0_w.stride = 2;
+                }
+                src1_1_w.type = BRW_REGISTER_TYPE_UW;
+                if (src1_1_w.stride != 0) {
+                   assert(src1_1_w.stride == 1);
+                   src1_1_w.stride = 2;
+                }
+                src1_1_w.subreg_offset += type_sz(BRW_REGISTER_TYPE_UW);
+             }
+             ibld.MUL(low, inst->src[0], src1_0_w);
+             ibld.MUL(high, inst->src[0], src1_1_w);
+          } else {
+             fs_reg src0_0_w = inst->src[0];
+             fs_reg src0_1_w = inst->src[0];
+             src0_0_w.type = BRW_REGISTER_TYPE_UW;
+             if (src0_0_w.stride != 0) {
+                assert(src0_0_w.stride == 1);
+                src0_0_w.stride = 2;
+             }
+             src0_1_w.type = BRW_REGISTER_TYPE_UW;
+             if (src0_1_w.stride != 0) {
+                assert(src0_1_w.stride == 1);
+                src0_1_w.stride = 2;
+             }
+             src0_1_w.subreg_offset += type_sz(BRW_REGISTER_TYPE_UW);
+             ibld.MUL(low, src0_0_w, inst->src[1]);
+             ibld.MUL(high, src0_1_w, inst->src[1]);
+          }
+          fs_reg dst = inst->dst;
+          dst.type = BRW_REGISTER_TYPE_UW;
+          dst.subreg_offset = 2;
+          dst.stride = 2;
+          high.type = BRW_REGISTER_TYPE_UW;
+          high.stride = 2;
+          low.type = BRW_REGISTER_TYPE_UW;
+          low.subreg_offset = 2;
+          low.stride = 2;
+          ibld.ADD(dst, low, high);
+          if (inst->conditional_mod) {
+             fs_reg null(retype(ibld.null_reg_f(), inst->dst.type));
+             set_condmod(inst->conditional_mod,
+                         ibld.MOV(null, inst->dst));
+          }
+       }
+       inst->remove(block);
+       progress = true;
+    }
+    if (progress)
+       invalidate_live_intervals();
+    return progress;
+ }
  void
  fs_visitor::dump_instructions()
  {
@@@ -3602,6 -3289,9 +3309,9 @@@ fs_visitor::dump_instruction(backend_in
     }
     fprintf(file, "(%d) ", inst->exec_size);
  
+    if (inst->mlen) {
+       fprintf(file, "(mlen: %d) ", inst->mlen);
+    }
  
     switch (inst->dst.file) {
     case GRF:
@@@ -3895,7 -3585,7 +3605,7 @@@ fs_visitor::setup_vs_payload(
  void
  fs_visitor::setup_cs_payload()
  {
-    assert(brw->gen >= 7);
+    assert(devinfo->gen >= 7);
  
     payload.num_regs = 1;
  }
@@@ -3938,6 -3628,17 +3648,17 @@@ fs_visitor::calculate_register_pressure
  void
  fs_visitor::optimize()
  {
+    /* bld is the common builder object pointing at the end of the program we
+     * used to translate it into i965 IR.  For the optimization and lowering
+     * passes coming next, any code added after the end of the program without
+     * having explicitly called fs_builder::at() clearly points at a mistake.
+     * Ideally optimization passes wouldn't be part of the visitor so they
+     * wouldn't have access to bld at all, but they do, so just in case some
+     * pass forgets to ask for a location explicitly set it to NULL here to
+     * make it trip.
+     */
+    bld = bld.at(NULL, NULL);
     split_virtual_grfs();
  
     move_uniform_array_access_to_pull_constants();
           snprintf(filename, 64, "%s%d-%04d-%02d-%02d-" #pass,              \
                    stage_abbrev, dispatch_width, shader_prog ? shader_prog->Name : 0, iteration, pass_num); \
                                                                          \
-          backend_visitor::dump_instructions(filename);                  \
+          backend_shader::dump_instructions(filename);                   \
        }                                                                 \
                                                                          \
        progress = progress || this_progress;                             \
                 stage_abbrev, dispatch_width,
                 shader_prog ? shader_prog->Name : 0);
  
-       backend_visitor::dump_instructions(filename);
+       backend_shader::dump_instructions(filename);
     }
  
     bool progress;
     }
  
     OPT(opt_combine_constants);
+    OPT(lower_integer_multiplication);
  
     lower_uniform_pull_constant_loads();
  }
@@@ -4066,9 -3768,11 +3788,11 @@@ fs_visitor::allocate_registers(
           fail("Failure to register allocate.  Reduce number of "
                "live scalar values to avoid this.");
        } else {
-          perf_debug("%s shader triggered register spilling.  "
-                     "Try reducing the number of live scalar values to "
-                     "improve performance.\n", stage_name);
+          compiler->shader_perf_log(log_data,
+                                    "%s shader triggered register spilling.  "
+                                    "Try reducing the number of live scalar "
+                                    "values to improve performance.\n",
+                                    stage_name);
        }
  
        /* Since we're out of heuristics, just go spill registers until we
  }
  
  bool
- fs_visitor::run_vs()
+ fs_visitor::run_vs(gl_clip_plane *clip_planes)
  {
     assert(stage == MESA_SHADER_VERTEX);
  
 -   assign_common_binding_table_offsets(0);
 +   if (prog_data->map_entries == NULL)
 +      assign_common_binding_table_offsets(0);
     setup_vs_payload();
  
-    if (INTEL_DEBUG & DEBUG_SHADER_TIME)
+    if (shader_time_index >= 0)
        emit_shader_time_begin();
  
-    if (brw->ctx.Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].NirOptions) {
-       emit_nir_code();
-    } else {
-       foreach_in_list(ir_instruction, ir, shader->base.ir) {
-          base_ir = ir;
-          this->result = reg_undef;
-          ir->accept(this);
-       }
-       base_ir = NULL;
-    }
+    emit_nir_code();
  
     if (failed)
        return false;
  
-    emit_urb_writes();
+    emit_urb_writes(clip_planes);
  
-    if (INTEL_DEBUG & DEBUG_SHADER_TIME)
+    if (shader_time_index >= 0)
        emit_shader_time_end();
  
     calculate_cfg();
  }
  
  bool
- fs_visitor::run_fs()
+ fs_visitor::run_fs(bool do_rep_send)
  {
     brw_wm_prog_data *wm_prog_data = (brw_wm_prog_data *) this->prog_data;
     brw_wm_prog_key *wm_key = (brw_wm_prog_key *) this->key;
  
     sanity_param_count = prog->Parameters->NumParameters;
  
 -   assign_binding_table_offsets();
 +   if (prog_data->map_entries == NULL)
 +      assign_binding_table_offsets();
  
     if (devinfo->gen >= 6)
        setup_payload_gen6();
  
     if (0) {
        emit_dummy_fs();
-    } else if (brw->use_rep_send && dispatch_width == 16) {
+    } else if (do_rep_send) {
+       assert(dispatch_width == 16);
        emit_repclear_shader();
     } else {
-       if (INTEL_DEBUG & DEBUG_SHADER_TIME)
+       if (shader_time_index >= 0)
           emit_shader_time_begin();
  
        calculate_urb_setup();
         * Initialize it with the dispatched pixels.
         */
        if (wm_prog_data->uses_kill) {
-          fs_inst *discard_init = emit(FS_OPCODE_MOV_DISPATCH_TO_FLAGS);
+          fs_inst *discard_init = bld.emit(FS_OPCODE_MOV_DISPATCH_TO_FLAGS);
           discard_init->flag_subreg = 1;
        }
  
        /* Generate FS IR for main().  (the visitor only descends into
         * functions called "main").
         */
-       if (brw->ctx.Const.ShaderCompilerOptions[MESA_SHADER_FRAGMENT].NirOptions) {
-          emit_nir_code();
-       } else if (shader) {
-          foreach_in_list(ir_instruction, ir, shader->base.ir) {
-             base_ir = ir;
-             this->result = reg_undef;
-             ir->accept(this);
-          }
-       } else {
-          emit_fragment_program_code();
-       }
-       base_ir = NULL;
+       emit_nir_code();
        if (failed)
         return false;
  
        if (wm_prog_data->uses_kill)
-          emit(FS_OPCODE_PLACEHOLDER_HALT);
+          bld.emit(FS_OPCODE_PLACEHOLDER_HALT);
  
        if (wm_key->alpha_test_func)
           emit_alpha_test();
  
        emit_fb_writes();
  
-       if (INTEL_DEBUG & DEBUG_SHADER_TIME)
+       if (shader_time_index >= 0)
           emit_shader_time_end();
  
        calculate_cfg();
@@@ -4252,7 -3936,7 +3958,7 @@@ fs_visitor::run_cs(
  
     setup_cs_payload();
  
-    if (INTEL_DEBUG & DEBUG_SHADER_TIME)
+    if (shader_time_index >= 0)
        emit_shader_time_begin();
  
     emit_nir_code();
  
     emit_cs_terminate();
  
-    if (INTEL_DEBUG & DEBUG_SHADER_TIME)
+    if (shader_time_index >= 0)
        emit_shader_time_end();
  
     calculate_cfg();
@@@ -4312,10 -3996,18 +4018,18 @@@ brw_wm_fs_emit(struct brw_context *brw
     if (unlikely(INTEL_DEBUG & DEBUG_WM))
        brw_dump_ir("fragment", prog, &shader->base, &fp->Base);
  
+    int st_index8 = -1, st_index16 = -1;
+    if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
+       st_index8 = brw_get_shader_time_index(brw, prog, &fp->Base, ST_FS8);
+       st_index16 = brw_get_shader_time_index(brw, prog, &fp->Base, ST_FS16);
+    }
     /* Now the main event: Visit the shader IR and generate our FS IR for it.
      */
-    fs_visitor v(brw, mem_ctx, key, prog_data, prog, fp, 8);
-    if (!v.run_fs()) {
+    fs_visitor v(brw->intelScreen->compiler, brw,
+                 mem_ctx, MESA_SHADER_FRAGMENT, key, &prog_data->base,
+                 prog, &fp->Base, 8, st_index8);
+    if (!v.run_fs(false /* do_rep_send */)) {
        if (prog) {
           prog->LinkStatus = false;
           ralloc_strcat(&prog->InfoLog, v.fail_msg);
     }
  
     cfg_t *simd16_cfg = NULL;
-    fs_visitor v2(brw, mem_ctx, key, prog_data, prog, fp, 16);
+    fs_visitor v2(brw->intelScreen->compiler, brw,
+                  mem_ctx, MESA_SHADER_FRAGMENT, key, &prog_data->base,
+                  prog, &fp->Base, 16, st_index16);
     if (likely(!(INTEL_DEBUG & DEBUG_NO16) || brw->use_rep_send)) {
        if (!v.simd16_unsupported) {
           /* Try a SIMD16 compile */
           v2.import_uniforms(&v);
-          if (!v2.run_fs()) {
-             perf_debug("SIMD16 shader failed to compile, falling back to "
-                        "SIMD8 at a 10-20%% performance cost: %s", v2.fail_msg);
+          if (!v2.run_fs(brw->use_rep_send)) {
+             perf_debug("SIMD16 shader failed to compile: %s", v2.fail_msg);
           } else {
              simd16_cfg = v2.cfg;
           }
-       } else {
-          perf_debug("SIMD16 shader unsupported, falling back to "
-                     "SIMD8 at a 10-20%% performance cost: %s", v.no16_msg);
        }
     }
  
        prog_data->no_8 = false;
     }
  
-    fs_generator g(brw, mem_ctx, (void *) key, &prog_data->base,
+    fs_generator g(brw->intelScreen->compiler, brw,
+                   mem_ctx, (void *) key, &prog_data->base,
                    &fp->Base, v.promoted_constants, v.runtime_check_aads_emit, "FS");
  
     if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
index 270131a73d15c979e8804380e42dd8726414fa94,59081eab8770345ee8f6684e3698a321fab6b6f3..a378019af5b3c258751e2e7f4880e98c4a4311bf
@@@ -28,6 -28,8 +28,8 @@@
  #include "brw_fs.h"
  #include "brw_nir.h"
  
+ using namespace brw;
  void
  fs_visitor::emit_nir_code()
  {
      */
  
     if (nir->num_inputs > 0) {
-       nir_inputs = vgrf(nir->num_inputs);
+       nir_inputs = bld.vgrf(BRW_REGISTER_TYPE_F, nir->num_inputs);
        nir_setup_inputs(nir);
     }
  
     if (nir->num_outputs > 0) {
-       nir_outputs = vgrf(nir->num_outputs);
+       nir_outputs = bld.vgrf(BRW_REGISTER_TYPE_F, nir->num_outputs);
        nir_setup_outputs(nir);
     }
  
@@@ -58,7 -60,7 +60,7 @@@
        unsigned array_elems =
           reg->num_array_elems == 0 ? 1 : reg->num_array_elems;
        unsigned size = array_elems * reg->num_components;
-       nir_globals[reg->index] = vgrf(size);
+       nir_globals[reg->index] = bld.vgrf(BRW_REGISTER_TYPE_F, size);
     }
  
     /* get the main function and emit it */
@@@ -93,8 -95,8 +95,8 @@@ fs_visitor::nir_setup_inputs(nir_shade
           unsigned array_length = var->type->is_array() ? var->type->length : 1;
           for (unsigned i = 0; i < array_length; i++) {
              for (unsigned j = 0; j < components; j++) {
-                emit(MOV(retype(offset(input, components * i + j), type),
-                         offset(fs_reg(ATTR, var->data.location + i, type), j)));
+                bld.MOV(retype(offset(input, components * i + j), type),
+                        offset(fs_reg(ATTR, var->data.location + i, type), j));
              }
           }
           break;
           if (var->data.location == VARYING_SLOT_POS) {
              reg = *emit_fragcoord_interpolation(var->data.pixel_center_integer,
                                                  var->data.origin_upper_left);
-             emit_percomp(MOV(input, reg), 0xF);
+             emit_percomp(bld, fs_inst(BRW_OPCODE_MOV, input, reg), 0xF);
           } else {
              emit_general_interpolation(input, var->name, var->type,
                                         (glsl_interp_qualifier) var->data.interpolation,
@@@ -218,9 -220,12 +220,12 @@@ fs_visitor::nir_setup_uniform(nir_varia
        * our name.
        */
     unsigned index = var->data.driver_location;
-    for (unsigned u = 0; u < shader_prog->NumUserUniformStorage; u++) {
+    for (unsigned u = 0; u < shader_prog->NumUniformStorage; u++) {
        struct gl_uniform_storage *storage = &shader_prog->UniformStorage[u];
  
+       if (storage->builtin)
+               continue;
        if (strncmp(var->name, storage->name, namelen) != 0 ||
           (storage->name[namelen] != 0 &&
           storage->name[namelen] != '.' &&
@@@ -358,7 -363,7 +363,7 @@@ fs_visitor::nir_emit_impl(nir_function_
        unsigned array_elems =
           reg->num_array_elems == 0 ? 1 : reg->num_array_elems;
        unsigned size = array_elems * reg->num_components;
-       nir_locals[reg->index] = vgrf(size);
+       nir_locals[reg->index] = bld.vgrf(BRW_REGISTER_TYPE_F, size);
     }
  
     nir_emit_cf_list(&impl->body);
@@@ -392,21 -397,21 +397,21 @@@ voi
  fs_visitor::nir_emit_if(nir_if *if_stmt)
  {
     /* first, put the condition into f0 */
-    fs_inst *inst = emit(MOV(reg_null_d,
+    fs_inst *inst = bld.MOV(bld.null_reg_d(),
                              retype(get_nir_src(if_stmt->condition),
-                                    BRW_REGISTER_TYPE_D)));
+                                    BRW_REGISTER_TYPE_D));
     inst->conditional_mod = BRW_CONDITIONAL_NZ;
  
-    emit(IF(BRW_PREDICATE_NORMAL));
+    bld.IF(BRW_PREDICATE_NORMAL);
  
     nir_emit_cf_list(&if_stmt->then_list);
  
     /* note: if the else is empty, dead CF elimination will remove it */
-    emit(BRW_OPCODE_ELSE);
+    bld.emit(BRW_OPCODE_ELSE);
  
     nir_emit_cf_list(&if_stmt->else_list);
  
-    emit(BRW_OPCODE_ENDIF);
+    bld.emit(BRW_OPCODE_ENDIF);
  
     if (!try_replace_with_sel() && devinfo->gen < 6) {
        no16("Can't support (non-uniform) control flow on SIMD16\n");
@@@ -420,11 -425,11 +425,11 @@@ fs_visitor::nir_emit_loop(nir_loop *loo
        no16("Can't support (non-uniform) control flow on SIMD16\n");
     }
  
-    emit(BRW_OPCODE_DO);
+    bld.emit(BRW_OPCODE_DO);
  
     nir_emit_cf_list(&loop->body);
  
-    emit(BRW_OPCODE_WHILE);
+    bld.emit(BRW_OPCODE_WHILE);
  }
  
  void
@@@ -438,19 -443,19 +443,19 @@@ fs_visitor::nir_emit_block(nir_block *b
  void
  fs_visitor::nir_emit_instr(nir_instr *instr)
  {
-    this->base_ir = instr;
+    const fs_builder abld = bld.annotate(NULL, instr);
  
     switch (instr->type) {
     case nir_instr_type_alu:
-       nir_emit_alu(nir_instr_as_alu(instr));
+       nir_emit_alu(abld, nir_instr_as_alu(instr));
        break;
  
     case nir_instr_type_intrinsic:
-       nir_emit_intrinsic(nir_instr_as_intrinsic(instr));
+       nir_emit_intrinsic(abld, nir_instr_as_intrinsic(instr));
        break;
  
     case nir_instr_type_tex:
-       nir_emit_texture(nir_instr_as_tex(instr));
+       nir_emit_texture(abld, nir_instr_as_tex(instr));
        break;
  
     case nir_instr_type_load_const:
        break;
  
     case nir_instr_type_jump:
-       nir_emit_jump(nir_instr_as_jump(instr));
+       nir_emit_jump(abld, nir_instr_as_jump(instr));
        break;
  
     default:
        unreachable("unknown instruction type");
     }
-    this->base_ir = NULL;
  }
  
  static brw_reg_type
@@@ -540,7 -543,7 +543,7 @@@ fs_visitor::optimize_frontfacing_ternar
        tmp.subreg_offset = 2;
        tmp.stride = 2;
  
-       fs_inst *or_inst = emit(OR(tmp, g0, fs_reg(0x3f80)));
+       fs_inst *or_inst = bld.OR(tmp, g0, fs_reg(0x3f80));
        or_inst->src[1].type = BRW_REGISTER_TYPE_UW;
  
        tmp.type = BRW_REGISTER_TYPE_D;
           g1_6.negate = true;
        }
  
-       emit(OR(tmp, g1_6, fs_reg(0x3f800000)));
+       bld.OR(tmp, g1_6, fs_reg(0x3f800000));
     }
-    emit(AND(retype(result, BRW_REGISTER_TYPE_D), tmp, fs_reg(0xbf800000)));
+    bld.AND(retype(result, BRW_REGISTER_TYPE_D), tmp, fs_reg(0xbf800000));
  
     return true;
  }
  
  void
- fs_visitor::nir_emit_alu(nir_alu_instr *instr)
+ fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr)
  {
     struct brw_wm_prog_key *fs_key = (struct brw_wm_prog_key *) this->key;
     fs_inst *inst;
           if (!instr->src[i].src.is_ssa &&
               instr->dest.dest.reg.reg == instr->src[i].src.reg.reg) {
              need_extra_copy = true;
-             temp = retype(vgrf(4), result.type);
+             temp = bld.vgrf(result.type, 4);
              break;
           }
        }
              continue;
  
           if (instr->op == nir_op_imov || instr->op == nir_op_fmov) {
-             inst = emit(MOV(offset(temp, i),
-                         offset(op[0], instr->src[0].swizzle[i])));
+             inst = bld.MOV(offset(temp, i),
+                            offset(op[0], instr->src[0].swizzle[i]));
           } else {
-             inst = emit(MOV(offset(temp, i),
-                         offset(op[i], instr->src[i].swizzle[0])));
+             inst = bld.MOV(offset(temp, i),
+                            offset(op[i], instr->src[i].swizzle[0]));
           }
           inst->saturate = instr->dest.saturate;
        }
              if (!(instr->dest.write_mask & (1 << i)))
                 continue;
  
-             emit(MOV(offset(result, i), offset(temp, i)));
+             bld.MOV(offset(result, i), offset(temp, i));
           }
        }
        return;
     switch (instr->op) {
     case nir_op_i2f:
     case nir_op_u2f:
-       inst = emit(MOV(result, op[0]));
+       inst = bld.MOV(result, op[0]);
        inst->saturate = instr->dest.saturate;
        break;
  
     case nir_op_f2i:
     case nir_op_f2u:
-       emit(MOV(result, op[0]));
+       bld.MOV(result, op[0]);
        break;
  
     case nir_op_fsign: {
           * Predicated OR ORs 1.0 (0x3f800000) with the sign bit if val is not
           * zero.
           */
-       emit(CMP(reg_null_f, op[0], fs_reg(0.0f), BRW_CONDITIONAL_NZ));
+       bld.CMP(bld.null_reg_f(), op[0], fs_reg(0.0f), BRW_CONDITIONAL_NZ);
  
        fs_reg result_int = retype(result, BRW_REGISTER_TYPE_UD);
        op[0].type = BRW_REGISTER_TYPE_UD;
        result.type = BRW_REGISTER_TYPE_UD;
-       emit(AND(result_int, op[0], fs_reg(0x80000000u)));
+       bld.AND(result_int, op[0], fs_reg(0x80000000u));
  
-       inst = emit(OR(result_int, result_int, fs_reg(0x3f800000u)));
+       inst = bld.OR(result_int, result_int, fs_reg(0x3f800000u));
        inst->predicate = BRW_PREDICATE_NORMAL;
        if (instr->dest.saturate) {
-          inst = emit(MOV(result, result));
+          inst = bld.MOV(result, result);
           inst->saturate = true;
        }
        break;
         *               -> non-negative val generates 0x00000000.
         *  Predicated OR sets 1 if val is positive.
         */
-       emit(CMP(reg_null_d, op[0], fs_reg(0), BRW_CONDITIONAL_G));
-       emit(ASR(result, op[0], fs_reg(31)));
-       inst = emit(OR(result, result, fs_reg(1)));
+       bld.CMP(bld.null_reg_d(), op[0], fs_reg(0), BRW_CONDITIONAL_G);
+       bld.ASR(result, op[0], fs_reg(31));
+       inst = bld.OR(result, result, fs_reg(1));
        inst->predicate = BRW_PREDICATE_NORMAL;
        break;
  
     case nir_op_frcp:
-       inst = emit_math(SHADER_OPCODE_RCP, result, op[0]);
+       inst = bld.emit(SHADER_OPCODE_RCP, result, op[0]);
        inst->saturate = instr->dest.saturate;
        break;
  
     case nir_op_fexp2:
-       inst = emit_math(SHADER_OPCODE_EXP2, result, op[0]);
+       inst = bld.emit(SHADER_OPCODE_EXP2, result, op[0]);
        inst->saturate = instr->dest.saturate;
        break;
  
     case nir_op_flog2:
-       inst = emit_math(SHADER_OPCODE_LOG2, result, op[0]);
+       inst = bld.emit(SHADER_OPCODE_LOG2, result, op[0]);
        inst->saturate = instr->dest.saturate;
        break;
  
     case nir_op_fsin:
-       inst = emit_math(SHADER_OPCODE_SIN, result, op[0]);
+       inst = bld.emit(SHADER_OPCODE_SIN, result, op[0]);
        inst->saturate = instr->dest.saturate;
        break;
  
     case nir_op_fcos:
-       inst = emit_math(SHADER_OPCODE_COS, result, op[0]);
+       inst = bld.emit(SHADER_OPCODE_COS, result, op[0]);
        inst->saturate = instr->dest.saturate;
        break;
  
     case nir_op_fddx:
        if (fs_key->high_quality_derivatives) {
-          inst = emit(FS_OPCODE_DDX_FINE, result, op[0]);
+          inst = bld.emit(FS_OPCODE_DDX_FINE, result, op[0]);
        } else {
-          inst = emit(FS_OPCODE_DDX_COARSE, result, op[0]);
+          inst = bld.emit(FS_OPCODE_DDX_COARSE, result, op[0]);
        }
        inst->saturate = instr->dest.saturate;
        break;
     case nir_op_fddx_fine:
-       inst = emit(FS_OPCODE_DDX_FINE, result, op[0]);
+       inst = bld.emit(FS_OPCODE_DDX_FINE, result, op[0]);
        inst->saturate = instr->dest.saturate;
        break;
     case nir_op_fddx_coarse:
-       inst = emit(FS_OPCODE_DDX_COARSE, result, op[0]);
+       inst = bld.emit(FS_OPCODE_DDX_COARSE, result, op[0]);
        inst->saturate = instr->dest.saturate;
        break;
     case nir_op_fddy:
        if (fs_key->high_quality_derivatives) {
-          inst = emit(FS_OPCODE_DDY_FINE, result, op[0],
-                      fs_reg(fs_key->render_to_fbo));
+          inst = bld.emit(FS_OPCODE_DDY_FINE, result, op[0],
+                          fs_reg(fs_key->render_to_fbo));
        } else {
-          inst = emit(FS_OPCODE_DDY_COARSE, result, op[0],
-                      fs_reg(fs_key->render_to_fbo));
+          inst = bld.emit(FS_OPCODE_DDY_COARSE, result, op[0],
+                          fs_reg(fs_key->render_to_fbo));
        }
        inst->saturate = instr->dest.saturate;
        break;
     case nir_op_fddy_fine:
-       inst = emit(FS_OPCODE_DDY_FINE, result, op[0],
-                   fs_reg(fs_key->render_to_fbo));
+       inst = bld.emit(FS_OPCODE_DDY_FINE, result, op[0],
+                       fs_reg(fs_key->render_to_fbo));
        inst->saturate = instr->dest.saturate;
        break;
     case nir_op_fddy_coarse:
-       inst = emit(FS_OPCODE_DDY_COARSE, result, op[0],
-                   fs_reg(fs_key->render_to_fbo));
+       inst = bld.emit(FS_OPCODE_DDY_COARSE, result, op[0],
+                       fs_reg(fs_key->render_to_fbo));
        inst->saturate = instr->dest.saturate;
        break;
  
     case nir_op_fadd:
     case nir_op_iadd:
-       inst = emit(ADD(result, op[0], op[1]));
+       inst = bld.ADD(result, op[0], op[1]);
        inst->saturate = instr->dest.saturate;
        break;
  
     case nir_op_fmul:
-       inst = emit(MUL(result, op[0], op[1]));
+       inst = bld.MUL(result, op[0], op[1]);
        inst->saturate = instr->dest.saturate;
        break;
  
-    case nir_op_imul: {
-       if (devinfo->gen >= 8) {
-          emit(MUL(result, op[0], op[1]));
-          break;
-       } else {
-          nir_const_value *value0 = nir_src_as_const_value(instr->src[0].src);
-          nir_const_value *value1 = nir_src_as_const_value(instr->src[1].src);
-          if (value0 && value0->u[0] < (1 << 16)) {
-             if (devinfo->gen < 7) {
-                emit(MUL(result, op[0], op[1]));
-             } else {
-                emit(MUL(result, op[1], op[0]));
-             }
-             break;
-          } else if (value1 && value1->u[0] < (1 << 16)) {
-             if (devinfo->gen < 7) {
-                emit(MUL(result, op[1], op[0]));
-             } else {
-                emit(MUL(result, op[0], op[1]));
-             }
-             break;
-          }
-       }
-       if (devinfo->gen >= 7)
-          no16("SIMD16 explicit accumulator operands unsupported\n");
-       struct brw_reg acc = retype(brw_acc_reg(dispatch_width), result.type);
-       emit(MUL(acc, op[0], op[1]));
-       emit(MACH(reg_null_d, op[0], op[1]));
-       emit(MOV(result, fs_reg(acc)));
+    case nir_op_imul:
+       bld.MUL(result, op[0], op[1]);
        break;
-    }
  
     case nir_op_imul_high:
     case nir_op_umul_high: {
  
        struct brw_reg acc = retype(brw_acc_reg(dispatch_width), result.type);
  
-       fs_inst *mul = emit(MUL(acc, op[0], op[1]));
-       emit(MACH(result, op[0], op[1]));
+       fs_inst *mul = bld.MUL(acc, op[0], op[1]);
+       bld.MACH(result, op[0], op[1]);
  
        /* Until Gen8, integer multiplies read 32-bits from one source, and
         * 16-bits from the other, and relying on the MACH instruction to
  
     case nir_op_idiv:
     case nir_op_udiv:
-       emit_math(SHADER_OPCODE_INT_QUOTIENT, result, op[0], op[1]);
+       bld.emit(SHADER_OPCODE_INT_QUOTIENT, result, op[0], op[1]);
        break;
  
     case nir_op_uadd_carry: {
        struct brw_reg acc = retype(brw_acc_reg(dispatch_width),
                                    BRW_REGISTER_TYPE_UD);
  
-       emit(ADDC(reg_null_ud, op[0], op[1]));
-       emit(MOV(result, fs_reg(acc)));
+       bld.ADDC(bld.null_reg_ud(), op[0], op[1]);
+       bld.MOV(result, fs_reg(acc));
        break;
     }
  
        struct brw_reg acc = retype(brw_acc_reg(dispatch_width),
                                    BRW_REGISTER_TYPE_UD);
  
-       emit(SUBB(reg_null_ud, op[0], op[1]));
-       emit(MOV(result, fs_reg(acc)));
+       bld.SUBB(bld.null_reg_ud(), op[0], op[1]);
+       bld.MOV(result, fs_reg(acc));
        break;
     }
  
     case nir_op_umod:
-       emit_math(SHADER_OPCODE_INT_REMAINDER, result, op[0], op[1]);
+       bld.emit(SHADER_OPCODE_INT_REMAINDER, result, op[0], op[1]);
        break;
  
     case nir_op_flt:
     case nir_op_ilt:
     case nir_op_ult:
-       emit(CMP(result, op[0], op[1], BRW_CONDITIONAL_L));
+       bld.CMP(result, op[0], op[1], BRW_CONDITIONAL_L);
        break;
  
     case nir_op_fge:
     case nir_op_ige:
     case nir_op_uge:
-       emit(CMP(result, op[0], op[1], BRW_CONDITIONAL_GE));
+       bld.CMP(result, op[0], op[1], BRW_CONDITIONAL_GE);
        break;
  
     case nir_op_feq:
     case nir_op_ieq:
-       emit(CMP(result, op[0], op[1], BRW_CONDITIONAL_Z));
+       bld.CMP(result, op[0], op[1], BRW_CONDITIONAL_Z);
        break;
  
     case nir_op_fne:
     case nir_op_ine:
-       emit(CMP(result, op[0], op[1], BRW_CONDITIONAL_NZ));
+       bld.CMP(result, op[0], op[1], BRW_CONDITIONAL_NZ);
        break;
  
     case nir_op_inot:
        if (devinfo->gen >= 8) {
           resolve_source_modifiers(&op[0]);
        }
-       emit(NOT(result, op[0]));
+       bld.NOT(result, op[0]);
        break;
     case nir_op_ixor:
        if (devinfo->gen >= 8) {
           resolve_source_modifiers(&op[0]);
           resolve_source_modifiers(&op[1]);
        }
-       emit(XOR(result, op[0], op[1]));
+       bld.XOR(result, op[0], op[1]);
        break;
     case nir_op_ior:
        if (devinfo->gen >= 8) {
           resolve_source_modifiers(&op[0]);
           resolve_source_modifiers(&op[1]);
        }
-       emit(OR(result, op[0], op[1]));
+       bld.OR(result, op[0], op[1]);
        break;
     case nir_op_iand:
        if (devinfo->gen >= 8) {
           resolve_source_modifiers(&op[0]);
           resolve_source_modifiers(&op[1]);
        }
-       emit(AND(result, op[0], op[1]));
+       bld.AND(result, op[0], op[1]);
        break;
  
     case nir_op_fdot2:
        unreachable("not reached: should be handled by ldexp_to_arith()");
  
     case nir_op_fsqrt:
-       inst = emit_math(SHADER_OPCODE_SQRT, result, op[0]);
+       inst = bld.emit(SHADER_OPCODE_SQRT, result, op[0]);
        inst->saturate = instr->dest.saturate;
        break;
  
     case nir_op_frsq:
-       inst = emit_math(SHADER_OPCODE_RSQ, result, op[0]);
+       inst = bld.emit(SHADER_OPCODE_RSQ, result, op[0]);
        inst->saturate = instr->dest.saturate;
        break;
  
     case nir_op_b2i:
-       emit(AND(result, op[0], fs_reg(1)));
+       bld.AND(result, op[0], fs_reg(1));
        break;
     case nir_op_b2f:
-       emit(AND(retype(result, BRW_REGISTER_TYPE_UD), op[0], fs_reg(0x3f800000u)));
+       bld.AND(retype(result, BRW_REGISTER_TYPE_UD), op[0], fs_reg(0x3f800000u));
        break;
  
     case nir_op_f2b:
-       emit(CMP(result, op[0], fs_reg(0.0f), BRW_CONDITIONAL_NZ));
+       bld.CMP(result, op[0], fs_reg(0.0f), BRW_CONDITIONAL_NZ);
        break;
     case nir_op_i2b:
-       emit(CMP(result, op[0], fs_reg(0), BRW_CONDITIONAL_NZ));
+       bld.CMP(result, op[0], fs_reg(0), BRW_CONDITIONAL_NZ);
        break;
  
     case nir_op_ftrunc:
-       inst = emit(RNDZ(result, op[0]));
+       inst = bld.RNDZ(result, op[0]);
        inst->saturate = instr->dest.saturate;
        break;
  
     case nir_op_fceil: {
        op[0].negate = !op[0].negate;
        fs_reg temp = vgrf(glsl_type::float_type);
-       emit(RNDD(temp, op[0]));
+       bld.RNDD(temp, op[0]);
        temp.negate = true;
-       inst = emit(MOV(result, temp));
+       inst = bld.MOV(result, temp);
        inst->saturate = instr->dest.saturate;
        break;
     }
     case nir_op_ffloor:
-       inst = emit(RNDD(result, op[0]));
+       inst = bld.RNDD(result, op[0]);
        inst->saturate = instr->dest.saturate;
        break;
     case nir_op_ffract:
-       inst = emit(FRC(result, op[0]));
+       inst = bld.FRC(result, op[0]);
        inst->saturate = instr->dest.saturate;
        break;
     case nir_op_fround_even:
-       inst = emit(RNDE(result, op[0]));
+       inst = bld.RNDE(result, op[0]);
        inst->saturate = instr->dest.saturate;
        break;
  
     case nir_op_imin:
     case nir_op_umin:
        if (devinfo->gen >= 6) {
-          inst = emit(BRW_OPCODE_SEL, result, op[0], op[1]);
+          inst = bld.emit(BRW_OPCODE_SEL, result, op[0], op[1]);
           inst->conditional_mod = BRW_CONDITIONAL_L;
        } else {
-          emit(CMP(reg_null_d, op[0], op[1], BRW_CONDITIONAL_L));
-          inst = emit(SEL(result, op[0], op[1]));
+          bld.CMP(bld.null_reg_d(), op[0], op[1], BRW_CONDITIONAL_L);
+          inst = bld.SEL(result, op[0], op[1]);
           inst->predicate = BRW_PREDICATE_NORMAL;
        }
        inst->saturate = instr->dest.saturate;
     case nir_op_imax:
     case nir_op_umax:
        if (devinfo->gen >= 6) {
-          inst = emit(BRW_OPCODE_SEL, result, op[0], op[1]);
+          inst = bld.emit(BRW_OPCODE_SEL, result, op[0], op[1]);
           inst->conditional_mod = BRW_CONDITIONAL_GE;
        } else {
-          emit(CMP(reg_null_d, op[0], op[1], BRW_CONDITIONAL_GE));
-          inst = emit(SEL(result, op[0], op[1]));
+          bld.CMP(bld.null_reg_d(), op[0], op[1], BRW_CONDITIONAL_GE);
+          inst = bld.SEL(result, op[0], op[1]);
           inst->predicate = BRW_PREDICATE_NORMAL;
        }
        inst->saturate = instr->dest.saturate;
        unreachable("not reached: should be handled by lower_packing_builtins");
  
     case nir_op_unpack_half_2x16_split_x:
-       inst = emit(FS_OPCODE_UNPACK_HALF_2x16_SPLIT_X, result, op[0]);
+       inst = bld.emit(FS_OPCODE_UNPACK_HALF_2x16_SPLIT_X, result, op[0]);
        inst->saturate = instr->dest.saturate;
        break;
     case nir_op_unpack_half_2x16_split_y:
-       inst = emit(FS_OPCODE_UNPACK_HALF_2x16_SPLIT_Y, result, op[0]);
+       inst = bld.emit(FS_OPCODE_UNPACK_HALF_2x16_SPLIT_Y, result, op[0]);
        inst->saturate = instr->dest.saturate;
        break;
  
     case nir_op_fpow:
-       inst = emit_math(SHADER_OPCODE_POW, result, op[0], op[1]);
+       inst = bld.emit(SHADER_OPCODE_POW, result, op[0], op[1]);
        inst->saturate = instr->dest.saturate;
        break;
  
     case nir_op_bitfield_reverse:
-       emit(BFREV(result, op[0]));
+       bld.BFREV(result, op[0]);
        break;
  
     case nir_op_bit_count:
-       emit(CBIT(result, op[0]));
+       bld.CBIT(result, op[0]);
        break;
  
     case nir_op_ufind_msb:
     case nir_op_ifind_msb: {
-       emit(FBH(retype(result, BRW_REGISTER_TYPE_UD), op[0]));
+       bld.FBH(retype(result, BRW_REGISTER_TYPE_UD), op[0]);
  
        /* FBH counts from the MSB side, while GLSL's findMSB() wants the count
         * from the LSB side. If FBH didn't return an error (0xFFFFFFFF), then
         * subtract the result from 31 to convert the MSB count into an LSB count.
         */
  
-       emit(CMP(reg_null_d, result, fs_reg(-1), BRW_CONDITIONAL_NZ));
+       bld.CMP(bld.null_reg_d(), result, fs_reg(-1), BRW_CONDITIONAL_NZ);
        fs_reg neg_result(result);
        neg_result.negate = true;
-       inst = emit(ADD(result, neg_result, fs_reg(31)));
+       inst = bld.ADD(result, neg_result, fs_reg(31));
        inst->predicate = BRW_PREDICATE_NORMAL;
        break;
     }
  
     case nir_op_find_lsb:
-       emit(FBL(result, op[0]));
+       bld.FBL(result, op[0]);
        break;
  
     case nir_op_ubitfield_extract:
     case nir_op_ibitfield_extract:
-       emit(BFE(result, op[2], op[1], op[0]));
+       bld.BFE(result, op[2], op[1], op[0]);
        break;
     case nir_op_bfm:
-       emit(BFI1(result, op[0], op[1]));
+       bld.BFI1(result, op[0], op[1]);
        break;
     case nir_op_bfi:
-       emit(BFI2(result, op[0], op[1], op[2]));
+       bld.BFI2(result, op[0], op[1], op[2]);
        break;
  
     case nir_op_bitfield_insert:
                    "lower_instructions::bitfield_insert_to_bfm_bfi");
  
     case nir_op_ishl:
-       emit(SHL(result, op[0], op[1]));
+       bld.SHL(result, op[0], op[1]);
        break;
     case nir_op_ishr:
-       emit(ASR(result, op[0], op[1]));
+       bld.ASR(result, op[0], op[1]);
        break;
     case nir_op_ushr:
-       emit(SHR(result, op[0], op[1]));
+       bld.SHR(result, op[0], op[1]);
        break;
  
     case nir_op_pack_half_2x16_split:
-       emit(FS_OPCODE_PACK_HALF_2x16_SPLIT, result, op[0], op[1]);
+       bld.emit(FS_OPCODE_PACK_HALF_2x16_SPLIT, result, op[0], op[1]);
        break;
  
     case nir_op_ffma:
-       inst = emit(MAD(result, op[2], op[1], op[0]));
+       inst = bld.MAD(result, op[2], op[1], op[0]);
        inst->saturate = instr->dest.saturate;
        break;
  
     case nir_op_flrp:
-       inst = emit_lrp(result, op[0], op[1], op[2]);
+       inst = bld.LRP(result, op[0], op[1], op[2]);
        inst->saturate = instr->dest.saturate;
        break;
  
        if (optimize_frontfacing_ternary(instr, result))
           return;
  
-       emit(CMP(reg_null_d, op[0], fs_reg(0), BRW_CONDITIONAL_NZ));
-       inst = emit(SEL(result, op[1], op[2]));
+       bld.CMP(bld.null_reg_d(), op[0], fs_reg(0), BRW_CONDITIONAL_NZ);
+       inst = bld.SEL(result, op[1], op[2]);
        inst->predicate = BRW_PREDICATE_NORMAL;
        break;
  
     if (devinfo->gen <= 5 &&
         (instr->instr.pass_flags & BRW_NIR_BOOLEAN_MASK) == BRW_NIR_BOOLEAN_NEEDS_RESOLVE) {
        fs_reg masked = vgrf(glsl_type::int_type);
-       emit(AND(masked, result, fs_reg(1)));
+       bld.AND(masked, result, fs_reg(1));
        masked.negate = true;
-       emit(MOV(retype(result, BRW_REGISTER_TYPE_D), masked));
+       bld.MOV(retype(result, BRW_REGISTER_TYPE_D), masked);
     }
  }
  
@@@ -1190,8 -1161,8 +1161,8 @@@ fs_reg_for_nir_reg(fs_visitor *v, nir_r
        int multiplier = nir_reg->num_components * (v->dispatch_width / 8);
  
        reg.reladdr = new(v->mem_ctx) fs_reg(v->vgrf(glsl_type::int_type));
-       v->emit(v->MUL(*reg.reladdr, v->get_nir_src(*indirect),
-                      fs_reg(multiplier)));
+       v->bld.MUL(*reg.reladdr, v->get_nir_src(*indirect),
+                  fs_reg(multiplier));
     }
  
     return reg;
@@@ -1203,11 -1174,10 +1174,10 @@@ fs_visitor::get_nir_src(nir_src src
     if (src.is_ssa) {
        assert(src.ssa->parent_instr->type == nir_instr_type_load_const);
        nir_load_const_instr *load = nir_instr_as_load_const(src.ssa->parent_instr);
-       fs_reg reg = vgrf(src.ssa->num_components);
-       reg.type = BRW_REGISTER_TYPE_D;
+       fs_reg reg = bld.vgrf(BRW_REGISTER_TYPE_D, src.ssa->num_components);
  
        for (unsigned i = 0; i < src.ssa->num_components; ++i)
-          emit(MOV(offset(reg, i), fs_reg(load->value.i[i])));
+          bld.MOV(offset(reg, i), fs_reg(load->value.i[i]));
  
        return reg;
     } else {
@@@ -1230,24 -1200,25 +1200,25 @@@ fs_visitor::get_nir_dest(nir_dest dest
  }
  
  void
- fs_visitor::emit_percomp(fs_inst *inst, unsigned wr_mask)
+ fs_visitor::emit_percomp(const fs_builder &bld, const fs_inst &inst,
+                          unsigned wr_mask)
  {
     for (unsigned i = 0; i < 4; i++) {
        if (!((wr_mask >> i) & 1))
           continue;
  
-       fs_inst *new_inst = new(mem_ctx) fs_inst(*inst);
+       fs_inst *new_inst = new(mem_ctx) fs_inst(inst);
        new_inst->dst = offset(new_inst->dst, i);
        for (unsigned j = 0; j < new_inst->sources; j++)
-          if (inst->src[j].file == GRF)
+          if (new_inst->src[j].file == GRF)
              new_inst->src[j] = offset(new_inst->src[j], i);
  
-       emit(new_inst);
+       bld.emit(new_inst);
     }
  }
  
  void
- fs_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr)
+ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr)
  {
     fs_reg dest;
     if (nir_intrinsic_infos[instr->intrinsic].has_dest)
         */
        fs_inst *cmp;
        if (instr->intrinsic == nir_intrinsic_discard_if) {
-          cmp = emit(CMP(reg_null_f, get_nir_src(instr->src[0]),
-                         fs_reg(0), BRW_CONDITIONAL_Z));
+          cmp = bld.CMP(bld.null_reg_f(), get_nir_src(instr->src[0]),
+                        fs_reg(0), BRW_CONDITIONAL_Z);
        } else {
           fs_reg some_reg = fs_reg(retype(brw_vec8_grf(0, 0),
                                         BRW_REGISTER_TYPE_UW));
-          cmp = emit(CMP(reg_null_f, some_reg, some_reg, BRW_CONDITIONAL_NZ));
+          cmp = bld.CMP(bld.null_reg_f(), some_reg, some_reg, BRW_CONDITIONAL_NZ);
        }
        cmp->predicate = BRW_PREDICATE_NORMAL;
        cmp->flag_subreg = 1;
     }
  
     case nir_intrinsic_load_front_face:
-       emit(MOV(retype(dest, BRW_REGISTER_TYPE_D),
-                *emit_frontfacing_interpolation()));
+       bld.MOV(retype(dest, BRW_REGISTER_TYPE_D),
+               *emit_frontfacing_interpolation());
        break;
  
     case nir_intrinsic_load_vertex_id:
        fs_reg vertex_id = nir_system_values[SYSTEM_VALUE_VERTEX_ID_ZERO_BASE];
        assert(vertex_id.file != BAD_FILE);
        dest.type = vertex_id.type;
-       emit(MOV(dest, vertex_id));
+       bld.MOV(dest, vertex_id);
        break;
     }
  
        fs_reg base_vertex = nir_system_values[SYSTEM_VALUE_BASE_VERTEX];
        assert(base_vertex.file != BAD_FILE);
        dest.type = base_vertex.type;
-       emit(MOV(dest, base_vertex));
+       bld.MOV(dest, base_vertex);
        break;
     }
  
        fs_reg instance_id = nir_system_values[SYSTEM_VALUE_INSTANCE_ID];
        assert(instance_id.file != BAD_FILE);
        dest.type = instance_id.type;
-       emit(MOV(dest, instance_id));
+       bld.MOV(dest, instance_id);
        break;
     }
  
        fs_reg sample_mask_in = nir_system_values[SYSTEM_VALUE_SAMPLE_MASK_IN];
        assert(sample_mask_in.file != BAD_FILE);
        dest.type = sample_mask_in.type;
-       emit(MOV(dest, sample_mask_in));
+       bld.MOV(dest, sample_mask_in);
        break;
     }
  
        fs_reg sample_pos = nir_system_values[SYSTEM_VALUE_SAMPLE_POS];
        assert(sample_pos.file != BAD_FILE);
        dest.type = sample_pos.type;
-       emit(MOV(dest, sample_pos));
-       emit(MOV(offset(dest, 1), offset(sample_pos, 1)));
+       bld.MOV(dest, sample_pos);
+       bld.MOV(offset(dest, 1), offset(sample_pos, 1));
        break;
     }
  
        fs_reg sample_id = nir_system_values[SYSTEM_VALUE_SAMPLE_ID];
        assert(sample_id.file != BAD_FILE);
        dest.type = sample_id.type;
-       emit(MOV(dest, sample_id));
+       bld.MOV(dest, sample_id);
        break;
     }
  
           index -= num_direct_uniforms;
        }
  
-       for (int i = 0; i < instr->const_index[1]; i++) {
-          for (unsigned j = 0; j < instr->num_components; j++) {
-             fs_reg src = offset(retype(uniform_reg, dest.type), index);
-             if (has_indirect)
-                src.reladdr = new(mem_ctx) fs_reg(get_nir_src(instr->src[0]));
-             index++;
+       for (unsigned j = 0; j < instr->num_components; j++) {
+          fs_reg src = offset(retype(uniform_reg, dest.type), index);
+          if (has_indirect)
+             src.reladdr = new(mem_ctx) fs_reg(get_nir_src(instr->src[0]));
+          index++;
  
-             emit(MOV(dest, src));
-             dest = offset(dest, 1);
-          }
+          bld.MOV(dest, src);
+          dest = offset(dest, 1);
        }
        break;
     }
        fs_reg surf_index;
  
        if (const_index) {
 -         surf_index = fs_reg(stage_prog_data->binding_table.ubo_start +
 -                             const_index->u[0]);
 +         uint32_t index = const_index->u[0];
 +         uint32_t set = shader->base.UniformBlocks[index].Set;
 +         uint32_t binding = shader->base.UniformBlocks[index].Binding;
 +
 +         /* FIXME: We should probably assert here, but dota2 seems to hit
 +          * it and we'd like to keep going.
 +          */
 +         if (binding >= stage_prog_data->bind_map[set].index_count)
 +            binding = 0;
 +
 +         surf_index = fs_reg(stage_prog_data->bind_map[set].index[binding]);
        } else {
 +         assert(0 && "need more info from the ir for this.");
           /* The block index is not a constant. Evaluate the index expression
            * per-channel and add the base UBO index; we have to select a value
            * from any live channel.
            */
           surf_index = vgrf(glsl_type::uint_type);
-          emit(ADD(surf_index, get_nir_src(instr->src[0]),
-                   fs_reg(stage_prog_data->binding_table.ubo_start)));
-          emit_uniformize(surf_index, surf_index);
+          bld.ADD(surf_index, get_nir_src(instr->src[0]),
+                  fs_reg(stage_prog_data->binding_table.ubo_start));
+          bld.emit_uniformize(surf_index, surf_index);
  
           /* Assume this may touch any UBO. It would be nice to provide
            * a tighter bound, but the array information is already lowered away.
        if (has_indirect) {
           /* Turn the byte offset into a dword offset. */
           fs_reg base_offset = vgrf(glsl_type::int_type);
-          emit(SHR(base_offset, retype(get_nir_src(instr->src[1]),
-                                  BRW_REGISTER_TYPE_D),
-                   fs_reg(2)));
+          bld.SHR(base_offset, retype(get_nir_src(instr->src[1]),
+                                      BRW_REGISTER_TYPE_D),
+                  fs_reg(2));
  
           unsigned vec4_offset = instr->const_index[0] / 4;
           for (int i = 0; i < instr->num_components; i++)
-             emit(VARYING_PULL_CONSTANT_LOAD(offset(dest, i), surf_index,
-                                             base_offset, vec4_offset + i));
+             VARYING_PULL_CONSTANT_LOAD(bld, offset(dest, i), surf_index,
+                                        base_offset, vec4_offset + i);
        } else {
           fs_reg packed_consts = vgrf(glsl_type::float_type);
           packed_consts.type = dest.type;
  
           fs_reg const_offset_reg((unsigned) instr->const_index[0] & ~15);
-          emit(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD, packed_consts,
-               surf_index, const_offset_reg);
+          bld.emit(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD, packed_consts,
+                   surf_index, const_offset_reg);
  
           for (unsigned i = 0; i < instr->num_components; i++) {
              packed_consts.set_smear(instr->const_index[0] % 16 / 4 + i);
               */
              assert(packed_consts.subreg_offset < 32);
  
-             emit(MOV(dest, packed_consts));
+             bld.MOV(dest, packed_consts);
              dest = offset(dest, 1);
           }
        }
        /* fallthrough */
     case nir_intrinsic_load_input: {
        unsigned index = 0;
-       for (int i = 0; i < instr->const_index[1]; i++) {
-          for (unsigned j = 0; j < instr->num_components; j++) {
-             fs_reg src = offset(retype(nir_inputs, dest.type),
-                                 instr->const_index[0] + index);
-             if (has_indirect)
-                src.reladdr = new(mem_ctx) fs_reg(get_nir_src(instr->src[0]));
-             index++;
-             emit(MOV(dest, src));
-             dest = offset(dest, 1);
-          }
+       for (unsigned j = 0; j < instr->num_components; j++) {
+          fs_reg src = offset(retype(nir_inputs, dest.type),
+                              instr->const_index[0] + index);
+          if (has_indirect)
+             src.reladdr = new(mem_ctx) fs_reg(get_nir_src(instr->src[0]));
+          index++;
+          bld.MOV(dest, src);
+          dest = offset(dest, 1);
        }
        break;
     }
         */
        no16("interpolate_at_* not yet supported in SIMD16 mode.");
  
-       fs_reg dst_xy = vgrf(2);
+       fs_reg dst_xy = bld.vgrf(BRW_REGISTER_TYPE_F, 2);
  
        /* For most messages, we need one reg of ignored data; the hardware
         * requires mlen==1 even when there is no payload. in the per-slot
  
        switch (instr->intrinsic) {
        case nir_intrinsic_interp_var_at_centroid:
-          inst = emit(FS_OPCODE_INTERPOLATE_AT_CENTROID, dst_xy, src, fs_reg(0u));
+          inst = bld.emit(FS_OPCODE_INTERPOLATE_AT_CENTROID,
+                          dst_xy, src, fs_reg(0u));
           break;
  
        case nir_intrinsic_interp_var_at_sample: {
           nir_const_value *const_sample = nir_src_as_const_value(instr->src[0]);
           assert(const_sample);
           unsigned msg_data = const_sample ? const_sample->i[0] << 4 : 0;
-          inst = emit(FS_OPCODE_INTERPOLATE_AT_SAMPLE, dst_xy, src,
-                      fs_reg(msg_data));
+          inst = bld.emit(FS_OPCODE_INTERPOLATE_AT_SAMPLE, dst_xy, src,
+                          fs_reg(msg_data));
           break;
        }
  
              unsigned off_x = MIN2((int)(const_offset->f[0] * 16), 7) & 0xf;
              unsigned off_y = MIN2((int)(const_offset->f[1] * 16), 7) & 0xf;
  
-             inst = emit(FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET, dst_xy, src,
-                         fs_reg(off_x | (off_y << 4)));
+             inst = bld.emit(FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET, dst_xy, src,
+                             fs_reg(off_x | (off_y << 4)));
           } else {
              src = vgrf(glsl_type::ivec2_type);
              fs_reg offset_src = retype(get_nir_src(instr->src[0]),
                                         BRW_REGISTER_TYPE_F);
              for (int i = 0; i < 2; i++) {
                 fs_reg temp = vgrf(glsl_type::float_type);
-                emit(MUL(temp, offset(offset_src, i), fs_reg(16.0f)));
+                bld.MUL(temp, offset(offset_src, i), fs_reg(16.0f));
                 fs_reg itemp = vgrf(glsl_type::int_type);
-                emit(MOV(itemp, temp));  /* float to int */
+                bld.MOV(itemp, temp);  /* float to int */
  
                 /* Clamp the upper end of the range to +7/16.
                  * ARB_gpu_shader5 requires that we support a maximum offset
                  * implementation-dependent constant
                  * FRAGMENT_INTERPOLATION_OFFSET_BITS"
                  */
-                emit(BRW_OPCODE_SEL, offset(src, i), itemp, fs_reg(7))
-                    ->conditional_mod = BRW_CONDITIONAL_L; /* min(src2, 7) */
+                set_condmod(BRW_CONDITIONAL_L,
+                            bld.SEL(offset(src, i), itemp, fs_reg(7)));
              }
  
              mlen = 2;
-             inst = emit(FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET, dst_xy, src,
-                         fs_reg(0u));
+             inst = bld.emit(FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET, dst_xy, src,
+                             fs_reg(0u));
           }
           break;
        }
           fs_reg src = interp_reg(instr->variables[0]->var->data.location, j);
           src.type = dest.type;
  
-          emit(FS_OPCODE_LINTERP, dest, dst_xy, src);
+          bld.emit(FS_OPCODE_LINTERP, dest, dst_xy, src);
           dest = offset(dest, 1);
        }
        break;
     case nir_intrinsic_store_output: {
        fs_reg src = get_nir_src(instr->src[0]);
        unsigned index = 0;
-       for (int i = 0; i < instr->const_index[1]; i++) {
-          for (unsigned j = 0; j < instr->num_components; j++) {
-             fs_reg new_dest = offset(retype(nir_outputs, src.type),
-                                      instr->const_index[0] + index);
-             if (has_indirect)
-                src.reladdr = new(mem_ctx) fs_reg(get_nir_src(instr->src[1]));
-             index++;
-             emit(MOV(new_dest, src));
-             src = offset(src, 1);
-          }
+       for (unsigned j = 0; j < instr->num_components; j++) {
+          fs_reg new_dest = offset(retype(nir_outputs, src.type),
+                                   instr->const_index[0] + index);
+          if (has_indirect)
+             src.reladdr = new(mem_ctx) fs_reg(get_nir_src(instr->src[1]));
+          index++;
+          bld.MOV(new_dest, src);
+          src = offset(src, 1);
        }
        break;
     }
  
+    case nir_intrinsic_barrier:
+       emit_barrier();
+       break;
     default:
        unreachable("unknown intrinsic");
     }
  }
  
  void
- fs_visitor::nir_emit_texture(nir_tex_instr *instr)
+ fs_visitor::nir_emit_texture(const fs_builder &bld, nir_tex_instr *instr)
  {
 -   unsigned sampler = instr->sampler_index;
 +   uint32_t set = instr->sampler_set;
 +   uint32_t binding = instr->sampler_index;
 +
 +   assert(binding < stage_prog_data->bind_map[set].index_count);
 +   assert(stage_prog_data->bind_map[set].index[binding] < 1000);
 +
 +   unsigned sampler = stage_prog_data->bind_map[set].index[binding];
     fs_reg sampler_reg(sampler);
  
     /* FINISHME: We're failing to recompile our programs when the sampler is
     bool is_cube_array = instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE &&
                          instr->is_array;
  
-    int lod_components = 0, offset_components = 0;
+    int lod_components = 0;
+    int UNUSED offset_components = 0;
  
     fs_reg coordinate, shadow_comparitor, lod, lod2, sample_index, mcs, tex_offset;
  
  
           /* Emit code to evaluate the actual indexing expression */
           sampler_reg = vgrf(glsl_type::uint_type);
-          emit(ADD(sampler_reg, src, fs_reg(sampler)));
-          emit_uniformize(sampler_reg, sampler_reg);
+          bld.ADD(sampler_reg, src, fs_reg(sampler));
+          bld.emit_uniformize(sampler_reg, sampler_reg);
           break;
        }
  
     fs_reg dest = get_nir_dest(instr->dest);
     dest.type = this->result.type;
     unsigned num_components = nir_tex_instr_dest_size(instr);
-    emit_percomp(MOV(dest, this->result), (1 << num_components) - 1);
+    emit_percomp(bld, fs_inst(BRW_OPCODE_MOV, dest, this->result),
+                 (1 << num_components) - 1);
  }
  
  void
- fs_visitor::nir_emit_jump(nir_jump_instr *instr)
+ fs_visitor::nir_emit_jump(const fs_builder &bld, nir_jump_instr *instr)
  {
     switch (instr->type) {
     case nir_jump_break:
-       emit(BRW_OPCODE_BREAK);
+       bld.emit(BRW_OPCODE_BREAK);
        break;
     case nir_jump_continue:
-       emit(BRW_OPCODE_CONTINUE);
+       bld.emit(BRW_OPCODE_CONTINUE);
        break;
     case nir_jump_return:
     default:
index e4119b1aa3f475657d305728577270424f145d72,c13708a2f8a41b8c2cac3db0f2759c381c2d7bf3..b7bb2315b97d6ac979c63b1ff49c8dbdad315a59
@@@ -122,18 -122,9 +122,9 @@@ brw_create_nir(struct brw_context *brw
     /* Get rid of split copies */
     nir_optimize(nir);
  
-    if (shader_prog) {
-       nir_assign_var_locations_scalar_direct_first(nir, &nir->uniforms,
-                                                    &nir->num_direct_uniforms,
-                                                    &nir->num_uniforms);
-    } else {
-       /* ARB programs generally create a giant array of "uniform" data, and allow
-        * indirect addressing without any boundaries.  In the absence of bounds
-        * analysis, it's all or nothing.  num_direct_uniforms is only useful when
-        * we have some direct and some indirect access; it doesn't matter here.
-        */
-       nir->num_direct_uniforms = 0;
-    }
+    nir_assign_var_locations_scalar_direct_first(nir, &nir->uniforms,
+                                                 &nir->num_direct_uniforms,
+                                                 &nir->num_uniforms);
     nir_assign_var_locations_scalar(&nir->inputs, &nir->num_inputs);
     nir_assign_var_locations_scalar(&nir->outputs, &nir->num_outputs);
  
     nir_validate_shader(nir);
  
     if (shader_prog) {
 -      nir_lower_samplers(nir, shader_prog, stage);
 +      nir_lower_samplers_for_vk(nir);
        nir_validate_shader(nir);
     }
  
     nir_validate_shader(nir);
  
     if (unlikely(debug_enabled)) {
+       /* Re-index SSA defs so we print more sensible numbers. */
+       nir_foreach_overload(nir, overload) {
+          if (overload->impl)
+             nir_index_ssa_defs(overload->impl);
+       }
        fprintf(stderr, "NIR (SSA form) for %s shader:\n",
                _mesa_shader_stage_to_string(stage));
        nir_print_shader(nir, stderr);
index b056fbfc4275018b3f6e04894139de060302be9c,2327af77ad38b7ec8320289e2df4ee0fd979179c..ea128ccb670d511044e6fb06e201b126df6c427c
@@@ -88,7 -88,7 +88,7 @@@ static struct gl_program *brwNewProgram
         return NULL;
     }
  
-    case MESA_GEOMETRY_PROGRAM: {
+    case GL_GEOMETRY_PROGRAM_NV: {
        struct brw_geometry_program *prog = CALLOC_STRUCT(brw_geometry_program);
        if (prog) {
           prog->id = get_new_program_id(brw->intelScreen);
@@@ -275,7 -275,7 +275,7 @@@ brw_get_scratch_bo(struct brw_context *
  
  void brwInitFragProgFuncs( struct dd_function_table *functions )
  {
 -   assert(functions->ProgramStringNotify == _tnl_program_string);
 +   /* assert(functions->ProgramStringNotify == _tnl_program_string); */
  
     functions->NewProgram = brwNewProgram;
     functions->DeleteProgram = brwDeleteProgram;
     functions->MemoryBarrier = brw_memory_barrier;
  }
  
+ struct shader_times {
+    uint64_t time;
+    uint64_t written;
+    uint64_t reset;
+ };
  void
  brw_init_shader_time(struct brw_context *brw)
  {
-    const int max_entries = 4096;
-    brw->shader_time.bo = drm_intel_bo_alloc(brw->bufmgr, "shader time",
-                                             max_entries * SHADER_TIME_STRIDE,
-                                             4096);
+    const int max_entries = 2048;
+    brw->shader_time.bo =
+       drm_intel_bo_alloc(brw->bufmgr, "shader time",
+                          max_entries * SHADER_TIME_STRIDE * 3, 4096);
     brw->shader_time.names = rzalloc_array(brw, const char *, max_entries);
     brw->shader_time.ids = rzalloc_array(brw, int, max_entries);
     brw->shader_time.types = rzalloc_array(brw, enum shader_time_shader_type,
                                            max_entries);
-    brw->shader_time.cumulative = rzalloc_array(brw, uint64_t,
+    brw->shader_time.cumulative = rzalloc_array(brw, struct shader_times,
                                                 max_entries);
     brw->shader_time.max_entries = max_entries;
  }
@@@ -318,27 -324,6 +324,6 @@@ compare_time(const void *a, const void 
        return 1;
  }
  
- static void
- get_written_and_reset(struct brw_context *brw, int i,
-                       uint64_t *written, uint64_t *reset)
- {
-    enum shader_time_shader_type type = brw->shader_time.types[i];
-    assert(type == ST_VS || type == ST_GS || type == ST_FS8 ||
-           type == ST_FS16 || type == ST_CS);
-    /* Find where we recorded written and reset. */
-    int wi, ri;
-    for (wi = i; brw->shader_time.types[wi] != type + 1; wi++)
-       ;
-    for (ri = i; brw->shader_time.types[ri] != type + 2; ri++)
-       ;
-    *written = brw->shader_time.cumulative[wi];
-    *reset = brw->shader_time.cumulative[ri];
- }
  static void
  print_shader_time_line(const char *stage, const char *name,
                         int shader_num, uint64_t time, uint64_t total)
@@@ -374,26 -359,13 +359,13 @@@ brw_report_shader_time(struct brw_conte
        sorted[i] = &scaled[i];
  
        switch (type) {
-       case ST_VS_WRITTEN:
-       case ST_VS_RESET:
-       case ST_GS_WRITTEN:
-       case ST_GS_RESET:
-       case ST_FS8_WRITTEN:
-       case ST_FS8_RESET:
-       case ST_FS16_WRITTEN:
-       case ST_FS16_RESET:
-       case ST_CS_WRITTEN:
-       case ST_CS_RESET:
-          /* We'll handle these when along with the time. */
-          scaled[i] = 0;
-          continue;
        case ST_VS:
        case ST_GS:
        case ST_FS8:
        case ST_FS16:
        case ST_CS:
-          get_written_and_reset(brw, i, &written, &reset);
+          written = brw->shader_time.cumulative[i].written;
+          reset = brw->shader_time.cumulative[i].reset;
           break;
  
        default:
           break;
        }
  
-       uint64_t time = brw->shader_time.cumulative[i];
+       uint64_t time = brw->shader_time.cumulative[i].time;
        if (written) {
           scaled[i] = time / written * (written + reset);
        } else {
@@@ -491,16 -463,19 +463,19 @@@ brw_collect_shader_time(struct brw_cont
      * overhead compared to the cost of tracking the time in the first place.
      */
     drm_intel_bo_map(brw->shader_time.bo, true);
-    uint32_t *times = brw->shader_time.bo->virtual;
+    void *bo_map = brw->shader_time.bo->virtual;
  
     for (int i = 0; i < brw->shader_time.num_entries; i++) {
-       brw->shader_time.cumulative[i] += times[i * SHADER_TIME_STRIDE / 4];
+       uint32_t *times = bo_map + i * 3 * SHADER_TIME_STRIDE;
+       brw->shader_time.cumulative[i].time += times[SHADER_TIME_STRIDE * 0 / 4];
+       brw->shader_time.cumulative[i].written += times[SHADER_TIME_STRIDE * 1 / 4];
+       brw->shader_time.cumulative[i].reset += times[SHADER_TIME_STRIDE * 2 / 4];
     }
  
     /* Zero the BO out to clear it out for our next collection.
      */
-    memset(times, 0, brw->shader_time.bo->size);
+    memset(bo_map, 0, brw->shader_time.bo->size);
     drm_intel_bo_unmap(brw->shader_time.bo);
  }
  
index ebfb49acf8d70747f230baf9dfcd933fc87cd07f,32c401314341b8cc468389748797a6aa124f30cd..06393c8ff2bb61343bcb56e77dfb3d8259fabec9
  #include "glsl/glsl_parser_extras.h"
  #include "main/shaderapi.h"
  
+ static void
+ shader_debug_log_mesa(void *data, const char *fmt, ...)
+ {
+    struct brw_context *brw = (struct brw_context *)data;
+    va_list args;
+    va_start(args, fmt);
+    GLuint msg_id = 0;
+    _mesa_gl_vdebug(&brw->ctx, &msg_id,
+                    MESA_DEBUG_SOURCE_SHADER_COMPILER,
+                    MESA_DEBUG_TYPE_OTHER,
+                    MESA_DEBUG_SEVERITY_NOTIFICATION, fmt, args);
+    va_end(args);
+ }
+ static void
+ shader_perf_log_mesa(void *data, const char *fmt, ...)
+ {
+    struct brw_context *brw = (struct brw_context *)data;
+    va_list args;
+    va_start(args, fmt);
+    if (unlikely(INTEL_DEBUG & DEBUG_PERF)) {
+       va_list args_copy;
+       va_copy(args_copy, args);
+       vfprintf(stderr, fmt, args_copy);
+       va_end(args_copy);
+    }
+    if (brw->perf_debug) {
+       GLuint msg_id = 0;
+       _mesa_gl_vdebug(&brw->ctx, &msg_id,
+                       MESA_DEBUG_SOURCE_SHADER_COMPILER,
+                       MESA_DEBUG_TYPE_PERFORMANCE,
+                       MESA_DEBUG_SEVERITY_MEDIUM, fmt, args);
+    }
+    va_end(args);
+ }
  struct brw_compiler *
  brw_compiler_create(void *mem_ctx, const struct brw_device_info *devinfo)
  {
     struct brw_compiler *compiler = rzalloc(mem_ctx, struct brw_compiler);
  
     compiler->devinfo = devinfo;
+    compiler->shader_debug_log = shader_debug_log_mesa;
+    compiler->shader_perf_log = shader_perf_log_mesa;
  
     brw_fs_alloc_reg_sets(compiler);
     brw_vec4_alloc_reg_set(compiler);
  
+    if (devinfo->gen >= 8 && !(INTEL_DEBUG & DEBUG_VEC4VS))
+       compiler->scalar_vs = true;
+    nir_shader_compiler_options *nir_options =
+       rzalloc(compiler, nir_shader_compiler_options);
+    nir_options->native_integers = true;
+    /* In order to help allow for better CSE at the NIR level we tell NIR
+     * to split all ffma instructions during opt_algebraic and we then
+     * re-combine them as a later step.
+     */
+    nir_options->lower_ffma = true;
+    nir_options->lower_sub = true;
+    /* We want the GLSL compiler to emit code that uses condition codes */
+    for (int i = 0; i < MESA_SHADER_STAGES; i++) {
+       compiler->glsl_compiler_options[i].MaxUnrollIterations = 32;
+       compiler->glsl_compiler_options[i].MaxIfDepth =
+          devinfo->gen < 6 ? 16 : UINT_MAX;
+       compiler->glsl_compiler_options[i].EmitCondCodes = true;
+       compiler->glsl_compiler_options[i].EmitNoNoise = true;
+       compiler->glsl_compiler_options[i].EmitNoMainReturn = true;
+       compiler->glsl_compiler_options[i].EmitNoIndirectInput = true;
+       compiler->glsl_compiler_options[i].EmitNoIndirectOutput =
+        (i == MESA_SHADER_FRAGMENT);
+       compiler->glsl_compiler_options[i].EmitNoIndirectTemp =
+        (i == MESA_SHADER_FRAGMENT);
+       compiler->glsl_compiler_options[i].EmitNoIndirectUniform = false;
+       compiler->glsl_compiler_options[i].LowerClipDistance = true;
+    }
+    compiler->glsl_compiler_options[MESA_SHADER_VERTEX].OptimizeForAOS = true;
+    compiler->glsl_compiler_options[MESA_SHADER_GEOMETRY].OptimizeForAOS = true;
+    if (compiler->scalar_vs) {
+       /* If we're using the scalar backend for vertex shaders, we need to
+        * configure these accordingly.
+        */
+       compiler->glsl_compiler_options[MESA_SHADER_VERTEX].EmitNoIndirectOutput = true;
+       compiler->glsl_compiler_options[MESA_SHADER_VERTEX].EmitNoIndirectTemp = true;
+       compiler->glsl_compiler_options[MESA_SHADER_VERTEX].OptimizeForAOS = false;
+       compiler->glsl_compiler_options[MESA_SHADER_VERTEX].NirOptions = nir_options;
+    }
+    compiler->glsl_compiler_options[MESA_SHADER_FRAGMENT].NirOptions = nir_options;
+    compiler->glsl_compiler_options[MESA_SHADER_COMPUTE].NirOptions = nir_options;
     return compiler;
  }
  
@@@ -97,7 -187,7 +187,7 @@@ is_scalar_shader_stage(struct brw_conte
     case MESA_SHADER_FRAGMENT:
        return true;
     case MESA_SHADER_VERTEX:
-       return brw->scalar_vs;
+       return brw->intelScreen->compiler->scalar_vs;
     default:
        return false;
     }
@@@ -351,7 -441,6 +441,7 @@@ brw_type_for_base_type(const struct gls
     case GLSL_TYPE_ERROR:
     case GLSL_TYPE_INTERFACE:
     case GLSL_TYPE_DOUBLE:
 +   case GLSL_TYPE_FUNCTION:
        unreachable("not reached");
     }
  
@@@ -632,6 -721,8 +722,8 @@@ brw_instruction_name(enum opcode op
        return "gs_ff_sync_set_primitives";
     case CS_OPCODE_CS_TERMINATE:
        return "cs_terminate";
+    case SHADER_OPCODE_BARRIER:
+       return "barrier";
     }
  
     unreachable("not reached");
@@@ -755,19 -846,22 +847,22 @@@ brw_abs_immediate(enum brw_reg_type typ
     return false;
  }
  
- backend_visitor::backend_visitor(struct brw_context *brw,
-                                  struct gl_shader_program *shader_prog,
-                                  struct gl_program *prog,
-                                  struct brw_stage_prog_data *stage_prog_data,
-                                  gl_shader_stage stage)
-    : brw(brw),
-      devinfo(brw->intelScreen->devinfo),
-      ctx(&brw->ctx),
+ backend_shader::backend_shader(const struct brw_compiler *compiler,
+                                void *log_data,
+                                void *mem_ctx,
+                                struct gl_shader_program *shader_prog,
+                                struct gl_program *prog,
+                                struct brw_stage_prog_data *stage_prog_data,
+                                gl_shader_stage stage)
+    : compiler(compiler),
+      log_data(log_data),
+      devinfo(compiler->devinfo),
       shader(shader_prog ?
          (struct brw_shader *)shader_prog->_LinkedShaders[stage] : NULL),
       shader_prog(shader_prog),
       prog(prog),
       stage_prog_data(stage_prog_data),
+      mem_ctx(mem_ctx),
       cfg(NULL),
       stage(stage)
  {
@@@ -950,7 -1044,6 +1045,6 @@@ backend_instruction::can_do_saturate() 
     case BRW_OPCODE_LINE:
     case BRW_OPCODE_LRP:
     case BRW_OPCODE_MAC:
-    case BRW_OPCODE_MACH:
     case BRW_OPCODE_MAD:
     case BRW_OPCODE_MATH:
     case BRW_OPCODE_MOV:
@@@ -1060,6 -1153,7 +1154,7 @@@ backend_instruction::has_side_effects(
     case SHADER_OPCODE_MEMORY_FENCE:
     case SHADER_OPCODE_URB_WRITE_SIMD8:
     case FS_OPCODE_FB_WRITE:
+    case SHADER_OPCODE_BARRIER:
        return true;
     default:
        return false;
@@@ -1148,13 -1242,13 +1243,13 @@@ backend_instruction::remove(bblock_t *b
  }
  
  void
- backend_visitor::dump_instructions()
+ backend_shader::dump_instructions()
  {
     dump_instructions(NULL);
  }
  
  void
- backend_visitor::dump_instructions(const char *name)
+ backend_shader::dump_instructions(const char *name)
  {
     FILE *file = stderr;
     if (name && geteuid() != 0) {
  }
  
  void
- backend_visitor::calculate_cfg()
+ backend_shader::calculate_cfg()
  {
     if (this->cfg)
        return;
  }
  
  void
- backend_visitor::invalidate_cfg()
+ backend_shader::invalidate_cfg()
  {
     ralloc_free(this->cfg);
     this->cfg = NULL;
   * trigger some of our asserts that surface indices are < BRW_MAX_SURFACES.
   */
  void
- backend_visitor::assign_common_binding_table_offsets(uint32_t next_binding_table_offset)
+ backend_shader::assign_common_binding_table_offsets(uint32_t next_binding_table_offset)
  {
     int num_textures = _mesa_fls(prog->SamplersUsed);
  
index e51c140c0f28924b906e5bd0988c7c7d9b495758,8d7a80b19eb43779b6e5aa194abf9b54d8ca8db3..236fa51f92c0f282fd0287ce318d40b479b7cea1
@@@ -615,7 -615,6 +615,7 @@@ type_size(const struct glsl_type *type
     case GLSL_TYPE_DOUBLE:
     case GLSL_TYPE_ERROR:
     case GLSL_TYPE_INTERFACE:
 +   case GLSL_TYPE_FUNCTION:
        unreachable("not reached");
     }
  
@@@ -684,9 -683,12 +684,12 @@@ vec4_visitor::setup_uniform_values(ir_v
      * order we'd walk the type, so walk the list of storage and find anything
      * with our name, or the prefix of a component that starts with our name.
      */
-    for (unsigned u = 0; u < shader_prog->NumUserUniformStorage; u++) {
+    for (unsigned u = 0; u < shader_prog->NumUniformStorage; u++) {
        struct gl_uniform_storage *storage = &shader_prog->UniformStorage[u];
  
+       if (storage->builtin)
+          continue;
        if (strncmp(ir->name, storage->name, namelen) != 0 ||
            (storage->name[namelen] != 0 &&
             storage->name[namelen] != '.' &&
  }
  
  void
- vec4_visitor::setup_uniform_clipplane_values()
+ vec4_visitor::setup_uniform_clipplane_values(gl_clip_plane *clip_planes)
  {
-    gl_clip_plane *clip_planes = brw_select_clip_planes(ctx);
     for (int i = 0; i < key->nr_userclip_plane_consts; ++i) {
        assert(this->uniforms < uniform_array_size);
        this->uniform_vector_size[this->uniforms] = 4;
@@@ -2461,11 -2461,27 +2462,27 @@@ vec4_visitor::emit_mcs_fetch(ir_textur
        new(mem_ctx) vec4_instruction(SHADER_OPCODE_TXF_MCS,
                                      dst_reg(this, glsl_type::uvec4_type));
     inst->base_mrf = 2;
-    inst->mlen = 1;
     inst->src[1] = sampler;
  
+    int param_base;
+    if (devinfo->gen >= 9) {
+       /* Gen9+ needs a message header in order to use SIMD4x2 mode */
+       vec4_instruction *header_inst = new(mem_ctx)
+          vec4_instruction(VS_OPCODE_SET_SIMD4X2_HEADER_GEN9,
+                           dst_reg(MRF, inst->base_mrf));
+       emit(header_inst);
+       inst->mlen = 2;
+       inst->header_size = 1;
+       param_base = inst->base_mrf + 1;
+    } else {
+       inst->mlen = 1;
+       param_base = inst->base_mrf;
+    }
     /* parameters are: u, v, r, lod; lod will always be zero due to api restrictions */
-    int param_base = inst->base_mrf;
     int coord_mask = (1 << ir->coordinate->type->vector_elements) - 1;
     int zero_mask = 0xf & ~coord_mask;
  
@@@ -2948,6 -2964,12 +2965,12 @@@ vec4_visitor::visit(ir_end_primitive *
     unreachable("not reached");
  }
  
+ void
+ vec4_visitor::visit(ir_barrier *)
+ {
+    unreachable("not reached");
+ }
  void
  vec4_visitor::emit_untyped_atomic(unsigned atomic_op, unsigned surf_index,
                                    dst_reg dst, src_reg offset,
@@@ -3655,7 -3677,7 +3678,7 @@@ vec4_visitor::resolve_bool_comparison(i
     *reg = neg_result;
  }
  
- vec4_visitor::vec4_visitor(struct brw_context *brw,
+ vec4_visitor::vec4_visitor(const struct brw_compiler *compiler,
                             struct brw_vec4_compile *c,
                             struct gl_program *prog,
                             const struct brw_vue_prog_key *key,
                             gl_shader_stage stage,
                           void *mem_ctx,
                             bool no_spills,
-                            shader_time_shader_type st_base,
-                            shader_time_shader_type st_written,
-                            shader_time_shader_type st_reset)
-    : backend_visitor(brw, shader_prog, prog, &prog_data->base, stage),
+                            int shader_time_index)
+    : backend_shader(compiler, NULL, mem_ctx,
+                     shader_prog, prog, &prog_data->base, stage),
       c(c),
       key(key),
       prog_data(prog_data),
       first_non_payload_grf(0),
       need_all_constants_in_pull_buffer(false),
       no_spills(no_spills),
-      st_base(st_base),
-      st_written(st_written),
-      st_reset(st_reset)
+      shader_time_index(shader_time_index)
  {
-    this->mem_ctx = mem_ctx;
     this->failed = false;
  
     this->base_ir = NULL;
index 5496225a6c70da8420787541278f9531291cbce7,592a72927c3e5d55376282d01c3f3c6658c443c8..4619ce1080d1eb059e5a4937536cfe8dfd2b2e84
@@@ -36,6 -36,7 +36,7 @@@
  #include "main/formats.h"
  #include "main/fbobject.h"
  #include "main/samplerobj.h"
+ #include "main/framebuffer.h"
  #include "program/prog_parameter.h"
  #include "program/program.h"
  #include "intel_mipmap_tree.h"
@@@ -46,7 -47,7 +47,7 @@@
   * Return a bitfield where bit n is set if barycentric interpolation mode n
   * (see enum brw_wm_barycentric_interp_mode) is needed by the fragment shader.
   */
 -static unsigned
 +unsigned
  brw_compute_barycentric_interp_modes(struct brw_context *brw,
                                       bool shade_model_flat,
                                       bool persample_shading,
@@@ -462,7 -463,7 +463,7 @@@ static void brw_wm_populate_key( struc
     GLuint lookup = 0;
     GLuint line_aa;
     bool program_uses_dfdy = fp->program.UsesDFdy;
-    bool multisample_fbo = ctx->DrawBuffer->Visual.samples > 1;
+    const bool multisample_fbo = _mesa_geometric_samples(ctx->DrawBuffer) > 1;
  
     memset(key, 0, sizeof(*key));
  
      * drawable height in order to invert the Y axis.
      */
     if (fp->program.Base.InputsRead & VARYING_BIT_POS) {
-       key->drawable_height = ctx->DrawBuffer->Height;
+       key->drawable_height = _mesa_geometric_height(ctx->DrawBuffer);
     }
  
     if ((fp->program.Base.InputsRead & VARYING_BIT_POS) || program_uses_dfdy) {
     key->persample_shading =
        _mesa_get_min_invocations_per_fragment(ctx, &fp->program, true) > 1;
     if (key->persample_shading)
-       key->persample_2x = ctx->DrawBuffer->Visual.samples == 2;
+       key->persample_2x = _mesa_geometric_samples(ctx->DrawBuffer) == 2;
  
     key->compute_pos_offset =
        _mesa_get_min_invocations_per_fragment(ctx, &fp->program, false) > 1 &&
index 33a0348486d05e1188d444e757021044a8e6c887,b68c2127f8db3142ff8735a880d2b21fafc45c0f..75cf7854effb78f139d209cea6711b6ffa1bfbd3
@@@ -60,7 -60,7 +60,7 @@@ static const struct dri_debug_control d
     { "urb",         DEBUG_URB },
     { "vs",          DEBUG_VS },
     { "clip",        DEBUG_CLIP },
 -   { "aub",         DEBUG_AUB },
 +   { "foob",        DEBUG_AUB }, /* disable aub dumbing in the dri driver */
     { "shader_time", DEBUG_SHADER_TIME },
     { "no16",        DEBUG_NO16 },
     { "blorp",       DEBUG_BLORP },
@@@ -88,25 -88,22 +88,22 @@@ intel_debug_flag_for_shader_stage(gl_sh
  }
  
  void
- brw_process_intel_debug_variable(struct brw_context *brw)
+ brw_process_intel_debug_variable(struct intel_screen *screen)
  {
     uint64_t intel_debug = driParseDebugString(getenv("INTEL_DEBUG"), debug_control);
     (void) p_atomic_cmpxchg(&INTEL_DEBUG, 0, intel_debug);
  
     if (INTEL_DEBUG & DEBUG_BUFMGR)
-       dri_bufmgr_set_debug(brw->bufmgr, true);
+       dri_bufmgr_set_debug(screen->bufmgr, true);
  
-    if ((INTEL_DEBUG & DEBUG_SHADER_TIME) && brw->gen < 7) {
+    if ((INTEL_DEBUG & DEBUG_SHADER_TIME) && screen->devinfo->gen < 7) {
        fprintf(stderr,
                "shader_time debugging requires gen7 (Ivybridge) or better.\n");
        INTEL_DEBUG &= ~DEBUG_SHADER_TIME;
     }
  
-    if (INTEL_DEBUG & DEBUG_PERF)
-       brw->perf_debug = true;
     if (INTEL_DEBUG & DEBUG_AUB)
-       drm_intel_bufmgr_gem_set_aub_dump(brw->bufmgr, true);
+       drm_intel_bufmgr_gem_set_aub_dump(screen->bufmgr, true);
  }
  
  /**
index d6da34c7065acf675717e1f47f97db2194664969,365b4b8f718ab56ae6de58d84e41a9398ccec914..c99677c7197806027002925ec453a5dcf907c0ec
@@@ -275,11 -275,9 +275,11 @@@ intelInitExtensions(struct gl_context *
        ctx->Extensions.EXT_shader_integer_mix = ctx->Const.GLSLVersion >= 130;
        ctx->Extensions.EXT_timer_query = true;
  
 -      if (brw->gen == 5 || can_write_oacontrol(brw)) {
 -         ctx->Extensions.AMD_performance_monitor = true;
 -         ctx->Extensions.INTEL_performance_query = true;
 +      if (brw->bufmgr) {
 +         if (brw->gen == 5 || can_write_oacontrol(brw)) {
 +            ctx->Extensions.AMD_performance_monitor = true;
 +            ctx->Extensions.INTEL_performance_query = true;
 +         }
        }
     }
  
        uint64_t dummy;
  
        ctx->Extensions.ARB_blend_func_extended =
 +         brw->optionCache.info == NULL ||
           !driQueryOptionb(&brw->optionCache, "disable_blend_func_extended");
        ctx->Extensions.ARB_conditional_render_inverted = true;
        ctx->Extensions.ARB_draw_buffers_blend = true;
        ctx->Extensions.OES_depth_texture_cube_map = true;
  
        /* Test if the kernel has the ioctl. */
 -      if (drm_intel_reg_read(brw->bufmgr, TIMESTAMP, &dummy) == 0)
 +      if (brw->bufmgr && drm_intel_reg_read(brw->bufmgr, TIMESTAMP, &dummy) == 0)
           ctx->Extensions.ARB_timer_query = true;
  
        /* Only enable this in core profile because other parts of Mesa behave
        }
     }
  
+    brw->predicate.supported = false;
     if (brw->gen >= 7) {
        ctx->Extensions.ARB_conservative_depth = true;
        ctx->Extensions.ARB_derivative_control = true;
+       ctx->Extensions.ARB_framebuffer_no_attachments = true;
        ctx->Extensions.ARB_gpu_shader5 = true;
        ctx->Extensions.ARB_shader_atomic_counters = true;
        ctx->Extensions.ARB_texture_compression_bptc = true;
        ctx->Extensions.ARB_texture_view = true;
  
 -      if (can_do_pipelined_register_writes(brw)) {
 +      if (brw->bufmgr &&
 +          can_do_pipelined_register_writes(brw)) {
           ctx->Extensions.ARB_draw_indirect = true;
           ctx->Extensions.ARB_transform_feedback2 = true;
           ctx->Extensions.ARB_transform_feedback3 = true;
           ctx->Extensions.ARB_transform_feedback_instanced = true;
+          if (brw->intelScreen->cmd_parser_version >= 2)
+             brw->predicate.supported = true;
        }
  
        /* Only enable this in core profile because other parts of Mesa behave
     if (ctx->API != API_OPENGL_CORE)
        ctx->Extensions.ARB_color_buffer_float = true;
  
 -   if (ctx->Mesa_DXTn || driQueryOptionb(&brw->optionCache, "force_s3tc_enable"))
 +   if (ctx->Mesa_DXTn ||
 +       (brw->optionCache.info != NULL &&
 +        driQueryOptionb(&brw->optionCache, "force_s3tc_enable")))
        ctx->Extensions.EXT_texture_compression_s3tc = true;
  
     ctx->Extensions.ANGLE_texture_compression_dxt = true;
index 4860a160ee9e01fef37fb0536b3d238ca2baa5c2,f9398d7859e936ced92733312dd905782bb21182..de14696bd76f2fbc8577a9682f020777794a691d
@@@ -39,6 -39,7 +39,7 @@@
  #include "swrast/s_renderbuffer.h"
  #include "util/ralloc.h"
  #include "brw_shader.h"
+ #include "glsl/nir/nir.h"
  
  #include "utils.h"
  #include "xmlpool.h"
@@@ -1372,6 -1373,8 +1373,8 @@@ __DRIconfig **intelInitScreen2(__DRIscr
     if (!intelScreen->devinfo)
        return false;
  
+    brw_process_intel_debug_variable(intelScreen);
     intelScreen->hw_must_use_separate_stencil = intelScreen->devinfo->gen >= 7;
  
     intelScreen->hw_has_swizzling = intel_detect_swizzling(intelScreen);
           (ret != -1 || errno != EINVAL);
     }
  
+    struct drm_i915_getparam getparam;
+    getparam.param = I915_PARAM_CMD_PARSER_VERSION;
+    getparam.value = &intelScreen->cmd_parser_version;
+    const int ret = drmIoctl(psp->fd, DRM_IOCTL_I915_GETPARAM, &getparam);
+    if (ret == -1)
+       intelScreen->cmd_parser_version = 0;
     psp->extensions = !intelScreen->has_context_reset_notification
        ? intelScreenExtensions : intelRobustScreenExtensions;
  
     return (const __DRIconfig**) intel_screen_make_configs(psp);
  }
  
 +struct intel_screen *
 +intel_screen_create(int fd)
 +{
 +   __DRIscreen *psp;
 +   __DRIconfig **configs;
 +   int i;
 +
 +   psp = malloc(sizeof(*psp));
 +   if (psp == NULL)
 +      return NULL;
 +
 +   psp->image.loader = (void *) 1; /* Don't complain about this being NULL */
 +   psp->fd = fd;
 +   psp->dri2.useInvalidate = (void *) 1;
 +
 +   configs = (__DRIconfig **) intelInitScreen2(psp);
 +   for (i = 0; configs[i]; i++)
 +      free(configs[i]);
 +   free(configs);
 +
 +   return psp->driverPrivate;
 +}
 +
 +void
 +intel_screen_destroy(struct intel_screen *screen)
 +{
 +   __DRIscreen *psp;
 +
 +   psp = screen->driScrnPriv;
 +   intelDestroyScreen(screen->driScrnPriv);
 +   free(psp);
 +}
 +
 +
 +struct brw_context *
 +intel_context_create(struct intel_screen *screen)
 +{
 +   __DRIcontext *driContextPriv;
 +   struct brw_context *brw;
 +   unsigned error;
 +
 +   driContextPriv = malloc(sizeof(*driContextPriv));
 +   if (driContextPriv == NULL)
 +      return NULL;
 +
 +   driContextPriv->driScreenPriv = screen->driScrnPriv;
 +
 +   brwCreateContext(API_OPENGL_CORE,
 +                    NULL, /* visual */
 +                    driContextPriv,
 +                    3, 0,
 +                    0, /* flags */
 +                    false, /* notify_reset */
 +                    &error,
 +                    NULL);
 +
 +   brw = driContextPriv->driverPrivate;
 +   brw->ctx.FirstTimeCurrent = false;
 +
 +   return driContextPriv->driverPrivate;
 +}
 +
 +void
 +intel_context_destroy(struct brw_context *brw)
 +{
 +   __DRIcontext *driContextPriv;
 +
 +   driContextPriv = brw->driContext;
 +   intelDestroyContext(driContextPriv);
 +   free(driContextPriv);
 +}
 +
  struct intel_buffer {
     __DRIbuffer base;
     drm_intel_bo *bo;
diff --combined src/mesa/main/mtypes.h
index bd84113ea919e5ee16a215c5f2aecd862288ad0a,983b9dc307b87c075a0679fe10b7545764b5f1d6..481fd5e7fdf87545e0428f6e108354fa0c89944a
@@@ -43,7 -43,6 +43,6 @@@
  #include "glapi/glapi.h"
  #include "math/m_matrix.h"    /* GLmatrix */
  #include "glsl/shader_enums.h"
- #include "util/simple_list.h" /* struct simple_node */
  #include "main/formats.h"       /* MESA_FORMAT_COUNT */
  
  
@@@ -398,7 -397,6 +397,6 @@@ struct gl_confi
  {
     GLboolean rgbMode;
     GLboolean floatMode;
-    GLboolean colorIndexMode;  /* XXX is this used anywhere? */
     GLuint doubleBufferMode;
     GLuint stereoMode;
  
@@@ -2099,8 -2097,6 +2097,6 @@@ struct gl_progra
     GLbitfield64 DoubleInputsRead;     /**< Bitmask of which input regs are read  and are doubles */
     GLbitfield64 OutputsWritten; /**< Bitmask of which output regs are written */
     GLbitfield SystemValuesRead;   /**< Bitmask of SYSTEM_VALUE_x inputs used */
-    GLbitfield InputFlags[MAX_PROGRAM_INPUTS];   /**< PROG_PARAM_BIT_x flags */
-    GLbitfield OutputFlags[MAX_PROGRAM_OUTPUTS]; /**< PROG_PARAM_BIT_x flags */
     GLbitfield TexturesUsed[MAX_COMBINED_TEXTURE_IMAGE_UNITS];  /**< TEXTURE_x_BIT bitmask */
     GLbitfield SamplersUsed;   /**< Bitfield of which samplers are used */
     GLbitfield ShadowSamplers; /**< Texture units used for shadow sampling. */
@@@ -2275,16 -2271,10 +2271,10 @@@ struct gl_vertex_program_stat
   */
  struct gl_geometry_program_state
  {
-    GLboolean Enabled;               /**< GL_ARB_GEOMETRY_SHADER4 */
-    GLboolean _Enabled;              /**< Enabled and valid program? */
-    struct gl_geometry_program *Current;  /**< user-bound geometry program */
     /** Currently enabled and valid program (including internal programs
      * and compiled shader programs).
      */
     struct gl_geometry_program *_Current;
-    GLfloat Parameters[MAX_PROGRAM_ENV_PARAMS][4]; /**< Env params */
  };
  
  /**
@@@ -2320,8 -2310,6 +2310,6 @@@ struct gl_fragment_program_stat
   */
  struct gl_compute_program_state
  {
-    struct gl_compute_program *Current;  /**< user-bound compute program */
     /** Currently enabled and valid program (including internal programs
      * and compiled shader programs).
      */
@@@ -2570,11 -2558,6 +2558,11 @@@ struct gl_uniform_bloc
      */
     GLuint Binding;
  
 +   /**
 +    * Vulkan descriptor set qualifier for this block.
 +    */
 +   GLuint Set;
 +
     /**
      * Minimum size (in bytes) of a buffer object to back this uniform buffer
      * (GL_UNIFORM_BLOCK_DATA_SIZE).
@@@ -2733,7 -2716,7 +2721,7 @@@ struct gl_shader_progra
     } Comp;
  
     /* post-link info: */
-    unsigned NumUserUniformStorage;
+    unsigned NumUniformStorage;
     unsigned NumHiddenUniforms;
     struct gl_uniform_storage *UniformStorage;
  
@@@ -2832,6 -2815,8 +2820,8 @@@ struct gl_pipeline_objec
  
     mtx_t Mutex;
  
+    GLchar *Label;   /**< GL_KHR_debug */
     /**
      * Programs used for rendering
      *
@@@ -3009,7 -2994,6 +2999,6 @@@ struct gl_shared_stat
     struct _mesa_HashTable *Programs; /**< All vertex/fragment programs */
     struct gl_vertex_program *DefaultVertexProgram;
     struct gl_fragment_program *DefaultFragmentProgram;
-    struct gl_geometry_program *DefaultGeometryProgram;
     /*@}*/
  
     /* GL_ATI_fragment_shader */
@@@ -3151,12 -3135,29 +3140,29 @@@ struct gl_framebuffe
      */
     struct gl_config Visual;
  
-    GLuint Width, Height;      /**< size of frame buffer in pixels */
+    /**
+     * Size of frame buffer in pixels. If there are no attachments, then both
+     * of these are 0.
+     */
+    GLuint Width, Height;
  
-    /** \name  Drawing bounds (Intersection of buffer size and scissor box) */
+    /**
+     * In the case that the framebuffer has no attachment (i.e.
+     * GL_ARB_framebuffer_no_attachments) then the geometry of
+     * the framebuffer is specified by the default values.
+     */
+    struct {
+      GLuint Width, Height, Layers, NumSamples;
+      GLboolean FixedSampleLocations;
+    } DefaultGeometry;
+    /** \name  Drawing bounds (Intersection of buffer size and scissor box)
+     * The drawing region is given by [_Xmin, _Xmax) x [_Ymin, _Ymax),
+     * (inclusive for _Xmin and _Ymin while exclusive for _Xmax and _Ymax)
+     */
     /*@{*/
-    GLint _Xmin, _Xmax;  /**< inclusive */
-    GLint _Ymin, _Ymax;  /**< exclusive */
+    GLint _Xmin, _Xmax;
+    GLint _Ymin, _Ymax;
     /*@}*/
  
     /** \name  Derived Z buffer stuff */
     /** One of the GL_FRAMEBUFFER_(IN)COMPLETE_* tokens */
     GLenum _Status;
  
+    /** Whether one of Attachment has Type != GL_NONE
+     * NOTE: the values for Width and Height are set to 0 in case of having
+     * no attachments, a backend driver supporting the extension
+     * GL_ARB_framebuffer_no_attachments must check for the flag _HasAttachments
+     * and if GL_FALSE, must then use the values in DefaultGeometry to initialize
+     * its viewport, scissor and so on (in particular _Xmin, _Xmax, _Ymin and
+     * _Ymax do NOT take into account _HasAttachments being false). To get the
+     * geometry of the framebuffer, the  helper functions
+     *   _mesa_geometric_width(),
+     *   _mesa_geometric_height(),
+     *   _mesa_geometric_samples() and
+     *   _mesa_geometric_layers()
+     * are available that check _HasAttachments.
+     */
+    bool _HasAttachments;
     /** Integer color values */
     GLboolean _IntegerColor;
  
     /**
      * The maximum number of layers in the framebuffer, or 0 if the framebuffer
      * is not layered.  For cube maps and cube map arrays, each cube face
-     * counts as a layer.
+     * counts as a layer. As the case for Width, Height a backend driver
+     * supporting GL_ARB_framebuffer_no_attachments must use DefaultGeometry
+     * in the case that _HasAttachments is false
      */
     GLuint MaxNumLayers;
  
@@@ -3358,6 -3377,14 +3382,14 @@@ struct gl_constant
     GLuint MaxRenderbufferSize;   /**< GL_EXT_framebuffer_object */
     GLuint MaxSamples;            /**< GL_ARB_framebuffer_object */
  
+    /**
+     * GL_ARB_framebuffer_no_attachments
+     */
+    GLuint MaxFramebufferWidth;
+    GLuint MaxFramebufferHeight;
+    GLuint MaxFramebufferLayers;
+    GLuint MaxFramebufferSamples;
     /** Number of varying vectors between any two shader stages. */
     GLuint MaxVarying;
  
@@@ -3635,6 -3662,7 +3667,7 @@@ struct gl_extension
     GLboolean ARB_fragment_program;
     GLboolean ARB_fragment_program_shadow;
     GLboolean ARB_fragment_shader;
+    GLboolean ARB_framebuffer_no_attachments;
     GLboolean ARB_framebuffer_object;
     GLboolean ARB_explicit_attrib_location;
     GLboolean ARB_explicit_uniform_location;
@@@ -4422,7 -4450,12 +4455,12 @@@ enum _debu
     DEBUG_INCOMPLETE_FBO         = (1 << 3)
  };
  
+ static inline bool
+ _mesa_active_fragment_shader_has_atomic_ops(const struct gl_context *ctx)
+ {
+    return ctx->Shader._CurrentFragmentProgram != NULL &&
+       ctx->Shader._CurrentFragmentProgram->NumAtomicBuffers > 0;
+ }
  
  #ifdef __cplusplus
  }
index fceed712bdb47d75d97005e84eaa77c883877fe2,18e3bc5d5cc1ed5a74da7dd25cfdce42450f229d..3bffe90ff1f190a96bfe61a4823aa42cd8c91326
@@@ -262,6 -262,7 +262,7 @@@ public
     virtual void visit(ir_if *);
     virtual void visit(ir_emit_vertex *);
     virtual void visit(ir_end_primitive *);
+    virtual void visit(ir_barrier *);
     /*@}*/
  
     src_reg result;
@@@ -405,7 -406,7 +406,7 @@@ ir_to_mesa_visitor::emit_dp(ir_instruct
                            dst_reg dst, src_reg src0, src_reg src1,
                            unsigned elements)
  {
-    static const gl_inst_opcode dot_opcodes[] = {
+    static const enum prog_opcode dot_opcodes[] = {
        OPCODE_DP2, OPCODE_DP3, OPCODE_DP4
     };
  
@@@ -541,7 -542,6 +542,7 @@@ type_size(const struct glsl_type *type
     case GLSL_TYPE_VOID:
     case GLSL_TYPE_ERROR:
     case GLSL_TYPE_INTERFACE:
 +   case GLSL_TYPE_FUNCTION:
        assert(!"Invalid type in type_size");
        break;
     }
@@@ -2118,6 -2118,12 +2119,12 @@@ ir_to_mesa_visitor::visit(ir_end_primit
     assert(!"Geometry shaders not supported.");
  }
  
+ void
+ ir_to_mesa_visitor::visit(ir_barrier *)
+ {
+    unreachable("GLSL barrier() not supported.");
+ }
  ir_to_mesa_visitor::ir_to_mesa_visitor()
  {
     result.file = PROGRAM_UNDEFINED;
@@@ -2407,9 -2413,14 +2414,14 @@@ _mesa_associate_uniform_storage(struct 
        if (!found)
         continue;
  
+       struct gl_uniform_storage *storage =
+          &shader_program->UniformStorage[location];
+       /* Do not associate any uniform storage to built-in uniforms */
+       if (storage->builtin)
+          continue;
        if (location != last_location) {
-        struct gl_uniform_storage *storage =
-           &shader_program->UniformStorage[location];
         enum gl_uniform_driver_format format = uniform_native;
  
         unsigned columns = 0;
           case GLSL_TYPE_STRUCT:
           case GLSL_TYPE_ERROR:
           case GLSL_TYPE_INTERFACE:
 +         case GLSL_TYPE_FUNCTION:
            assert(!"Should not get here.");
            break;
         }
@@@ -2722,7 -2732,7 +2734,7 @@@ get_mesa_program(struct gl_context *ctx
        mesa_inst->Opcode = inst->op;
        mesa_inst->CondUpdate = inst->cond_update;
        if (inst->saturate)
-        mesa_inst->SaturateMode = SATURATE_ZERO_ONE;
+        mesa_inst->Saturate = GL_TRUE;
        mesa_inst->DstReg.File = inst->dst.file;
        mesa_inst->DstReg.Index = inst->dst.index;
        mesa_inst->DstReg.CondMask = inst->dst.cond_mask;
diff --combined src/vulkan/compiler.cpp
index 19a403aa1c1ab97232b81865395b6a569390bb21,0000000000000000000000000000000000000000..0ea44ac6ce567a677496543118bb18e5f3b8e4d3
mode 100644,000000..100644
--- /dev/null
@@@ -1,1123 -1,0 +1,1121 @@@
-    brw_process_intel_debug_variable(compiler->brw);
 +/*
 + * Copyright Â© 2015 Intel Corporation
 + *
 + * Permission is hereby granted, free of charge, to any person obtaining a
 + * copy of this software and associated documentation files (the "Software"),
 + * to deal in the Software without restriction, including without limitation
 + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 + * and/or sell copies of the Software, and to permit persons to whom the
 + * Software is furnished to do so, subject to the following conditions:
 + *
 + * The above copyright notice and this permission notice (including the next
 + * paragraph) shall be included in all copies or substantial portions of the
 + * Software.
 + *
 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 + * DEALINGS IN THE SOFTWARE.
 + */
 +
 +#include <sys/stat.h>
 +#include <unistd.h>
 +#include <fcntl.h>
 +
 +#include "private.h"
 +
 +#include <brw_context.h>
 +#include <brw_wm.h> /* brw_new_shader_program is here */
 +
 +#include <brw_vs.h>
 +#include <brw_gs.h>
 +#include <brw_cs.h>
 +
 +#include <mesa/main/shaderobj.h>
 +#include <mesa/main/fbobject.h>
 +#include <mesa/main/context.h>
 +#include <mesa/program/program.h>
 +#include <glsl/program.h>
 +
 +#define SPIR_V_MAGIC_NUMBER 0x07230203
 +
 +static void
 +fail_if(int cond, const char *format, ...)
 +{
 +   va_list args;
 +
 +   if (!cond)
 +      return;
 +
 +   va_start(args, format);
 +   vfprintf(stderr, format, args);
 +   va_end(args);
 +
 +   exit(1);
 +}
 +
 +static VkResult
 +set_binding_table_layout(struct brw_stage_prog_data *prog_data,
 +                         struct anv_pipeline *pipeline, uint32_t stage)
 +{
 +   uint32_t bias, count, k, *map;
 +   struct anv_pipeline_layout *layout = pipeline->layout;
 +
 +   /* No layout is valid for shaders that don't bind any resources. */
 +   if (pipeline->layout == NULL)
 +      return VK_SUCCESS;
 +
 +   if (stage == VK_SHADER_STAGE_FRAGMENT)
 +      bias = MAX_RTS;
 +   else
 +      bias = 0;
 +
 +   count = layout->stage[stage].surface_count;
 +   prog_data->map_entries =
 +      (uint32_t *) malloc(count * sizeof(prog_data->map_entries[0]));
 +   if (prog_data->map_entries == NULL)
 +      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
 +
 +   k = bias;
 +   map = prog_data->map_entries;
 +   for (uint32_t i = 0; i < layout->num_sets; i++) {
 +      prog_data->bind_map[i].index = map;
 +      for (uint32_t j = 0; j < layout->set[i].layout->stage[stage].surface_count; j++)
 +         *map++ = k++;
 +
 +      prog_data->bind_map[i].index_count =
 +         layout->set[i].layout->stage[stage].surface_count;
 +   }
 +
 +   return VK_SUCCESS;
 +}
 +
 +static void
 +brw_vs_populate_key(struct brw_context *brw,
 +                    struct brw_vertex_program *vp,
 +                    struct brw_vs_prog_key *key)
 +{
 +   struct gl_context *ctx = &brw->ctx;
 +   /* BRW_NEW_VERTEX_PROGRAM */
 +   struct gl_program *prog = (struct gl_program *) vp;
 +
 +   memset(key, 0, sizeof(*key));
 +
 +   /* Just upload the program verbatim for now.  Always send it all
 +    * the inputs it asks for, whether they are varying or not.
 +    */
 +   key->base.program_string_id = vp->id;
 +   brw_setup_vue_key_clip_info(brw, &key->base,
 +                               vp->program.Base.UsesClipDistanceOut);
 +
 +   /* _NEW_POLYGON */
 +   if (brw->gen < 6) {
 +      key->copy_edgeflag = (ctx->Polygon.FrontMode != GL_FILL ||
 +                           ctx->Polygon.BackMode != GL_FILL);
 +   }
 +
 +   if (prog->OutputsWritten & (VARYING_BIT_COL0 | VARYING_BIT_COL1 |
 +                               VARYING_BIT_BFC0 | VARYING_BIT_BFC1)) {
 +      /* _NEW_LIGHT | _NEW_BUFFERS */
 +      key->clamp_vertex_color = ctx->Light._ClampVertexColor;
 +   }
 +
 +   /* _NEW_POINT */
 +   if (brw->gen < 6 && ctx->Point.PointSprite) {
 +      for (int i = 0; i < 8; i++) {
 +         if (ctx->Point.CoordReplace[i])
 +            key->point_coord_replace |= (1 << i);
 +      }
 +   }
 +
 +   /* _NEW_TEXTURE */
 +   brw_populate_sampler_prog_key_data(ctx, prog, brw->vs.base.sampler_count,
 +                                      &key->base.tex);
 +}
 +
 +static bool
 +really_do_vs_prog(struct brw_context *brw,
 +                  struct gl_shader_program *prog,
 +                  struct brw_vertex_program *vp,
 +                  struct brw_vs_prog_key *key, struct anv_pipeline *pipeline)
 +{
 +   GLuint program_size;
 +   const GLuint *program;
 +   struct brw_vs_compile c;
 +   struct brw_vs_prog_data *prog_data = &pipeline->vs_prog_data;
 +   struct brw_stage_prog_data *stage_prog_data = &prog_data->base.base;
 +   void *mem_ctx;
 +   struct gl_shader *vs = NULL;
 +
 +   if (prog)
 +      vs = prog->_LinkedShaders[MESA_SHADER_VERTEX];
 +
 +   memset(&c, 0, sizeof(c));
 +   memcpy(&c.key, key, sizeof(*key));
 +   memset(prog_data, 0, sizeof(*prog_data));
 +
 +   mem_ctx = ralloc_context(NULL);
 +
 +   c.vp = vp;
 +
 +   /* Allocate the references to the uniforms that will end up in the
 +    * prog_data associated with the compiled program, and which will be freed
 +    * by the state cache.
 +    */
 +   int param_count;
 +   if (vs) {
 +      /* We add padding around uniform values below vec4 size, with the worst
 +       * case being a float value that gets blown up to a vec4, so be
 +       * conservative here.
 +       */
 +      param_count = vs->num_uniform_components * 4;
 +
 +   } else {
 +      param_count = vp->program.Base.Parameters->NumParameters * 4;
 +   }
 +   /* vec4_visitor::setup_uniform_clipplane_values() also uploads user clip
 +    * planes as uniforms.
 +    */
 +   param_count += c.key.base.nr_userclip_plane_consts * 4;
 +
 +   /* Setting nr_params here NOT to the size of the param and pull_param
 +    * arrays, but to the number of uniform components vec4_visitor
 +    * needs. vec4_visitor::setup_uniforms() will set it back to a proper value.
 +    */
 +   stage_prog_data->nr_params = ALIGN(param_count, 4) / 4;
 +   if (vs) {
 +      stage_prog_data->nr_params += vs->num_samplers;
 +   }
 +
 +   GLbitfield64 outputs_written = vp->program.Base.OutputsWritten;
 +   prog_data->inputs_read = vp->program.Base.InputsRead;
 +
 +   if (c.key.copy_edgeflag) {
 +      outputs_written |= BITFIELD64_BIT(VARYING_SLOT_EDGE);
 +      prog_data->inputs_read |= VERT_BIT_EDGEFLAG;
 +   }
 +
 +   if (brw->gen < 6) {
 +      /* Put dummy slots into the VUE for the SF to put the replaced
 +       * point sprite coords in.  We shouldn't need these dummy slots,
 +       * which take up precious URB space, but it would mean that the SF
 +       * doesn't get nice aligned pairs of input coords into output
 +       * coords, which would be a pain to handle.
 +       */
 +      for (int i = 0; i < 8; i++) {
 +         if (c.key.point_coord_replace & (1 << i))
 +            outputs_written |= BITFIELD64_BIT(VARYING_SLOT_TEX0 + i);
 +      }
 +
 +      /* if back colors are written, allocate slots for front colors too */
 +      if (outputs_written & BITFIELD64_BIT(VARYING_SLOT_BFC0))
 +         outputs_written |= BITFIELD64_BIT(VARYING_SLOT_COL0);
 +      if (outputs_written & BITFIELD64_BIT(VARYING_SLOT_BFC1))
 +         outputs_written |= BITFIELD64_BIT(VARYING_SLOT_COL1);
 +   }
 +
 +   /* In order for legacy clipping to work, we need to populate the clip
 +    * distance varying slots whenever clipping is enabled, even if the vertex
 +    * shader doesn't write to gl_ClipDistance.
 +    */
 +   if (c.key.base.userclip_active) {
 +      outputs_written |= BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST0);
 +      outputs_written |= BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST1);
 +   }
 +
 +   brw_compute_vue_map(brw->intelScreen->devinfo,
 +                       &prog_data->base.vue_map, outputs_written);
 +\
 +   set_binding_table_layout(&prog_data->base.base, pipeline,
 +                            VK_SHADER_STAGE_VERTEX);
 +
 +   /* Emit GEN4 code.
 +    */
 +   program = brw_vs_emit(brw, prog, &c, prog_data, mem_ctx, &program_size);
 +   if (program == NULL) {
 +      ralloc_free(mem_ctx);
 +      return false;
 +   }
 +
 +   struct anv_state vs_state = anv_state_stream_alloc(&pipeline->program_stream,
 +                                                      program_size, 64);
 +   memcpy(vs_state.map, program, program_size);
 +
 +   pipeline->vs_simd8 = vs_state.offset;
 +
 +   ralloc_free(mem_ctx);
 +
 +   return true;
 +}
 +
 +void brw_wm_populate_key(struct brw_context *brw,
 +                         struct brw_fragment_program *fp,
 +                         struct brw_wm_prog_key *key)
 +{
 +   struct gl_context *ctx = &brw->ctx;
 +   struct gl_program *prog = (struct gl_program *) brw->fragment_program;
 +   GLuint lookup = 0;
 +   GLuint line_aa;
 +   bool program_uses_dfdy = fp->program.UsesDFdy;
 +   struct gl_framebuffer draw_buffer;
 +   bool multisample_fbo;
 +
 +   memset(key, 0, sizeof(*key));
 +
 +   for (int i = 0; i < MAX_SAMPLERS; i++) {
 +      /* Assume color sampler, no swizzling. */
 +      key->tex.swizzles[i] = SWIZZLE_XYZW;
 +   }
 +
 +   /* A non-zero framebuffer name indicates that the framebuffer was created by
 +    * the user rather than the window system. */
 +   draw_buffer.Name = 1;
 +   draw_buffer.Visual.samples = 1;
 +   draw_buffer._NumColorDrawBuffers = 1;
 +   draw_buffer._NumColorDrawBuffers = 1;
 +   draw_buffer.Width = 400;
 +   draw_buffer.Height = 400;
 +   ctx->DrawBuffer = &draw_buffer;
 +
 +   multisample_fbo = ctx->DrawBuffer->Visual.samples > 1;
 +
 +   /* Build the index for table lookup
 +    */
 +   if (brw->gen < 6) {
 +      /* _NEW_COLOR */
 +      if (fp->program.UsesKill || ctx->Color.AlphaEnabled)
 +         lookup |= IZ_PS_KILL_ALPHATEST_BIT;
 +
 +      if (fp->program.Base.OutputsWritten & BITFIELD64_BIT(FRAG_RESULT_DEPTH))
 +         lookup |= IZ_PS_COMPUTES_DEPTH_BIT;
 +
 +      /* _NEW_DEPTH */
 +      if (ctx->Depth.Test)
 +         lookup |= IZ_DEPTH_TEST_ENABLE_BIT;
 +
 +      if (ctx->Depth.Test && ctx->Depth.Mask) /* ?? */
 +         lookup |= IZ_DEPTH_WRITE_ENABLE_BIT;
 +
 +      /* _NEW_STENCIL | _NEW_BUFFERS */
 +      if (ctx->Stencil._Enabled) {
 +         lookup |= IZ_STENCIL_TEST_ENABLE_BIT;
 +
 +         if (ctx->Stencil.WriteMask[0] ||
 +             ctx->Stencil.WriteMask[ctx->Stencil._BackFace])
 +            lookup |= IZ_STENCIL_WRITE_ENABLE_BIT;
 +      }
 +      key->iz_lookup = lookup;
 +   }
 +
 +   line_aa = AA_NEVER;
 +
 +   /* _NEW_LINE, _NEW_POLYGON, BRW_NEW_REDUCED_PRIMITIVE */
 +   if (ctx->Line.SmoothFlag) {
 +      if (brw->reduced_primitive == GL_LINES) {
 +         line_aa = AA_ALWAYS;
 +      }
 +      else if (brw->reduced_primitive == GL_TRIANGLES) {
 +         if (ctx->Polygon.FrontMode == GL_LINE) {
 +            line_aa = AA_SOMETIMES;
 +
 +            if (ctx->Polygon.BackMode == GL_LINE ||
 +                (ctx->Polygon.CullFlag &&
 +                 ctx->Polygon.CullFaceMode == GL_BACK))
 +               line_aa = AA_ALWAYS;
 +         }
 +         else if (ctx->Polygon.BackMode == GL_LINE) {
 +            line_aa = AA_SOMETIMES;
 +
 +            if ((ctx->Polygon.CullFlag &&
 +                 ctx->Polygon.CullFaceMode == GL_FRONT))
 +               line_aa = AA_ALWAYS;
 +         }
 +      }
 +   }
 +
 +   key->line_aa = line_aa;
 +
 +   /* _NEW_HINT */
 +   key->high_quality_derivatives =
 +      ctx->Hint.FragmentShaderDerivative == GL_NICEST;
 +
 +   if (brw->gen < 6)
 +      key->stats_wm = brw->stats_wm;
 +
 +   /* _NEW_LIGHT */
 +   key->flat_shade = (ctx->Light.ShadeModel == GL_FLAT);
 +
 +   /* _NEW_FRAG_CLAMP | _NEW_BUFFERS */
 +   key->clamp_fragment_color = ctx->Color._ClampFragmentColor;
 +
 +   /* _NEW_TEXTURE */
 +   brw_populate_sampler_prog_key_data(ctx, prog, brw->wm.base.sampler_count,
 +                                      &key->tex);
 +
 +   /* _NEW_BUFFERS */
 +   /*
 +    * Include the draw buffer origin and height so that we can calculate
 +    * fragment position values relative to the bottom left of the drawable,
 +    * from the incoming screen origin relative position we get as part of our
 +    * payload.
 +    *
 +    * This is only needed for the WM_WPOSXY opcode when the fragment program
 +    * uses the gl_FragCoord input.
 +    *
 +    * We could avoid recompiling by including this as a constant referenced by
 +    * our program, but if we were to do that it would also be nice to handle
 +    * getting that constant updated at batchbuffer submit time (when we
 +    * hold the lock and know where the buffer really is) rather than at emit
 +    * time when we don't hold the lock and are just guessing.  We could also
 +    * just avoid using this as key data if the program doesn't use
 +    * fragment.position.
 +    *
 +    * For DRI2 the origin_x/y will always be (0,0) but we still need the
 +    * drawable height in order to invert the Y axis.
 +    */
 +   if (fp->program.Base.InputsRead & VARYING_BIT_POS) {
 +      key->drawable_height = ctx->DrawBuffer->Height;
 +   }
 +
 +   if ((fp->program.Base.InputsRead & VARYING_BIT_POS) || program_uses_dfdy) {
 +      key->render_to_fbo = _mesa_is_user_fbo(ctx->DrawBuffer);
 +   }
 +
 +   /* _NEW_BUFFERS */
 +   key->nr_color_regions = ctx->DrawBuffer->_NumColorDrawBuffers;
 +
 +   /* _NEW_MULTISAMPLE, _NEW_COLOR, _NEW_BUFFERS */
 +   key->replicate_alpha = ctx->DrawBuffer->_NumColorDrawBuffers > 1 &&
 +      (ctx->Multisample.SampleAlphaToCoverage || ctx->Color.AlphaEnabled);
 +
 +   /* _NEW_BUFFERS _NEW_MULTISAMPLE */
 +   /* Ignore sample qualifier while computing this flag. */
 +   key->persample_shading =
 +      _mesa_get_min_invocations_per_fragment(ctx, &fp->program, true) > 1;
 +   if (key->persample_shading)
 +      key->persample_2x = ctx->DrawBuffer->Visual.samples == 2;
 +
 +   key->compute_pos_offset =
 +      _mesa_get_min_invocations_per_fragment(ctx, &fp->program, false) > 1 &&
 +      fp->program.Base.SystemValuesRead & SYSTEM_BIT_SAMPLE_POS;
 +
 +   key->compute_sample_id =
 +      multisample_fbo &&
 +      ctx->Multisample.Enabled &&
 +      (fp->program.Base.SystemValuesRead & SYSTEM_BIT_SAMPLE_ID);
 +
 +   /* BRW_NEW_VUE_MAP_GEOM_OUT */
 +   if (brw->gen < 6 || _mesa_bitcount_64(fp->program.Base.InputsRead &
 +                                         BRW_FS_VARYING_INPUT_MASK) > 16)
 +      key->input_slots_valid = brw->vue_map_geom_out.slots_valid;
 +
 +
 +   /* _NEW_COLOR | _NEW_BUFFERS */
 +   /* Pre-gen6, the hardware alpha test always used each render
 +    * target's alpha to do alpha test, as opposed to render target 0's alpha
 +    * like GL requires.  Fix that by building the alpha test into the
 +    * shader, and we'll skip enabling the fixed function alpha test.
 +    */
 +   if (brw->gen < 6 && ctx->DrawBuffer->_NumColorDrawBuffers > 1 && ctx->Color.AlphaEnabled) {
 +      key->alpha_test_func = ctx->Color.AlphaFunc;
 +      key->alpha_test_ref = ctx->Color.AlphaRef;
 +   }
 +
 +   /* The unique fragment program ID */
 +   key->program_string_id = fp->id;
 +
 +   ctx->DrawBuffer = NULL;
 +}
 +
 +static uint8_t
 +computed_depth_mode(struct gl_fragment_program *fp)
 +{
 +   if (fp->Base.OutputsWritten & BITFIELD64_BIT(FRAG_RESULT_DEPTH)) {
 +      switch (fp->FragDepthLayout) {
 +      case FRAG_DEPTH_LAYOUT_NONE:
 +      case FRAG_DEPTH_LAYOUT_ANY:
 +         return BRW_PSCDEPTH_ON;
 +      case FRAG_DEPTH_LAYOUT_GREATER:
 +         return BRW_PSCDEPTH_ON_GE;
 +      case FRAG_DEPTH_LAYOUT_LESS:
 +         return BRW_PSCDEPTH_ON_LE;
 +      case FRAG_DEPTH_LAYOUT_UNCHANGED:
 +         return BRW_PSCDEPTH_OFF;
 +      }
 +   }
 +   return BRW_PSCDEPTH_OFF;
 +}
 +
 +static bool
 +really_do_wm_prog(struct brw_context *brw,
 +                  struct gl_shader_program *prog,
 +                  struct brw_fragment_program *fp,
 +                  struct brw_wm_prog_key *key, struct anv_pipeline *pipeline)
 +{
 +   struct gl_context *ctx = &brw->ctx;
 +   void *mem_ctx = ralloc_context(NULL);
 +   struct brw_wm_prog_data *prog_data = &pipeline->wm_prog_data;
 +   struct gl_shader *fs = NULL;
 +   unsigned int program_size;
 +   const uint32_t *program;
 +
 +   if (prog)
 +      fs = prog->_LinkedShaders[MESA_SHADER_FRAGMENT];
 +
 +   memset(prog_data, 0, sizeof(*prog_data));
 +
 +   /* key->alpha_test_func means simulating alpha testing via discards,
 +    * so the shader definitely kills pixels.
 +    */
 +   prog_data->uses_kill = fp->program.UsesKill || key->alpha_test_func;
 +
 +   prog_data->computed_depth_mode = computed_depth_mode(&fp->program);
 +
 +   /* Allocate the references to the uniforms that will end up in the
 +    * prog_data associated with the compiled program, and which will be freed
 +    * by the state cache.
 +    */
 +   int param_count;
 +   if (fs) {
 +      param_count = fs->num_uniform_components;
 +   } else {
 +      param_count = fp->program.Base.Parameters->NumParameters * 4;
 +   }
 +   /* The backend also sometimes adds params for texture size. */
 +   param_count += 2 * ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits;
 +   prog_data->base.param =
 +      rzalloc_array(NULL, const gl_constant_value *, param_count);
 +   prog_data->base.pull_param =
 +      rzalloc_array(NULL, const gl_constant_value *, param_count);
 +   prog_data->base.nr_params = param_count;
 +
 +   prog_data->barycentric_interp_modes =
 +      brw_compute_barycentric_interp_modes(brw, key->flat_shade,
 +                                           key->persample_shading,
 +                                           &fp->program);
 +
 +   set_binding_table_layout(&prog_data->base, pipeline,
 +                            VK_SHADER_STAGE_FRAGMENT);
 +   /* This needs to come after shader time and pull constant entries, but we
 +    * don't have those set up now, so just put it after the layout entries.
 +    */
 +   prog_data->binding_table.render_target_start = 0;
 +
 +   program = brw_wm_fs_emit(brw, mem_ctx, key, prog_data,
 +                            &fp->program, prog, &program_size);
 +   if (program == NULL) {
 +      ralloc_free(mem_ctx);
 +      return false;
 +   }
 +
 +   struct anv_state ps_state = anv_state_stream_alloc(&pipeline->program_stream,
 +                                                      program_size, 64);
 +   memcpy(ps_state.map, program, program_size);
 +
 +   if (prog_data->no_8)
 +      pipeline->ps_simd8 = NO_KERNEL;
 +   else
 +      pipeline->ps_simd8 = ps_state.offset;
 +
 +   if (prog_data->no_8 || prog_data->prog_offset_16) {
 +      pipeline->ps_simd16 = ps_state.offset + prog_data->prog_offset_16;
 +   } else {
 +      pipeline->ps_simd16 = NO_KERNEL;
 +   }
 +
 +   ralloc_free(mem_ctx);
 +
 +   return true;
 +}
 +
 +static void
 +brw_gs_populate_key(struct brw_context *brw,
 +                    struct anv_pipeline *pipeline,
 +                    struct brw_geometry_program *gp,
 +                    struct brw_gs_prog_key *key)
 +{
 +   struct gl_context *ctx = &brw->ctx;
 +   struct brw_stage_state *stage_state = &brw->gs.base;
 +   struct gl_program *prog = &gp->program.Base;
 +
 +   memset(key, 0, sizeof(*key));
 +
 +   key->base.program_string_id = gp->id;
 +   brw_setup_vue_key_clip_info(brw, &key->base,
 +                               gp->program.Base.UsesClipDistanceOut);
 +
 +   /* _NEW_TEXTURE */
 +   brw_populate_sampler_prog_key_data(ctx, prog, stage_state->sampler_count,
 +                                      &key->base.tex);
 +
 +   struct brw_vs_prog_data *prog_data = &pipeline->vs_prog_data;
 +
 +   /* BRW_NEW_VUE_MAP_VS */
 +   key->input_varyings = prog_data->base.vue_map.slots_valid;
 +}
 +
 +static bool
 +really_do_gs_prog(struct brw_context *brw,
 +                  struct gl_shader_program *prog,
 +                  struct brw_geometry_program *gp,
 +                  struct brw_gs_prog_key *key, struct anv_pipeline *pipeline)
 +{
 +   struct brw_gs_compile_output output;
 +
 +   /* FIXME: We pass the bind map to the compile in the output struct. Need
 +    * something better. */
 +   set_binding_table_layout(&output.prog_data.base.base,
 +                            pipeline, VK_SHADER_STAGE_GEOMETRY);
 +
 +   brw_compile_gs_prog(brw, prog, gp, key, &output);
 +
 +   struct anv_state gs_state = anv_state_stream_alloc(&pipeline->program_stream,
 +                                                      output.program_size, 64);
 +   memcpy(gs_state.map, output.program, output.program_size);
 +
 +   pipeline->gs_vec4 = gs_state.offset;
 +   pipeline->gs_vertex_count = gp->program.VerticesIn;
 +
 +   ralloc_free(output.mem_ctx);
 +
 +   return true;
 +}
 +
 +static bool
 +brw_codegen_cs_prog(struct brw_context *brw,
 +                    struct gl_shader_program *prog,
 +                    struct brw_compute_program *cp,
 +                    struct brw_cs_prog_key *key, struct anv_pipeline *pipeline)
 +{
 +   struct gl_context *ctx = &brw->ctx;
 +   const GLuint *program;
 +   void *mem_ctx = ralloc_context(NULL);
 +   GLuint program_size;
 +   struct brw_cs_prog_data *prog_data = &pipeline->cs_prog_data;
 +
 +   struct gl_shader *cs = prog->_LinkedShaders[MESA_SHADER_COMPUTE];
 +   assert (cs);
 +
 +   memset(prog_data, 0, sizeof(*prog_data));
 +
 +   set_binding_table_layout(&prog_data->base, pipeline, VK_SHADER_STAGE_COMPUTE);
 +
 +   /* Allocate the references to the uniforms that will end up in the
 +    * prog_data associated with the compiled program, and which will be freed
 +    * by the state cache.
 +    */
 +   int param_count = cs->num_uniform_components;
 +
 +   /* The backend also sometimes adds params for texture size. */
 +   param_count += 2 * ctx->Const.Program[MESA_SHADER_COMPUTE].MaxTextureImageUnits;
 +   prog_data->base.param =
 +      rzalloc_array(NULL, const gl_constant_value *, param_count);
 +   prog_data->base.pull_param =
 +      rzalloc_array(NULL, const gl_constant_value *, param_count);
 +   prog_data->base.nr_params = param_count;
 +
 +   program = brw_cs_emit(brw, mem_ctx, key, prog_data,
 +                         &cp->program, prog, &program_size);
 +   if (program == NULL) {
 +      ralloc_free(mem_ctx);
 +      return false;
 +   }
 +
 +   if (unlikely(INTEL_DEBUG & DEBUG_CS))
 +      fprintf(stderr, "\n");
 +
 +   struct anv_state cs_state = anv_state_stream_alloc(&pipeline->program_stream,
 +                                                      program_size, 64);
 +   memcpy(cs_state.map, program, program_size);
 +
 +   pipeline->cs_simd = cs_state.offset;
 +
 +   ralloc_free(mem_ctx);
 +
 +   return true;
 +}
 +
 +static void
 +brw_cs_populate_key(struct brw_context *brw,
 +                    struct brw_compute_program *bcp, struct brw_cs_prog_key *key)
 +{
 +   memset(key, 0, sizeof(*key));
 +
 +   /* The unique compute program ID */
 +   key->program_string_id = bcp->id;
 +}
 +
 +static void
 +fail_on_compile_error(int status, const char *msg)
 +{
 +   int source, line, column;
 +   char error[256];
 +
 +   if (status)
 +      return;
 +
 +   if (sscanf(msg, "%d:%d(%d): error: %255[^\n]", &source, &line, &column, error) == 4)
 +      fail_if(!status, "%d:%s\n", line, error);
 +   else
 +      fail_if(!status, "%s\n", msg);
 +}
 +
 +struct anv_compiler {
 +   struct anv_device *device;
 +   struct intel_screen *screen;
 +   struct brw_context *brw;
 +   struct gl_pipeline_object pipeline;
 +};
 +
 +extern "C" {
 +
 +struct anv_compiler *
 +anv_compiler_create(struct anv_device *device)
 +{
 +   const struct brw_device_info *devinfo = &device->info;
 +   struct anv_compiler *compiler;
 +   struct gl_context *ctx;
 +
 +   compiler = rzalloc(NULL, struct anv_compiler);
 +   if (compiler == NULL)
 +      return NULL;
 +
 +   compiler->screen = rzalloc(compiler, struct intel_screen);
 +   if (compiler->screen == NULL)
 +      goto fail;
 +
 +   compiler->brw = rzalloc(compiler, struct brw_context);
 +   if (compiler->brw == NULL)
 +      goto fail;
 +
 +   compiler->device = device;
 +
 +   compiler->brw->optionCache.info = NULL;
 +   compiler->brw->bufmgr = NULL;
 +   compiler->brw->gen = devinfo->gen;
 +   compiler->brw->is_g4x = devinfo->is_g4x;
 +   compiler->brw->is_baytrail = devinfo->is_baytrail;
 +   compiler->brw->is_haswell = devinfo->is_haswell;
 +   compiler->brw->is_cherryview = devinfo->is_cherryview;
 +
 +   /* We need this at least for CS, which will check brw->max_cs_threads
 +    * against the work group size. */
 +   compiler->brw->max_vs_threads = devinfo->max_vs_threads;
 +   compiler->brw->max_hs_threads = devinfo->max_hs_threads;
 +   compiler->brw->max_ds_threads = devinfo->max_ds_threads;
 +   compiler->brw->max_gs_threads = devinfo->max_gs_threads;
 +   compiler->brw->max_wm_threads = devinfo->max_wm_threads;
 +   compiler->brw->max_cs_threads = devinfo->max_cs_threads;
 +   compiler->brw->urb.size = devinfo->urb.size;
 +   compiler->brw->urb.min_vs_entries = devinfo->urb.min_vs_entries;
 +   compiler->brw->urb.max_vs_entries = devinfo->urb.max_vs_entries;
 +   compiler->brw->urb.max_hs_entries = devinfo->urb.max_hs_entries;
 +   compiler->brw->urb.max_ds_entries = devinfo->urb.max_ds_entries;
 +   compiler->brw->urb.max_gs_entries = devinfo->urb.max_gs_entries;
 +
 +   compiler->brw->intelScreen = compiler->screen;
 +   compiler->screen->devinfo = &device->info;
 +
-    if (device->info.gen >= 8 && !(INTEL_DEBUG & DEBUG_VEC4VS))
-       compiler->brw->scalar_vs = true;
++   brw_process_intel_debug_variable(compiler->screen);
 +
-    compiler->screen->compiler = brw_compiler_create(compiler, &device->info);
++   compiler->screen->compiler = brw_compiler_create(compiler, &device->info);
 +
 +   ctx = &compiler->brw->ctx;
 +   _mesa_init_shader_object_functions(&ctx->Driver);
 +
 +   _mesa_init_constants(&ctx->Const, API_OPENGL_CORE);
 +
 +   brw_initialize_context_constants(compiler->brw);
 +
 +   intelInitExtensions(ctx);
 +
 +   /* Set dd::NewShader */
 +   brwInitFragProgFuncs(&ctx->Driver);
 +
 +   ctx->_Shader = &compiler->pipeline;
 +
 +   compiler->brw->precompile = false;
 +
 +   return compiler;
 +
 + fail:
 +   ralloc_free(compiler);
 +   return NULL;
 +}
 +
 +void
 +anv_compiler_destroy(struct anv_compiler *compiler)
 +{
 +   _mesa_free_errors_data(&compiler->brw->ctx);
 +   ralloc_free(compiler);
 +}
 +
 +/* From gen7_urb.c */
 +
 +/* FIXME: Add to struct intel_device_info */
 +
 +static const int gen8_push_size = 32 * 1024;
 +
 +static void
 +gen7_compute_urb_partition(struct anv_pipeline *pipeline)
 +{
 +   const struct brw_device_info *devinfo = &pipeline->device->info;
 +   bool vs_present = pipeline->vs_simd8 != NO_KERNEL;
 +   unsigned vs_size = vs_present ? pipeline->vs_prog_data.base.urb_entry_size : 1;
 +   unsigned vs_entry_size_bytes = vs_size * 64;
 +   bool gs_present = pipeline->gs_vec4 != NO_KERNEL;
 +   unsigned gs_size = gs_present ? pipeline->gs_prog_data.base.urb_entry_size : 1;
 +   unsigned gs_entry_size_bytes = gs_size * 64;
 +
 +   /* From p35 of the Ivy Bridge PRM (section 1.7.1: 3DSTATE_URB_GS):
 +    *
 +    *     VS Number of URB Entries must be divisible by 8 if the VS URB Entry
 +    *     Allocation Size is less than 9 512-bit URB entries.
 +    *
 +    * Similar text exists for GS.
 +    */
 +   unsigned vs_granularity = (vs_size < 9) ? 8 : 1;
 +   unsigned gs_granularity = (gs_size < 9) ? 8 : 1;
 +
 +   /* URB allocations must be done in 8k chunks. */
 +   unsigned chunk_size_bytes = 8192;
 +
 +   /* Determine the size of the URB in chunks. */
 +   unsigned urb_chunks = devinfo->urb.size * 1024 / chunk_size_bytes;
 +
 +   /* Reserve space for push constants */
 +   unsigned push_constant_bytes = gen8_push_size;
 +   unsigned push_constant_chunks =
 +      push_constant_bytes / chunk_size_bytes;
 +
 +   /* Initially, assign each stage the minimum amount of URB space it needs,
 +    * and make a note of how much additional space it "wants" (the amount of
 +    * additional space it could actually make use of).
 +    */
 +
 +   /* VS has a lower limit on the number of URB entries */
 +   unsigned vs_chunks =
 +      ALIGN(devinfo->urb.min_vs_entries * vs_entry_size_bytes,
 +            chunk_size_bytes) / chunk_size_bytes;
 +   unsigned vs_wants =
 +      ALIGN(devinfo->urb.max_vs_entries * vs_entry_size_bytes,
 +            chunk_size_bytes) / chunk_size_bytes - vs_chunks;
 +
 +   unsigned gs_chunks = 0;
 +   unsigned gs_wants = 0;
 +   if (gs_present) {
 +      /* There are two constraints on the minimum amount of URB space we can
 +       * allocate:
 +       *
 +       * (1) We need room for at least 2 URB entries, since we always operate
 +       * the GS in DUAL_OBJECT mode.
 +       *
 +       * (2) We can't allocate less than nr_gs_entries_granularity.
 +       */
 +      gs_chunks = ALIGN(MAX2(gs_granularity, 2) * gs_entry_size_bytes,
 +                        chunk_size_bytes) / chunk_size_bytes;
 +      gs_wants =
 +         ALIGN(devinfo->urb.max_gs_entries * gs_entry_size_bytes,
 +               chunk_size_bytes) / chunk_size_bytes - gs_chunks;
 +   }
 +
 +   /* There should always be enough URB space to satisfy the minimum
 +    * requirements of each stage.
 +    */
 +   unsigned total_needs = push_constant_chunks + vs_chunks + gs_chunks;
 +   assert(total_needs <= urb_chunks);
 +
 +   /* Mete out remaining space (if any) in proportion to "wants". */
 +   unsigned total_wants = vs_wants + gs_wants;
 +   unsigned remaining_space = urb_chunks - total_needs;
 +   if (remaining_space > total_wants)
 +      remaining_space = total_wants;
 +   if (remaining_space > 0) {
 +      unsigned vs_additional = (unsigned)
 +         round(vs_wants * (((double) remaining_space) / total_wants));
 +      vs_chunks += vs_additional;
 +      remaining_space -= vs_additional;
 +      gs_chunks += remaining_space;
 +   }
 +
 +   /* Sanity check that we haven't over-allocated. */
 +   assert(push_constant_chunks + vs_chunks + gs_chunks <= urb_chunks);
 +
 +   /* Finally, compute the number of entries that can fit in the space
 +    * allocated to each stage.
 +    */
 +   unsigned nr_vs_entries = vs_chunks * chunk_size_bytes / vs_entry_size_bytes;
 +   unsigned nr_gs_entries = gs_chunks * chunk_size_bytes / gs_entry_size_bytes;
 +
 +   /* Since we rounded up when computing *_wants, this may be slightly more
 +    * than the maximum allowed amount, so correct for that.
 +    */
 +   nr_vs_entries = MIN2(nr_vs_entries, devinfo->urb.max_vs_entries);
 +   nr_gs_entries = MIN2(nr_gs_entries, devinfo->urb.max_gs_entries);
 +
 +   /* Ensure that we program a multiple of the granularity. */
 +   nr_vs_entries = ROUND_DOWN_TO(nr_vs_entries, vs_granularity);
 +   nr_gs_entries = ROUND_DOWN_TO(nr_gs_entries, gs_granularity);
 +
 +   /* Finally, sanity check to make sure we have at least the minimum number
 +    * of entries needed for each stage.
 +    */
 +   assert(nr_vs_entries >= devinfo->urb.min_vs_entries);
 +   if (gs_present)
 +      assert(nr_gs_entries >= 2);
 +
 +   /* Lay out the URB in the following order:
 +    * - push constants
 +    * - VS
 +    * - GS
 +    */
 +   pipeline->urb.vs_start = push_constant_chunks;
 +   pipeline->urb.vs_size = vs_size;
 +   pipeline->urb.nr_vs_entries = nr_vs_entries;
 +
 +   pipeline->urb.gs_start = push_constant_chunks + vs_chunks;
 +   pipeline->urb.gs_size = gs_size;
 +   pipeline->urb.nr_gs_entries = nr_gs_entries;
 +}
 +
 +static const struct {
 +   uint32_t token;
 +   const char *name;
 +} stage_info[] = {
 +   { GL_VERTEX_SHADER, "vertex" },
 +   { GL_TESS_CONTROL_SHADER, "tess control" },
 +   { GL_TESS_EVALUATION_SHADER, "tess evaluation" },
 +   { GL_GEOMETRY_SHADER, "geometry" },
 +   { GL_FRAGMENT_SHADER, "fragment" },
 +   { GL_COMPUTE_SHADER, "compute" },
 +};
 +
 +struct spirv_header{
 +   uint32_t magic;
 +   uint32_t version;
 +   uint32_t gen_magic;
 +};
 +
 +static const char *
 +src_as_glsl(const char *data)
 +{
 +   const struct spirv_header *as_spirv = (const struct spirv_header *)data;
 +
 +   /* Check alignment */
 +   if ((intptr_t)data & 0x3) {
 +      return data;
 +   }
 +
 +   if (as_spirv->magic == SPIR_V_MAGIC_NUMBER) {
 +      /* LunarG back-door */
 +      if (as_spirv->version == 0)
 +         return data + 12;
 +      else
 +         return NULL;
 +   } else {
 +      return data;
 +   }
 +}
 +
 +static void
 +anv_compile_shader_glsl(struct anv_compiler *compiler,
 +                   struct gl_shader_program *program,
 +                   struct anv_pipeline *pipeline, uint32_t stage)
 +{
 +   struct brw_context *brw = compiler->brw;
 +   struct gl_shader *shader;
 +   int name = 0;
 +
 +   shader = brw_new_shader(&brw->ctx, name, stage_info[stage].token);
 +   fail_if(shader == NULL, "failed to create %s shader\n", stage_info[stage].name);
 +
 +   shader->Source = strdup(src_as_glsl(pipeline->shaders[stage]->data));
 +   _mesa_glsl_compile_shader(&brw->ctx, shader, false, false);
 +   fail_on_compile_error(shader->CompileStatus, shader->InfoLog);
 +
 +   program->Shaders[program->NumShaders] = shader;
 +   program->NumShaders++;
 +}
 +
 +static void
 +anv_compile_shader_spirv(struct anv_compiler *compiler,
 +                         struct gl_shader_program *program,
 +                         struct anv_pipeline *pipeline, uint32_t stage)
 +{
 +   unreachable("SPIR-V is not supported yet!");
 +}
 +
 +static void
 +add_compiled_stage(struct anv_pipeline *pipeline, uint32_t stage,
 +                   struct brw_stage_prog_data *prog_data)
 +{
 +   struct brw_device_info *devinfo = &pipeline->device->info;
 +   uint32_t max_threads[] = {
 +      [VK_SHADER_STAGE_VERTEX]                  = devinfo->max_vs_threads,
 +      [VK_SHADER_STAGE_TESS_CONTROL]            = 0,
 +      [VK_SHADER_STAGE_TESS_EVALUATION]         = 0,
 +      [VK_SHADER_STAGE_GEOMETRY]                = devinfo->max_gs_threads,
 +      [VK_SHADER_STAGE_FRAGMENT]                = devinfo->max_wm_threads,
 +      [VK_SHADER_STAGE_COMPUTE]                 = devinfo->max_cs_threads,
 +   };
 +
 +   pipeline->prog_data[stage] = prog_data;
 +   pipeline->active_stages |= 1 << stage;
 +   pipeline->scratch_start[stage] = pipeline->total_scratch;
 +   pipeline->total_scratch =
 +      ALIGN_U32(pipeline->total_scratch, 1024) +
 +      prog_data->total_scratch * max_threads[stage];
 +}
 +
 +int
 +anv_compiler_run(struct anv_compiler *compiler, struct anv_pipeline *pipeline)
 +{
 +   struct gl_shader_program *program;
 +   int name = 0;
 +   struct brw_context *brw = compiler->brw;
 +
 +   /* When we free the pipeline, we detect stages based on the NULL status
 +    * of various prog_data pointers.  Make them NULL by default.
 +    */
 +   memset(pipeline->prog_data, 0, sizeof(pipeline->prog_data));
 +   memset(pipeline->scratch_start, 0, sizeof(pipeline->scratch_start));
 +
 +   brw->use_rep_send = pipeline->use_repclear;
 +   brw->no_simd8 = pipeline->use_repclear;
 +
 +   program = brw->ctx.Driver.NewShaderProgram(name);
 +   program->Shaders = (struct gl_shader **)
 +      calloc(VK_NUM_SHADER_STAGE, sizeof(struct gl_shader *));
 +   fail_if(program == NULL || program->Shaders == NULL,
 +           "failed to create program\n");
 +
 +   bool all_spirv = true;
 +   for (unsigned i = 0; i < VK_NUM_SHADER_STAGE; i++) {
 +      if (pipeline->shaders[i] == NULL)
 +         continue;
 +
 +      /* You need at least this much for "void main() { }" anyway */
 +      assert(pipeline->shaders[i]->size >= 12);
 +
 +      if (src_as_glsl(pipeline->shaders[i]->data)) {
 +         all_spirv = false;
 +         break;
 +      }
 +
 +      assert(pipeline->shaders[i]->size % 4 == 0);
 +   }
 +
 +   if (all_spirv) {
 +      for (unsigned i = 0; i < VK_NUM_SHADER_STAGE; i++) {
 +         if (pipeline->shaders[i])
 +            anv_compile_shader_spirv(compiler, program, pipeline, i);
 +      }
 +
 +      /* TODO: nir_link_shader? */
 +   } else {
 +      for (unsigned i = 0; i < VK_NUM_SHADER_STAGE; i++) {
 +         if (pipeline->shaders[i])
 +            anv_compile_shader_glsl(compiler, program, pipeline, i);
 +      }
 +
 +      _mesa_glsl_link_shader(&brw->ctx, program);
 +      fail_on_compile_error(program->LinkStatus,
 +                            program->InfoLog);
 +   }
 +
 +   bool success;
 +   pipeline->active_stages = 0;
 +   pipeline->total_scratch = 0;
 +
 +   if (pipeline->shaders[VK_SHADER_STAGE_VERTEX]) {
 +      struct brw_vs_prog_key vs_key;
 +      struct gl_vertex_program *vp = (struct gl_vertex_program *)
 +         program->_LinkedShaders[MESA_SHADER_VERTEX]->Program;
 +      struct brw_vertex_program *bvp = brw_vertex_program(vp);
 +
 +      brw_vs_populate_key(brw, bvp, &vs_key);
 +
 +      success = really_do_vs_prog(brw, program, bvp, &vs_key, pipeline);
 +      fail_if(!success, "do_wm_prog failed\n");
 +      add_compiled_stage(pipeline, VK_SHADER_STAGE_VERTEX,
 +                         &pipeline->vs_prog_data.base.base);
 +   } else {
 +      memset(&pipeline->vs_prog_data, 0, sizeof(pipeline->vs_prog_data));
 +      pipeline->vs_simd8 = NO_KERNEL;
 +   }
 +
 +
 +   if (pipeline->shaders[VK_SHADER_STAGE_GEOMETRY]) {
 +      struct brw_gs_prog_key gs_key;
 +      struct gl_geometry_program *gp = (struct gl_geometry_program *)
 +         program->_LinkedShaders[MESA_SHADER_GEOMETRY]->Program;
 +      struct brw_geometry_program *bgp = brw_geometry_program(gp);
 +
 +      brw_gs_populate_key(brw, pipeline, bgp, &gs_key);
 +
 +      success = really_do_gs_prog(brw, program, bgp, &gs_key, pipeline);
 +      fail_if(!success, "do_gs_prog failed\n");
 +      add_compiled_stage(pipeline, VK_SHADER_STAGE_GEOMETRY,
 +                         &pipeline->gs_prog_data.base.base);
 +   } else {
 +      pipeline->gs_vec4 = NO_KERNEL;
 +   }
 +
 +   if (pipeline->shaders[VK_SHADER_STAGE_FRAGMENT]) {
 +      struct brw_wm_prog_key wm_key;
 +      struct gl_fragment_program *fp = (struct gl_fragment_program *)
 +         program->_LinkedShaders[MESA_SHADER_FRAGMENT]->Program;
 +      struct brw_fragment_program *bfp = brw_fragment_program(fp);
 +
 +      brw_wm_populate_key(brw, bfp, &wm_key);
 +
 +      success = really_do_wm_prog(brw, program, bfp, &wm_key, pipeline);
 +      fail_if(!success, "do_wm_prog failed\n");
 +      add_compiled_stage(pipeline, VK_SHADER_STAGE_FRAGMENT,
 +                         &pipeline->wm_prog_data.base);
 +   }
 +
 +   if (pipeline->shaders[VK_SHADER_STAGE_COMPUTE]) {
 +      struct brw_cs_prog_key cs_key;
 +      struct gl_compute_program *cp = (struct gl_compute_program *)
 +         program->_LinkedShaders[MESA_SHADER_COMPUTE]->Program;
 +      struct brw_compute_program *bcp = brw_compute_program(cp);
 +
 +      brw_cs_populate_key(brw, bcp, &cs_key);
 +
 +      success = brw_codegen_cs_prog(brw, program, bcp, &cs_key, pipeline);
 +      fail_if(!success, "brw_codegen_cs_prog failed\n");
 +      add_compiled_stage(pipeline, VK_SHADER_STAGE_COMPUTE,
 +                         &pipeline->cs_prog_data.base);
 +   }
 +
 +   brw->ctx.Driver.DeleteShaderProgram(&brw->ctx, program);
 +
 +   struct anv_device *device = compiler->device;
 +   while (device->scratch_block_pool.bo.size < pipeline->total_scratch)
 +      anv_block_pool_alloc(&device->scratch_block_pool);
 +
 +   gen7_compute_urb_partition(pipeline);
 +
 +   return 0;
 +}
 +
 +/* This badly named function frees the struct anv_pipeline data that the compiler
 + * allocates.  Currently just the prog_data structs.
 + */
 +void
 +anv_compiler_free(struct anv_pipeline *pipeline)
 +{
 +   for (uint32_t stage = 0; stage < VK_NUM_SHADER_STAGE; stage++) {
 +      if (pipeline->prog_data[stage]) {
 +         free(pipeline->prog_data[stage]->map_entries);
 +         ralloc_free(pipeline->prog_data[stage]->param);
 +         ralloc_free(pipeline->prog_data[stage]->pull_param);
 +      }
 +   }
 +}
 +
 +}