glsl: join calculate_array_size() and calculate_array_stride()
[mesa.git] / src / glsl / linker.cpp
index 3ef5940f892f2baf47b7ac387037f79212a40d59..c35d87acea6795fa1274aa58bc4bdeac2081f62d 100644 (file)
  * \author Ian Romanick <ian.d.romanick@intel.com>
  */
 
+#include <ctype.h>
+#include "util/strndup.h"
 #include "main/core.h"
 #include "glsl_symbol_table.h"
+#include "glsl_parser_extras.h"
 #include "ir.h"
 #include "program.h"
 #include "program/hash_table.h"
 #include "linker.h"
 #include "link_varyings.h"
 #include "ir_optimization.h"
+#include "ir_rvalue_visitor.h"
+#include "ir_uniform.h"
 
-extern "C" {
 #include "main/shaderobj.h"
-}
+#include "main/enums.h"
+
+
+void linker_error(gl_shader_program *, const char *, ...);
+
+namespace {
 
 /**
  * Visitor that determines whether or not a variable is ever written.
@@ -102,20 +111,19 @@ public:
 
    virtual ir_visitor_status visit_enter(ir_call *ir)
    {
-      exec_list_iterator sig_iter = ir->callee->parameters.iterator();
-      foreach_iter(exec_list_iterator, iter, *ir) {
-        ir_rvalue *param_rval = (ir_rvalue *)iter.get();
-        ir_variable *sig_param = (ir_variable *)sig_iter.get();
+      foreach_two_lists(formal_node, &ir->callee->parameters,
+                        actual_node, &ir->actual_parameters) {
+        ir_rvalue *param_rval = (ir_rvalue *) actual_node;
+        ir_variable *sig_param = (ir_variable *) formal_node;
 
-        if (sig_param->mode == ir_var_function_out ||
-            sig_param->mode == ir_var_function_inout) {
+        if (sig_param->data.mode == ir_var_function_out ||
+            sig_param->data.mode == ir_var_function_inout) {
            ir_variable *var = param_rval->variable_referenced();
            if (var && strcmp(name, var->name) == 0) {
               found = true;
               return visit_stop;
            }
         }
-        sig_iter.next();
       }
 
       if (ir->return_deref != NULL) {
@@ -173,6 +181,345 @@ private:
 };
 
 
+class geom_array_resize_visitor : public ir_hierarchical_visitor {
+public:
+   unsigned num_vertices;
+   gl_shader_program *prog;
+
+   geom_array_resize_visitor(unsigned num_vertices, gl_shader_program *prog)
+   {
+      this->num_vertices = num_vertices;
+      this->prog = prog;
+   }
+
+   virtual ~geom_array_resize_visitor()
+   {
+      /* empty */
+   }
+
+   virtual ir_visitor_status visit(ir_variable *var)
+   {
+      if (!var->type->is_array() || var->data.mode != ir_var_shader_in)
+         return visit_continue;
+
+      unsigned size = var->type->length;
+
+      /* Generate a link error if the shader has declared this array with an
+       * incorrect size.
+       */
+      if (size && size != this->num_vertices) {
+         linker_error(this->prog, "size of array %s declared as %u, "
+                      "but number of input vertices is %u\n",
+                      var->name, size, this->num_vertices);
+         return visit_continue;
+      }
+
+      /* Generate a link error if the shader attempts to access an input
+       * array using an index too large for its actual size assigned at link
+       * time.
+       */
+      if (var->data.max_array_access >= this->num_vertices) {
+         linker_error(this->prog, "geometry shader accesses element %i of "
+                      "%s, but only %i input vertices\n",
+                      var->data.max_array_access, var->name, this->num_vertices);
+         return visit_continue;
+      }
+
+      var->type = glsl_type::get_array_instance(var->type->fields.array,
+                                                this->num_vertices);
+      var->data.max_array_access = this->num_vertices - 1;
+
+      return visit_continue;
+   }
+
+   /* Dereferences of input variables need to be updated so that their type
+    * matches the newly assigned type of the variable they are accessing. */
+   virtual ir_visitor_status visit(ir_dereference_variable *ir)
+   {
+      ir->type = ir->var->type;
+      return visit_continue;
+   }
+
+   /* Dereferences of 2D input arrays need to be updated so that their type
+    * matches the newly assigned type of the array they are accessing. */
+   virtual ir_visitor_status visit_leave(ir_dereference_array *ir)
+   {
+      const glsl_type *const vt = ir->array->type;
+      if (vt->is_array())
+         ir->type = vt->fields.array;
+      return visit_continue;
+   }
+};
+
+class tess_eval_array_resize_visitor : public ir_hierarchical_visitor {
+public:
+   unsigned num_vertices;
+   gl_shader_program *prog;
+
+   tess_eval_array_resize_visitor(unsigned num_vertices, gl_shader_program *prog)
+   {
+      this->num_vertices = num_vertices;
+      this->prog = prog;
+   }
+
+   virtual ~tess_eval_array_resize_visitor()
+   {
+      /* empty */
+   }
+
+   virtual ir_visitor_status visit(ir_variable *var)
+   {
+      if (!var->type->is_array() || var->data.mode != ir_var_shader_in || var->data.patch)
+         return visit_continue;
+
+      var->type = glsl_type::get_array_instance(var->type->fields.array,
+                                                this->num_vertices);
+      var->data.max_array_access = this->num_vertices - 1;
+
+      return visit_continue;
+   }
+
+   /* Dereferences of input variables need to be updated so that their type
+    * matches the newly assigned type of the variable they are accessing. */
+   virtual ir_visitor_status visit(ir_dereference_variable *ir)
+   {
+      ir->type = ir->var->type;
+      return visit_continue;
+   }
+
+   /* Dereferences of 2D input arrays need to be updated so that their type
+    * matches the newly assigned type of the array they are accessing. */
+   virtual ir_visitor_status visit_leave(ir_dereference_array *ir)
+   {
+      const glsl_type *const vt = ir->array->type;
+      if (vt->is_array())
+         ir->type = vt->fields.array;
+      return visit_continue;
+   }
+};
+
+class barrier_use_visitor : public ir_hierarchical_visitor {
+public:
+   barrier_use_visitor(gl_shader_program *prog)
+      : prog(prog), in_main(false), after_return(false), control_flow(0)
+   {
+   }
+
+   virtual ~barrier_use_visitor()
+   {
+      /* empty */
+   }
+
+   virtual ir_visitor_status visit_enter(ir_function *ir)
+   {
+      if (strcmp(ir->name, "main") == 0)
+         in_main = true;
+
+      return visit_continue;
+   }
+
+   virtual ir_visitor_status visit_leave(ir_function *)
+   {
+      in_main = false;
+      after_return = false;
+      return visit_continue;
+   }
+
+   virtual ir_visitor_status visit_leave(ir_return *)
+   {
+      after_return = true;
+      return visit_continue;
+   }
+
+   virtual ir_visitor_status visit_enter(ir_if *)
+   {
+      ++control_flow;
+      return visit_continue;
+   }
+
+   virtual ir_visitor_status visit_leave(ir_if *)
+   {
+      --control_flow;
+      return visit_continue;
+   }
+
+   virtual ir_visitor_status visit_enter(ir_loop *)
+   {
+      ++control_flow;
+      return visit_continue;
+   }
+
+   virtual ir_visitor_status visit_leave(ir_loop *)
+   {
+      --control_flow;
+      return visit_continue;
+   }
+
+   /* FINISHME: `switch` is not expressed at the IR level -- it's already
+    * been lowered to a mess of `if`s. We'll correctly disallow any use of
+    * barrier() in a conditional path within the switch, but not in a path
+    * which is always hit.
+    */
+
+   virtual ir_visitor_status visit_enter(ir_call *ir)
+   {
+      if (ir->use_builtin && strcmp(ir->callee_name(), "barrier") == 0) {
+         /* Use of barrier(); determine if it is legal: */
+         if (!in_main) {
+            linker_error(prog, "Builtin barrier() may only be used in main");
+            return visit_stop;
+         }
+
+         if (after_return) {
+            linker_error(prog, "Builtin barrier() may not be used after return");
+            return visit_stop;
+         }
+
+         if (control_flow != 0) {
+            linker_error(prog, "Builtin barrier() may not be used inside control flow");
+            return visit_stop;
+         }
+      }
+      return visit_continue;
+   }
+
+private:
+   gl_shader_program *prog;
+   bool in_main, after_return;
+   int control_flow;
+};
+
+/**
+ * Visitor that determines the highest stream id to which a (geometry) shader
+ * emits vertices. It also checks whether End{Stream}Primitive is ever called.
+ */
+class find_emit_vertex_visitor : public ir_hierarchical_visitor {
+public:
+   find_emit_vertex_visitor(int max_allowed)
+      : max_stream_allowed(max_allowed),
+        invalid_stream_id(0),
+        invalid_stream_id_from_emit_vertex(false),
+        end_primitive_found(false),
+        uses_non_zero_stream(false)
+   {
+      /* empty */
+   }
+
+   virtual ir_visitor_status visit_leave(ir_emit_vertex *ir)
+   {
+      int stream_id = ir->stream_id();
+
+      if (stream_id < 0) {
+         invalid_stream_id = stream_id;
+         invalid_stream_id_from_emit_vertex = true;
+         return visit_stop;
+      }
+
+      if (stream_id > max_stream_allowed) {
+         invalid_stream_id = stream_id;
+         invalid_stream_id_from_emit_vertex = true;
+         return visit_stop;
+      }
+
+      if (stream_id != 0)
+         uses_non_zero_stream = true;
+
+      return visit_continue;
+   }
+
+   virtual ir_visitor_status visit_leave(ir_end_primitive *ir)
+   {
+      end_primitive_found = true;
+
+      int stream_id = ir->stream_id();
+
+      if (stream_id < 0) {
+         invalid_stream_id = stream_id;
+         invalid_stream_id_from_emit_vertex = false;
+         return visit_stop;
+      }
+
+      if (stream_id > max_stream_allowed) {
+         invalid_stream_id = stream_id;
+         invalid_stream_id_from_emit_vertex = false;
+         return visit_stop;
+      }
+
+      if (stream_id != 0)
+         uses_non_zero_stream = true;
+
+      return visit_continue;
+   }
+
+   bool error()
+   {
+      return invalid_stream_id != 0;
+   }
+
+   const char *error_func()
+   {
+      return invalid_stream_id_from_emit_vertex ?
+         "EmitStreamVertex" : "EndStreamPrimitive";
+   }
+
+   int error_stream()
+   {
+      return invalid_stream_id;
+   }
+
+   bool uses_streams()
+   {
+      return uses_non_zero_stream;
+   }
+
+   bool uses_end_primitive()
+   {
+      return end_primitive_found;
+   }
+
+private:
+   int max_stream_allowed;
+   int invalid_stream_id;
+   bool invalid_stream_id_from_emit_vertex;
+   bool end_primitive_found;
+   bool uses_non_zero_stream;
+};
+
+/* Class that finds array derefs and check if indexes are dynamic. */
+class dynamic_sampler_array_indexing_visitor : public ir_hierarchical_visitor
+{
+public:
+   dynamic_sampler_array_indexing_visitor() :
+      dynamic_sampler_array_indexing(false)
+   {
+   }
+
+   ir_visitor_status visit_enter(ir_dereference_array *ir)
+   {
+      if (!ir->variable_referenced())
+         return visit_continue;
+
+      if (!ir->variable_referenced()->type->contains_sampler())
+         return visit_continue;
+
+      if (!ir->array_index->constant_expression_value()) {
+         dynamic_sampler_array_indexing = true;
+         return visit_stop;
+      }
+      return visit_continue;
+   }
+
+   bool uses_dynamic_sampler_array_indexing()
+   {
+      return dynamic_sampler_array_indexing;
+   }
+
+private:
+   bool dynamic_sampler_array_indexing;
+};
+
+} /* anonymous namespace */
+
 void
 linker_error(gl_shader_program *prog, const char *fmt, ...)
 {
@@ -192,7 +539,7 @@ linker_warning(gl_shader_program *prog, const char *fmt, ...)
 {
    va_list ap;
 
-   ralloc_strcat(&prog->InfoLog, "error: ");
+   ralloc_strcat(&prog->InfoLog, "warning: ");
    va_start(ap, fmt);
    ralloc_vasprintf_append(&prog->InfoLog, fmt, ap);
    va_end(ap);
@@ -254,97 +601,116 @@ parse_program_resource_name(const GLchar *name,
    if (array_index < 0)
       return -1;
 
+   /* Check for leading zero */
+   if (name[i] == '0' && name[i+1] != ']')
+      return -1;
+
    *out_base_name_end = name + (i - 1);
    return array_index;
 }
 
 
 void
-link_invalidate_variable_locations(gl_shader *sh, int input_base,
-                                   int output_base)
+link_invalidate_variable_locations(exec_list *ir)
 {
-   foreach_list(node, sh->ir) {
-      ir_variable *const var = ((ir_instruction *) node)->as_variable();
+   foreach_in_list(ir_instruction, node, ir) {
+      ir_variable *const var = node->as_variable();
 
       if (var == NULL)
          continue;
 
-      int base;
-      switch (var->mode) {
-      case ir_var_shader_in:
-         base = input_base;
-         break;
-      case ir_var_shader_out:
-         base = output_base;
-         break;
-      default:
-         continue;
+      /* Only assign locations for variables that lack an explicit location.
+       * Explicit locations are set for all built-in variables, generic vertex
+       * shader inputs (via layout(location=...)), and generic fragment shader
+       * outputs (also via layout(location=...)).
+       */
+      if (!var->data.explicit_location) {
+         var->data.location = -1;
+         var->data.location_frac = 0;
       }
 
-      /* Only assign locations for generic attributes / varyings / etc.
+      /* ir_variable::is_unmatched_generic_inout is used by the linker while
+       * connecting outputs from one stage to inputs of the next stage.
+       *
+       * There are two implicit assumptions here.  First, we assume that any
+       * built-in variable (i.e., non-generic in or out) will have
+       * explicit_location set.  Second, we assume that any generic in or out
+       * will not have explicit_location set.
+       *
+       * This second assumption will only be valid until
+       * GL_ARB_separate_shader_objects is supported.  When that extension is
+       * implemented, this function will need some modifications.
        */
-      if ((var->location >= base) && !var->explicit_location)
-         var->location = -1;
-
-      if ((var->location == -1) && !var->explicit_location) {
-         var->is_unmatched_generic_inout = 1;
-         var->location_frac = 0;
+      if (!var->data.explicit_location) {
+         var->data.is_unmatched_generic_inout = 1;
       } else {
-         var->is_unmatched_generic_inout = 0;
+         var->data.is_unmatched_generic_inout = 0;
       }
    }
 }
 
 
 /**
- * Determine the number of attribute slots required for a particular type
+ * Set clip_distance_array_size based on the given shader.
  *
- * This code is here because it implements the language rules of a specific
- * GLSL version.  Since it's a property of the language and not a property of
- * types in general, it doesn't really belong in glsl_type.
+ * Also check for errors based on incorrect usage of gl_ClipVertex and
+ * gl_ClipDistance.
+ *
+ * Return false if an error was reported.
  */
-unsigned
-count_attribute_slots(const glsl_type *t)
+static void
+analyze_clip_usage(struct gl_shader_program *prog,
+                   struct gl_shader *shader,
+                   GLuint *clip_distance_array_size)
 {
-   /* From page 31 (page 37 of the PDF) of the GLSL 1.50 spec:
-    *
-    *     "A scalar input counts the same amount against this limit as a vec4,
-    *     so applications may want to consider packing groups of four
-    *     unrelated float inputs together into a vector to better utilize the
-    *     capabilities of the underlying hardware. A matrix input will use up
-    *     multiple locations.  The number of locations used will equal the
-    *     number of columns in the matrix."
-    *
-    * The spec does not explicitly say how arrays are counted.  However, it
-    * should be safe to assume the total number of slots consumed by an array
-    * is the number of entries in the array multiplied by the number of slots
-    * consumed by a single element of the array.
-    */
+   *clip_distance_array_size = 0;
+
+   if (!prog->IsES && prog->Version >= 130) {
+      /* From section 7.1 (Vertex Shader Special Variables) of the
+       * GLSL 1.30 spec:
+       *
+       *   "It is an error for a shader to statically write both
+       *   gl_ClipVertex and gl_ClipDistance."
+       *
+       * This does not apply to GLSL ES shaders, since GLSL ES defines neither
+       * gl_ClipVertex nor gl_ClipDistance.
+       */
+      find_assignment_visitor clip_vertex("gl_ClipVertex");
+      find_assignment_visitor clip_distance("gl_ClipDistance");
 
-   if (t->is_array())
-      return t->array_size() * count_attribute_slots(t->element_type());
+      clip_vertex.run(shader->ir);
+      clip_distance.run(shader->ir);
+      if (clip_vertex.variable_found() && clip_distance.variable_found()) {
+         linker_error(prog, "%s shader writes to both `gl_ClipVertex' "
+                      "and `gl_ClipDistance'\n",
+                      _mesa_shader_stage_to_string(shader->Stage));
+         return;
+      }
 
-   if (t->is_matrix())
-      return t->matrix_columns;
+      if (clip_distance.variable_found()) {
+         ir_variable *clip_distance_var =
+               shader->symbols->get_variable("gl_ClipDistance");
 
-   return 1;
+         assert(clip_distance_var);
+         *clip_distance_array_size = clip_distance_var->type->length;
+      }
+   }
 }
 
 
 /**
  * Verify that a vertex shader executable meets all semantic requirements.
  *
- * Also sets prog->Vert.UsesClipDistance and prog->Vert.ClipDistanceArraySize
- * as a side effect.
+ * Also sets prog->Vert.ClipDistanceArraySize as a side effect.
  *
  * \param shader  Vertex shader executable to be verified
  */
-bool
+void
 validate_vertex_shader_executable(struct gl_shader_program *prog,
                                  struct gl_shader *shader)
 {
    if (shader == NULL)
-      return true;
+      return;
 
    /* From the GLSL 1.10 spec, page 48:
     *
@@ -369,48 +735,36 @@ validate_vertex_shader_executable(struct gl_shader_program *prog,
     *      vertex processing has occurred. Its value is undefined if
     *      the vertex shader executable does not write gl_Position."
     *
-    * GLSL ES 3.00 is similar to GLSL 1.40--failing to write to gl_Position is
-    * not an error.
+    * All GLSL ES Versions are similar to GLSL 1.40--failing to write to
+    * gl_Position is not an error.
     */
    if (prog->Version < (prog->IsES ? 300 : 140)) {
       find_assignment_visitor find("gl_Position");
       find.run(shader->ir);
       if (!find.variable_found()) {
-        linker_error(prog, "vertex shader does not write to `gl_Position'\n");
-        return false;
+        if (prog->IsES) {
+          linker_warning(prog,
+                         "vertex shader does not write to `gl_Position'."
+                         "It's value is undefined. \n");
+        } else {
+          linker_error(prog,
+                       "vertex shader does not write to `gl_Position'. \n");
+        }
+        return;
       }
    }
 
-   prog->Vert.ClipDistanceArraySize = 0;
-
-   if (!prog->IsES && prog->Version >= 130) {
-      /* From section 7.1 (Vertex Shader Special Variables) of the
-       * GLSL 1.30 spec:
-       *
-       *   "It is an error for a shader to statically write both
-       *   gl_ClipVertex and gl_ClipDistance."
-       *
-       * This does not apply to GLSL ES shaders, since GLSL ES defines neither
-       * gl_ClipVertex nor gl_ClipDistance.
-       */
-      find_assignment_visitor clip_vertex("gl_ClipVertex");
-      find_assignment_visitor clip_distance("gl_ClipDistance");
+   analyze_clip_usage(prog, shader, &prog->Vert.ClipDistanceArraySize);
+}
 
-      clip_vertex.run(shader->ir);
-      clip_distance.run(shader->ir);
-      if (clip_vertex.variable_found() && clip_distance.variable_found()) {
-         linker_error(prog, "vertex shader writes to both `gl_ClipVertex' "
-                      "and `gl_ClipDistance'\n");
-         return false;
-      }
-      prog->Vert.UsesClipDistance = clip_distance.variable_found();
-      ir_variable *clip_distance_var =
-         shader->symbols->get_variable("gl_ClipDistance");
-      if (clip_distance_var)
-         prog->Vert.ClipDistanceArraySize = clip_distance_var->type->length;
-   }
+void
+validate_tess_eval_shader_executable(struct gl_shader_program *prog,
+                                     struct gl_shader *shader)
+{
+   if (shader == NULL)
+      return;
 
-   return true;
+   analyze_clip_usage(prog, shader, &prog->TessEval.ClipDistanceArraySize);
 }
 
 
@@ -419,12 +773,12 @@ validate_vertex_shader_executable(struct gl_shader_program *prog,
  *
  * \param shader  Fragment shader executable to be verified
  */
-bool
+void
 validate_fragment_shader_executable(struct gl_shader_program *prog,
                                    struct gl_shader *shader)
 {
    if (shader == NULL)
-      return true;
+      return;
 
    find_assignment_visitor frag_color("gl_FragColor");
    find_assignment_visitor frag_data("gl_FragData");
@@ -435,40 +789,136 @@ validate_fragment_shader_executable(struct gl_shader_program *prog,
    if (frag_color.variable_found() && frag_data.variable_found()) {
       linker_error(prog,  "fragment shader writes to both "
                   "`gl_FragColor' and `gl_FragData'\n");
-      return false;
    }
-
-   return true;
 }
 
+/**
+ * Verify that a geometry shader executable meets all semantic requirements
+ *
+ * Also sets prog->Geom.VerticesIn, and prog->Geom.ClipDistanceArraySize as
+ * a side effect.
+ *
+ * \param shader Geometry shader executable to be verified
+ */
+void
+validate_geometry_shader_executable(struct gl_shader_program *prog,
+                                   struct gl_shader *shader)
+{
+   if (shader == NULL)
+      return;
+
+   unsigned num_vertices = vertices_per_prim(prog->Geom.InputType);
+   prog->Geom.VerticesIn = num_vertices;
+
+   analyze_clip_usage(prog, shader, &prog->Geom.ClipDistanceArraySize);
+}
 
 /**
- * Generate a string describing the mode of a variable
+ * Check if geometry shaders emit to non-zero streams and do corresponding
+ * validations.
  */
-static const char *
-mode_string(const ir_variable *var)
+static void
+validate_geometry_shader_emissions(struct gl_context *ctx,
+                                   struct gl_shader_program *prog)
 {
-   switch (var->mode) {
-   case ir_var_auto:
-      return (var->read_only) ? "global constant" : "global variable";
+   if (prog->_LinkedShaders[MESA_SHADER_GEOMETRY] != NULL) {
+      find_emit_vertex_visitor emit_vertex(ctx->Const.MaxVertexStreams - 1);
+      emit_vertex.run(prog->_LinkedShaders[MESA_SHADER_GEOMETRY]->ir);
+      if (emit_vertex.error()) {
+         linker_error(prog, "Invalid call %s(%d). Accepted values for the "
+                      "stream parameter are in the range [0, %d].\n",
+                      emit_vertex.error_func(),
+                      emit_vertex.error_stream(),
+                      ctx->Const.MaxVertexStreams - 1);
+      }
+      prog->Geom.UsesStreams = emit_vertex.uses_streams();
+      prog->Geom.UsesEndPrimitive = emit_vertex.uses_end_primitive();
 
-   case ir_var_uniform:    return "uniform";
-   case ir_var_shader_in:  return "shader input";
-   case ir_var_shader_out: return "shader output";
+      /* From the ARB_gpu_shader5 spec:
+       *
+       *   "Multiple vertex streams are supported only if the output primitive
+       *    type is declared to be "points".  A program will fail to link if it
+       *    contains a geometry shader calling EmitStreamVertex() or
+       *    EndStreamPrimitive() if its output primitive type is not "points".
+       *
+       * However, in the same spec:
+       *
+       *   "The function EmitVertex() is equivalent to calling EmitStreamVertex()
+       *    with <stream> set to zero."
+       *
+       * And:
+       *
+       *   "The function EndPrimitive() is equivalent to calling
+       *    EndStreamPrimitive() with <stream> set to zero."
+       *
+       * Since we can call EmitVertex() and EndPrimitive() when we output
+       * primitives other than points, calling EmitStreamVertex(0) or
+       * EmitEndPrimitive(0) should not produce errors. This it also what Nvidia
+       * does. Currently we only set prog->Geom.UsesStreams to TRUE when
+       * EmitStreamVertex() or EmitEndPrimitive() are called with a non-zero
+       * stream.
+       */
+      if (prog->Geom.UsesStreams && prog->Geom.OutputType != GL_POINTS) {
+         linker_error(prog, "EmitStreamVertex(n) and EndStreamPrimitive(n) "
+                      "with n>0 requires point output\n");
+      }
+   }
+}
 
-   case ir_var_const_in:
-   case ir_var_temporary:
-   default:
-      assert(!"Should not get here.");
-      return "invalid variable";
+bool
+validate_intrastage_arrays(struct gl_shader_program *prog,
+                           ir_variable *const var,
+                          ir_variable *const existing)
+{
+   /* Consider the types to be "the same" if both types are arrays
+    * of the same type and one of the arrays is implicitly sized.
+    * In addition, set the type of the linked variable to the
+    * explicitly sized array.
+    */
+   if (var->type->is_array() && existing->type->is_array()) {
+      if ((var->type->fields.array == existing->type->fields.array) &&
+          ((var->type->length == 0)|| (existing->type->length == 0))) {
+         if (var->type->length != 0) {
+            if (var->type->length <= existing->data.max_array_access) {
+               linker_error(prog, "%s `%s' declared as type "
+                           "`%s' but outermost dimension has an index"
+                           " of `%i'\n",
+                           mode_string(var),
+                           var->name, var->type->name,
+                           existing->data.max_array_access);
+            }
+            existing->type = var->type;
+            return true;
+         } else if (existing->type->length != 0) {
+            if(existing->type->length <= var->data.max_array_access &&
+               !existing->data.from_ssbo_unsized_array) {
+               linker_error(prog, "%s `%s' declared as type "
+                           "`%s' but outermost dimension has an index"
+                           " of `%i'\n",
+                           mode_string(var),
+                           var->name, existing->type->name,
+                           var->data.max_array_access);
+            }
+            return true;
+         }
+      } else {
+         /* The arrays of structs could have different glsl_type pointers but
+          * they are actually the same type. Use record_compare() to check that.
+          */
+         if (existing->type->fields.array->is_record() &&
+             var->type->fields.array->is_record() &&
+             existing->type->fields.array->record_compare(var->type->fields.array))
+            return true;
+      }
    }
+   return false;
 }
 
 
 /**
  * Perform validation of global variables used across multiple shaders
  */
-bool
+void
 cross_validate_globals(struct gl_shader_program *prog,
                       struct gl_shader **shader_list,
                       unsigned num_shaders,
@@ -482,19 +932,23 @@ cross_validate_globals(struct gl_shader_program *prog,
       if (shader_list[i] == NULL)
         continue;
 
-      foreach_list(node, shader_list[i]->ir) {
-        ir_variable *const var = ((ir_instruction *) node)->as_variable();
+      foreach_in_list(ir_instruction, node, shader_list[i]->ir) {
+        ir_variable *const var = node->as_variable();
 
         if (var == NULL)
            continue;
 
-        if (uniforms_only && (var->mode != ir_var_uniform))
+        if (uniforms_only && (var->data.mode != ir_var_uniform && var->data.mode != ir_var_shader_storage))
            continue;
 
+         /* don't cross validate subroutine uniforms */
+         if (var->type->contains_subroutine())
+            continue;
+
         /* Don't cross validate temporaries that are at global scope.  These
          * will eventually get pulled into the shaders 'main'.
          */
-        if (var->mode == ir_var_temporary)
+        if (var->data.mode == ir_var_temporary)
            continue;
 
         /* If a global with this name has already been seen, verify that the
@@ -503,43 +957,78 @@ cross_validate_globals(struct gl_shader_program *prog,
          */
         ir_variable *const existing = variables.get_variable(var->name);
         if (existing != NULL) {
-           if (var->type != existing->type) {
-              /* Consider the types to be "the same" if both types are arrays
-               * of the same type and one of the arrays is implicitly sized.
-               * In addition, set the type of the linked variable to the
-               * explicitly sized array.
-               */
-              if (var->type->is_array()
-                  && existing->type->is_array()
-                  && (var->type->fields.array == existing->type->fields.array)
-                  && ((var->type->length == 0)
-                      || (existing->type->length == 0))) {
-                 if (var->type->length != 0) {
-                    existing->type = var->type;
-                 }
-              } else {
-                 linker_error(prog, "%s `%s' declared as type "
-                              "`%s' and type `%s'\n",
-                              mode_string(var),
-                              var->name, var->type->name,
-                              existing->type->name);
-                 return false;
+            /* Check if types match. Interface blocks have some special
+             * rules so we handle those elsewhere.
+             */
+           if (var->type != existing->type &&
+                !var->is_interface_instance()) {
+              if (!validate_intrastage_arrays(prog, var, existing)) {
+                  if (var->type->is_record() && existing->type->is_record()
+                      && existing->type->record_compare(var->type)) {
+                     existing->type = var->type;
+                  } else {
+                     /* If it is an unsized array in a Shader Storage Block,
+                      * two different shaders can access to different elements.
+                      * Because of that, they might be converted to different
+                      * sized arrays, then check that they are compatible but
+                      * ignore the array size.
+                      */
+                     if (!(var->data.mode == ir_var_shader_storage &&
+                           var->data.from_ssbo_unsized_array &&
+                           existing->data.mode == ir_var_shader_storage &&
+                           existing->data.from_ssbo_unsized_array &&
+                           var->type->gl_type == existing->type->gl_type)) {
+                        linker_error(prog, "%s `%s' declared as type "
+                                    "`%s' and type `%s'\n",
+                                    mode_string(var),
+                                    var->name, var->type->name,
+                                    existing->type->name);
+                        return;
+                     }
+                  }
               }
            }
 
-           if (var->explicit_location) {
-              if (existing->explicit_location
-                  && (var->location != existing->location)) {
+           if (var->data.explicit_location) {
+              if (existing->data.explicit_location
+                  && (var->data.location != existing->data.location)) {
                     linker_error(prog, "explicit locations for %s "
                                  "`%s' have differing values\n",
                                  mode_string(var), var->name);
-                    return false;
+                    return;
               }
 
-              existing->location = var->location;
-              existing->explicit_location = true;
+              existing->data.location = var->data.location;
+              existing->data.explicit_location = true;
            }
 
+            /* From the GLSL 4.20 specification:
+             * "A link error will result if two compilation units in a program
+             *  specify different integer-constant bindings for the same
+             *  opaque-uniform name.  However, it is not an error to specify a
+             *  binding on some but not all declarations for the same name"
+             */
+            if (var->data.explicit_binding) {
+               if (existing->data.explicit_binding &&
+                   var->data.binding != existing->data.binding) {
+                  linker_error(prog, "explicit bindings for %s "
+                               "`%s' have differing values\n",
+                               mode_string(var), var->name);
+                  return;
+               }
+
+               existing->data.binding = var->data.binding;
+               existing->data.explicit_binding = true;
+            }
+
+            if (var->type->contains_atomic() &&
+                var->data.atomic.offset != existing->data.atomic.offset) {
+               linker_error(prog, "offset specifications for %s "
+                            "`%s' have differing values\n",
+                            mode_string(var), var->name);
+               return;
+            }
+
            /* Validate layout qualifiers for gl_FragDepth.
             *
             * From the AMD/ARB_conservative_depth specs:
@@ -552,24 +1041,24 @@ cross_validate_globals(struct gl_shader_program *prog,
             *    of qualifiers."
             */
            if (strcmp(var->name, "gl_FragDepth") == 0) {
-              bool layout_declared = var->depth_layout != ir_depth_layout_none;
+              bool layout_declared = var->data.depth_layout != ir_depth_layout_none;
               bool layout_differs =
-                 var->depth_layout != existing->depth_layout;
+                 var->data.depth_layout != existing->data.depth_layout;
 
               if (layout_declared && layout_differs) {
                  linker_error(prog,
                               "All redeclarations of gl_FragDepth in all "
                               "fragment shaders in a single program must have "
-                              "the same set of qualifiers.");
+                              "the same set of qualifiers.\n");
               }
 
-              if (var->used && layout_differs) {
+              if (var->data.used && layout_differs) {
                  linker_error(prog,
                               "If gl_FragDepth is redeclared with a layout "
                               "qualifier in any fragment shader, it must be "
                               "redeclared with the same layout qualifier in "
                               "all fragment shaders that have assignments to "
-                              "gl_FragDepth");
+                              "gl_FragDepth\n");
               }
            }
 
@@ -594,7 +1083,7 @@ cross_validate_globals(struct gl_shader_program *prog,
                     linker_error(prog, "initializers for %s "
                                  "`%s' have differing values\n",
                                  mode_string(var), var->name);
-                    return false;
+                    return;
                  }
               } else {
                  /* If the first-seen instance of a particular uniform did not
@@ -615,15 +1104,15 @@ cross_validate_globals(struct gl_shader_program *prog,
               }
            }
 
-           if (var->has_initializer) {
-              if (existing->has_initializer
+           if (var->data.has_initializer) {
+              if (existing->data.has_initializer
                   && (var->constant_initializer == NULL
                       || existing->constant_initializer == NULL)) {
                  linker_error(prog,
                               "shared global variable `%s' has multiple "
                               "non-constant initializers.\n",
                               var->name);
-                 return false;
+                 return;
               }
 
               /* Some instance had an initializer, so keep track of that.  In
@@ -631,54 +1120,58 @@ cross_validate_globals(struct gl_shader_program *prog,
                * otherwise) will propagate the existence to the variable
                * stored in the symbol table.
                */
-              existing->has_initializer = true;
+              existing->data.has_initializer = true;
            }
 
-           if (existing->invariant != var->invariant) {
+           if (existing->data.invariant != var->data.invariant) {
               linker_error(prog, "declarations for %s `%s' have "
                            "mismatching invariant qualifiers\n",
                            mode_string(var), var->name);
-              return false;
+              return;
            }
-            if (existing->centroid != var->centroid) {
+            if (existing->data.centroid != var->data.centroid) {
                linker_error(prog, "declarations for %s `%s' have "
                            "mismatching centroid qualifiers\n",
                            mode_string(var), var->name);
-               return false;
+               return;
+            }
+            if (existing->data.sample != var->data.sample) {
+               linker_error(prog, "declarations for %s `%s` have "
+                            "mismatching sample qualifiers\n",
+                            mode_string(var), var->name);
+               return;
             }
         } else
            variables.add_variable(var);
       }
    }
-
-   return true;
 }
 
 
 /**
  * Perform validation of uniforms used across multiple shader stages
  */
-bool
+void
 cross_validate_uniforms(struct gl_shader_program *prog)
 {
-   return cross_validate_globals(prog, prog->_LinkedShaders,
-                                MESA_SHADER_TYPES, true);
+   cross_validate_globals(prog, prog->_LinkedShaders,
+                          MESA_SHADER_STAGES, true);
 }
 
 /**
- * Accumulates the array of prog->UniformBlocks and checks that all
+ * Accumulates the array of prog->BufferInterfaceBlocks and checks that all
  * definitons of blocks agree on their contents.
  */
 static bool
 interstage_cross_validate_uniform_blocks(struct gl_shader_program *prog)
 {
    unsigned max_num_uniform_blocks = 0;
-   for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) {
+   for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
       if (prog->_LinkedShaders[i])
-        max_num_uniform_blocks += prog->_LinkedShaders[i]->NumUniformBlocks;
+        max_num_uniform_blocks += prog->_LinkedShaders[i]->NumBufferInterfaceBlocks;
    }
 
-   for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) {
+   for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
       struct gl_shader *sh = prog->_LinkedShaders[i];
 
       prog->UniformBlockStageIndex[i] = ralloc_array(prog, int,
@@ -689,15 +1182,15 @@ interstage_cross_validate_uniform_blocks(struct gl_shader_program *prog)
       if (sh == NULL)
         continue;
 
-      for (unsigned int j = 0; j < sh->NumUniformBlocks; j++) {
+      for (unsigned int j = 0; j < sh->NumBufferInterfaceBlocks; j++) {
         int index = link_cross_validate_uniform_block(prog,
-                                                      &prog->UniformBlocks,
-                                                      &prog->NumUniformBlocks,
-                                                      &sh->UniformBlocks[j]);
+                                                      &prog->BufferInterfaceBlocks,
+                                                      &prog->NumBufferInterfaceBlocks,
+                                                      &sh->BufferInterfaceBlocks[j]);
 
         if (index == -1) {
-           linker_error(prog, "uniform block `%s' has mismatching definitions",
-                        sh->UniformBlocks[j].Name);
+           linker_error(prog, "uniform block `%s' has mismatching definitions\n",
+                        sh->BufferInterfaceBlocks[j].Name);
            return false;
         }
 
@@ -717,15 +1210,15 @@ populate_symbol_table(gl_shader *sh)
 {
    sh->symbols = new(sh) glsl_symbol_table;
 
-   foreach_list(node, sh->ir) {
-      ir_instruction *const inst = (ir_instruction *) node;
+   foreach_in_list(ir_instruction, inst, sh->ir) {
       ir_variable *var;
       ir_function *func;
 
       if ((func = inst->as_function()) != NULL) {
         sh->symbols->add_function(func);
       } else if ((var = inst->as_variable()) != NULL) {
-        sh->symbols->add_variable(var);
+         if (var->data.mode != ir_var_temporary)
+            sh->symbols->add_variable(var);
       }
    }
 }
@@ -767,7 +1260,7 @@ remap_variables(ir_instruction *inst, struct gl_shader *target,
 
       virtual ir_visitor_status visit(ir_dereference_variable *ir)
       {
-        if (ir->var->mode == ir_var_temporary) {
+        if (ir->var->data.mode == ir_var_temporary) {
            ir_variable *var = (ir_variable *) hash_table_find(temps, ir->var);
 
            assert(var != NULL);
@@ -834,20 +1327,18 @@ move_non_declarations(exec_list *instructions, exec_node *last,
       temps = hash_table_ctor(0, hash_table_pointer_hash,
                              hash_table_pointer_compare);
 
-   foreach_list_safe(node, instructions) {
-      ir_instruction *inst = (ir_instruction *) node;
-
+   foreach_in_list_safe(ir_instruction, inst, instructions) {
       if (inst->as_function())
         continue;
 
       ir_variable *var = inst->as_variable();
-      if ((var != NULL) && (var->mode != ir_var_temporary))
+      if ((var != NULL) && (var->data.mode != ir_var_temporary))
         continue;
 
       assert(inst->as_assignment()
              || inst->as_call()
              || inst->as_if() /* for initializers with the ?: operator */
-            || ((var != NULL) && (var->mode == ir_var_temporary)));
+            || ((var != NULL) && (var->data.mode == ir_var_temporary)));
 
       if (make_copies) {
         inst = inst->clone(target, NULL);
@@ -870,52 +1361,608 @@ move_non_declarations(exec_list *instructions, exec_node *last,
    return last;
 }
 
+
+/**
+ * This class is only used in link_intrastage_shaders() below but declaring
+ * it inside that function leads to compiler warnings with some versions of
+ * gcc.
+ */
+class array_sizing_visitor : public ir_hierarchical_visitor {
+public:
+   array_sizing_visitor()
+      : mem_ctx(ralloc_context(NULL)),
+        unnamed_interfaces(hash_table_ctor(0, hash_table_pointer_hash,
+                                           hash_table_pointer_compare))
+   {
+   }
+
+   ~array_sizing_visitor()
+   {
+      hash_table_dtor(this->unnamed_interfaces);
+      ralloc_free(this->mem_ctx);
+   }
+
+   virtual ir_visitor_status visit(ir_variable *var)
+   {
+      const glsl_type *type_without_array;
+      fixup_type(&var->type, var->data.max_array_access,
+                 var->data.from_ssbo_unsized_array);
+      type_without_array = var->type->without_array();
+      if (var->type->is_interface()) {
+         if (interface_contains_unsized_arrays(var->type)) {
+            const glsl_type *new_type =
+               resize_interface_members(var->type,
+                                        var->get_max_ifc_array_access(),
+                                        var->is_in_shader_storage_block());
+            var->type = new_type;
+            var->change_interface_type(new_type);
+         }
+      } else if (type_without_array->is_interface()) {
+         if (interface_contains_unsized_arrays(type_without_array)) {
+            const glsl_type *new_type =
+               resize_interface_members(type_without_array,
+                                        var->get_max_ifc_array_access(),
+                                        var->is_in_shader_storage_block());
+            var->change_interface_type(new_type);
+            var->type = update_interface_members_array(var->type, new_type);
+         }
+      } else if (const glsl_type *ifc_type = var->get_interface_type()) {
+         /* Store a pointer to the variable in the unnamed_interfaces
+          * hashtable.
+          */
+         ir_variable **interface_vars = (ir_variable **)
+            hash_table_find(this->unnamed_interfaces, ifc_type);
+         if (interface_vars == NULL) {
+            interface_vars = rzalloc_array(mem_ctx, ir_variable *,
+                                           ifc_type->length);
+            hash_table_insert(this->unnamed_interfaces, interface_vars,
+                              ifc_type);
+         }
+         unsigned index = ifc_type->field_index(var->name);
+         assert(index < ifc_type->length);
+         assert(interface_vars[index] == NULL);
+         interface_vars[index] = var;
+      }
+      return visit_continue;
+   }
+
+   /**
+    * For each unnamed interface block that was discovered while running the
+    * visitor, adjust the interface type to reflect the newly assigned array
+    * sizes, and fix up the ir_variable nodes to point to the new interface
+    * type.
+    */
+   void fixup_unnamed_interface_types()
+   {
+      hash_table_call_foreach(this->unnamed_interfaces,
+                              fixup_unnamed_interface_type, NULL);
+   }
+
+private:
+   /**
+    * If the type pointed to by \c type represents an unsized array, replace
+    * it with a sized array whose size is determined by max_array_access.
+    */
+   static void fixup_type(const glsl_type **type, unsigned max_array_access,
+                          bool from_ssbo_unsized_array)
+   {
+      if (!from_ssbo_unsized_array && (*type)->is_unsized_array()) {
+         *type = glsl_type::get_array_instance((*type)->fields.array,
+                                               max_array_access + 1);
+         assert(*type != NULL);
+      }
+   }
+
+   static const glsl_type *
+   update_interface_members_array(const glsl_type *type,
+                                  const glsl_type *new_interface_type)
+   {
+      const glsl_type *element_type = type->fields.array;
+      if (element_type->is_array()) {
+         const glsl_type *new_array_type =
+            update_interface_members_array(element_type, new_interface_type);
+         return glsl_type::get_array_instance(new_array_type, type->length);
+      } else {
+         return glsl_type::get_array_instance(new_interface_type,
+                                              type->length);
+      }
+   }
+
+   /**
+    * Determine whether the given interface type contains unsized arrays (if
+    * it doesn't, array_sizing_visitor doesn't need to process it).
+    */
+   static bool interface_contains_unsized_arrays(const glsl_type *type)
+   {
+      for (unsigned i = 0; i < type->length; i++) {
+         const glsl_type *elem_type = type->fields.structure[i].type;
+         if (elem_type->is_unsized_array())
+            return true;
+      }
+      return false;
+   }
+
+   /**
+    * Create a new interface type based on the given type, with unsized arrays
+    * replaced by sized arrays whose size is determined by
+    * max_ifc_array_access.
+    */
+   static const glsl_type *
+   resize_interface_members(const glsl_type *type,
+                            const unsigned *max_ifc_array_access,
+                            bool is_ssbo)
+   {
+      unsigned num_fields = type->length;
+      glsl_struct_field *fields = new glsl_struct_field[num_fields];
+      memcpy(fields, type->fields.structure,
+             num_fields * sizeof(*fields));
+      for (unsigned i = 0; i < num_fields; i++) {
+         /* If SSBO last member is unsized array, we don't replace it by a sized
+          * array.
+          */
+         if (is_ssbo && i == (num_fields - 1))
+            fixup_type(&fields[i].type, max_ifc_array_access[i],
+                       true);
+         else
+            fixup_type(&fields[i].type, max_ifc_array_access[i],
+                       false);
+      }
+      glsl_interface_packing packing =
+         (glsl_interface_packing) type->interface_packing;
+      const glsl_type *new_ifc_type =
+         glsl_type::get_interface_instance(fields, num_fields,
+                                           packing, type->name);
+      delete [] fields;
+      return new_ifc_type;
+   }
+
+   static void fixup_unnamed_interface_type(const void *key, void *data,
+                                            void *)
+   {
+      const glsl_type *ifc_type = (const glsl_type *) key;
+      ir_variable **interface_vars = (ir_variable **) data;
+      unsigned num_fields = ifc_type->length;
+      glsl_struct_field *fields = new glsl_struct_field[num_fields];
+      memcpy(fields, ifc_type->fields.structure,
+             num_fields * sizeof(*fields));
+      bool interface_type_changed = false;
+      for (unsigned i = 0; i < num_fields; i++) {
+         if (interface_vars[i] != NULL &&
+             fields[i].type != interface_vars[i]->type) {
+            fields[i].type = interface_vars[i]->type;
+            interface_type_changed = true;
+         }
+      }
+      if (!interface_type_changed) {
+         delete [] fields;
+         return;
+      }
+      glsl_interface_packing packing =
+         (glsl_interface_packing) ifc_type->interface_packing;
+      const glsl_type *new_ifc_type =
+         glsl_type::get_interface_instance(fields, num_fields, packing,
+                                           ifc_type->name);
+      delete [] fields;
+      for (unsigned i = 0; i < num_fields; i++) {
+         if (interface_vars[i] != NULL)
+            interface_vars[i]->change_interface_type(new_ifc_type);
+      }
+   }
+
+   /**
+    * Memory context used to allocate the data in \c unnamed_interfaces.
+    */
+   void *mem_ctx;
+
+   /**
+    * Hash table from const glsl_type * to an array of ir_variable *'s
+    * pointing to the ir_variables constituting each unnamed interface block.
+    */
+   hash_table *unnamed_interfaces;
+};
+
+
+/**
+ * Performs the cross-validation of tessellation control shader vertices and
+ * layout qualifiers for the attached tessellation control shaders,
+ * and propagates them to the linked TCS and linked shader program.
+ */
+static void
+link_tcs_out_layout_qualifiers(struct gl_shader_program *prog,
+                             struct gl_shader *linked_shader,
+                             struct gl_shader **shader_list,
+                             unsigned num_shaders)
+{
+   linked_shader->TessCtrl.VerticesOut = 0;
+
+   if (linked_shader->Stage != MESA_SHADER_TESS_CTRL)
+      return;
+
+   /* From the GLSL 4.0 spec (chapter 4.3.8.2):
+    *
+    *     "All tessellation control shader layout declarations in a program
+    *      must specify the same output patch vertex count.  There must be at
+    *      least one layout qualifier specifying an output patch vertex count
+    *      in any program containing tessellation control shaders; however,
+    *      such a declaration is not required in all tessellation control
+    *      shaders."
+    */
+
+   for (unsigned i = 0; i < num_shaders; i++) {
+      struct gl_shader *shader = shader_list[i];
+
+      if (shader->TessCtrl.VerticesOut != 0) {
+        if (linked_shader->TessCtrl.VerticesOut != 0 &&
+            linked_shader->TessCtrl.VerticesOut != shader->TessCtrl.VerticesOut) {
+           linker_error(prog, "tessellation control shader defined with "
+                        "conflicting output vertex count (%d and %d)\n",
+                        linked_shader->TessCtrl.VerticesOut,
+                        shader->TessCtrl.VerticesOut);
+           return;
+        }
+        linked_shader->TessCtrl.VerticesOut = shader->TessCtrl.VerticesOut;
+      }
+   }
+
+   /* Just do the intrastage -> interstage propagation right now,
+    * since we already know we're in the right type of shader program
+    * for doing it.
+    */
+   if (linked_shader->TessCtrl.VerticesOut == 0) {
+      linker_error(prog, "tessellation control shader didn't declare "
+                  "vertices out layout qualifier\n");
+      return;
+   }
+   prog->TessCtrl.VerticesOut = linked_shader->TessCtrl.VerticesOut;
+}
+
+
+/**
+ * Performs the cross-validation of tessellation evaluation shader
+ * primitive type, vertex spacing, ordering and point_mode layout qualifiers
+ * for the attached tessellation evaluation shaders, and propagates them
+ * to the linked TES and linked shader program.
+ */
+static void
+link_tes_in_layout_qualifiers(struct gl_shader_program *prog,
+                               struct gl_shader *linked_shader,
+                               struct gl_shader **shader_list,
+                               unsigned num_shaders)
+{
+   linked_shader->TessEval.PrimitiveMode = PRIM_UNKNOWN;
+   linked_shader->TessEval.Spacing = 0;
+   linked_shader->TessEval.VertexOrder = 0;
+   linked_shader->TessEval.PointMode = -1;
+
+   if (linked_shader->Stage != MESA_SHADER_TESS_EVAL)
+      return;
+
+   /* From the GLSL 4.0 spec (chapter 4.3.8.1):
+    *
+    *     "At least one tessellation evaluation shader (compilation unit) in
+    *      a program must declare a primitive mode in its input layout.
+    *      Declaration vertex spacing, ordering, and point mode identifiers is
+    *      optional.  It is not required that all tessellation evaluation
+    *      shaders in a program declare a primitive mode.  If spacing or
+    *      vertex ordering declarations are omitted, the tessellation
+    *      primitive generator will use equal spacing or counter-clockwise
+    *      vertex ordering, respectively.  If a point mode declaration is
+    *      omitted, the tessellation primitive generator will produce lines or
+    *      triangles according to the primitive mode."
+    */
+
+   for (unsigned i = 0; i < num_shaders; i++) {
+      struct gl_shader *shader = shader_list[i];
+
+      if (shader->TessEval.PrimitiveMode != PRIM_UNKNOWN) {
+        if (linked_shader->TessEval.PrimitiveMode != PRIM_UNKNOWN &&
+            linked_shader->TessEval.PrimitiveMode != shader->TessEval.PrimitiveMode) {
+           linker_error(prog, "tessellation evaluation shader defined with "
+                        "conflicting input primitive modes.\n");
+           return;
+        }
+        linked_shader->TessEval.PrimitiveMode = shader->TessEval.PrimitiveMode;
+      }
+
+      if (shader->TessEval.Spacing != 0) {
+        if (linked_shader->TessEval.Spacing != 0 &&
+            linked_shader->TessEval.Spacing != shader->TessEval.Spacing) {
+           linker_error(prog, "tessellation evaluation shader defined with "
+                        "conflicting vertex spacing.\n");
+           return;
+        }
+        linked_shader->TessEval.Spacing = shader->TessEval.Spacing;
+      }
+
+      if (shader->TessEval.VertexOrder != 0) {
+        if (linked_shader->TessEval.VertexOrder != 0 &&
+            linked_shader->TessEval.VertexOrder != shader->TessEval.VertexOrder) {
+           linker_error(prog, "tessellation evaluation shader defined with "
+                        "conflicting ordering.\n");
+           return;
+        }
+        linked_shader->TessEval.VertexOrder = shader->TessEval.VertexOrder;
+      }
+
+      if (shader->TessEval.PointMode != -1) {
+        if (linked_shader->TessEval.PointMode != -1 &&
+            linked_shader->TessEval.PointMode != shader->TessEval.PointMode) {
+           linker_error(prog, "tessellation evaluation shader defined with "
+                        "conflicting point modes.\n");
+           return;
+        }
+        linked_shader->TessEval.PointMode = shader->TessEval.PointMode;
+      }
+
+   }
+
+   /* Just do the intrastage -> interstage propagation right now,
+    * since we already know we're in the right type of shader program
+    * for doing it.
+    */
+   if (linked_shader->TessEval.PrimitiveMode == PRIM_UNKNOWN) {
+      linker_error(prog,
+                  "tessellation evaluation shader didn't declare input "
+                  "primitive modes.\n");
+      return;
+   }
+   prog->TessEval.PrimitiveMode = linked_shader->TessEval.PrimitiveMode;
+
+   if (linked_shader->TessEval.Spacing == 0)
+      linked_shader->TessEval.Spacing = GL_EQUAL;
+   prog->TessEval.Spacing = linked_shader->TessEval.Spacing;
+
+   if (linked_shader->TessEval.VertexOrder == 0)
+      linked_shader->TessEval.VertexOrder = GL_CCW;
+   prog->TessEval.VertexOrder = linked_shader->TessEval.VertexOrder;
+
+   if (linked_shader->TessEval.PointMode == -1)
+      linked_shader->TessEval.PointMode = GL_FALSE;
+   prog->TessEval.PointMode = linked_shader->TessEval.PointMode;
+}
+
+
 /**
- * Get the function signature for main from a shader
+ * Performs the cross-validation of layout qualifiers specified in
+ * redeclaration of gl_FragCoord for the attached fragment shaders,
+ * and propagates them to the linked FS and linked shader program.
  */
-static ir_function_signature *
-get_main_function_signature(gl_shader *sh)
+static void
+link_fs_input_layout_qualifiers(struct gl_shader_program *prog,
+                               struct gl_shader *linked_shader,
+                               struct gl_shader **shader_list,
+                               unsigned num_shaders)
 {
-   ir_function *const f = sh->symbols->get_function("main");
-   if (f != NULL) {
-      exec_list void_parameters;
+   linked_shader->redeclares_gl_fragcoord = false;
+   linked_shader->uses_gl_fragcoord = false;
+   linked_shader->origin_upper_left = false;
+   linked_shader->pixel_center_integer = false;
 
-      /* Look for the 'void main()' signature and ensure that it's defined.
-       * This keeps the linker from accidentally pick a shader that just
-       * contains a prototype for main.
+   if (linked_shader->Stage != MESA_SHADER_FRAGMENT ||
+       (prog->Version < 150 && !prog->ARB_fragment_coord_conventions_enable))
+      return;
+
+   for (unsigned i = 0; i < num_shaders; i++) {
+      struct gl_shader *shader = shader_list[i];
+      /* From the GLSL 1.50 spec, page 39:
+       *
+       *   "If gl_FragCoord is redeclared in any fragment shader in a program,
+       *    it must be redeclared in all the fragment shaders in that program
+       *    that have a static use gl_FragCoord."
+       */
+      if ((linked_shader->redeclares_gl_fragcoord
+           && !shader->redeclares_gl_fragcoord
+           && shader->uses_gl_fragcoord)
+          || (shader->redeclares_gl_fragcoord
+              && !linked_shader->redeclares_gl_fragcoord
+              && linked_shader->uses_gl_fragcoord)) {
+             linker_error(prog, "fragment shader defined with conflicting "
+                         "layout qualifiers for gl_FragCoord\n");
+      }
+
+      /* From the GLSL 1.50 spec, page 39:
        *
-       * We don't have to check for multiple definitions of main (in multiple
-       * shaders) because that would have already been caught above.
+       *   "All redeclarations of gl_FragCoord in all fragment shaders in a
+       *    single program must have the same set of qualifiers."
+       */
+      if (linked_shader->redeclares_gl_fragcoord && shader->redeclares_gl_fragcoord
+          && (shader->origin_upper_left != linked_shader->origin_upper_left
+          || shader->pixel_center_integer != linked_shader->pixel_center_integer)) {
+         linker_error(prog, "fragment shader defined with conflicting "
+                      "layout qualifiers for gl_FragCoord\n");
+      }
+
+      /* Update the linked shader state.  Note that uses_gl_fragcoord should
+       * accumulate the results.  The other values should replace.  If there
+       * are multiple redeclarations, all the fields except uses_gl_fragcoord
+       * are already known to be the same.
        */
-      ir_function_signature *sig = f->matching_signature(&void_parameters);
-      if ((sig != NULL) && sig->is_defined) {
-        return sig;
+      if (shader->redeclares_gl_fragcoord || shader->uses_gl_fragcoord) {
+         linked_shader->redeclares_gl_fragcoord =
+            shader->redeclares_gl_fragcoord;
+         linked_shader->uses_gl_fragcoord = linked_shader->uses_gl_fragcoord
+            || shader->uses_gl_fragcoord;
+         linked_shader->origin_upper_left = shader->origin_upper_left;
+         linked_shader->pixel_center_integer = shader->pixel_center_integer;
+      }
+
+      linked_shader->EarlyFragmentTests |= shader->EarlyFragmentTests;
+   }
+}
+
+/**
+ * Performs the cross-validation of geometry shader max_vertices and
+ * primitive type layout qualifiers for the attached geometry shaders,
+ * and propagates them to the linked GS and linked shader program.
+ */
+static void
+link_gs_inout_layout_qualifiers(struct gl_shader_program *prog,
+                               struct gl_shader *linked_shader,
+                               struct gl_shader **shader_list,
+                               unsigned num_shaders)
+{
+   linked_shader->Geom.VerticesOut = 0;
+   linked_shader->Geom.Invocations = 0;
+   linked_shader->Geom.InputType = PRIM_UNKNOWN;
+   linked_shader->Geom.OutputType = PRIM_UNKNOWN;
+
+   /* No in/out qualifiers defined for anything but GLSL 1.50+
+    * geometry shaders so far.
+    */
+   if (linked_shader->Stage != MESA_SHADER_GEOMETRY || prog->Version < 150)
+      return;
+
+   /* From the GLSL 1.50 spec, page 46:
+    *
+    *     "All geometry shader output layout declarations in a program
+    *      must declare the same layout and same value for
+    *      max_vertices. There must be at least one geometry output
+    *      layout declaration somewhere in a program, but not all
+    *      geometry shaders (compilation units) are required to
+    *      declare it."
+    */
+
+   for (unsigned i = 0; i < num_shaders; i++) {
+      struct gl_shader *shader = shader_list[i];
+
+      if (shader->Geom.InputType != PRIM_UNKNOWN) {
+        if (linked_shader->Geom.InputType != PRIM_UNKNOWN &&
+            linked_shader->Geom.InputType != shader->Geom.InputType) {
+           linker_error(prog, "geometry shader defined with conflicting "
+                        "input types\n");
+           return;
+        }
+        linked_shader->Geom.InputType = shader->Geom.InputType;
+      }
+
+      if (shader->Geom.OutputType != PRIM_UNKNOWN) {
+        if (linked_shader->Geom.OutputType != PRIM_UNKNOWN &&
+            linked_shader->Geom.OutputType != shader->Geom.OutputType) {
+           linker_error(prog, "geometry shader defined with conflicting "
+                        "output types\n");
+           return;
+        }
+        linked_shader->Geom.OutputType = shader->Geom.OutputType;
+      }
+
+      if (shader->Geom.VerticesOut != 0) {
+        if (linked_shader->Geom.VerticesOut != 0 &&
+            linked_shader->Geom.VerticesOut != shader->Geom.VerticesOut) {
+           linker_error(prog, "geometry shader defined with conflicting "
+                        "output vertex count (%d and %d)\n",
+                        linked_shader->Geom.VerticesOut,
+                        shader->Geom.VerticesOut);
+           return;
+        }
+        linked_shader->Geom.VerticesOut = shader->Geom.VerticesOut;
+      }
+
+      if (shader->Geom.Invocations != 0) {
+        if (linked_shader->Geom.Invocations != 0 &&
+            linked_shader->Geom.Invocations != shader->Geom.Invocations) {
+           linker_error(prog, "geometry shader defined with conflicting "
+                        "invocation count (%d and %d)\n",
+                        linked_shader->Geom.Invocations,
+                        shader->Geom.Invocations);
+           return;
+        }
+        linked_shader->Geom.Invocations = shader->Geom.Invocations;
       }
    }
 
-   return NULL;
+   /* Just do the intrastage -> interstage propagation right now,
+    * since we already know we're in the right type of shader program
+    * for doing it.
+    */
+   if (linked_shader->Geom.InputType == PRIM_UNKNOWN) {
+      linker_error(prog,
+                  "geometry shader didn't declare primitive input type\n");
+      return;
+   }
+   prog->Geom.InputType = linked_shader->Geom.InputType;
+
+   if (linked_shader->Geom.OutputType == PRIM_UNKNOWN) {
+      linker_error(prog,
+                  "geometry shader didn't declare primitive output type\n");
+      return;
+   }
+   prog->Geom.OutputType = linked_shader->Geom.OutputType;
+
+   if (linked_shader->Geom.VerticesOut == 0) {
+      linker_error(prog,
+                  "geometry shader didn't declare max_vertices\n");
+      return;
+   }
+   prog->Geom.VerticesOut = linked_shader->Geom.VerticesOut;
+
+   if (linked_shader->Geom.Invocations == 0)
+      linked_shader->Geom.Invocations = 1;
+
+   prog->Geom.Invocations = linked_shader->Geom.Invocations;
 }
 
 
 /**
- * This class is only used in link_intrastage_shaders() below but declaring
- * it inside that function leads to compiler warnings with some versions of
- * gcc.
+ * Perform cross-validation of compute shader local_size_{x,y,z} layout
+ * qualifiers for the attached compute shaders, and propagate them to the
+ * linked CS and linked shader program.
  */
-class array_sizing_visitor : public ir_hierarchical_visitor {
-public:
-   virtual ir_visitor_status visit(ir_variable *var)
-   {
-      if (var->type->is_array() && (var->type->length == 0)) {
-         const glsl_type *type =
-            glsl_type::get_array_instance(var->type->fields.array,
-                                          var->max_array_access + 1);
-         assert(type != NULL);
-         var->type = type;
+static void
+link_cs_input_layout_qualifiers(struct gl_shader_program *prog,
+                                struct gl_shader *linked_shader,
+                                struct gl_shader **shader_list,
+                                unsigned num_shaders)
+{
+   for (int i = 0; i < 3; i++)
+      linked_shader->Comp.LocalSize[i] = 0;
+
+   /* This function is called for all shader stages, but it only has an effect
+    * for compute shaders.
+    */
+   if (linked_shader->Stage != MESA_SHADER_COMPUTE)
+      return;
+
+   /* From the ARB_compute_shader spec, in the section describing local size
+    * declarations:
+    *
+    *     If multiple compute shaders attached to a single program object
+    *     declare local work-group size, the declarations must be identical;
+    *     otherwise a link-time error results. Furthermore, if a program
+    *     object contains any compute shaders, at least one must contain an
+    *     input layout qualifier specifying the local work sizes of the
+    *     program, or a link-time error will occur.
+    */
+   for (unsigned sh = 0; sh < num_shaders; sh++) {
+      struct gl_shader *shader = shader_list[sh];
+
+      if (shader->Comp.LocalSize[0] != 0) {
+         if (linked_shader->Comp.LocalSize[0] != 0) {
+            for (int i = 0; i < 3; i++) {
+               if (linked_shader->Comp.LocalSize[i] !=
+                   shader->Comp.LocalSize[i]) {
+                  linker_error(prog, "compute shader defined with conflicting "
+                               "local sizes\n");
+                  return;
+               }
+            }
+         }
+         for (int i = 0; i < 3; i++)
+            linked_shader->Comp.LocalSize[i] = shader->Comp.LocalSize[i];
       }
-      return visit_continue;
    }
-};
+
+   /* Just do the intrastage -> interstage propagation right now,
+    * since we already know we're in the right type of shader program
+    * for doing it.
+    */
+   if (linked_shader->Comp.LocalSize[0] == 0) {
+      linker_error(prog, "compute shader didn't declare local size\n");
+      return;
+   }
+   for (int i = 0; i < 3; i++)
+      prog->Comp.LocalSize[i] = linked_shader->Comp.LocalSize[i];
+}
+
 
 /**
  * Combine a group of shaders for a single stage to generate a linked shader
@@ -935,22 +1982,30 @@ link_intrastage_shaders(void *mem_ctx,
 
    /* Check that global variables defined in multiple shaders are consistent.
     */
-   if (!cross_validate_globals(prog, shader_list, num_shaders, false))
+   cross_validate_globals(prog, shader_list, num_shaders, false);
+   if (!prog->LinkStatus)
+      return NULL;
+
+   /* Check that interface blocks defined in multiple shaders are consistent.
+    */
+   validate_intrastage_interface_blocks(prog, (const gl_shader **)shader_list,
+                                        num_shaders);
+   if (!prog->LinkStatus)
       return NULL;
 
-   /* Check that uniform blocks between shaders for a stage agree. */
-   const int num_uniform_blocks =
-      link_uniform_blocks(mem_ctx, prog, shader_list, num_shaders,
+   /* Link up uniform blocks defined within this stage. */
+   const unsigned num_uniform_blocks =
+      link_uniform_blocks(mem_ctx, ctx, prog, shader_list, num_shaders,
                           &uniform_blocks);
-   if (num_uniform_blocks < 0)
+   if (!prog->LinkStatus)
       return NULL;
 
    /* Check that there is only a single definition of each function signature
     * across all shaders.
     */
    for (unsigned i = 0; i < (num_shaders - 1); i++) {
-      foreach_list(node, shader_list[i]->ir) {
-        ir_function *const f = ((ir_instruction *) node)->as_function();
+      foreach_in_list(ir_instruction, node, shader_list[i]->ir) {
+        ir_function *const f = node->as_function();
 
         if (f == NULL)
            continue;
@@ -965,19 +2020,16 @@ link_intrastage_shaders(void *mem_ctx,
            if (other == NULL)
               continue;
 
-           foreach_iter (exec_list_iterator, iter, *f) {
-              ir_function_signature *sig =
-                 (ir_function_signature *) iter.get();
-
-              if (!sig->is_defined || sig->is_builtin)
+           foreach_in_list(ir_function_signature, sig, &f->signatures) {
+              if (!sig->is_defined || sig->is_builtin())
                  continue;
 
               ir_function_signature *other_sig =
-                 other->exact_matching_signature(sig->parameters);
+                 other->exact_matching_signature(NULL, &sig->parameters);
 
               if ((other_sig != NULL) && other_sig->is_defined
-                  && !other_sig->is_builtin) {
-                 linker_error(prog, "function `%s' is multiply defined",
+                  && !other_sig->is_builtin()) {
+                 linker_error(prog, "function `%s' is multiply defined\n",
                               f->name);
                  return NULL;
               }
@@ -995,7 +2047,7 @@ link_intrastage_shaders(void *mem_ctx,
     */
    gl_shader *main = NULL;
    for (unsigned i = 0; i < num_shaders; i++) {
-      if (get_main_function_signature(shader_list[i]) != NULL) {
+      if (_mesa_get_main_function_signature(shader_list[i]) != NULL) {
         main = shader_list[i];
         break;
       }
@@ -1003,8 +2055,7 @@ link_intrastage_shaders(void *mem_ctx,
 
    if (main == NULL) {
       linker_error(prog, "%s shader lacks `main'\n",
-                  (shader_list[0]->Type == GL_VERTEX_SHADER)
-                  ? "vertex" : "fragment");
+                  _mesa_shader_stage_to_string(shader_list[0]->Stage));
       return NULL;
    }
 
@@ -1012,16 +2063,23 @@ link_intrastage_shaders(void *mem_ctx,
    linked->ir = new(linked) exec_list;
    clone_ir_list(mem_ctx, linked->ir, main->ir);
 
-   linked->UniformBlocks = uniform_blocks;
-   linked->NumUniformBlocks = num_uniform_blocks;
-   ralloc_steal(linked, linked->UniformBlocks);
+   linked->BufferInterfaceBlocks = uniform_blocks;
+   linked->NumBufferInterfaceBlocks = num_uniform_blocks;
+   ralloc_steal(linked, linked->BufferInterfaceBlocks);
+
+   link_fs_input_layout_qualifiers(prog, linked, shader_list, num_shaders);
+   link_tcs_out_layout_qualifiers(prog, linked, shader_list, num_shaders);
+   link_tes_in_layout_qualifiers(prog, linked, shader_list, num_shaders);
+   link_gs_inout_layout_qualifiers(prog, linked, shader_list, num_shaders);
+   link_cs_input_layout_qualifiers(prog, linked, shader_list, num_shaders);
 
    populate_symbol_table(linked);
 
-   /* The pointer to the main function in the final linked shader (i.e., the
+   /* The pointer to the main function in the final linked shader (i.e., the
     * copy of the original shader that contained the main function).
     */
-   ir_function_signature *const main_sig = get_main_function_signature(linked);
+   ir_function_signature *const main_sig =
+      _mesa_get_main_function_signature(linked);
 
    /* Move any instructions other than variable declarations or function
     * declarations into main.
@@ -1038,50 +2096,77 @@ link_intrastage_shaders(void *mem_ctx,
                                              insertion_point, true, linked);
    }
 
-   /* Resolve initializers for global variables in the linked shader.
-    */
-   unsigned num_linking_shaders = num_shaders;
-   for (unsigned i = 0; i < num_shaders; i++)
-      num_linking_shaders += shader_list[i]->num_builtins_to_link;
+   /* Check if any shader needs built-in functions. */
+   bool need_builtins = false;
+   for (unsigned i = 0; i < num_shaders; i++) {
+      if (shader_list[i]->uses_builtin_functions) {
+         need_builtins = true;
+         break;
+      }
+   }
 
-   gl_shader **linking_shaders =
-      (gl_shader **) calloc(num_linking_shaders, sizeof(gl_shader *));
+   bool ok;
+   if (need_builtins) {
+      /* Make a temporary array one larger than shader_list, which will hold
+       * the built-in function shader as well.
+       */
+      gl_shader **linking_shaders = (gl_shader **)
+         calloc(num_shaders + 1, sizeof(gl_shader *));
 
-   memcpy(linking_shaders, shader_list,
-         sizeof(linking_shaders[0]) * num_shaders);
+      ok = linking_shaders != NULL;
 
-   unsigned idx = num_shaders;
-   for (unsigned i = 0; i < num_shaders; i++) {
-      memcpy(&linking_shaders[idx], shader_list[i]->builtins_to_link,
-            sizeof(linking_shaders[0]) * shader_list[i]->num_builtins_to_link);
-      idx += shader_list[i]->num_builtins_to_link;
-   }
+      if (ok) {
+         memcpy(linking_shaders, shader_list, num_shaders * sizeof(gl_shader *));
+         linking_shaders[num_shaders] = _mesa_glsl_get_builtin_function_shader();
 
-   assert(idx == num_linking_shaders);
+         ok = link_function_calls(prog, linked, linking_shaders, num_shaders + 1);
 
-   if (!link_function_calls(prog, linked, linking_shaders,
-                           num_linking_shaders)) {
-      ctx->Driver.DeleteShader(ctx, linked);
-      linked = NULL;
+         free(linking_shaders);
+      } else {
+         _mesa_error_no_memory(__func__);
+      }
+   } else {
+      ok = link_function_calls(prog, linked, shader_list, num_shaders);
    }
 
-   free(linking_shaders);
+
+   if (!ok) {
+      _mesa_delete_shader(ctx, linked);
+      return NULL;
+   }
 
    /* At this point linked should contain all of the linked IR, so
     * validate it to make sure nothing went wrong.
     */
-   if (linked)
-      validate_ir_tree(linked->ir);
+   validate_ir_tree(linked->ir);
+
+   /* Set the size of geometry shader input arrays */
+   if (linked->Stage == MESA_SHADER_GEOMETRY) {
+      unsigned num_vertices = vertices_per_prim(prog->Geom.InputType);
+      geom_array_resize_visitor input_resize_visitor(num_vertices, prog);
+      foreach_in_list(ir_instruction, ir, linked->ir) {
+         ir->accept(&input_resize_visitor);
+      }
+   }
+
+   if (ctx->Const.VertexID_is_zero_based)
+      lower_vertex_id(linked);
+
+   /* Validate correct usage of barrier() in the tess control shader */
+   if (linked->Stage == MESA_SHADER_TESS_CTRL) {
+      barrier_use_visitor visitor(prog);
+      foreach_in_list(ir_instruction, ir, linked->ir) {
+         ir->accept(&visitor);
+      }
+   }
 
    /* Make a pass over all variable declarations to ensure that arrays with
     * unspecified sizes have a size specified.  The size is inferred from the
     * max_array_access field.
     */
-   if (linked != NULL) {
-      array_sizing_visitor v;
-
-      v.run(linked->ir);
-   }
+   array_sizing_visitor v;
+   v.run(linked->ir);
+   v.fixup_unnamed_interface_types();
 
    return linked;
 }
@@ -1104,44 +2189,49 @@ link_intrastage_shaders(void *mem_ctx,
 static void
 update_array_sizes(struct gl_shader_program *prog)
 {
-   for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) {
+   for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
         if (prog->_LinkedShaders[i] == NULL)
            continue;
 
-      foreach_list(node, prog->_LinkedShaders[i]->ir) {
-        ir_variable *const var = ((ir_instruction *) node)->as_variable();
+      foreach_in_list(ir_instruction, node, prog->_LinkedShaders[i]->ir) {
+        ir_variable *const var = node->as_variable();
 
-        if ((var == NULL) || (var->mode != ir_var_uniform &&
-                              var->mode != ir_var_shader_in &&
-                              var->mode != ir_var_shader_out) ||
+        if ((var == NULL) || (var->data.mode != ir_var_uniform) ||
             !var->type->is_array())
            continue;
 
         /* GL_ARB_uniform_buffer_object says that std140 uniforms
          * will not be eliminated.  Since we always do std140, just
          * don't resize arrays in UBOs.
+          *
+          * Atomic counters are supposed to get deterministic
+          * locations assigned based on the declaration ordering and
+          * sizes, array compaction would mess that up.
+          *
+          * Subroutine uniforms are not removed.
          */
-        if (var->is_in_uniform_block())
+        if (var->is_in_buffer_block() || var->type->contains_atomic() ||
+            var->type->contains_subroutine())
            continue;
 
-        unsigned int size = var->max_array_access;
-        for (unsigned j = 0; j < MESA_SHADER_TYPES; j++) {
+        unsigned int size = var->data.max_array_access;
+        for (unsigned j = 0; j < MESA_SHADER_STAGES; j++) {
               if (prog->_LinkedShaders[j] == NULL)
                  continue;
 
-           foreach_list(node2, prog->_LinkedShaders[j]->ir) {
-              ir_variable *other_var = ((ir_instruction *) node2)->as_variable();
+           foreach_in_list(ir_instruction, node2, prog->_LinkedShaders[j]->ir) {
+              ir_variable *other_var = node2->as_variable();
               if (!other_var)
                  continue;
 
               if (strcmp(var->name, other_var->name) == 0 &&
-                  other_var->max_array_access > size) {
-                 size = other_var->max_array_access;
+                  other_var->data.max_array_access > size) {
+                 size = other_var->data.max_array_access;
               }
            }
         }
 
-        if (size + 1 != var->type->fields.array->length) {
+        if (size + 1 != var->type->length) {
            /* If this is a built-in uniform (i.e., it's backed by some
             * fixed-function state), adjust the number of state slots to
             * match the new array size.  The number of slots per array entry
@@ -1150,9 +2240,10 @@ update_array_sizes(struct gl_shader_program *prog)
             * Determine the number of slots per array element by dividing by
             * the old (total) size.
             */
-           if (var->num_state_slots > 0) {
-              var->num_state_slots = (size + 1)
-                 * (var->num_state_slots / var->type->length);
+            const unsigned num_slots = var->get_num_state_slots();
+           if (num_slots > 0) {
+              var->set_num_state_slots((size + 1)
+                                        * (num_slots / var->type->length));
            }
 
            var->type = glsl_type::get_array_instance(var->type->fields.array,
@@ -1165,6 +2256,50 @@ update_array_sizes(struct gl_shader_program *prog)
    }
 }
 
+/**
+ * Resize tessellation evaluation per-vertex inputs to the size of
+ * tessellation control per-vertex outputs.
+ */
+static void
+resize_tes_inputs(struct gl_context *ctx,
+                  struct gl_shader_program *prog)
+{
+   if (prog->_LinkedShaders[MESA_SHADER_TESS_EVAL] == NULL)
+      return;
+
+   gl_shader *const tcs = prog->_LinkedShaders[MESA_SHADER_TESS_CTRL];
+   gl_shader *const tes = prog->_LinkedShaders[MESA_SHADER_TESS_EVAL];
+
+   /* If no control shader is present, then the TES inputs are statically
+    * sized to MaxPatchVertices; the actual size of the arrays won't be
+    * known until draw time.
+    */
+   const int num_vertices = tcs
+      ? tcs->TessCtrl.VerticesOut
+      : ctx->Const.MaxPatchVertices;
+
+   tess_eval_array_resize_visitor input_resize_visitor(num_vertices, prog);
+   foreach_in_list(ir_instruction, ir, tes->ir) {
+      ir->accept(&input_resize_visitor);
+   }
+
+   if (tcs) {
+      /* Convert the gl_PatchVerticesIn system value into a constant, since
+       * the value is known at this point.
+       */
+      foreach_in_list(ir_instruction, ir, tes->ir) {
+         ir_variable *var = ir->as_variable();
+         if (var && var->data.mode == ir_var_system_value &&
+             var->data.location == SYSTEM_VALUE_VERTICES_IN) {
+            void *mem_ctx = ralloc_parent(var);
+            var->data.mode = ir_var_auto;
+            var->data.location = 0;
+            var->constant_value = new(mem_ctx) ir_constant(num_vertices);
+         }
+      }
+   }
+}
+
 /**
  * Find a contiguous set of available bits in a bitmask.
  *
@@ -1198,15 +2333,13 @@ find_available_slots(unsigned used_mask, unsigned needed_count)
 
 
 /**
- * Assign locations for either VS inputs for FS outputs
+ * Assign locations for either VS inputs or FS outputs
  *
  * \param prog          Shader program whose variables need locations assigned
+ * \param constants     Driver specific constant values for the program.
  * \param target_index  Selector for the program target to receive location
  *                      assignmnets.  Must be either \c MESA_SHADER_VERTEX or
  *                      \c MESA_SHADER_FRAGMENT.
- * \param max_index     Maximum number of generic locations.  This corresponds
- *                      to either the maximum number of draw buffers or the
- *                      maximum number of generic attributes.
  *
  * \return
  * If locations are successfully assigned, true is returned.  Otherwise an
@@ -1214,13 +2347,22 @@ find_available_slots(unsigned used_mask, unsigned needed_count)
  */
 bool
 assign_attribute_or_color_locations(gl_shader_program *prog,
-                                   unsigned target_index,
-                                   unsigned max_index)
+                                    struct gl_constants *constants,
+                                    unsigned target_index)
 {
+   /* Maximum number of generic locations.  This corresponds to either the
+    * maximum number of draw buffers or the maximum number of generic
+    * attributes.
+    */
+   unsigned max_index = (target_index == MESA_SHADER_VERTEX) ?
+      constants->Program[target_index].MaxAttribs :
+      MAX2(constants->MaxDrawBuffers, constants->MaxDualSourceDrawBuffers);
+
    /* Mark invalid locations as being used.
     */
    unsigned used_locations = (max_index >= 32)
       ? ~0 : ~((1 << max_index) - 1);
+   unsigned double_storage_locations = 0;
 
    assert((target_index == MESA_SHADER_VERTEX)
          || (target_index == MESA_SHADER_FRAGMENT));
@@ -1272,19 +2414,20 @@ assign_attribute_or_color_locations(gl_shader_program *prog,
 
    unsigned num_attr = 0;
 
-   foreach_list(node, sh->ir) {
-      ir_variable *const var = ((ir_instruction *) node)->as_variable();
+   foreach_in_list(ir_instruction, node, sh->ir) {
+      ir_variable *const var = node->as_variable();
 
-      if ((var == NULL) || (var->mode != (unsigned) direction))
+      if ((var == NULL) || (var->data.mode != (unsigned) direction))
         continue;
 
-      if (var->explicit_location) {
-        if ((var->location >= (int)(max_index + generic_base))
-            || (var->location < 0)) {
+      if (var->data.explicit_location) {
+        if ((var->data.location >= (int)(max_index + generic_base))
+            || (var->data.location < 0)) {
            linker_error(prog,
                         "invalid explicit location %d specified for `%s'\n",
-                        (var->location < 0)
-                        ? var->location : var->location - generic_base,
+                        (var->data.location < 0)
+                        ? var->data.location
+                         : var->data.location - generic_base,
                         var->name);
            return false;
         }
@@ -1293,8 +2436,8 @@ assign_attribute_or_color_locations(gl_shader_program *prog,
 
         if (prog->AttributeBindings->get(binding, var->name)) {
            assert(binding >= VERT_ATTRIB_GENERIC0);
-           var->location = binding;
-            var->is_unmatched_generic_inout = 0;
+           var->data.location = binding;
+            var->data.is_unmatched_generic_inout = 0;
         }
       } else if (target_index == MESA_SHADER_FRAGMENT) {
         unsigned binding;
@@ -1302,23 +2445,43 @@ assign_attribute_or_color_locations(gl_shader_program *prog,
 
         if (prog->FragDataBindings->get(binding, var->name)) {
            assert(binding >= FRAG_RESULT_DATA0);
-           var->location = binding;
-            var->is_unmatched_generic_inout = 0;
+           var->data.location = binding;
+            var->data.is_unmatched_generic_inout = 0;
 
            if (prog->FragDataIndexBindings->get(index, var->name)) {
-              var->index = index;
+              var->data.index = index;
            }
         }
       }
 
+      /* From GL4.5 core spec, section 15.2 (Shader Execution):
+       *
+       *     "Output binding assignments will cause LinkProgram to fail:
+       *     ...
+       *     If the program has an active output assigned to a location greater
+       *     than or equal to the value of MAX_DUAL_SOURCE_DRAW_BUFFERS and has
+       *     an active output assigned an index greater than or equal to one;"
+       */
+      if (target_index == MESA_SHADER_FRAGMENT && var->data.index >= 1 &&
+          var->data.location - generic_base >=
+          (int) constants->MaxDualSourceDrawBuffers) {
+         linker_error(prog,
+                      "output location %d >= GL_MAX_DUAL_SOURCE_DRAW_BUFFERS "
+                      "with index %u for %s\n",
+                      var->data.location - generic_base, var->data.index,
+                      var->name);
+         return false;
+      }
+
+      const unsigned slots = var->type->count_attribute_slots();
+
       /* If the variable is not a built-in and has a location statically
        * assigned in the shader (presumably via a layout qualifier), make sure
        * that it doesn't collide with other assigned locations.  Otherwise,
        * add it to the list of variables that need linker-assigned locations.
        */
-      const unsigned slots = count_attribute_slots(var->type);
-      if (var->location != -1) {
-        if (var->location >= generic_base && var->index < 1) {
+      if (var->data.location != -1) {
+        if (var->data.location >= generic_base && var->data.index < 1) {
            /* From page 61 of the OpenGL 4.0 spec:
             *
             *     "LinkProgram will fail if the attribute bindings assigned
@@ -1327,10 +2490,12 @@ assign_attribute_or_color_locations(gl_shader_program *prog,
             *     active attribute array, both of which require multiple
             *     contiguous generic attributes."
             *
-            * Previous versions of the spec contain similar language but omit
-            * the bit about attribute arrays.
+            * I think above text prohibits the aliasing of explicit and
+            * automatic assignments. But, aliasing is allowed in manual
+            * assignments of attribute locations. See below comments for
+            * the details.
             *
-            * Page 61 of the OpenGL 4.0 spec also says:
+            * From OpenGL 4.0 spec, page 61:
             *
             *     "It is possible for an application to bind more than one
             *     attribute name to the same location. This is referred to as
@@ -1343,32 +2508,119 @@ assign_attribute_or_color_locations(gl_shader_program *prog,
             *     but implementations are not required to generate an error
             *     in this case."
             *
-            * These two paragraphs are either somewhat contradictory, or I
-            * don't fully understand one or both of them.
-            */
-           /* FINISHME: The code as currently written does not support
-            * FINISHME: attribute location aliasing (see comment above).
+            * From GLSL 4.30 spec, page 54:
+            *
+            *    "A program will fail to link if any two non-vertex shader
+            *     input variables are assigned to the same location. For
+            *     vertex shaders, multiple input variables may be assigned
+            *     to the same location using either layout qualifiers or via
+            *     the OpenGL API. However, such aliasing is intended only to
+            *     support vertex shaders where each execution path accesses
+            *     at most one input per each location. Implementations are
+            *     permitted, but not required, to generate link-time errors
+            *     if they detect that every path through the vertex shader
+            *     executable accesses multiple inputs assigned to any single
+            *     location. For all shader types, a program will fail to link
+            *     if explicit location assignments leave the linker unable
+            *     to find space for other variables without explicit
+            *     assignments."
+            *
+            * From OpenGL ES 3.0 spec, page 56:
+            *
+            *    "Binding more than one attribute name to the same location
+            *     is referred to as aliasing, and is not permitted in OpenGL
+            *     ES Shading Language 3.00 vertex shaders. LinkProgram will
+            *     fail when this condition exists. However, aliasing is
+            *     possible in OpenGL ES Shading Language 1.00 vertex shaders.
+            *     This will only work if only one of the aliased attributes
+            *     is active in the executable program, or if no path through
+            *     the shader consumes more than one attribute of a set of
+            *     attributes aliased to the same location. A link error can
+            *     occur if the linker determines that every path through the
+            *     shader consumes multiple aliased attributes, but implemen-
+            *     tations are not required to generate an error in this case."
+            *
+            * After looking at above references from OpenGL, OpenGL ES and
+            * GLSL specifications, we allow aliasing of vertex input variables
+            * in: OpenGL 2.0 (and above) and OpenGL ES 2.0.
+            *
+            * NOTE: This is not required by the spec but its worth mentioning
+            * here that we're not doing anything to make sure that no path
+            * through the vertex shader executable accesses multiple inputs
+            * assigned to any single location.
             */
+
            /* Mask representing the contiguous slots that will be used by
             * this attribute.
             */
-           const unsigned attr = var->location - generic_base;
+           const unsigned attr = var->data.location - generic_base;
            const unsigned use_mask = (1 << slots) - 1;
+            const char *const string = (target_index == MESA_SHADER_VERTEX)
+               ? "vertex shader input" : "fragment shader output";
+
+            /* Generate a link error if the requested locations for this
+             * attribute exceed the maximum allowed attribute location.
+             */
+            if (attr + slots > max_index) {
+               linker_error(prog,
+                           "insufficient contiguous locations "
+                           "available for %s `%s' %d %d %d\n", string,
+                           var->name, used_locations, use_mask, attr);
+               return false;
+            }
 
            /* Generate a link error if the set of bits requested for this
             * attribute overlaps any previously allocated bits.
             */
            if ((~(use_mask << attr) & used_locations) != used_locations) {
-              const char *const string = (target_index == MESA_SHADER_VERTEX)
-                 ? "vertex shader input" : "fragment shader output";
-              linker_error(prog,
-                           "insufficient contiguous locations "
-                           "available for %s `%s' %d %d %d", string,
-                           var->name, used_locations, use_mask, attr);
-              return false;
+               if (target_index == MESA_SHADER_FRAGMENT ||
+                   (prog->IsES && prog->Version >= 300)) {
+                  linker_error(prog,
+                               "overlapping location is assigned "
+                               "to %s `%s' %d %d %d\n", string,
+                               var->name, used_locations, use_mask, attr);
+                  return false;
+               } else {
+                  linker_warning(prog,
+                                 "overlapping location is assigned "
+                                 "to %s `%s' %d %d %d\n", string,
+                                 var->name, used_locations, use_mask, attr);
+               }
            }
 
            used_locations |= (use_mask << attr);
+
+            /* From the GL 4.5 core spec, section 11.1.1 (Vertex Attributes):
+             *
+             * "A program with more than the value of MAX_VERTEX_ATTRIBS
+             *  active attribute variables may fail to link, unless
+             *  device-dependent optimizations are able to make the program
+             *  fit within available hardware resources. For the purposes
+             *  of this test, attribute variables of the type dvec3, dvec4,
+             *  dmat2x3, dmat2x4, dmat3, dmat3x4, dmat4x3, and dmat4 may
+             *  count as consuming twice as many attributes as equivalent
+             *  single-precision types. While these types use the same number
+             *  of generic attributes as their single-precision equivalents,
+             *  implementations are permitted to consume two single-precision
+             *  vectors of internal storage for each three- or four-component
+             *  double-precision vector."
+             *
+             * Mark this attribute slot as taking up twice as much space
+             * so we can count it properly against limits.  According to
+             * issue (3) of the GL_ARB_vertex_attrib_64bit behavior, this
+             * is optional behavior, but it seems preferable.
+             */
+            const glsl_type *type = var->type->without_array();
+            if (type == glsl_type::dvec3_type ||
+                type == glsl_type::dvec4_type ||
+                type == glsl_type::dmat2x3_type ||
+                type == glsl_type::dmat2x4_type ||
+                type == glsl_type::dmat3_type ||
+                type == glsl_type::dmat3x4_type ||
+                type == glsl_type::dmat4x3_type ||
+                type == glsl_type::dmat4_type) {
+               double_storage_locations |= (use_mask << attr);
+            }
         }
 
         continue;
@@ -1379,6 +2631,18 @@ assign_attribute_or_color_locations(gl_shader_program *prog,
       num_attr++;
    }
 
+   if (target_index == MESA_SHADER_VERTEX) {
+      unsigned total_attribs_size =
+         _mesa_bitcount(used_locations & ((1 << max_index) - 1)) +
+         _mesa_bitcount(double_storage_locations);
+      if (total_attribs_size > max_index) {
+        linker_error(prog,
+                     "attempt to use %d vertex attribute slots only %d available ",
+                     total_attribs_size, max_index);
+        return false;
+      }
+   }
+
    /* If all of the attributes were assigned locations by the application (or
     * are built-in attributes with fixed locations), return early.  This should
     * be the common case.
@@ -1413,13 +2677,13 @@ assign_attribute_or_color_locations(gl_shader_program *prog,
 
         linker_error(prog,
                      "insufficient contiguous locations "
-                     "available for %s `%s'",
+                     "available for %s `%s'\n",
                      string, to_assign[i].var->name);
         return false;
       }
 
-      to_assign[i].var->location = generic_base + location;
-      to_assign[i].var->is_unmatched_generic_inout = 0;
+      to_assign[i].var->data.location = generic_base + location;
+      to_assign[i].var->data.is_unmatched_generic_inout = 0;
       used_locations |= (use_mask << location);
    }
 
@@ -1433,18 +2697,19 @@ assign_attribute_or_color_locations(gl_shader_program *prog,
 void
 demote_shader_inputs_and_outputs(gl_shader *sh, enum ir_variable_mode mode)
 {
-   foreach_list(node, sh->ir) {
-      ir_variable *const var = ((ir_instruction *) node)->as_variable();
+   foreach_in_list(ir_instruction, node, sh->ir) {
+      ir_variable *const var = node->as_variable();
 
-      if ((var == NULL) || (var->mode != int(mode)))
+      if ((var == NULL) || (var->data.mode != int(mode)))
         continue;
 
       /* A shader 'in' or 'out' variable is only really an input or output if
        * its value is used by other shader stages.  This will cause the variable
        * to have a location assigned.
        */
-      if (var->is_unmatched_generic_inout) {
-        var->mode = ir_var_auto;
+      if (var->data.is_unmatched_generic_inout) {
+         assert(var->data.mode != ir_var_temporary);
+        var->data.mode = ir_var_auto;
       }
    }
 }
@@ -1469,15 +2734,15 @@ store_fragdepth_layout(struct gl_shader_program *prog)
     * We're only interested in the cases where the variable is NOT removed
     * from the IR.
     */
-   foreach_list(node, ir) {
-      ir_variable *const var = ((ir_instruction *) node)->as_variable();
+   foreach_in_list(ir_instruction, node, ir) {
+      ir_variable *const var = node->as_variable();
 
-      if (var == NULL || var->mode != ir_var_shader_out) {
+      if (var == NULL || var->data.mode != ir_var_shader_out) {
          continue;
       }
 
       if (strcmp(var->name, "gl_FragDepth") == 0) {
-         switch (var->depth_layout) {
+         switch (var->data.depth_layout) {
          case ir_depth_layout_none:
             prog->FragDepthLayout = FRAG_DEPTH_LAYOUT_NONE;
             return;
@@ -1504,419 +2769,1697 @@ store_fragdepth_layout(struct gl_shader_program *prog)
 /**
  * Validate the resources used by a program versus the implementation limits
  */
-static bool
+static void
 check_resources(struct gl_context *ctx, struct gl_shader_program *prog)
 {
-   static const char *const shader_names[MESA_SHADER_TYPES] = {
-      "vertex", "fragment", "geometry"
-   };
-
-   const unsigned max_samplers[MESA_SHADER_TYPES] = {
-      ctx->Const.VertexProgram.MaxTextureImageUnits,
-      ctx->Const.FragmentProgram.MaxTextureImageUnits,
-      ctx->Const.GeometryProgram.MaxTextureImageUnits
-   };
-
-   const unsigned max_uniform_components[MESA_SHADER_TYPES] = {
-      ctx->Const.VertexProgram.MaxUniformComponents,
-      ctx->Const.FragmentProgram.MaxUniformComponents,
-      ctx->Const.GeometryProgram.MaxUniformComponents
-   };
-
-   const unsigned max_uniform_blocks[MESA_SHADER_TYPES] = {
-      ctx->Const.VertexProgram.MaxUniformBlocks,
-      ctx->Const.FragmentProgram.MaxUniformBlocks,
-      ctx->Const.GeometryProgram.MaxUniformBlocks,
-   };
-
-   for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) {
+   for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
       struct gl_shader *sh = prog->_LinkedShaders[i];
 
       if (sh == NULL)
         continue;
 
-      if (sh->num_samplers > max_samplers[i]) {
-        linker_error(prog, "Too many %s shader texture samplers",
-                     shader_names[i]);
+      if (sh->num_samplers > ctx->Const.Program[i].MaxTextureImageUnits) {
+        linker_error(prog, "Too many %s shader texture samplers\n",
+                     _mesa_shader_stage_to_string(i));
+      }
+
+      if (sh->num_uniform_components >
+          ctx->Const.Program[i].MaxUniformComponents) {
+         if (ctx->Const.GLSLSkipStrictMaxUniformLimitCheck) {
+            linker_warning(prog, "Too many %s shader default uniform block "
+                           "components, but the driver will try to optimize "
+                           "them out; this is non-portable out-of-spec "
+                          "behavior\n",
+                           _mesa_shader_stage_to_string(i));
+         } else {
+            linker_error(prog, "Too many %s shader default uniform block "
+                        "components\n",
+                         _mesa_shader_stage_to_string(i));
+         }
       }
 
-      if (sh->num_uniform_components > max_uniform_components[i]) {
+      if (sh->num_combined_uniform_components >
+         ctx->Const.Program[i].MaxCombinedUniformComponents) {
          if (ctx->Const.GLSLSkipStrictMaxUniformLimitCheck) {
             linker_warning(prog, "Too many %s shader uniform components, "
                            "but the driver will try to optimize them out; "
                            "this is non-portable out-of-spec behavior\n",
-                           shader_names[i]);
+                           _mesa_shader_stage_to_string(i));
          } else {
-            linker_error(prog, "Too many %s shader uniform components",
-                         shader_names[i]);
+            linker_error(prog, "Too many %s shader uniform components\n",
+                         _mesa_shader_stage_to_string(i));
          }
       }
    }
 
-   unsigned blocks[MESA_SHADER_TYPES] = {0};
+   unsigned blocks[MESA_SHADER_STAGES] = {0};
    unsigned total_uniform_blocks = 0;
+   unsigned shader_blocks[MESA_SHADER_STAGES] = {0};
+   unsigned total_shader_storage_blocks = 0;
+
+   for (unsigned i = 0; i < prog->NumBufferInterfaceBlocks; i++) {
+      /* Don't check SSBOs for Uniform Block Size */
+      if (!prog->BufferInterfaceBlocks[i].IsShaderStorage &&
+          prog->BufferInterfaceBlocks[i].UniformBufferSize > ctx->Const.MaxUniformBlockSize) {
+         linker_error(prog, "Uniform block %s too big (%d/%d)\n",
+                      prog->BufferInterfaceBlocks[i].Name,
+                      prog->BufferInterfaceBlocks[i].UniformBufferSize,
+                      ctx->Const.MaxUniformBlockSize);
+      }
+
+      if (prog->BufferInterfaceBlocks[i].IsShaderStorage &&
+          prog->BufferInterfaceBlocks[i].UniformBufferSize > ctx->Const.MaxShaderStorageBlockSize) {
+         linker_error(prog, "Shader storage block %s too big (%d/%d)\n",
+                      prog->BufferInterfaceBlocks[i].Name,
+                      prog->BufferInterfaceBlocks[i].UniformBufferSize,
+                      ctx->Const.MaxShaderStorageBlockSize);
+      }
 
-   for (unsigned i = 0; i < prog->NumUniformBlocks; i++) {
-      for (unsigned j = 0; j < MESA_SHADER_TYPES; j++) {
+      for (unsigned j = 0; j < MESA_SHADER_STAGES; j++) {
         if (prog->UniformBlockStageIndex[j][i] != -1) {
-           blocks[j]++;
-           total_uniform_blocks++;
+            struct gl_shader *sh = prog->_LinkedShaders[j];
+            int stage_index = prog->UniformBlockStageIndex[j][i];
+            if (sh && sh->BufferInterfaceBlocks[stage_index].IsShaderStorage) {
+               shader_blocks[j]++;
+               total_shader_storage_blocks++;
+            } else {
+               blocks[j]++;
+               total_uniform_blocks++;
+            }
         }
       }
 
       if (total_uniform_blocks > ctx->Const.MaxCombinedUniformBlocks) {
-        linker_error(prog, "Too many combined uniform blocks (%d/%d)",
-                     prog->NumUniformBlocks,
+        linker_error(prog, "Too many combined uniform blocks (%d/%d)\n",
+                     total_uniform_blocks,
                      ctx->Const.MaxCombinedUniformBlocks);
       } else {
-        for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) {
-           if (blocks[i] > max_uniform_blocks[i]) {
-              linker_error(prog, "Too many %s uniform blocks (%d/%d)",
-                           shader_names[i],
+        for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+            const unsigned max_uniform_blocks =
+               ctx->Const.Program[i].MaxUniformBlocks;
+           if (blocks[i] > max_uniform_blocks) {
+              linker_error(prog, "Too many %s uniform blocks (%d/%d)\n",
+                           _mesa_shader_stage_to_string(i),
                            blocks[i],
-                           max_uniform_blocks[i]);
+                           max_uniform_blocks);
               break;
            }
         }
       }
-   }
 
-   return prog->LinkStatus;
+      if (total_shader_storage_blocks > ctx->Const.MaxCombinedShaderStorageBlocks) {
+         linker_error(prog, "Too many combined shader storage blocks (%d/%d)\n",
+                      total_shader_storage_blocks,
+                      ctx->Const.MaxCombinedShaderStorageBlocks);
+      } else {
+         for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+            const unsigned max_shader_storage_blocks =
+               ctx->Const.Program[i].MaxShaderStorageBlocks;
+            if (shader_blocks[i] > max_shader_storage_blocks) {
+               linker_error(prog, "Too many %s shader storage blocks (%d/%d)\n",
+                            _mesa_shader_stage_to_string(i),
+                            shader_blocks[i],
+                            max_shader_storage_blocks);
+               break;
+            }
+         }
+      }
+   }
 }
 
-void
-link_shaders(struct gl_context *ctx, struct gl_shader_program *prog)
+static void
+link_calculate_subroutine_compat(struct gl_shader_program *prog)
 {
-   tfeedback_decl *tfeedback_decls = NULL;
-   unsigned num_tfeedback_decls = prog->TransformFeedback.NumVarying;
+   for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+      struct gl_shader *sh = prog->_LinkedShaders[i];
+      int count;
+      if (!sh)
+         continue;
 
-   void *mem_ctx = ralloc_context(NULL); // temporary linker context
+      for (unsigned j = 0; j < sh->NumSubroutineUniformRemapTable; j++) {
+         struct gl_uniform_storage *uni = sh->SubroutineUniformRemapTable[j];
 
-   prog->LinkStatus = false;
-   prog->Validated = false;
-   prog->_Used = false;
+         if (!uni)
+            continue;
+
+         count = 0;
+         for (unsigned f = 0; f < sh->NumSubroutineFunctions; f++) {
+            struct gl_subroutine_function *fn = &sh->SubroutineFunctions[f];
+            for (int k = 0; k < fn->num_compat_types; k++) {
+               if (fn->types[k] == uni->type) {
+                  count++;
+                  break;
+               }
+            }
+         }
+         uni->num_compatible_subroutines = count;
+      }
+   }
+}
 
-   ralloc_free(prog->InfoLog);
-   prog->InfoLog = ralloc_strdup(NULL, "");
+static void
+check_subroutine_resources(struct gl_shader_program *prog)
+{
+   for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+      struct gl_shader *sh = prog->_LinkedShaders[i];
 
-   ralloc_free(prog->UniformBlocks);
-   prog->UniformBlocks = NULL;
-   prog->NumUniformBlocks = 0;
-   for (int i = 0; i < MESA_SHADER_TYPES; i++) {
-      ralloc_free(prog->UniformBlockStageIndex[i]);
-      prog->UniformBlockStageIndex[i] = NULL;
+      if (sh) {
+         if (sh->NumSubroutineUniformRemapTable > MAX_SUBROUTINE_UNIFORM_LOCATIONS)
+            linker_error(prog, "Too many %s shader subroutine uniforms\n",
+                         _mesa_shader_stage_to_string(i));
+      }
    }
+}
+/**
+ * Validate shader image resources.
+ */
+static void
+check_image_resources(struct gl_context *ctx, struct gl_shader_program *prog)
+{
+   unsigned total_image_units = 0;
+   unsigned fragment_outputs = 0;
+   unsigned total_shader_storage_blocks = 0;
 
-   /* Separate the shaders into groups based on their type.
-    */
-   struct gl_shader **vert_shader_list;
-   unsigned num_vert_shaders = 0;
-   struct gl_shader **frag_shader_list;
-   unsigned num_frag_shaders = 0;
+   if (!ctx->Extensions.ARB_shader_image_load_store)
+      return;
 
-   vert_shader_list = (struct gl_shader **)
-      calloc(2 * prog->NumShaders, sizeof(struct gl_shader *));
-   frag_shader_list =  &vert_shader_list[prog->NumShaders];
+   for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+      struct gl_shader *sh = prog->_LinkedShaders[i];
 
-   unsigned min_version = UINT_MAX;
-   unsigned max_version = 0;
-   const bool is_es_prog =
-      (prog->NumShaders > 0 && prog->Shaders[0]->IsES) ? true : false;
-   for (unsigned i = 0; i < prog->NumShaders; i++) {
-      min_version = MIN2(min_version, prog->Shaders[i]->Version);
-      max_version = MAX2(max_version, prog->Shaders[i]->Version);
+      if (sh) {
+         if (sh->NumImages > ctx->Const.Program[i].MaxImageUniforms)
+            linker_error(prog, "Too many %s shader image uniforms (%u > %u)\n",
+                         _mesa_shader_stage_to_string(i), sh->NumImages,
+                         ctx->Const.Program[i].MaxImageUniforms);
 
-      if (prog->Shaders[i]->IsES != is_es_prog) {
-        linker_error(prog, "all shaders must use same shading "
-                     "language version\n");
-        goto done;
-      }
+         total_image_units += sh->NumImages;
 
-      switch (prog->Shaders[i]->Type) {
-      case GL_VERTEX_SHADER:
-        vert_shader_list[num_vert_shaders] = prog->Shaders[i];
-        num_vert_shaders++;
-        break;
-      case GL_FRAGMENT_SHADER:
-        frag_shader_list[num_frag_shaders] = prog->Shaders[i];
-        num_frag_shaders++;
-        break;
-      case GL_GEOMETRY_SHADER:
-        /* FINISHME: Support geometry shaders. */
-        assert(prog->Shaders[i]->Type != GL_GEOMETRY_SHADER);
-        break;
+         for (unsigned j = 0; j < prog->NumBufferInterfaceBlocks; j++) {
+            int stage_index = prog->UniformBlockStageIndex[i][j];
+            if (stage_index != -1 && sh->BufferInterfaceBlocks[stage_index].IsShaderStorage)
+               total_shader_storage_blocks++;
+         }
+
+         if (i == MESA_SHADER_FRAGMENT) {
+            foreach_in_list(ir_instruction, node, sh->ir) {
+               ir_variable *var = node->as_variable();
+               if (var && var->data.mode == ir_var_shader_out)
+                  fragment_outputs += var->type->count_attribute_slots();
+            }
+         }
       }
    }
 
-   /* Previous to GLSL version 1.30, different compilation units could mix and
-    * match shading language versions.  With GLSL 1.30 and later, the versions
-    * of all shaders must match.
-    *
-    * GLSL ES has never allowed mixing of shading language versions.
-    */
-   if ((is_es_prog || max_version >= 130)
-       && min_version != max_version) {
-      linker_error(prog, "all shaders must use same shading "
-                  "language version\n");
-      goto done;
-   }
+   if (total_image_units > ctx->Const.MaxCombinedImageUniforms)
+      linker_error(prog, "Too many combined image uniforms\n");
 
-   prog->Version = max_version;
-   prog->IsES = is_es_prog;
+   if (total_image_units + fragment_outputs + total_shader_storage_blocks >
+       ctx->Const.MaxCombinedShaderOutputResources)
+      linker_error(prog, "Too many combined image uniforms, shader storage "
+                         " buffers and fragment outputs\n");
+}
 
-   for (unsigned int i = 0; i < MESA_SHADER_TYPES; i++) {
-      if (prog->_LinkedShaders[i] != NULL)
-        ctx->Driver.DeleteShader(ctx, prog->_LinkedShaders[i]);
 
-      prog->_LinkedShaders[i] = NULL;
-   }
+/**
+ * Initializes explicit location slots to INACTIVE_UNIFORM_EXPLICIT_LOCATION
+ * for a variable, checks for overlaps between other uniforms using explicit
+ * locations.
+ */
+static bool
+reserve_explicit_locations(struct gl_shader_program *prog,
+                           string_to_uint_map *map, ir_variable *var)
+{
+   unsigned slots = var->type->uniform_locations();
+   unsigned max_loc = var->data.location + slots - 1;
+
+   /* Resize remap table if locations do not fit in the current one. */
+   if (max_loc + 1 > prog->NumUniformRemapTable) {
+      prog->UniformRemapTable =
+         reralloc(prog, prog->UniformRemapTable,
+                  gl_uniform_storage *,
+                  max_loc + 1);
+
+      if (!prog->UniformRemapTable) {
+         linker_error(prog, "Out of memory during linking.\n");
+         return false;
+      }
 
-   /* Link all shaders for a particular stage and validate the result.
-    */
-   if (num_vert_shaders > 0) {
-      gl_shader *const sh =
-        link_intrastage_shaders(mem_ctx, ctx, prog, vert_shader_list,
-                                num_vert_shaders);
+      /* Initialize allocated space. */
+      for (unsigned i = prog->NumUniformRemapTable; i < max_loc + 1; i++)
+         prog->UniformRemapTable[i] = NULL;
 
-      if (sh == NULL)
-        goto done;
+      prog->NumUniformRemapTable = max_loc + 1;
+   }
 
-      if (!validate_vertex_shader_executable(prog, sh))
-        goto done;
+   for (unsigned i = 0; i < slots; i++) {
+      unsigned loc = var->data.location + i;
+
+      /* Check if location is already used. */
+      if (prog->UniformRemapTable[loc] == INACTIVE_UNIFORM_EXPLICIT_LOCATION) {
+
+         /* Possibly same uniform from a different stage, this is ok. */
+         unsigned hash_loc;
+         if (map->get(hash_loc, var->name) && hash_loc == loc - i)
+               continue;
+
+         /* ARB_explicit_uniform_location specification states:
+          *
+          *     "No two default-block uniform variables in the program can have
+          *     the same location, even if they are unused, otherwise a compiler
+          *     or linker error will be generated."
+          */
+         linker_error(prog,
+                      "location qualifier for uniform %s overlaps "
+                      "previously used location\n",
+                      var->name);
+         return false;
+      }
 
-      _mesa_reference_shader(ctx, &prog->_LinkedShaders[MESA_SHADER_VERTEX],
-                            sh);
+      /* Initialize location as inactive before optimization
+       * rounds and location assignment.
+       */
+      prog->UniformRemapTable[loc] = INACTIVE_UNIFORM_EXPLICIT_LOCATION;
    }
 
-   if (num_frag_shaders > 0) {
-      gl_shader *const sh =
-        link_intrastage_shaders(mem_ctx, ctx, prog, frag_shader_list,
-                                num_frag_shaders);
+   /* Note, base location used for arrays. */
+   map->put(var->data.location, var->name);
 
-      if (sh == NULL)
-        goto done;
+   return true;
+}
 
-      if (!validate_fragment_shader_executable(prog, sh))
-        goto done;
+static bool
+reserve_subroutine_explicit_locations(struct gl_shader_program *prog,
+                                      struct gl_shader *sh,
+                                      ir_variable *var)
+{
+   unsigned slots = var->type->uniform_locations();
+   unsigned max_loc = var->data.location + slots - 1;
+
+   /* Resize remap table if locations do not fit in the current one. */
+   if (max_loc + 1 > sh->NumSubroutineUniformRemapTable) {
+      sh->SubroutineUniformRemapTable =
+         reralloc(sh, sh->SubroutineUniformRemapTable,
+                  gl_uniform_storage *,
+                  max_loc + 1);
+
+      if (!sh->SubroutineUniformRemapTable) {
+         linker_error(prog, "Out of memory during linking.\n");
+         return false;
+      }
 
-      _mesa_reference_shader(ctx, &prog->_LinkedShaders[MESA_SHADER_FRAGMENT],
-                            sh);
-   }
+      /* Initialize allocated space. */
+      for (unsigned i = sh->NumSubroutineUniformRemapTable; i < max_loc + 1; i++)
+         sh->SubroutineUniformRemapTable[i] = NULL;
 
-   /* Here begins the inter-stage linking phase.  Some initial validation is
-    * performed, then locations are assigned for uniforms, attributes, and
-    * varyings.
-    */
-   if (cross_validate_uniforms(prog)) {
-      unsigned prev;
+      sh->NumSubroutineUniformRemapTable = max_loc + 1;
+   }
 
-      for (prev = 0; prev < MESA_SHADER_TYPES; prev++) {
-        if (prog->_LinkedShaders[prev] != NULL)
-           break;
+   for (unsigned i = 0; i < slots; i++) {
+      unsigned loc = var->data.location + i;
+
+      /* Check if location is already used. */
+      if (sh->SubroutineUniformRemapTable[loc] == INACTIVE_UNIFORM_EXPLICIT_LOCATION) {
+
+         /* ARB_explicit_uniform_location specification states:
+          *     "No two subroutine uniform variables can have the same location
+          *     in the same shader stage, otherwise a compiler or linker error
+          *     will be generated."
+          */
+         linker_error(prog,
+                      "location qualifier for uniform %s overlaps "
+                      "previously used location\n",
+                      var->name);
+         return false;
       }
 
-      /* Validate the inputs of each stage with the output of the preceding
-       * stage.
+      /* Initialize location as inactive before optimization
+       * rounds and location assignment.
        */
-      for (unsigned i = prev + 1; i < MESA_SHADER_TYPES; i++) {
-        if (prog->_LinkedShaders[i] == NULL)
-           continue;
+      sh->SubroutineUniformRemapTable[loc] = INACTIVE_UNIFORM_EXPLICIT_LOCATION;
+   }
 
-        if (!cross_validate_outputs_to_inputs(prog,
-                                              prog->_LinkedShaders[prev],
-                                              prog->_LinkedShaders[i]))
-           goto done;
+   return true;
+}
+/**
+ * Check and reserve all explicit uniform locations, called before
+ * any optimizations happen to handle also inactive uniforms and
+ * inactive array elements that may get trimmed away.
+ */
+static void
+check_explicit_uniform_locations(struct gl_context *ctx,
+                                 struct gl_shader_program *prog)
+{
+   if (!ctx->Extensions.ARB_explicit_uniform_location)
+      return;
 
-        prev = i;
-      }
+   /* This map is used to detect if overlapping explicit locations
+    * occur with the same uniform (from different stage) or a different one.
+    */
+   string_to_uint_map *uniform_map = new string_to_uint_map;
 
-      prog->LinkStatus = true;
+   if (!uniform_map) {
+      linker_error(prog, "Out of memory during linking.\n");
+      return;
    }
 
-   /* Implement the GLSL 1.30+ rule for discard vs infinite loops Do
-    * it before optimization because we want most of the checks to get
-    * dropped thanks to constant propagation.
-    *
-    * This rule also applies to GLSL ES 3.00.
-    */
-   if (max_version >= (is_es_prog ? 300 : 130)) {
-      struct gl_shader *sh = prog->_LinkedShaders[MESA_SHADER_FRAGMENT];
-      if (sh) {
-        lower_discard_flow(sh->ir);
+   for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+      struct gl_shader *sh = prog->_LinkedShaders[i];
+
+      if (!sh)
+         continue;
+
+      foreach_in_list(ir_instruction, node, sh->ir) {
+         ir_variable *var = node->as_variable();
+         if (var && (var->data.mode == ir_var_uniform &&
+                     var->data.explicit_location)) {
+            bool ret;
+            if (var->type->is_subroutine())
+               ret = reserve_subroutine_explicit_locations(prog, sh, var);
+            else
+               ret = reserve_explicit_locations(prog, uniform_map, var);
+            if (!ret) {
+               delete uniform_map;
+               return;
+            }
+         }
       }
    }
 
-   if (!interstage_cross_validate_uniform_blocks(prog))
-      goto done;
+   delete uniform_map;
+}
 
-   /* Do common optimization before assigning storage for attributes,
-    * uniforms, and varyings.  Later optimization could possibly make
-    * some of that unused.
+static bool
+should_add_buffer_variable(struct gl_shader_program *shProg,
+                           GLenum type, const char *name)
+{
+   bool found_interface = false;
+   unsigned block_name_len = 0;
+   const char *block_name_dot = strchr(name, '.');
+
+   /* These rules only apply to buffer variables. So we return
+    * true for the rest of types.
     */
-   for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) {
-      if (prog->_LinkedShaders[i] == NULL)
-        continue;
+   if (type != GL_BUFFER_VARIABLE)
+      return true;
 
-      detect_recursion_linked(prog, prog->_LinkedShaders[i]->ir);
-      if (!prog->LinkStatus)
-        goto done;
+   for (unsigned i = 0; i < shProg->NumBufferInterfaceBlocks; i++) {
+      const char *block_name = shProg->BufferInterfaceBlocks[i].Name;
+      block_name_len = strlen(block_name);
 
-      if (ctx->ShaderCompilerOptions[i].LowerClipDistance) {
-         lower_clip_distance(prog->_LinkedShaders[i]);
+      const char *block_square_bracket = strchr(block_name, '[');
+      if (block_square_bracket) {
+         /* The block is part of an array of named interfaces,
+          * for the name comparison we ignore the "[x]" part.
+          */
+         block_name_len -= strlen(block_square_bracket);
       }
 
-      unsigned max_unroll = ctx->ShaderCompilerOptions[i].MaxUnrollIterations;
+      if (block_name_dot) {
+         /* Check if the variable name starts with the interface
+          * name. The interface name (if present) should have the
+          * length than the interface block name we are comparing to.
+          */
+         unsigned len = strlen(name) - strlen(block_name_dot);
+         if (len != block_name_len)
+            continue;
+      }
 
-      while (do_common_optimization(prog->_LinkedShaders[i]->ir, true, false, max_unroll))
-        ;
+      if (strncmp(block_name, name, block_name_len) == 0) {
+         found_interface = true;
+         break;
+      }
    }
 
-   /* Mark all generic shader inputs and outputs as unpaired. */
-   if (prog->_LinkedShaders[MESA_SHADER_VERTEX] != NULL) {
-      link_invalidate_variable_locations(
-            prog->_LinkedShaders[MESA_SHADER_VERTEX],
-            VERT_ATTRIB_GENERIC0, VARYING_SLOT_VAR0);
-   }
-   /* FINISHME: Geometry shaders not implemented yet */
-   if (prog->_LinkedShaders[MESA_SHADER_FRAGMENT] != NULL) {
-      link_invalidate_variable_locations(
-            prog->_LinkedShaders[MESA_SHADER_FRAGMENT],
-            VARYING_SLOT_VAR0, FRAG_RESULT_DATA0);
-   }
+   /* We remove the interface name from the buffer variable name,
+    * including the dot that follows it.
+    */
+   if (found_interface)
+      name = name + block_name_len + 1;
 
-   /* FINISHME: The value of the max_attribute_index parameter is
-    * FINISHME: implementation dependent based on the value of
-    * FINISHME: GL_MAX_VERTEX_ATTRIBS.  GL_MAX_VERTEX_ATTRIBS must be
-    * FINISHME: at least 16, so hardcode 16 for now.
+   /* From: ARB_program_interface_query extension:
+    *
+    *  "For an active shader storage block member declared as an array, an
+    *   entry will be generated only for the first array element, regardless
+    *   of its type.  For arrays of aggregate types, the enumeration rules are
+    *   applied recursively for the single enumerated array element.
     */
-   if (!assign_attribute_or_color_locations(prog, MESA_SHADER_VERTEX, 16)) {
-      goto done;
-   }
+   const char *struct_first_dot = strchr(name, '.');
+   const char *first_square_bracket = strchr(name, '[');
 
-   if (!assign_attribute_or_color_locations(prog, MESA_SHADER_FRAGMENT, MAX2(ctx->Const.MaxDrawBuffers, ctx->Const.MaxDualSourceDrawBuffers))) {
-      goto done;
+   /* The buffer variable is on top level and it is not an array */
+   if (!first_square_bracket) {
+      return true;
+   /* The shader storage block member is a struct, then generate the entry */
+   } else if (struct_first_dot && struct_first_dot < first_square_bracket) {
+      return true;
+   } else {
+      /* Shader storage block member is an array, only generate an entry for the
+       * first array element.
+       */
+      if (strncmp(first_square_bracket, "[0]", 3) == 0)
+         return true;
    }
 
-   unsigned prev;
-   for (prev = 0; prev < MESA_SHADER_TYPES; prev++) {
-      if (prog->_LinkedShaders[prev] != NULL)
-        break;
+   return false;
+}
+
+static bool
+add_program_resource(struct gl_shader_program *prog, GLenum type,
+                     const void *data, uint8_t stages)
+{
+   assert(data);
+
+   /* If resource already exists, do not add it again. */
+   for (unsigned i = 0; i < prog->NumProgramResourceList; i++)
+      if (prog->ProgramResourceList[i].Data == data)
+         return true;
+
+   prog->ProgramResourceList =
+      reralloc(prog,
+               prog->ProgramResourceList,
+               gl_program_resource,
+               prog->NumProgramResourceList + 1);
+
+   if (!prog->ProgramResourceList) {
+      linker_error(prog, "Out of memory during linking.\n");
+      return false;
    }
 
-   if (num_tfeedback_decls != 0) {
-      /* From GL_EXT_transform_feedback:
-       *   A program will fail to link if:
-       *
-       *   * the <count> specified by TransformFeedbackVaryingsEXT is
-       *     non-zero, but the program object has no vertex or geometry
-       *     shader;
-       */
-      if (prev >= MESA_SHADER_FRAGMENT) {
-         linker_error(prog, "Transform feedback varyings specified, but "
-                      "no vertex or geometry shader is present.");
-         goto done;
-      }
+   struct gl_program_resource *res =
+      &prog->ProgramResourceList[prog->NumProgramResourceList];
 
-      tfeedback_decls = ralloc_array(mem_ctx, tfeedback_decl,
-                                     prog->TransformFeedback.NumVarying);
-      if (!parse_tfeedback_decls(ctx, prog, mem_ctx, num_tfeedback_decls,
-                                 prog->TransformFeedback.VaryingNames,
-                                 tfeedback_decls))
-         goto done;
+   res->Type = type;
+   res->Data = data;
+   res->StageReferences = stages;
+
+   prog->NumProgramResourceList++;
+
+   return true;
+}
+
+/* Function checks if a variable var is a packed varying and
+ * if given name is part of packed varying's list.
+ *
+ * If a variable is a packed varying, it has a name like
+ * 'packed:a,b,c' where a, b and c are separate variables.
+ */
+static bool
+included_in_packed_varying(ir_variable *var, const char *name)
+{
+   if (strncmp(var->name, "packed:", 7) != 0)
+      return false;
+
+   char *list = strdup(var->name + 7);
+   assert(list);
+
+   bool found = false;
+   char *saveptr;
+   char *token = strtok_r(list, ",", &saveptr);
+   while (token) {
+      if (strcmp(token, name) == 0) {
+         found = true;
+         break;
+      }
+      token = strtok_r(NULL, ",", &saveptr);
    }
+   free(list);
+   return found;
+}
+
+/**
+ * Function builds a stage reference bitmask from variable name.
+ */
+static uint8_t
+build_stageref(struct gl_shader_program *shProg, const char *name,
+               unsigned mode)
+{
+   uint8_t stages = 0;
+
+   /* Note, that we assume MAX 8 stages, if there will be more stages, type
+    * used for reference mask in gl_program_resource will need to be changed.
+    */
+   assert(MESA_SHADER_STAGES < 8);
 
-   for (unsigned i = prev + 1; i < MESA_SHADER_TYPES; i++) {
+   for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+      struct gl_shader *sh = shProg->_LinkedShaders[i];
+      if (!sh)
+         continue;
+
+      /* Shader symbol table may contain variables that have
+       * been optimized away. Search IR for the variable instead.
+       */
+      foreach_in_list(ir_instruction, node, sh->ir) {
+         ir_variable *var = node->as_variable();
+         if (var) {
+            unsigned baselen = strlen(var->name);
+
+            if (included_in_packed_varying(var, name)) {
+                  stages |= (1 << i);
+                  break;
+            }
+
+            /* Type needs to match if specified, otherwise we might
+             * pick a variable with same name but different interface.
+             */
+            if (var->data.mode != mode)
+               continue;
+
+            if (strncmp(var->name, name, baselen) == 0) {
+               /* Check for exact name matches but also check for arrays and
+                * structs.
+                */
+               if (name[baselen] == '\0' ||
+                   name[baselen] == '[' ||
+                   name[baselen] == '.') {
+                  stages |= (1 << i);
+                  break;
+               }
+            }
+         }
+      }
+   }
+   return stages;
+}
+
+static bool
+add_interface_variables(struct gl_shader_program *shProg,
+                        exec_list *ir, GLenum programInterface)
+{
+   foreach_in_list(ir_instruction, node, ir) {
+      ir_variable *var = node->as_variable();
+      uint8_t mask = 0;
+
+      if (!var)
+         continue;
+
+      switch (var->data.mode) {
+      /* From GL 4.3 core spec, section 11.1.1 (Vertex Attributes):
+       * "For GetActiveAttrib, all active vertex shader input variables
+       * are enumerated, including the special built-in inputs gl_VertexID
+       * and gl_InstanceID."
+       */
+      case ir_var_system_value:
+         if (var->data.location != SYSTEM_VALUE_VERTEX_ID &&
+             var->data.location != SYSTEM_VALUE_VERTEX_ID_ZERO_BASE &&
+             var->data.location != SYSTEM_VALUE_INSTANCE_ID)
+            continue;
+         /* Mark special built-in inputs referenced by the vertex stage so
+          * that they are considered active by the shader queries.
+          */
+         mask = (1 << (MESA_SHADER_VERTEX));
+         /* FALLTHROUGH */
+      case ir_var_shader_in:
+         if (programInterface != GL_PROGRAM_INPUT)
+            continue;
+         break;
+      case ir_var_shader_out:
+         if (programInterface != GL_PROGRAM_OUTPUT)
+            continue;
+         break;
+      default:
+         continue;
+      };
+
+      /* Skip packed varyings, packed varyings are handled separately
+       * by add_packed_varyings.
+       */
+      if (strncmp(var->name, "packed:", 7) == 0)
+         continue;
+
+      /* Skip fragdata arrays, these are handled separately
+       * by add_fragdata_arrays.
+       */
+      if (strncmp(var->name, "gl_out_FragData", 15) == 0)
+         continue;
+
+      if (!add_program_resource(shProg, programInterface, var,
+                                build_stageref(shProg, var->name,
+                                               var->data.mode) | mask))
+         return false;
+   }
+   return true;
+}
+
+static bool
+add_packed_varyings(struct gl_shader_program *shProg, int stage)
+{
+   struct gl_shader *sh = shProg->_LinkedShaders[stage];
+   GLenum iface;
+
+   if (!sh || !sh->packed_varyings)
+      return true;
+
+   foreach_in_list(ir_instruction, node, sh->packed_varyings) {
+      ir_variable *var = node->as_variable();
+      if (var) {
+         switch (var->data.mode) {
+         case ir_var_shader_in:
+            iface = GL_PROGRAM_INPUT;
+            break;
+         case ir_var_shader_out:
+            iface = GL_PROGRAM_OUTPUT;
+            break;
+         default:
+            unreachable("unexpected type");
+         }
+         if (!add_program_resource(shProg, iface, var,
+                                   build_stageref(shProg, var->name,
+                                                  var->data.mode)))
+            return false;
+      }
+   }
+   return true;
+}
+
+static bool
+add_fragdata_arrays(struct gl_shader_program *shProg)
+{
+   struct gl_shader *sh = shProg->_LinkedShaders[MESA_SHADER_FRAGMENT];
+
+   if (!sh || !sh->fragdata_arrays)
+      return true;
+
+   foreach_in_list(ir_instruction, node, sh->fragdata_arrays) {
+      ir_variable *var = node->as_variable();
+      if (var) {
+         assert(var->data.mode == ir_var_shader_out);
+         if (!add_program_resource(shProg, GL_PROGRAM_OUTPUT, var,
+                                   1 << MESA_SHADER_FRAGMENT))
+            return false;
+      }
+   }
+   return true;
+}
+
+static char*
+get_top_level_name(const char *name)
+{
+   const char *first_dot = strchr(name, '.');
+   const char *first_square_bracket = strchr(name, '[');
+   int name_size = 0;
+   /* From ARB_program_interface_query spec:
+    *
+    * "For the property TOP_LEVEL_ARRAY_SIZE, a single integer identifying the
+    *  number of active array elements of the top-level shader storage block
+    *  member containing to the active variable is written to <params>.  If the
+    *  top-level block member is not declared as an array, the value one is
+    *  written to <params>.  If the top-level block member is an array with no
+    *  declared size, the value zero is written to <params>.
+    */
+
+   /* The buffer variable is on top level.*/
+   if (!first_square_bracket && !first_dot)
+      name_size = strlen(name);
+   else if ((!first_square_bracket ||
+            (first_dot && first_dot < first_square_bracket)))
+      name_size = first_dot - name;
+   else
+      name_size = first_square_bracket - name;
+
+   return strndup(name, name_size);
+}
+
+static char*
+get_var_name(const char *name)
+{
+   const char *first_dot = strchr(name, '.');
+
+   if (!first_dot)
+      return strdup(name);
+
+   return strndup(first_dot+1, strlen(first_dot) - 1);
+}
+
+static bool
+is_top_level_shader_storage_block_member(const char* name,
+                                         const char* interface_name,
+                                         const char* field_name)
+{
+   bool result = false;
+
+   /* If the given variable is already a top-level shader storage
+    * block member, then return array_size = 1.
+    * We could have two possibilities: if we have an instanced
+    * shader storage block or not instanced.
+    *
+    * For the first, we check create a name as it was in top level and
+    * compare it with the real name. If they are the same, then
+    * the variable is already at top-level.
+    *
+    * Full instanced name is: interface name + '.' + var name +
+    *    NULL character
+    */
+   int name_length = strlen(interface_name) + 1 + strlen(field_name) + 1;
+   char *full_instanced_name = (char *) calloc(name_length, sizeof(char));
+   if (!full_instanced_name) {
+      fprintf(stderr, "%s: Cannot allocate space for name\n", __func__);
+      return false;
+   }
+
+   snprintf(full_instanced_name, name_length, "%s.%s",
+            interface_name, field_name);
+
+   /* Check if its top-level shader storage block member of an
+    * instanced interface block, or of a unnamed interface block.
+    */
+   if (strcmp(name, full_instanced_name) == 0 ||
+       strcmp(name, field_name) == 0)
+      result = true;
+
+   free(full_instanced_name);
+   return result;
+}
+
+static int
+get_array_size(struct gl_uniform_storage *uni, const glsl_struct_field *field,
+               char *interface_name, char *var_name)
+{
+   /* From GL_ARB_program_interface_query spec:
+    *
+    * "For the property TOP_LEVEL_ARRAY_SIZE, a single integer
+    * identifying the number of active array elements of the top-level
+    * shader storage block member containing to the active variable is
+    * written to <params>.  If the top-level block member is not
+    * declared as an array, the value one is written to <params>.  If
+    * the top-level block member is an array with no declared size,
+    * the value zero is written to <params>.
+    */
+   if (is_top_level_shader_storage_block_member(uni->name,
+                                                interface_name,
+                                                var_name))
+      return  1;
+   else if (field->type->is_unsized_array())
+      return 0;
+   else if (field->type->is_array())
+      return field->type->length;
+
+   return 1;
+}
+
+static int
+get_array_stride(struct gl_uniform_storage *uni, const glsl_type *interface,
+                 const glsl_struct_field *field, char *interface_name,
+                 char *var_name)
+{
+   /* From GL_ARB_program_interface_query:
+    *
+    * "For the property TOP_LEVEL_ARRAY_STRIDE, a single integer
+    *  identifying the stride between array elements of the top-level
+    *  shader storage block member containing the active variable is
+    *  written to <params>.  For top-level block members declared as
+    *  arrays, the value written is the difference, in basic machine
+    *  units, between the offsets of the active variable for
+    *  consecutive elements in the top-level array.  For top-level
+    *  block members not declared as an array, zero is written to
+    *  <params>."
+    */
+   if (field->type->is_array()) {
+      const enum glsl_matrix_layout matrix_layout =
+         glsl_matrix_layout(field->matrix_layout);
+      bool row_major = matrix_layout == GLSL_MATRIX_LAYOUT_ROW_MAJOR;
+      const glsl_type *array_type = field->type->fields.array;
+
+      if (is_top_level_shader_storage_block_member(uni->name,
+                                                   interface_name,
+                                                   var_name))
+         return 0;
+
+      if (interface->interface_packing != GLSL_INTERFACE_PACKING_STD430) {
+         if (array_type->is_record() || array_type->is_array())
+            return glsl_align(array_type->std140_size(row_major), 16);
+         else
+            return MAX2(array_type->std140_base_alignment(row_major), 16);
+      } else {
+         return array_type->std430_array_stride(row_major);
+      }
+   }
+   return 0;
+}
+
+static void
+calculate_array_size_and_stride(struct gl_shader_program *shProg,
+                                struct gl_uniform_storage *uni)
+{
+   int block_index = uni->block_index;
+   int array_size = -1;
+   int array_stride = -1;
+   char *var_name = get_top_level_name(uni->name);
+   char *interface_name =
+      get_top_level_name(shProg->BufferInterfaceBlocks[block_index].Name);
+
+   if (strcmp(var_name, interface_name) == 0) {
+      /* Deal with instanced array of SSBOs */
+      char *temp_name = get_var_name(uni->name);
+      if (!temp_name) {
+         linker_error(shProg, "Out of memory during linking.\n");
+         goto write_top_level_array_size_and_stride;
+      }
+      free(var_name);
+      var_name = get_top_level_name(temp_name);
+      free(temp_name);
+      if (!var_name) {
+         linker_error(shProg, "Out of memory during linking.\n");
+         goto write_top_level_array_size_and_stride;
+      }
+   }
+
+   for (unsigned i = 0; i < shProg->NumShaders; i++) {
+      if (shProg->Shaders[i] == NULL)
+         continue;
+
+      const gl_shader *stage = shProg->Shaders[i];
+      foreach_in_list(ir_instruction, node, stage->ir) {
+         ir_variable *var = node->as_variable();
+         if (!var || !var->get_interface_type() ||
+             var->data.mode != ir_var_shader_storage)
+            continue;
+
+         const glsl_type *interface = var->get_interface_type();
+
+         if (strcmp(interface_name, interface->name) != 0)
+            continue;
+
+         for (unsigned i = 0; i < interface->length; i++) {
+            const glsl_struct_field *field = &interface->fields.structure[i];
+            if (strcmp(field->name, var_name) != 0)
+               continue;
+
+            array_stride = get_array_stride(uni, interface, field,
+                                            interface_name, var_name);
+            array_size = get_array_size(uni, field, interface_name, var_name);
+            goto write_top_level_array_size_and_stride;
+         }
+      }
+   }
+write_top_level_array_size_and_stride:
+   free(interface_name);
+   free(var_name);
+   uni->top_level_array_stride = array_stride;
+   uni->top_level_array_size = array_size;
+}
+
+/**
+ * Builds up a list of program resources that point to existing
+ * resource data.
+ */
+void
+build_program_resource_list(struct gl_shader_program *shProg)
+{
+   /* Rebuild resource list. */
+   if (shProg->ProgramResourceList) {
+      ralloc_free(shProg->ProgramResourceList);
+      shProg->ProgramResourceList = NULL;
+      shProg->NumProgramResourceList = 0;
+   }
+
+   int input_stage = MESA_SHADER_STAGES, output_stage = 0;
+
+   /* Determine first input and final output stage. These are used to
+    * detect which variables should be enumerated in the resource list
+    * for GL_PROGRAM_INPUT and GL_PROGRAM_OUTPUT.
+    */
+   for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+      if (!shProg->_LinkedShaders[i])
+         continue;
+      if (input_stage == MESA_SHADER_STAGES)
+         input_stage = i;
+      output_stage = i;
+   }
+
+   /* Empty shader, no resources. */
+   if (input_stage == MESA_SHADER_STAGES && output_stage == 0)
+      return;
+
+   /* Program interface needs to expose varyings in case of SSO. */
+   if (shProg->SeparateShader) {
+      if (!add_packed_varyings(shProg, input_stage))
+         return;
+      if (!add_packed_varyings(shProg, output_stage))
+         return;
+   }
+
+   if (!add_fragdata_arrays(shProg))
+      return;
+
+   /* Add inputs and outputs to the resource list. */
+   if (!add_interface_variables(shProg, shProg->_LinkedShaders[input_stage]->ir,
+                                GL_PROGRAM_INPUT))
+      return;
+
+   if (!add_interface_variables(shProg, shProg->_LinkedShaders[output_stage]->ir,
+                                GL_PROGRAM_OUTPUT))
+      return;
+
+   /* Add transform feedback varyings. */
+   if (shProg->LinkedTransformFeedback.NumVarying > 0) {
+      for (int i = 0; i < shProg->LinkedTransformFeedback.NumVarying; i++) {
+         if (!add_program_resource(shProg, GL_TRANSFORM_FEEDBACK_VARYING,
+                                   &shProg->LinkedTransformFeedback.Varyings[i],
+                                   0))
+         return;
+      }
+   }
+
+   /* Add uniforms from uniform storage. */
+   for (unsigned i = 0; i < shProg->NumUniformStorage; i++) {
+      /* Do not add uniforms internally used by Mesa. */
+      if (shProg->UniformStorage[i].hidden)
+         continue;
+
+      uint8_t stageref =
+         build_stageref(shProg, shProg->UniformStorage[i].name,
+                        ir_var_uniform);
+
+      /* Add stagereferences for uniforms in a uniform block. */
+      int block_index = shProg->UniformStorage[i].block_index;
+      if (block_index != -1) {
+         for (unsigned j = 0; j < MESA_SHADER_STAGES; j++) {
+             if (shProg->UniformBlockStageIndex[j][block_index] != -1)
+                stageref |= (1 << j);
+         }
+      }
+
+      bool is_shader_storage =  shProg->UniformStorage[i].is_shader_storage;
+      GLenum type = is_shader_storage ? GL_BUFFER_VARIABLE : GL_UNIFORM;
+      if (!should_add_buffer_variable(shProg, type,
+                                      shProg->UniformStorage[i].name))
+         continue;
+
+      if (is_shader_storage) {
+         calculate_array_size_and_stride(shProg, &shProg->UniformStorage[i]);
+      }
+
+      if (!add_program_resource(shProg, type,
+                                &shProg->UniformStorage[i], stageref))
+         return;
+   }
+
+   /* Add program uniform blocks and shader storage blocks. */
+   for (unsigned i = 0; i < shProg->NumBufferInterfaceBlocks; i++) {
+      bool is_shader_storage = shProg->BufferInterfaceBlocks[i].IsShaderStorage;
+      GLenum type = is_shader_storage ? GL_SHADER_STORAGE_BLOCK : GL_UNIFORM_BLOCK;
+      if (!add_program_resource(shProg, type,
+          &shProg->BufferInterfaceBlocks[i], 0))
+         return;
+   }
+
+   /* Add atomic counter buffers. */
+   for (unsigned i = 0; i < shProg->NumAtomicBuffers; i++) {
+      if (!add_program_resource(shProg, GL_ATOMIC_COUNTER_BUFFER,
+                                &shProg->AtomicBuffers[i], 0))
+         return;
+   }
+
+   for (unsigned i = 0; i < shProg->NumUniformStorage; i++) {
+      GLenum type;
+      if (!shProg->UniformStorage[i].hidden)
+         continue;
+
+      for (int j = MESA_SHADER_VERTEX; j < MESA_SHADER_STAGES; j++) {
+         if (!shProg->UniformStorage[i].opaque[j].active)
+            continue;
+
+         type = _mesa_shader_stage_to_subroutine_uniform((gl_shader_stage)j);
+         /* add shader subroutines */
+         if (!add_program_resource(shProg, type, &shProg->UniformStorage[i], 0))
+            return;
+      }
+   }
+
+   for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+      struct gl_shader *sh = shProg->_LinkedShaders[i];
+      GLuint type;
+
+      if (!sh)
+         continue;
+
+      type = _mesa_shader_stage_to_subroutine((gl_shader_stage)i);
+      for (unsigned j = 0; j < sh->NumSubroutineFunctions; j++) {
+         if (!add_program_resource(shProg, type, &sh->SubroutineFunctions[j], 0))
+            return;
+      }
+   }
+
+   /* TODO - following extensions will require more resource types:
+    *
+    *    GL_ARB_shader_storage_buffer_object
+    */
+}
+
+/**
+ * This check is done to make sure we allow only constant expression
+ * indexing and "constant-index-expression" (indexing with an expression
+ * that includes loop induction variable).
+ */
+static bool
+validate_sampler_array_indexing(struct gl_context *ctx,
+                                struct gl_shader_program *prog)
+{
+   dynamic_sampler_array_indexing_visitor v;
+   for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
       if (prog->_LinkedShaders[i] == NULL)
         continue;
 
-      if (!assign_varying_locations(
-                                   ctx, mem_ctx, prog, prog->_LinkedShaders[prev], prog->_LinkedShaders[i],
-             i == MESA_SHADER_FRAGMENT ? num_tfeedback_decls : 0,
-             tfeedback_decls))
+      bool no_dynamic_indexing =
+         ctx->Const.ShaderCompilerOptions[i].EmitNoIndirectSampler;
+
+      /* Search for array derefs in shader. */
+      v.run(prog->_LinkedShaders[i]->ir);
+      if (v.uses_dynamic_sampler_array_indexing()) {
+         const char *msg = "sampler arrays indexed with non-constant "
+                           "expressions is forbidden in GLSL %s %u";
+         /* Backend has indicated that it has no dynamic indexing support. */
+         if (no_dynamic_indexing) {
+            linker_error(prog, msg, prog->IsES ? "ES" : "", prog->Version);
+            return false;
+         } else {
+            linker_warning(prog, msg, prog->IsES ? "ES" : "", prog->Version);
+         }
+      }
+   }
+   return true;
+}
+
+static void
+link_assign_subroutine_types(struct gl_shader_program *prog)
+{
+   for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+      gl_shader *sh = prog->_LinkedShaders[i];
+
+      if (sh == NULL)
+         continue;
+
+      foreach_in_list(ir_instruction, node, sh->ir) {
+         ir_function *fn = node->as_function();
+         if (!fn)
+            continue;
+
+         if (fn->is_subroutine)
+            sh->NumSubroutineUniformTypes++;
+
+         if (!fn->num_subroutine_types)
+            continue;
+
+         sh->SubroutineFunctions = reralloc(sh, sh->SubroutineFunctions,
+                                            struct gl_subroutine_function,
+                                            sh->NumSubroutineFunctions + 1);
+         sh->SubroutineFunctions[sh->NumSubroutineFunctions].name = ralloc_strdup(sh, fn->name);
+         sh->SubroutineFunctions[sh->NumSubroutineFunctions].num_compat_types = fn->num_subroutine_types;
+         sh->SubroutineFunctions[sh->NumSubroutineFunctions].types =
+            ralloc_array(sh, const struct glsl_type *,
+                         fn->num_subroutine_types);
+         for (int j = 0; j < fn->num_subroutine_types; j++)
+            sh->SubroutineFunctions[sh->NumSubroutineFunctions].types[j] = fn->subroutine_types[j];
+         sh->NumSubroutineFunctions++;
+      }
+   }
+}
+
+static void
+split_ubos_and_ssbos(void *mem_ctx,
+                     struct gl_uniform_block *blocks,
+                     unsigned num_blocks,
+                     struct gl_uniform_block ***ubos,
+                     unsigned *num_ubos,
+                     struct gl_uniform_block ***ssbos,
+                     unsigned *num_ssbos)
+{
+   unsigned num_ubo_blocks = 0;
+   unsigned num_ssbo_blocks = 0;
+
+   for (unsigned i = 0; i < num_blocks; i++) {
+      if (blocks[i].IsShaderStorage)
+         num_ssbo_blocks++;
+      else
+         num_ubo_blocks++;
+   }
+
+   *ubos = ralloc_array(mem_ctx, gl_uniform_block *, num_ubo_blocks);
+   *num_ubos = 0;
+
+   *ssbos = ralloc_array(mem_ctx, gl_uniform_block *, num_ssbo_blocks);
+   *num_ssbos = 0;
+
+   for (unsigned i = 0; i < num_blocks; i++) {
+      if (blocks[i].IsShaderStorage) {
+         (*ssbos)[(*num_ssbos)++] = &blocks[i];
+      } else {
+         (*ubos)[(*num_ubos)++] = &blocks[i];
+      }
+   }
+
+   assert(*num_ubos + *num_ssbos == num_blocks);
+}
+
+void
+link_shaders(struct gl_context *ctx, struct gl_shader_program *prog)
+{
+   tfeedback_decl *tfeedback_decls = NULL;
+   unsigned num_tfeedback_decls = prog->TransformFeedback.NumVarying;
+
+   void *mem_ctx = ralloc_context(NULL); // temporary linker context
+
+   prog->LinkStatus = true; /* All error paths will set this to false */
+   prog->Validated = false;
+   prog->_Used = false;
+
+   prog->ARB_fragment_coord_conventions_enable = false;
+
+   /* Separate the shaders into groups based on their type.
+    */
+   struct gl_shader **shader_list[MESA_SHADER_STAGES];
+   unsigned num_shaders[MESA_SHADER_STAGES];
+
+   for (int i = 0; i < MESA_SHADER_STAGES; i++) {
+      shader_list[i] = (struct gl_shader **)
+         calloc(prog->NumShaders, sizeof(struct gl_shader *));
+      num_shaders[i] = 0;
+   }
+
+   unsigned min_version = UINT_MAX;
+   unsigned max_version = 0;
+   const bool is_es_prog =
+      (prog->NumShaders > 0 && prog->Shaders[0]->IsES) ? true : false;
+   for (unsigned i = 0; i < prog->NumShaders; i++) {
+      min_version = MIN2(min_version, prog->Shaders[i]->Version);
+      max_version = MAX2(max_version, prog->Shaders[i]->Version);
+
+      if (prog->Shaders[i]->IsES != is_es_prog) {
+        linker_error(prog, "all shaders must use same shading "
+                     "language version\n");
         goto done;
+      }
+
+      if (prog->Shaders[i]->ARB_fragment_coord_conventions_enable) {
+         prog->ARB_fragment_coord_conventions_enable = true;
+      }
+
+      gl_shader_stage shader_type = prog->Shaders[i]->Stage;
+      shader_list[shader_type][num_shaders[shader_type]] = prog->Shaders[i];
+      num_shaders[shader_type]++;
+   }
+
+   /* In desktop GLSL, different shader versions may be linked together.  In
+    * GLSL ES, all shader versions must be the same.
+    */
+   if (is_es_prog && min_version != max_version) {
+      linker_error(prog, "all shaders must use same shading "
+                  "language version\n");
+      goto done;
+   }
+
+   prog->Version = max_version;
+   prog->IsES = is_es_prog;
+
+   /* From OpenGL 4.5 Core specification (7.3 Program Objects):
+    *     "Linking can fail for a variety of reasons as specified in the OpenGL
+    *     Shading Language Specification, as well as any of the following
+    *     reasons:
+    *
+    *     * No shader objects are attached to program.
+    *
+    *     ..."
+    *
+    *     Same rule applies for OpenGL ES >= 3.1.
+    */
+
+   if (prog->NumShaders == 0 &&
+       ((ctx->API == API_OPENGL_CORE && ctx->Version >= 45) ||
+        (ctx->API == API_OPENGLES2 && ctx->Version >= 31))) {
+      linker_error(prog, "No shader objects are attached to program.\n");
+      goto done;
+   }
+
+   /* Some shaders have to be linked with some other shaders present.
+    */
+   if (num_shaders[MESA_SHADER_GEOMETRY] > 0 &&
+       num_shaders[MESA_SHADER_VERTEX] == 0 &&
+       !prog->SeparateShader) {
+      linker_error(prog, "Geometry shader must be linked with "
+                  "vertex shader\n");
+      goto done;
+   }
+   if (num_shaders[MESA_SHADER_TESS_EVAL] > 0 &&
+       num_shaders[MESA_SHADER_VERTEX] == 0 &&
+       !prog->SeparateShader) {
+      linker_error(prog, "Tessellation evaluation shader must be linked with "
+                  "vertex shader\n");
+      goto done;
+   }
+   if (num_shaders[MESA_SHADER_TESS_CTRL] > 0 &&
+       num_shaders[MESA_SHADER_VERTEX] == 0 &&
+       !prog->SeparateShader) {
+      linker_error(prog, "Tessellation control shader must be linked with "
+                  "vertex shader\n");
+      goto done;
+   }
+
+   /* The spec is self-contradictory here. It allows linking without a tess
+    * eval shader, but that can only be used with transform feedback and
+    * rasterization disabled. However, transform feedback isn't allowed
+    * with GL_PATCHES, so it can't be used.
+    *
+    * More investigation showed that the idea of transform feedback after
+    * a tess control shader was dropped, because some hw vendors couldn't
+    * support tessellation without a tess eval shader, but the linker section
+    * wasn't updated to reflect that.
+    *
+    * All specifications (ARB_tessellation_shader, GL 4.0-4.5) have this
+    * spec bug.
+    *
+    * Do what's reasonable and always require a tess eval shader if a tess
+    * control shader is present.
+    */
+   if (num_shaders[MESA_SHADER_TESS_CTRL] > 0 &&
+       num_shaders[MESA_SHADER_TESS_EVAL] == 0 &&
+       !prog->SeparateShader) {
+      linker_error(prog, "Tessellation control shader must be linked with "
+                  "tessellation evaluation shader\n");
+      goto done;
+   }
+
+   /* Compute shaders have additional restrictions. */
+   if (num_shaders[MESA_SHADER_COMPUTE] > 0 &&
+       num_shaders[MESA_SHADER_COMPUTE] != prog->NumShaders) {
+      linker_error(prog, "Compute shaders may not be linked with any other "
+                   "type of shader\n");
+   }
+
+   for (unsigned int i = 0; i < MESA_SHADER_STAGES; i++) {
+      if (prog->_LinkedShaders[i] != NULL)
+        _mesa_delete_shader(ctx, prog->_LinkedShaders[i]);
+
+      prog->_LinkedShaders[i] = NULL;
+   }
+
+   /* Link all shaders for a particular stage and validate the result.
+    */
+   for (int stage = 0; stage < MESA_SHADER_STAGES; stage++) {
+      if (num_shaders[stage] > 0) {
+         gl_shader *const sh =
+            link_intrastage_shaders(mem_ctx, ctx, prog, shader_list[stage],
+                                    num_shaders[stage]);
+
+         if (!prog->LinkStatus) {
+            if (sh)
+               _mesa_delete_shader(ctx, sh);
+            goto done;
+         }
+
+         switch (stage) {
+         case MESA_SHADER_VERTEX:
+            validate_vertex_shader_executable(prog, sh);
+            break;
+         case MESA_SHADER_TESS_CTRL:
+            /* nothing to be done */
+            break;
+         case MESA_SHADER_TESS_EVAL:
+            validate_tess_eval_shader_executable(prog, sh);
+            break;
+         case MESA_SHADER_GEOMETRY:
+            validate_geometry_shader_executable(prog, sh);
+            break;
+         case MESA_SHADER_FRAGMENT:
+            validate_fragment_shader_executable(prog, sh);
+            break;
+         }
+         if (!prog->LinkStatus) {
+            if (sh)
+               _mesa_delete_shader(ctx, sh);
+            goto done;
+         }
+
+         _mesa_reference_shader(ctx, &prog->_LinkedShaders[stage], sh);
+      }
+   }
+
+   if (num_shaders[MESA_SHADER_GEOMETRY] > 0)
+      prog->LastClipDistanceArraySize = prog->Geom.ClipDistanceArraySize;
+   else if (num_shaders[MESA_SHADER_TESS_EVAL] > 0)
+      prog->LastClipDistanceArraySize = prog->TessEval.ClipDistanceArraySize;
+   else if (num_shaders[MESA_SHADER_VERTEX] > 0)
+      prog->LastClipDistanceArraySize = prog->Vert.ClipDistanceArraySize;
+   else
+      prog->LastClipDistanceArraySize = 0; /* Not used */
+
+   /* Here begins the inter-stage linking phase.  Some initial validation is
+    * performed, then locations are assigned for uniforms, attributes, and
+    * varyings.
+    */
+   cross_validate_uniforms(prog);
+   if (!prog->LinkStatus)
+      goto done;
+
+   unsigned prev;
+
+   for (prev = 0; prev <= MESA_SHADER_FRAGMENT; prev++) {
+      if (prog->_LinkedShaders[prev] != NULL)
+         break;
+   }
+
+   check_explicit_uniform_locations(ctx, prog);
+   link_assign_subroutine_types(prog);
+
+   if (!prog->LinkStatus)
+      goto done;
+
+   resize_tes_inputs(ctx, prog);
+
+   /* Validate the inputs of each stage with the output of the preceding
+    * stage.
+    */
+   for (unsigned i = prev + 1; i <= MESA_SHADER_FRAGMENT; i++) {
+      if (prog->_LinkedShaders[i] == NULL)
+         continue;
+
+      validate_interstage_inout_blocks(prog, prog->_LinkedShaders[prev],
+                                       prog->_LinkedShaders[i]);
+      if (!prog->LinkStatus)
+         goto done;
+
+      cross_validate_outputs_to_inputs(prog,
+                                       prog->_LinkedShaders[prev],
+                                       prog->_LinkedShaders[i]);
+      if (!prog->LinkStatus)
+         goto done;
 
       prev = i;
    }
 
-   if (prev != MESA_SHADER_FRAGMENT && num_tfeedback_decls != 0) {
-      /* There was no fragment shader, but we still have to assign varying
-       * locations for use by transform feedback.
-       */
-      if (!assign_varying_locations(
-                                   ctx, mem_ctx, prog, prog->_LinkedShaders[prev], NULL, num_tfeedback_decls,
-             tfeedback_decls))
+   /* Cross-validate uniform blocks between shader stages */
+   validate_interstage_uniform_blocks(prog, prog->_LinkedShaders,
+                                      MESA_SHADER_STAGES);
+   if (!prog->LinkStatus)
+      goto done;
+
+   for (unsigned int i = 0; i < MESA_SHADER_STAGES; i++) {
+      if (prog->_LinkedShaders[i] != NULL)
+         lower_named_interface_blocks(mem_ctx, prog->_LinkedShaders[i]);
+   }
+
+   /* Implement the GLSL 1.30+ rule for discard vs infinite loops Do
+    * it before optimization because we want most of the checks to get
+    * dropped thanks to constant propagation.
+    *
+    * This rule also applies to GLSL ES 3.00.
+    */
+   if (max_version >= (is_es_prog ? 300 : 130)) {
+      struct gl_shader *sh = prog->_LinkedShaders[MESA_SHADER_FRAGMENT];
+      if (sh) {
+        lower_discard_flow(sh->ir);
+      }
+   }
+
+   if (!interstage_cross_validate_uniform_blocks(prog))
+      goto done;
+
+   /* Do common optimization before assigning storage for attributes,
+    * uniforms, and varyings.  Later optimization could possibly make
+    * some of that unused.
+    */
+   for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+      if (prog->_LinkedShaders[i] == NULL)
+        continue;
+
+      detect_recursion_linked(prog, prog->_LinkedShaders[i]->ir);
+      if (!prog->LinkStatus)
+        goto done;
+
+      if (ctx->Const.ShaderCompilerOptions[i].LowerClipDistance) {
+         lower_clip_distance(prog->_LinkedShaders[i]);
+      }
+
+      if (ctx->Const.LowerTessLevel) {
+         lower_tess_level(prog->_LinkedShaders[i]);
+      }
+
+      while (do_common_optimization(prog->_LinkedShaders[i]->ir, true, false,
+                                    &ctx->Const.ShaderCompilerOptions[i],
+                                    ctx->Const.NativeIntegers))
+        ;
+
+      lower_const_arrays_to_uniforms(prog->_LinkedShaders[i]->ir);
+   }
+
+   /* Validation for special cases where we allow sampler array indexing
+    * with loop induction variable. This check emits a warning or error
+    * depending if backend can handle dynamic indexing.
+    */
+   if ((!prog->IsES && prog->Version < 130) ||
+       (prog->IsES && prog->Version < 300)) {
+      if (!validate_sampler_array_indexing(ctx, prog))
          goto done;
    }
 
-   if (!store_tfeedback_info(ctx, prog, num_tfeedback_decls, tfeedback_decls))
+   /* Check and validate stream emissions in geometry shaders */
+   validate_geometry_shader_emissions(ctx, prog);
+
+   /* Mark all generic shader inputs and outputs as unpaired. */
+   for (unsigned i = MESA_SHADER_VERTEX; i <= MESA_SHADER_FRAGMENT; i++) {
+      if (prog->_LinkedShaders[i] != NULL) {
+         link_invalidate_variable_locations(prog->_LinkedShaders[i]->ir);
+      }
+   }
+
+   if (!assign_attribute_or_color_locations(prog, &ctx->Const,
+                                            MESA_SHADER_VERTEX)) {
       goto done;
+   }
+
+   if (!assign_attribute_or_color_locations(prog, &ctx->Const,
+                                            MESA_SHADER_FRAGMENT)) {
+      goto done;
+   }
+
+   unsigned first, last;
 
-   if (prog->_LinkedShaders[MESA_SHADER_VERTEX] != NULL) {
-      demote_shader_inputs_and_outputs(prog->_LinkedShaders[MESA_SHADER_VERTEX],
-                                      ir_var_shader_out);
+   first = MESA_SHADER_STAGES;
+   last = 0;
 
-      /* Eliminate code that is now dead due to unused vertex outputs being
-       * demoted.
+   /* Determine first and last stage. */
+   for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+      if (!prog->_LinkedShaders[i])
+         continue;
+      if (first == MESA_SHADER_STAGES)
+         first = i;
+      last = i;
+   }
+
+   if (num_tfeedback_decls != 0) {
+      /* From GL_EXT_transform_feedback:
+       *   A program will fail to link if:
+       *
+       *   * the <count> specified by TransformFeedbackVaryingsEXT is
+       *     non-zero, but the program object has no vertex or geometry
+       *     shader;
        */
-      while (do_dead_code(prog->_LinkedShaders[MESA_SHADER_VERTEX]->ir, false))
-        ;
+      if (first == MESA_SHADER_FRAGMENT) {
+         linker_error(prog, "Transform feedback varyings specified, but "
+                      "no vertex or geometry shader is present.\n");
+         goto done;
+      }
+
+      tfeedback_decls = ralloc_array(mem_ctx, tfeedback_decl,
+                                     prog->TransformFeedback.NumVarying);
+      if (!parse_tfeedback_decls(ctx, prog, mem_ctx, num_tfeedback_decls,
+                                 prog->TransformFeedback.VaryingNames,
+                                 tfeedback_decls))
+         goto done;
    }
 
-   if (prog->_LinkedShaders[MESA_SHADER_GEOMETRY] != NULL) {
-      gl_shader *const sh = prog->_LinkedShaders[MESA_SHADER_GEOMETRY];
+   /* Linking the stages in the opposite order (from fragment to vertex)
+    * ensures that inter-shader outputs written to in an earlier stage are
+    * eliminated if they are (transitively) not used in a later stage.
+    */
+   int next;
+
+   if (first < MESA_SHADER_FRAGMENT) {
+      gl_shader *const sh = prog->_LinkedShaders[last];
+
+      if (first == MESA_SHADER_GEOMETRY) {
+         /* There was no vertex shader, but we still have to assign varying
+          * locations for use by geometry shader inputs in SSO.
+          *
+          * If the shader is not separable (i.e., prog->SeparateShader is
+          * false), linking will have already failed when first is
+          * MESA_SHADER_GEOMETRY.
+          */
+         if (!assign_varying_locations(ctx, mem_ctx, prog,
+                                       NULL, prog->_LinkedShaders[first],
+                                       num_tfeedback_decls, tfeedback_decls))
+            goto done;
+      }
 
-      demote_shader_inputs_and_outputs(sh, ir_var_shader_in);
-      demote_shader_inputs_and_outputs(sh, ir_var_shader_out);
+      if (last != MESA_SHADER_FRAGMENT &&
+         (num_tfeedback_decls != 0 || prog->SeparateShader)) {
+         /* There was no fragment shader, but we still have to assign varying
+          * locations for use by transform feedback.
+          */
+         if (!assign_varying_locations(ctx, mem_ctx, prog,
+                                       sh, NULL,
+                                       num_tfeedback_decls, tfeedback_decls))
+            goto done;
+      }
 
-      /* Eliminate code that is now dead due to unused geometry outputs being
-       * demoted.
+      do_dead_builtin_varyings(ctx, sh, NULL,
+                               num_tfeedback_decls, tfeedback_decls);
+
+      if (!prog->SeparateShader)
+         demote_shader_inputs_and_outputs(sh, ir_var_shader_out);
+
+      /* Eliminate code that is now dead due to unused outputs being demoted.
        */
-      while (do_dead_code(prog->_LinkedShaders[MESA_SHADER_GEOMETRY]->ir, false))
-        ;
+      while (do_dead_code(sh->ir, false))
+         ;
    }
+   else if (first == MESA_SHADER_FRAGMENT) {
+      /* If the program only contains a fragment shader...
+       */
+      gl_shader *const sh = prog->_LinkedShaders[first];
+
+      do_dead_builtin_varyings(ctx, NULL, sh,
+                               num_tfeedback_decls, tfeedback_decls);
+
+      if (prog->SeparateShader) {
+         if (!assign_varying_locations(ctx, mem_ctx, prog,
+                                       NULL /* producer */,
+                                       sh /* consumer */,
+                                       0 /* num_tfeedback_decls */,
+                                       NULL /* tfeedback_decls */))
+            goto done;
+      } else
+         demote_shader_inputs_and_outputs(sh, ir_var_shader_in);
+
+      while (do_dead_code(sh->ir, false))
+         ;
+   }
+
+   next = last;
+   for (int i = next - 1; i >= 0; i--) {
+      if (prog->_LinkedShaders[i] == NULL)
+         continue;
+
+      gl_shader *const sh_i = prog->_LinkedShaders[i];
+      gl_shader *const sh_next = prog->_LinkedShaders[next];
+
+      if (!assign_varying_locations(ctx, mem_ctx, prog, sh_i, sh_next,
+                next == MESA_SHADER_FRAGMENT ? num_tfeedback_decls : 0,
+                tfeedback_decls))
+         goto done;
 
-   if (prog->_LinkedShaders[MESA_SHADER_FRAGMENT] != NULL) {
-      gl_shader *const sh = prog->_LinkedShaders[MESA_SHADER_FRAGMENT];
+      do_dead_builtin_varyings(ctx, sh_i, sh_next,
+                next == MESA_SHADER_FRAGMENT ? num_tfeedback_decls : 0,
+                tfeedback_decls);
 
-      demote_shader_inputs_and_outputs(sh, ir_var_shader_in);
+      demote_shader_inputs_and_outputs(sh_i, ir_var_shader_out);
+      demote_shader_inputs_and_outputs(sh_next, ir_var_shader_in);
 
-      /* Eliminate code that is now dead due to unused fragment inputs being
-       * demoted.  This shouldn't actually do anything other than remove
-       * declarations of the (now unused) global variables.
+      /* Eliminate code that is now dead due to unused outputs being demoted.
        */
-      while (do_dead_code(prog->_LinkedShaders[MESA_SHADER_FRAGMENT]->ir, false))
-        ;
+      while (do_dead_code(sh_i->ir, false))
+         ;
+      while (do_dead_code(sh_next->ir, false))
+         ;
+
+      /* This must be done after all dead varyings are eliminated. */
+      if (!check_against_output_limit(ctx, prog, sh_i))
+         goto done;
+      if (!check_against_input_limit(ctx, prog, sh_next))
+         goto done;
+
+      next = i;
    }
 
+   if (!store_tfeedback_info(ctx, prog, num_tfeedback_decls, tfeedback_decls))
+      goto done;
+
    update_array_sizes(prog);
-   link_assign_uniform_locations(prog);
+   link_assign_uniform_locations(prog, ctx->Const.UniformBooleanTrue);
+   link_assign_atomic_counter_resources(ctx, prog);
    store_fragdepth_layout(prog);
 
-   if (!check_resources(ctx, prog))
+   link_calculate_subroutine_compat(prog);
+   check_resources(ctx, prog);
+   check_subroutine_resources(prog);
+   check_image_resources(ctx, prog);
+   link_check_atomic_counter_resources(ctx, prog);
+
+   if (!prog->LinkStatus)
       goto done;
 
    /* OpenGL ES requires that a vertex shader and a fragment shader both be
-    * present in a linked program.  By checking prog->IsES, we also
-    * catch the GL_ARB_ES2_compatibility case.
+    * present in a linked program. GL_ARB_ES2_compatibility doesn't say
+    * anything about shader linking when one of the shaders (vertex or
+    * fragment shader) is absent. So, the extension shouldn't change the
+    * behavior specified in GLSL specification.
+    */
+   if (!prog->SeparateShader && ctx->API == API_OPENGLES2) {
+      /* With ES < 3.1 one needs to have always vertex + fragment shader. */
+      if (ctx->Version < 31) {
+         if (prog->_LinkedShaders[MESA_SHADER_VERTEX] == NULL) {
+           linker_error(prog, "program lacks a vertex shader\n");
+         } else if (prog->_LinkedShaders[MESA_SHADER_FRAGMENT] == NULL) {
+           linker_error(prog, "program lacks a fragment shader\n");
+         }
+      } else {
+         /* From OpenGL ES 3.1 specification (7.3 Program Objects):
+          *     "Linking can fail for a variety of reasons as specified in the
+          *     OpenGL ES Shading Language Specification, as well as any of the
+          *     following reasons:
+          *
+          *     ...
+          *
+          *     * program contains objects to form either a vertex shader or
+          *       fragment shader, and program is not separable, and does not
+          *       contain objects to form both a vertex shader and fragment
+          *       shader."
+          */
+         if (!!prog->_LinkedShaders[MESA_SHADER_VERTEX] ^
+             !!prog->_LinkedShaders[MESA_SHADER_FRAGMENT]) {
+            linker_error(prog, "Program needs to contain both vertex and "
+                         "fragment shaders.\n");
+         }
+      }
+   }
+
+   /* Split BufferInterfaceBlocks into UniformBlocks and ShaderStorageBlocks
+    * for gl_shader_program and gl_shader, so that drivers that need separate
+    * index spaces for each set can have that.
     */
-   if (!prog->InternalSeparateShader &&
-       (ctx->API == API_OPENGLES2 || prog->IsES)) {
-      if (prog->_LinkedShaders[MESA_SHADER_VERTEX] == NULL) {
-        linker_error(prog, "program lacks a vertex shader\n");
-      } else if (prog->_LinkedShaders[MESA_SHADER_FRAGMENT] == NULL) {
-        linker_error(prog, "program lacks a fragment shader\n");
+   for (unsigned i = MESA_SHADER_VERTEX; i < MESA_SHADER_STAGES; i++) {
+      if (prog->_LinkedShaders[i] != NULL) {
+         gl_shader *sh = prog->_LinkedShaders[i];
+         split_ubos_and_ssbos(sh,
+                              sh->BufferInterfaceBlocks,
+                              sh->NumBufferInterfaceBlocks,
+                              &sh->UniformBlocks,
+                              &sh->NumUniformBlocks,
+                              &sh->ShaderStorageBlocks,
+                              &sh->NumShaderStorageBlocks);
       }
    }
 
+   split_ubos_and_ssbos(prog,
+                        prog->BufferInterfaceBlocks,
+                        prog->NumBufferInterfaceBlocks,
+                        &prog->UniformBlocks,
+                        &prog->NumUniformBlocks,
+                        &prog->ShaderStorageBlocks,
+                        &prog->NumShaderStorageBlocks);
+
    /* FINISHME: Assign fragment shader output locations. */
 
 done:
-   free(vert_shader_list);
-
-   for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) {
+   for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+      free(shader_list[i]);
       if (prog->_LinkedShaders[i] == NULL)
         continue;
 
+      /* Do a final validation step to make sure that the IR wasn't
+       * invalidated by any modifications performed after intrastage linking.
+       */
+      validate_ir_tree(prog->_LinkedShaders[i]->ir);
+
       /* Retain any live IR, but trash the rest. */
       reparent_ir(prog->_LinkedShaders[i]->ir, prog->_LinkedShaders[i]->ir);