spirv: make use of the loop control support in nir
[mesa.git] / src / compiler / glsl / glsl_to_nir.cpp
index 062434d066bb28bd7a373bd1a053224d08385f02..6a1a0b5f113e2d36555a809b0e8381ee306b105b 100644 (file)
@@ -30,6 +30,7 @@
 #include "ir_visitor.h"
 #include "ir_hierarchical_visitor.h"
 #include "ir.h"
+#include "ir_optimization.h"
 #include "program.h"
 #include "compiler/nir/nir_control_flow.h"
 #include "compiler/nir/nir_builder.h"
@@ -52,7 +53,7 @@ namespace {
 class nir_visitor : public ir_visitor
 {
 public:
-   nir_visitor(nir_shader *shader);
+   nir_visitor(gl_context *ctx, nir_shader *shader);
    ~nir_visitor();
 
    virtual void visit(ir_variable *);
@@ -90,6 +91,7 @@ private:
                        nir_ssa_def *src2, nir_ssa_def *src3);
 
    bool supports_ints;
+   bool supports_std430;
 
    nir_shader *shader;
    nir_function_impl *impl;
@@ -133,24 +135,103 @@ private:
    nir_visitor *visitor;
 };
 
+/* glsl_to_nir can only handle converting certain function paramaters
+ * to NIR. This visitor checks for parameters it can't currently handle.
+ */
+class ir_function_param_visitor : public ir_hierarchical_visitor
+{
+public:
+   ir_function_param_visitor()
+      : unsupported(false)
+   {
+   }
+
+   virtual ir_visitor_status visit_enter(ir_function_signature *ir)
+   {
+
+      if (ir->is_intrinsic())
+         return visit_continue;
+
+      foreach_in_list(ir_variable, param, &ir->parameters) {
+         if (!param->type->is_vector() || !param->type->is_scalar()) {
+            unsupported = true;
+            return visit_stop;
+         }
+
+         if (param->data.mode == ir_var_function_inout) {
+            unsupported = true;
+            return visit_stop;
+         }
+      }
+
+      return visit_continue;
+   }
+
+   bool unsupported;
+};
+
 } /* end of anonymous namespace */
 
+
+static bool
+has_unsupported_function_param(exec_list *ir)
+{
+   ir_function_param_visitor visitor;
+   visit_list_elements(&visitor, ir);
+   return visitor.unsupported;
+}
+
 nir_shader *
-glsl_to_nir(const struct gl_shader_program *shader_prog,
+glsl_to_nir(struct gl_context *ctx,
+            const struct gl_shader_program *shader_prog,
             gl_shader_stage stage,
             const nir_shader_compiler_options *options)
 {
    struct gl_linked_shader *sh = shader_prog->_LinkedShaders[stage];
 
+   const struct gl_shader_compiler_options *gl_options =
+      &ctx->Const.ShaderCompilerOptions[stage];
+
+   /* glsl_to_nir can only handle converting certain function paramaters
+    * to NIR. If we find something we can't handle then we get the GLSL IR
+    * opts to remove it before we continue on.
+    *
+    * TODO: add missing glsl ir to nir support and remove this loop.
+    */
+   while (has_unsupported_function_param(sh->ir)) {
+      do_common_optimization(sh->ir, true, true, gl_options,
+                             ctx->Const.NativeIntegers);
+   }
+
    nir_shader *shader = nir_shader_create(NULL, stage, options,
                                           &sh->Program->info);
 
-   nir_visitor v1(shader);
+   nir_visitor v1(ctx, shader);
    nir_function_visitor v2(&v1);
    v2.run(sh->ir);
    visit_exec_list(sh->ir, &v1);
 
+   nir_validate_shader(shader, "after glsl to nir, before function inline");
+
+   /* We have to lower away local constant initializers right before we
+    * inline functions.  That way they get properly initialized at the top
+    * of the function and not at the top of its caller.
+    */
    nir_lower_constant_initializers(shader, (nir_variable_mode)~0);
+   nir_lower_returns(shader);
+   nir_inline_functions(shader);
+   nir_opt_deref(shader);
+
+   nir_validate_shader(shader, "after function inlining and return lowering");
+
+   /* Now that we have inlined everything remove all of the functions except
+    * main().
+    */
+   foreach_list_typed_safe(nir_function, function, node, &(shader)->functions){
+      if (strcmp("main", function->name) != 0) {
+         exec_node_remove(&function->node);
+      }
+   }
 
    /* Remap the locations to slots so those requiring two slots will occupy
     * two locations. For instance, if we have in the IR code a dvec3 attr0 in
@@ -180,9 +261,10 @@ glsl_to_nir(const struct gl_shader_program *shader_prog,
    return shader;
 }
 
-nir_visitor::nir_visitor(nir_shader *shader)
+nir_visitor::nir_visitor(gl_context *ctx, nir_shader *shader)
 {
    this->supports_ints = shader->options->native_integers;
+   this->supports_std430 = ctx->Const.UseSTD430AsDefaultPacking;
    this->shader = shader;
    this->is_global = true;
    this->var_table = _mesa_pointer_hash_table_create(NULL);
@@ -300,6 +382,17 @@ nir_visitor::constant_copy(ir_constant *ir, void *mem_ctx)
    return ret;
 }
 
+static const glsl_type *
+wrap_type_in_array(const glsl_type *elem_type, const glsl_type *array_type)
+{
+   if (!array_type->is_array())
+      return elem_type;
+
+   elem_type = wrap_type_in_array(elem_type, array_type->fields.array);
+
+   return glsl_type::get_array_instance(elem_type, array_type->length);
+}
+
 void
 nir_visitor::visit(ir_variable *ir)
 {
@@ -406,6 +499,56 @@ nir_visitor::visit(ir_variable *ir)
       unreachable("not reached");
    }
 
+   unsigned image_access = 0;
+   if (ir->data.memory_read_only)
+      image_access |= ACCESS_NON_WRITEABLE;
+   if (ir->data.memory_write_only)
+      image_access |= ACCESS_NON_READABLE;
+   if (ir->data.memory_coherent)
+      image_access |= ACCESS_COHERENT;
+   if (ir->data.memory_volatile)
+      image_access |= ACCESS_VOLATILE;
+   if (ir->data.memory_restrict)
+      image_access |= ACCESS_RESTRICT;
+
+   /* For UBO and SSBO variables, we need explicit types */
+   if (var->data.mode & (nir_var_mem_ubo | nir_var_mem_ssbo)) {
+      const glsl_type *explicit_ifc_type =
+         ir->get_interface_type()->get_explicit_interface_type(supports_std430);
+
+      if (ir->type->without_array()->is_interface()) {
+         /* If the type contains the interface, wrap the explicit type in the
+          * right number of arrays.
+          */
+         var->type = wrap_type_in_array(explicit_ifc_type, ir->type);
+      } else {
+         /* Otherwise, this variable is one entry in the interface */
+         UNUSED bool found;
+         for (unsigned i = 0; i < explicit_ifc_type->length; i++) {
+            const glsl_struct_field *field =
+               &explicit_ifc_type->fields.structure[i];
+            if (strcmp(ir->name, field->name) != 0)
+               continue;
+
+            var->type = field->type;
+            if (field->memory_read_only)
+               image_access |= ACCESS_NON_WRITEABLE;
+            if (field->memory_write_only)
+               image_access |= ACCESS_NON_READABLE;
+            if (field->memory_coherent)
+               image_access |= ACCESS_COHERENT;
+            if (field->memory_volatile)
+               image_access |= ACCESS_VOLATILE;
+            if (field->memory_restrict)
+               image_access |= ACCESS_RESTRICT;
+
+            found = true;
+            break;
+         }
+         assert(found);
+      }
+   }
+
    var->data.interpolation = ir->data.interpolation;
    var->data.location_frac = ir->data.location_frac;
 
@@ -436,17 +579,6 @@ nir_visitor::visit(ir_variable *ir)
    var->data.bindless = ir->data.bindless;
    var->data.offset = ir->data.offset;
 
-   unsigned image_access = 0;
-   if (ir->data.memory_read_only)
-      image_access |= ACCESS_NON_WRITEABLE;
-   if (ir->data.memory_write_only)
-      image_access |= ACCESS_NON_READABLE;
-   if (ir->data.memory_coherent)
-      image_access |= ACCESS_COHERENT;
-   if (ir->data.memory_volatile)
-      image_access |= ACCESS_VOLATILE;
-   if (ir->data.memory_restrict)
-      image_access |= ACCESS_RESTRICT;
    var->data.image.access = (gl_access_qualifier)image_access;
    var->data.image.format = ir->data.image_format;
 
@@ -694,6 +826,49 @@ nir_visitor::visit(ir_call *ir)
       nir_intrinsic_op op;
 
       switch (ir->callee->intrinsic_id) {
+      case ir_intrinsic_generic_atomic_add:
+         op = ir->return_deref->type->is_integer_32_64()
+            ? nir_intrinsic_deref_atomic_add : nir_intrinsic_deref_atomic_fadd;
+         break;
+      case ir_intrinsic_generic_atomic_and:
+         op = nir_intrinsic_deref_atomic_and;
+         break;
+      case ir_intrinsic_generic_atomic_or:
+         op = nir_intrinsic_deref_atomic_or;
+         break;
+      case ir_intrinsic_generic_atomic_xor:
+         op = nir_intrinsic_deref_atomic_xor;
+         break;
+      case ir_intrinsic_generic_atomic_min:
+         assert(ir->return_deref);
+         if (ir->return_deref->type == glsl_type::int_type)
+            op = nir_intrinsic_deref_atomic_imin;
+         else if (ir->return_deref->type == glsl_type::uint_type)
+            op = nir_intrinsic_deref_atomic_umin;
+         else if (ir->return_deref->type == glsl_type::float_type)
+            op = nir_intrinsic_deref_atomic_fmin;
+         else
+            unreachable("Invalid type");
+         break;
+      case ir_intrinsic_generic_atomic_max:
+         assert(ir->return_deref);
+         if (ir->return_deref->type == glsl_type::int_type)
+            op = nir_intrinsic_deref_atomic_imax;
+         else if (ir->return_deref->type == glsl_type::uint_type)
+            op = nir_intrinsic_deref_atomic_umax;
+         else if (ir->return_deref->type == glsl_type::float_type)
+            op = nir_intrinsic_deref_atomic_fmax;
+         else
+            unreachable("Invalid type");
+         break;
+      case ir_intrinsic_generic_atomic_exchange:
+         op = nir_intrinsic_deref_atomic_exchange;
+         break;
+      case ir_intrinsic_generic_atomic_comp_swap:
+         op = ir->return_deref->type->is_integer_32_64()
+            ? nir_intrinsic_deref_atomic_comp_swap
+            : nir_intrinsic_deref_atomic_fcomp_swap;
+         break;
       case ir_intrinsic_atomic_counter_read:
          op = nir_intrinsic_atomic_counter_read_deref;
          break;
@@ -917,6 +1092,63 @@ nir_visitor::visit(ir_call *ir)
       nir_ssa_def *ret = &instr->dest.ssa;
 
       switch (op) {
+      case nir_intrinsic_deref_atomic_add:
+      case nir_intrinsic_deref_atomic_imin:
+      case nir_intrinsic_deref_atomic_umin:
+      case nir_intrinsic_deref_atomic_imax:
+      case nir_intrinsic_deref_atomic_umax:
+      case nir_intrinsic_deref_atomic_and:
+      case nir_intrinsic_deref_atomic_or:
+      case nir_intrinsic_deref_atomic_xor:
+      case nir_intrinsic_deref_atomic_exchange:
+      case nir_intrinsic_deref_atomic_comp_swap:
+      case nir_intrinsic_deref_atomic_fadd:
+      case nir_intrinsic_deref_atomic_fmin:
+      case nir_intrinsic_deref_atomic_fmax:
+      case nir_intrinsic_deref_atomic_fcomp_swap: {
+         int param_count = ir->actual_parameters.length();
+         assert(param_count == 2 || param_count == 3);
+
+         /* Deref */
+         exec_node *param = ir->actual_parameters.get_head();
+         ir_rvalue *rvalue = (ir_rvalue *) param;
+         ir_dereference *deref = rvalue->as_dereference();
+         ir_swizzle *swizzle = NULL;
+         if (!deref) {
+            /* We may have a swizzle to pick off a single vec4 component */
+            swizzle = rvalue->as_swizzle();
+            assert(swizzle && swizzle->type->vector_elements == 1);
+            deref = swizzle->val->as_dereference();
+            assert(deref);
+         }
+         nir_deref_instr *nir_deref = evaluate_deref(deref);
+         if (swizzle) {
+            nir_deref = nir_build_deref_array_imm(&b, nir_deref,
+                                                  swizzle->mask.x);
+         }
+         instr->src[0] = nir_src_for_ssa(&nir_deref->dest.ssa);
+
+         /* data1 parameter (this is always present) */
+         param = param->get_next();
+         ir_instruction *inst = (ir_instruction *) param;
+         instr->src[1] = nir_src_for_ssa(evaluate_rvalue(inst->as_rvalue()));
+
+         /* data2 parameter (only with atomic_comp_swap) */
+         if (param_count == 3) {
+            assert(op == nir_intrinsic_deref_atomic_comp_swap ||
+                   op == nir_intrinsic_deref_atomic_fcomp_swap);
+            param = param->get_next();
+            inst = (ir_instruction *) param;
+            instr->src[2] = nir_src_for_ssa(evaluate_rvalue(inst->as_rvalue()));
+         }
+
+         /* Atomic result */
+         assert(ir->return_deref);
+         nir_ssa_dest_init(&instr->instr, &instr->dest,
+                           ir->return_deref->type->vector_elements, 32, NULL);
+         nir_builder_instr_insert(&b, &instr->instr);
+         break;
+      }
       case nir_intrinsic_atomic_counter_read_deref:
       case nir_intrinsic_atomic_counter_inc_deref:
       case nir_intrinsic_atomic_counter_pre_dec_deref:
@@ -1606,6 +1838,18 @@ nir_visitor::visit(ir_expression *ir)
       return;
    }
 
+   case ir_unop_ssbo_unsized_array_length: {
+      nir_intrinsic_instr *intrin =
+         nir_intrinsic_instr_create(b.shader,
+                                    nir_intrinsic_deref_buffer_array_length);
+
+      ir_dereference *deref = ir->operands[0]->as_dereference();
+      intrin->src[0] = nir_src_for_ssa(&evaluate_deref(deref)->dest.ssa);
+
+      add_instr(&intrin->instr, 1, 32);
+      return;
+   }
+
    default:
       break;
    }
@@ -2134,7 +2378,8 @@ nir_visitor::visit(ir_texture *ir)
       num_srcs++;
    if (ir->shadow_comparator != NULL)
       num_srcs++;
-   if (ir->offset != NULL)
+   /* offsets are constants we store inside nir_tex_intrs.offsets */
+   if (ir->offset != NULL && !ir->offset->type->is_array())
       num_srcs++;
 
    /* Add one for the texture deref */
@@ -2195,13 +2440,25 @@ nir_visitor::visit(ir_texture *ir)
    }
 
    if (ir->offset != NULL) {
-      /* we don't support multiple offsets yet */
-      assert(ir->offset->type->is_vector() || ir->offset->type->is_scalar());
+      if (ir->offset->type->is_array()) {
+         for (int i = 0; i < ir->offset->type->array_size(); i++) {
+            const ir_constant *c =
+               ir->offset->as_constant()->get_array_element(i);
+
+            for (unsigned j = 0; j < 2; ++j) {
+               int val = c->get_int_component(j);
+               assert(val <= 31 && val >= -32);
+               instr->tg4_offsets[i][j] = val;
+            }
+         }
+      } else {
+         assert(ir->offset->type->is_vector() || ir->offset->type->is_scalar());
 
-      instr->src[src_number].src =
-         nir_src_for_ssa(evaluate_rvalue(ir->offset));
-      instr->src[src_number].src_type = nir_tex_src_offset;
-      src_number++;
+         instr->src[src_number].src =
+            nir_src_for_ssa(evaluate_rvalue(ir->offset));
+         instr->src[src_number].src_type = nir_tex_src_offset;
+         src_number++;
+      }
    }
 
    switch (ir->op) {
@@ -2352,7 +2609,7 @@ glsl_float64_funcs_to_nir(struct gl_context *ctx,
 
    nir_shader *nir = nir_shader_create(NULL, MESA_SHADER_VERTEX, options, NULL);
 
-   nir_visitor v1(nir);
+   nir_visitor v1(ctx, nir);
    nir_function_visitor v2(&v1);
    v2.run(sh->ir);
    visit_exec_list(sh->ir, &v1);