etnaviv: get rid of etna_compile dependency
[mesa.git] / src / gallium / drivers / etnaviv / etnaviv_compiler_nir.c
index 4d0466a1f4e4f16c65ec635adc5b9c6a0d35a626..9a4b461d3b039ddf69278f1b256c9e6d65e1d2c6 100644 (file)
@@ -31,6 +31,7 @@
 #include "etnaviv_context.h"
 #include "etnaviv_debug.h"
 #include "etnaviv_disasm.h"
+#include "etnaviv_nir.h"
 #include "etnaviv_uniforms.h"
 #include "etnaviv_util.h"
 
@@ -63,147 +64,7 @@ struct etna_compile {
    bool error;
 };
 
-#define compile_error(ctx, args...) ({ \
-   printf(args); \
-   ctx->error = true; \
-   assert(0); \
-})
 
-/* io related lowering
- * run after lower_int_to_float because it adds i2f/f2i ops
- */
-static void
-etna_lower_io(nir_shader *shader, struct etna_shader_variant *v)
-{
-   nir_foreach_function(function, shader) {
-      nir_builder b;
-      nir_builder_init(&b, function->impl);
-
-      nir_foreach_block(block, function->impl) {
-         nir_foreach_instr_safe(instr, block) {
-            if (instr->type == nir_instr_type_intrinsic) {
-               nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
-
-               switch (intr->intrinsic) {
-               case nir_intrinsic_load_front_face: {
-                  /* HW front_face is 0.0/1.0, not 0/~0u for bool
-                   * lower with a comparison with 0
-                   */
-                  intr->dest.ssa.bit_size = 32;
-
-                  b.cursor = nir_after_instr(instr);
-
-                  nir_ssa_def *ssa = nir_ine(&b, &intr->dest.ssa, nir_imm_int(&b, 0));
-                  if (v->key.front_ccw)
-                     nir_instr_as_alu(ssa->parent_instr)->op = nir_op_ieq;
-
-                  nir_ssa_def_rewrite_uses_after(&intr->dest.ssa,
-                                                 nir_src_for_ssa(ssa),
-                                                 ssa->parent_instr);
-               } break;
-               case nir_intrinsic_store_deref: {
-                  if (shader->info.stage != MESA_SHADER_FRAGMENT || !v->key.frag_rb_swap)
-                     break;
-
-                  nir_deref_instr *deref = nir_src_as_deref(intr->src[0]);
-                  assert(deref->deref_type == nir_deref_type_var);
-
-                  if (deref->var->data.location != FRAG_RESULT_COLOR &&
-                      deref->var->data.location != FRAG_RESULT_DATA0)
-                      break;
-
-                  b.cursor = nir_before_instr(instr);
-
-                  nir_ssa_def *ssa = nir_mov(&b, intr->src[1].ssa);
-                  nir_alu_instr *alu = nir_instr_as_alu(ssa->parent_instr);
-                  alu->src[0].swizzle[0] = 2;
-                  alu->src[0].swizzle[2] = 0;
-                  nir_instr_rewrite_src(instr, &intr->src[1], nir_src_for_ssa(ssa));
-               } break;
-               case nir_intrinsic_load_uniform: {
-                  /* multiply by 16 and convert to int */
-                  b.cursor = nir_before_instr(instr);
-                  nir_ssa_def *ssa = nir_imul(&b, intr->src[0].ssa, nir_imm_int(&b, 16));
-                  nir_instr_rewrite_src(instr, &intr->src[0], nir_src_for_ssa(ssa));
-               } break;
-               default:
-                  break;
-               }
-            }
-
-            if (instr->type != nir_instr_type_tex)
-               continue;
-
-            nir_tex_instr *tex = nir_instr_as_tex(instr);
-            nir_src *coord = NULL;
-            nir_src *lod_bias = NULL;
-            unsigned lod_bias_idx;
-
-            assert(tex->sampler_index == tex->texture_index);
-
-            for (unsigned i = 0; i < tex->num_srcs; i++) {
-               switch (tex->src[i].src_type) {
-               case nir_tex_src_coord:
-                  coord = &tex->src[i].src;
-                  break;
-               case nir_tex_src_bias:
-               case nir_tex_src_lod:
-                  assert(!lod_bias);
-                  lod_bias = &tex->src[i].src;
-                  lod_bias_idx = i;
-                  break;
-               case nir_tex_src_comparator:
-                  break;
-               default:
-                  assert(0);
-                  break;
-               }
-            }
-
-            if (tex->sampler_dim == GLSL_SAMPLER_DIM_RECT) {
-               /* use a dummy load_uniform here to represent texcoord scale */
-               b.cursor = nir_before_instr(instr);
-               nir_intrinsic_instr *load =
-                  nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_uniform);
-               nir_intrinsic_set_base(load, ~tex->sampler_index);
-               load->num_components = 2;
-               load->src[0] = nir_src_for_ssa(nir_imm_float(&b, 0.0f));
-               nir_ssa_dest_init(&load->instr, &load->dest, 2, 32, NULL);
-               nir_intrinsic_set_type(load, nir_type_float);
-
-               nir_builder_instr_insert(&b, &load->instr);
-
-               nir_ssa_def *new_coord = nir_fmul(&b, coord->ssa, &load->dest.ssa);
-               nir_instr_rewrite_src(&tex->instr, coord, nir_src_for_ssa(new_coord));
-            }
-
-            /* pre HALTI5 needs texture sources in a single source */
-
-            if (!lod_bias || v->shader->specs->halti >= 5)
-               continue;
-
-            assert(coord && lod_bias && tex->coord_components < 4);
-
-            nir_alu_instr *vec = nir_alu_instr_create(shader, nir_op_vec4);
-            for (unsigned i = 0; i < tex->coord_components; i++) {
-               vec->src[i].src = nir_src_for_ssa(coord->ssa);
-               vec->src[i].swizzle[0] = i;
-            }
-            for (unsigned i = tex->coord_components; i < 4; i++)
-               vec->src[i].src = nir_src_for_ssa(lod_bias->ssa);
-
-            vec->dest.write_mask = 0xf;
-            nir_ssa_dest_init(&vec->instr, &vec->dest.dest, 4, 32, NULL);
-
-            nir_tex_instr_remove_src(tex, lod_bias_idx);
-            nir_instr_rewrite_src(&tex->instr, coord, nir_src_for_ssa(&vec->dest.dest.ssa));
-            tex->coord_components = 4;
-
-            nir_instr_insert_before(&tex->instr, &vec->instr);
-         }
-      }
-   }
-}
 
 static bool
 etna_alu_to_scalar_filter_cb(const nir_instr *instr, const void *data)
@@ -251,7 +112,7 @@ etna_alu_to_scalar_filter_cb(const nir_instr *instr, const void *data)
 }
 
 static void
-etna_lower_alu_impl(nir_function_impl *impl, struct etna_compile *c)
+etna_lower_alu_impl(nir_function_impl *impl, bool has_new_transcendentals)
 {
    nir_shader *shader = impl->function->shader;
 
@@ -271,7 +132,7 @@ etna_lower_alu_impl(nir_function_impl *impl, struct etna_compile *c)
          if (alu->op == nir_op_fsin || alu->op == nir_op_fcos) {
             b.cursor = nir_before_instr(instr);
 
-            nir_ssa_def *imm = c->specs->has_new_transcendentals ?
+            nir_ssa_def *imm = has_new_transcendentals ?
                nir_imm_float(&b, 1.0 / M_PI) :
                nir_imm_float(&b, 2.0 / M_PI);
 
@@ -282,7 +143,7 @@ etna_lower_alu_impl(nir_function_impl *impl, struct etna_compile *c)
          /* change transcendental ops to vec2 and insert vec1 mul for the result
           * TODO: do this earlier (but it breaks with optimizations)
           */
-         if (c->specs->has_new_transcendentals && (
+         if (has_new_transcendentals && (
              alu->op == nir_op_fdiv || alu->op == nir_op_flog2 ||
              alu->op == nir_op_fsin || alu->op == nir_op_fcos)) {
             nir_ssa_def *ssa = &alu->dest.dest.ssa;
@@ -309,11 +170,11 @@ etna_lower_alu_impl(nir_function_impl *impl, struct etna_compile *c)
    }
 }
 
-static void etna_lower_alu(nir_shader *shader, struct etna_compile *c)
+static void etna_lower_alu(nir_shader *shader, bool has_new_transcendentals)
 {
    nir_foreach_function(function, shader) {
       if (function->impl)
-         etna_lower_alu_impl(function->impl, c);
+         etna_lower_alu_impl(function->impl, has_new_transcendentals);
    }
 }
 
@@ -594,44 +455,6 @@ etna_emit_output(struct etna_compile *c, nir_variable *var, struct etna_inst_src
    }
 }
 
-static void
-etna_emit_load_ubo(struct etna_compile *c, struct etna_inst_dst dst,
-                   struct etna_inst_src src, struct etna_inst_src base)
-{
-   /* convert float offset back to integer */
-   if (c->specs->halti < 2) {
-      emit_inst(c, &(struct etna_inst) {
-         .opcode = INST_OPCODE_F2I,
-         .type = INST_TYPE_U32,
-         .dst = dst,
-         .src[0] = src,
-      });
-
-      emit_inst(c, &(struct etna_inst) {
-         .opcode = INST_OPCODE_LOAD,
-         .type = INST_TYPE_U32,
-         .dst = dst,
-         .src[0] = {
-            .use = 1,
-            .rgroup = INST_RGROUP_TEMP,
-            .reg = dst.reg,
-            .swiz = INST_SWIZ_BROADCAST(ffs(dst.write_mask) - 1)
-         },
-         .src[1] = base,
-      });
-
-      return;
-   }
-
-   emit_inst(c, &(struct etna_inst) {
-      .opcode = INST_OPCODE_LOAD,
-      .type = INST_TYPE_U32,
-      .dst = dst,
-      .src[0] = src,
-      .src[1] = base,
-   });
-}
-
 #define OPT(nir, pass, ...) ({                             \
    bool this_progress = false;                             \
    NIR_PASS(this_progress, nir, pass, ##__VA_ARGS__);      \
@@ -698,6 +521,75 @@ copy_uniform_state_to_shader(struct etna_shader_variant *sobj, uint64_t *consts,
 
 #include "etnaviv_compiler_nir_emit.h"
 
+static bool
+etna_compile_check_limits(struct etna_shader_variant *v)
+{
+   const struct etna_specs *specs = v->shader->specs;
+   int max_uniforms = (v->stage == MESA_SHADER_VERTEX)
+                         ? specs->max_vs_uniforms
+                         : specs->max_ps_uniforms;
+
+   if (!specs->has_icache && v->needs_icache) {
+      DBG("Number of instructions (%d) exceeds maximum %d", v->code_size / 4,
+          specs->max_instructions);
+      return false;
+   }
+
+   if (v->num_temps > specs->max_registers) {
+      DBG("Number of registers (%d) exceeds maximum %d", v->num_temps,
+          specs->max_registers);
+      return false;
+   }
+
+   if (v->uniforms.imm_count / 4 > max_uniforms) {
+      DBG("Number of uniforms (%d) exceeds maximum %d",
+          v->uniforms.imm_count / 4, max_uniforms);
+      return false;
+   }
+
+   return true;
+}
+
+static void
+fill_vs_mystery(struct etna_shader_variant *v)
+{
+   const struct etna_specs *specs = v->shader->specs;
+
+   v->input_count_unk8 = DIV_ROUND_UP(v->infile.num_reg + 4, 16); /* XXX what is this */
+
+   /* fill in "mystery meat" load balancing value. This value determines how
+    * work is scheduled between VS and PS
+    * in the unified shader architecture. More precisely, it is determined from
+    * the number of VS outputs, as well as chip-specific
+    * vertex output buffer size, vertex cache size, and the number of shader
+    * cores.
+    *
+    * XXX this is a conservative estimate, the "optimal" value is only known for
+    * sure at link time because some
+    * outputs may be unused and thus unmapped. Then again, in the general use
+    * case with GLSL the vertex and fragment
+    * shaders are linked already before submitting to Gallium, thus all outputs
+    * are used.
+    *
+    * note: TGSI compiler counts all outputs (including position and pointsize), here
+    * v->outfile.num_reg only counts varyings, +1 to compensate for the position output
+    * TODO: might have a problem that we don't count pointsize when it is used
+    */
+
+   int half_out = v->outfile.num_reg / 2 + 1;
+   assert(half_out);
+
+   uint32_t b = ((20480 / (specs->vertex_output_buffer_size -
+                           2 * half_out * specs->vertex_cache_size)) +
+                 9) /
+                10;
+   uint32_t a = (b + 256 / (specs->shader_core_count * half_out)) / 2;
+   v->vs_load_balancing = VIVS_VS_LOAD_BALANCING_A(MIN2(a, 255)) |
+                             VIVS_VS_LOAD_BALANCING_B(MIN2(b, 255)) |
+                             VIVS_VS_LOAD_BALANCING_C(0x3f) |
+                             VIVS_VS_LOAD_BALANCING_D(0x0f);
+}
+
 bool
 etna_compile_shader_nir(struct etna_shader_variant *v)
 {
@@ -759,6 +651,9 @@ etna_compile_shader_nir(struct etna_shader_variant *v)
 
    OPT_V(s, etna_lower_io, v);
 
+   if (v->shader->specs->vs_need_z_div)
+      NIR_PASS_V(s, nir_lower_clip_halfz);
+
    /* lower pre-halti2 to float (halti0 has integers, but only scalar..) */
    if (c->specs->halti < 2) {
       /* use opt_algebraic between int_to_float and boot_to_float because
@@ -768,7 +663,7 @@ etna_compile_shader_nir(struct etna_shader_variant *v)
       OPT_V(s, nir_opt_algebraic);
       OPT_V(s, nir_lower_bool_to_float);
    } else {
-      OPT_V(s, nir_lower_idiv);
+      OPT_V(s, nir_lower_idiv, nir_lower_idiv_fast);
       OPT_V(s, nir_lower_bool_to_int32);
    }
 
@@ -780,7 +675,7 @@ etna_compile_shader_nir(struct etna_shader_variant *v)
    while( OPT(s, nir_opt_vectorize) );
    OPT_V(s, nir_lower_alu_to_scalar, etna_alu_to_scalar_filter_cb, specs);
 
-   NIR_PASS_V(s, nir_remove_dead_variables, nir_var_function_temp);
+   NIR_PASS_V(s, nir_remove_dead_variables, nir_var_function_temp, NULL);
    NIR_PASS_V(s, nir_opt_algebraic_late);
 
    NIR_PASS_V(s, nir_move_vec_src_uses_to_dest);
@@ -793,7 +688,7 @@ etna_compile_shader_nir(struct etna_shader_variant *v)
 
    NIR_PASS_V(s, nir_opt_dce);
 
-   NIR_PASS_V(s, etna_lower_alu, c);
+   NIR_PASS_V(s, etna_lower_alu, c->specs->has_new_transcendentals);
 
    if (DBG_ENABLED(ETNA_DBG_DUMP_SHADERS))
       nir_print_shader(s, stdout);
@@ -829,48 +724,14 @@ etna_compile_shader_nir(struct etna_shader_variant *v)
    if (s->info.stage == MESA_SHADER_FRAGMENT) {
       v->input_count_unk8 = 31; /* XXX what is this */
       assert(v->ps_depth_out_reg <= 0);
-      ralloc_free(c->nir);
-      FREE(c);
-      return true;
+   } else {
+      fill_vs_mystery(v);
    }
 
-   v->input_count_unk8 = DIV_ROUND_UP(v->infile.num_reg + 4, 16); /* XXX what is this */
-
-   /* fill in "mystery meat" load balancing value. This value determines how
-    * work is scheduled between VS and PS
-    * in the unified shader architecture. More precisely, it is determined from
-    * the number of VS outputs, as well as chip-specific
-    * vertex output buffer size, vertex cache size, and the number of shader
-    * cores.
-    *
-    * XXX this is a conservative estimate, the "optimal" value is only known for
-    * sure at link time because some
-    * outputs may be unused and thus unmapped. Then again, in the general use
-    * case with GLSL the vertex and fragment
-    * shaders are linked already before submitting to Gallium, thus all outputs
-    * are used.
-    *
-    * note: TGSI compiler counts all outputs (including position and pointsize), here
-    * v->outfile.num_reg only counts varyings, +1 to compensate for the position output
-    * TODO: might have a problem that we don't count pointsize when it is used
-    */
-
-   int half_out = v->outfile.num_reg / 2 + 1;
-   assert(half_out);
-
-   uint32_t b = ((20480 / (specs->vertex_output_buffer_size -
-                           2 * half_out * specs->vertex_cache_size)) +
-                 9) /
-                10;
-   uint32_t a = (b + 256 / (specs->shader_core_count * half_out)) / 2;
-   v->vs_load_balancing = VIVS_VS_LOAD_BALANCING_A(MIN2(a, 255)) |
-                             VIVS_VS_LOAD_BALANCING_B(MIN2(b, 255)) |
-                             VIVS_VS_LOAD_BALANCING_C(0x3f) |
-                             VIVS_VS_LOAD_BALANCING_D(0x0f);
-
+   bool result = etna_compile_check_limits(v);
    ralloc_free(c->nir);
    FREE(c);
-   return true;
+   return result;
 }
 
 void