Merge remote-tracking branch 'public/master' into vulkan
[mesa.git] / src / mesa / drivers / dri / i965 / brw_vec4.cpp
index 5b1c045ca70e1c845ef7edb0b9d65ea0595765ec..65e57ba5e624fde1383f63f9daf1b482fd6a4e70 100644 (file)
 #include "brw_cfg.h"
 #include "brw_vs.h"
 #include "brw_nir.h"
+#include "brw_vec4_builder.h"
 #include "brw_vec4_live_variables.h"
 #include "brw_dead_control_flow.h"
-
-extern "C" {
-#include "main/macros.h"
-#include "main/shaderobj.h"
-#include "program/prog_print.h"
 #include "program/prog_parameter.h"
-}
-#include "main/context.h"
 
 #define MAX_INSTRUCTION (1 << 30)
 
@@ -162,6 +156,42 @@ vec4_instruction::is_send_from_grf()
    case SHADER_OPCODE_TYPED_ATOMIC:
    case SHADER_OPCODE_TYPED_SURFACE_READ:
    case SHADER_OPCODE_TYPED_SURFACE_WRITE:
+   case VEC4_OPCODE_URB_READ:
+   case TCS_OPCODE_URB_WRITE:
+   case TCS_OPCODE_RELEASE_INPUT:
+   case SHADER_OPCODE_BARRIER:
+      return true;
+   default:
+      return false;
+   }
+}
+
+/**
+ * Returns true if this instruction's sources and destinations cannot
+ * safely be the same register.
+ *
+ * In most cases, a register can be written over safely by the same
+ * instruction that is its last use.  For a single instruction, the
+ * sources are dereferenced before writing of the destination starts
+ * (naturally).
+ *
+ * However, there are a few cases where this can be problematic:
+ *
+ * - Virtual opcodes that translate to multiple instructions in the
+ *   code generator: if src == dst and one instruction writes the
+ *   destination before a later instruction reads the source, then
+ *   src will have been clobbered.
+ *
+ * The register allocator uses this information to set up conflicts between
+ * GRF sources and the destination.
+ */
+bool
+vec4_instruction::has_source_and_destination_hazard() const
+{
+   switch (opcode) {
+   case TCS_OPCODE_SET_INPUT_URB_OFFSETS:
+   case TCS_OPCODE_SET_OUTPUT_URB_OFFSETS:
+   case TES_OPCODE_ADD_INDIRECT_URB_OFFSET:
       return true;
    default:
       return false;
@@ -182,6 +212,7 @@ vec4_instruction::regs_read(unsigned arg) const
    case SHADER_OPCODE_TYPED_ATOMIC:
    case SHADER_OPCODE_TYPED_SURFACE_READ:
    case SHADER_OPCODE_TYPED_SURFACE_WRITE:
+   case TCS_OPCODE_URB_WRITE:
       return arg == 0 ? mlen : 1;
 
    case VS_OPCODE_PULL_CONSTANT_LOAD_GEN7:
@@ -244,6 +275,7 @@ vec4_visitor::implied_mrf_writes(vec4_instruction *inst)
    case SHADER_OPCODE_INT_QUOTIENT:
    case SHADER_OPCODE_INT_REMAINDER:
    case SHADER_OPCODE_POW:
+   case TCS_OPCODE_THREAD_END:
       return 2;
    case VS_OPCODE_URB_WRITE:
       return 1;
@@ -259,6 +291,8 @@ vec4_visitor::implied_mrf_writes(vec4_instruction *inst)
       return 0;
    case GS_OPCODE_FF_SYNC:
       return 1;
+   case TCS_OPCODE_URB_WRITE:
+      return 0;
    case SHADER_OPCODE_SHADER_TIME_ADD:
       return 0;
    case SHADER_OPCODE_TEX:
@@ -286,6 +320,28 @@ src_reg::equals(const src_reg &r) const
           !reladdr && !r.reladdr);
 }
 
+bool
+vec4_visitor::vectorize_mov(bblock_t *block, vec4_instruction *inst,
+                            uint8_t imm[4], vec4_instruction *imm_inst[4],
+                            int inst_count, unsigned writemask)
+{
+   if (inst_count < 2)
+      return false;
+
+   unsigned vf;
+   memcpy(&vf, imm, sizeof(vf));
+   vec4_instruction *mov = MOV(imm_inst[0]->dst, brw_imm_vf(vf));
+   mov->dst.type = BRW_REGISTER_TYPE_F;
+   mov->dst.writemask = writemask;
+   inst->insert_before(block, mov);
+
+   for (int i = 0; i < inst_count; i++) {
+      imm_inst[i]->remove(block);
+   }
+
+   return true;
+}
+
 bool
 vec4_visitor::opt_vector_float()
 {
@@ -294,27 +350,38 @@ vec4_visitor::opt_vector_float()
    int last_reg = -1, last_reg_offset = -1;
    enum brw_reg_file last_reg_file = BAD_FILE;
 
-   int remaining_channels = 0;
-   uint8_t imm[4];
+   uint8_t imm[4] = { 0 };
    int inst_count = 0;
    vec4_instruction *imm_inst[4];
+   unsigned writemask = 0;
 
    foreach_block_and_inst_safe(block, vec4_instruction, inst, cfg) {
       if (last_reg != inst->dst.nr ||
           last_reg_offset != inst->dst.reg_offset ||
           last_reg_file != inst->dst.file) {
+         progress |= vectorize_mov(block, inst, imm, imm_inst, inst_count,
+                                   writemask);
+         inst_count = 0;
+         writemask = 0;
          last_reg = inst->dst.nr;
          last_reg_offset = inst->dst.reg_offset;
          last_reg_file = inst->dst.file;
-         remaining_channels = WRITEMASK_XYZW;
 
-         inst_count = 0;
+         for (int i = 0; i < 4; i++) {
+            imm[i] = 0;
+         }
       }
 
       if (inst->opcode != BRW_OPCODE_MOV ||
           inst->dst.writemask == WRITEMASK_XYZW ||
-          inst->src[0].file != IMM)
+          inst->src[0].file != IMM ||
+          inst->predicate != BRW_PREDICATE_NONE) {
+         progress |= vectorize_mov(block, inst, imm, imm_inst, inst_count,
+                                   writemask);
+         inst_count = 0;
+         last_reg = -1;
          continue;
+      }
 
       int vf = brw_float_to_vf(inst->src[0].f);
       if (vf == -1)
@@ -329,23 +396,8 @@ vec4_visitor::opt_vector_float()
       if ((inst->dst.writemask & WRITEMASK_W) != 0)
          imm[3] = vf;
 
+      writemask |= inst->dst.writemask;
       imm_inst[inst_count++] = inst;
-
-      remaining_channels &= ~inst->dst.writemask;
-      if (remaining_channels == 0) {
-         unsigned vf;
-         memcpy(&vf, imm, sizeof(vf));
-         vec4_instruction *mov = MOV(inst->dst, brw_imm_vf(vf));
-         mov->dst.type = BRW_REGISTER_TYPE_F;
-         mov->dst.writemask = WRITEMASK_XYZW;
-         inst->insert_after(block, mov);
-         last_reg = -1;
-
-         for (int i = 0; i < inst_count; i++) {
-            imm_inst[i]->remove(block);
-         }
-         progress = true;
-      }
    }
 
    if (progress)
@@ -444,11 +496,6 @@ vec4_visitor::split_uniform_registers()
         inst->src[i].reg_offset = 0;
       }
    }
-
-   /* Update that everything is now vector-sized. */
-   for (int i = 0; i < this->uniforms; i++) {
-      this->uniform_size[i] = 1;
-   }
 }
 
 void
@@ -506,7 +553,6 @@ vec4_visitor::pack_uniform_registers()
     * push constants.
     */
    for (int src = 0; src < uniforms; src++) {
-      assert(src < uniform_array_size);
       int size = chans_used[src];
 
       if (size == 0)
@@ -583,7 +629,8 @@ vec4_visitor::opt_algebraic()
             if (inst->dst.type != inst->src[0].type)
                assert(!"unimplemented: saturate mixed types");
 
-            if (brw_saturate_immediate(inst->dst.type, &inst->src[0])) {
+            if (brw_saturate_immediate(inst->dst.type,
+                                       &inst->src[0].as_brw_reg())) {
                inst->saturate = false;
                progress = true;
             }
@@ -752,7 +799,7 @@ vec4_visitor::move_push_constants_to_pull_constants()
         dst_reg temp = dst_reg(this, glsl_type::vec4_type);
 
         emit_pull_constant_load(block, inst, temp, inst->src[i],
-                                pull_constant_loc[uniform]);
+                                pull_constant_loc[uniform], src_reg());
 
         inst->src[i].file = temp.file;
          inst->src[i].nr = temp.nr;
@@ -998,6 +1045,7 @@ vec4_visitor::opt_register_coalesce()
 
          if (is_nop_mov) {
             inst->remove(block);
+            progress = true;
             continue;
          }
       }
@@ -1488,22 +1536,6 @@ vec4_visitor::lower_attributes_to_hw_regs(const int *attribute_map,
                                           bool interleaved)
 {
    foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
-      /* We have to support ATTR as a destination for GL_FIXED fixup. */
-      if (inst->dst.file == ATTR) {
-         int grf = attribute_map[inst->dst.nr + inst->dst.reg_offset];
-
-         /* All attributes used in the shader need to have been assigned a
-          * hardware register by the caller
-          */
-         assert(grf != 0);
-
-        struct brw_reg reg = attribute_to_hw_reg(grf, interleaved);
-        reg.type = inst->dst.type;
-        reg.writemask = inst->dst.writemask;
-
-         inst->dst = reg;
-      }
-
       for (int i = 0; i < 3; i++) {
         if (inst->src[i].file != ATTR)
            continue;
@@ -1532,7 +1564,7 @@ int
 vec4_vs_visitor::setup_attributes(int payload_reg)
 {
    int nr_attributes;
-   int attribute_map[VERT_ATTRIB_MAX + 1];
+   int attribute_map[VERT_ATTRIB_MAX + 2];
    memset(attribute_map, 0, sizeof(attribute_map));
 
    nr_attributes = 0;
@@ -1547,8 +1579,15 @@ vec4_vs_visitor::setup_attributes(int payload_reg)
     * don't represent it with a flag in inputs_read, so we call it
     * VERT_ATTRIB_MAX.
     */
-   if (vs_prog_data->uses_vertexid || vs_prog_data->uses_instanceid) {
+   if (vs_prog_data->uses_vertexid || vs_prog_data->uses_instanceid ||
+       vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance) {
       attribute_map[VERT_ATTRIB_MAX] = payload_reg + nr_attributes;
+      nr_attributes++;
+   }
+
+   if (vs_prog_data->uses_drawid) {
+      attribute_map[VERT_ATTRIB_MAX + 1] = payload_reg + nr_attributes;
+      nr_attributes++;
    }
 
    lower_attributes_to_hw_regs(attribute_map, false /* interleaved */);
@@ -1565,8 +1604,6 @@ vec4_visitor::setup_uniforms(int reg)
     * matter what, or the GPU would hang.
     */
    if (devinfo->gen < 6 && this->uniforms == 0) {
-      assert(this->uniforms < this->uniform_array_size);
-
       stage_prog_data->param =
          reralloc(NULL, stage_prog_data->param, const gl_constant_value *, 4);
       for (unsigned int i = 0; i < 4; i++) {
@@ -1607,6 +1644,36 @@ vec4_vs_visitor::setup_payload(void)
    this->first_non_payload_grf = reg;
 }
 
+bool
+vec4_visitor::lower_minmax()
+{
+   assert(devinfo->gen < 6);
+
+   bool progress = false;
+
+   foreach_block_and_inst_safe(block, vec4_instruction, inst, cfg) {
+      const vec4_builder ibld(this, block, inst);
+
+      if (inst->opcode == BRW_OPCODE_SEL &&
+          inst->predicate == BRW_PREDICATE_NONE) {
+         /* FIXME: Using CMP doesn't preserve the NaN propagation semantics of
+          *        the original SEL.L/GE instruction
+          */
+         ibld.CMP(ibld.null_reg_d(), inst->src[0], inst->src[1],
+                  inst->conditional_mod);
+         inst->predicate = BRW_PREDICATE_NORMAL;
+         inst->conditional_mod = BRW_CONDITIONAL_NONE;
+
+         progress = true;
+      }
+   }
+
+   if (progress)
+      invalidate_live_intervals();
+
+   return progress;
+}
+
 src_reg
 vec4_visitor::get_timestamp()
 {
@@ -1743,9 +1810,22 @@ vec4_visitor::convert_to_hw_regs()
          case ATTR:
             unreachable("not reached");
          }
+
          src = reg;
       }
 
+      if (inst->is_3src()) {
+         /* 3-src instructions with scalar sources support arbitrary subnr,
+          * but don't actually use swizzles.  Convert swizzle into subnr.
+          */
+         for (int i = 0; i < 3; i++) {
+            if (inst->src[i].vstride == BRW_VERTICAL_STRIDE_0) {
+               assert(brw_is_single_value_swizzle(inst->src[i].swizzle));
+               inst->src[i].subnr += 4 * BRW_GET_SWZ(inst->src[i].swizzle, 0);
+            }
+         }
+      }
+
       dst_reg &dst = inst->dst;
       struct brw_reg reg;
 
@@ -1765,7 +1845,7 @@ vec4_visitor::convert_to_hw_regs()
 
       case ARF:
       case FIXED_GRF:
-         reg = dst;
+         reg = dst.as_brw_reg();
          break;
 
       case BAD_FILE:
@@ -1831,7 +1911,7 @@ vec4_visitor::run()
 
    if (unlikely(INTEL_DEBUG & DEBUG_OPTIMIZER)) {
       char filename[64];
-      snprintf(filename, 64, "%s-%s-00-start",
+      snprintf(filename, 64, "%s-%s-00-00-start",
                stage_abbrev, nir->info.name);
 
       backend_shader::dump_instructions(filename);
@@ -1866,6 +1946,13 @@ vec4_visitor::run()
       OPT(dead_code_eliminate);
    }
 
+   if (devinfo->gen <= 5 && OPT(lower_minmax)) {
+      OPT(opt_cmod_propagation);
+      OPT(opt_cse);
+      OPT(opt_copy_propagation);
+      OPT(dead_code_eliminate);
+   }
+
    if (failed)
       return false;
 
@@ -1934,11 +2021,14 @@ brw_compile_vs(const struct brw_compiler *compiler, void *log_data,
                unsigned *final_assembly_size,
                char **error_str)
 {
+   const bool is_scalar = compiler->scalar_stage[MESA_SHADER_VERTEX];
    nir_shader *shader = nir_shader_clone(mem_ctx, src_shader);
    shader = brw_nir_apply_sampler_key(shader, compiler->devinfo, &key->tex,
-                                      compiler->scalar_stage[MESA_SHADER_VERTEX]);
-   shader = brw_postprocess_nir(shader, compiler->devinfo,
-                                compiler->scalar_stage[MESA_SHADER_VERTEX]);
+                                      is_scalar);
+   brw_nir_lower_vs_inputs(shader, compiler->devinfo, is_scalar,
+                           use_legacy_snorm_formula, key->gl_attrib_wa_flags);
+   brw_nir_lower_vue_outputs(shader, is_scalar);
+   shader = brw_postprocess_nir(shader, compiler->devinfo, is_scalar);
 
    const unsigned *assembly = NULL;
 
@@ -1948,16 +2038,23 @@ brw_compile_vs(const struct brw_compiler *compiler, void *log_data,
     * incoming vertex attribute.  So, add an extra slot.
     */
    if (shader->info.system_values_read &
-       (BITFIELD64_BIT(SYSTEM_VALUE_VERTEX_ID_ZERO_BASE) |
+       (BITFIELD64_BIT(SYSTEM_VALUE_BASE_VERTEX) |
+        BITFIELD64_BIT(SYSTEM_VALUE_BASE_INSTANCE) |
+        BITFIELD64_BIT(SYSTEM_VALUE_VERTEX_ID_ZERO_BASE) |
         BITFIELD64_BIT(SYSTEM_VALUE_INSTANCE_ID))) {
       nr_attributes++;
    }
 
+   /* gl_DrawID has its very own vec4 */
+   if (shader->info.system_values_read & BITFIELD64_BIT(SYSTEM_VALUE_DRAW_ID)) {
+      nr_attributes++;
+   }
+
    /* The 3DSTATE_VS documentation lists the lower bound on "Vertex URB Entry
     * Read Length" as 1 in vec4 mode, and 0 in SIMD8 mode.  Empirically, in
     * vec4 mode, the hardware appears to wedge unless we read something.
     */
-   if (compiler->scalar_stage[MESA_SHADER_VERTEX])
+   if (is_scalar)
       prog_data->base.urb_read_length = DIV_ROUND_UP(nr_attributes, 2);
    else
       prog_data->base.urb_read_length = DIV_ROUND_UP(MAX2(nr_attributes, 1), 2);
@@ -1976,7 +2073,7 @@ brw_compile_vs(const struct brw_compiler *compiler, void *log_data,
    else
       prog_data->base.urb_entry_size = DIV_ROUND_UP(vue_entries, 4);
 
-   if (compiler->scalar_stage[MESA_SHADER_VERTEX]) {
+   if (is_scalar) {
       prog_data->base.dispatch_mode = DISPATCH_MODE_SIMD8;
 
       fs_visitor v(compiler, log_data, mem_ctx, key, &prog_data->base.base,
@@ -1991,7 +2088,7 @@ brw_compile_vs(const struct brw_compiler *compiler, void *log_data,
 
       fs_generator g(compiler, log_data, mem_ctx, (void *) key,
                      &prog_data->base.base, v.promoted_constants,
-                     v.runtime_check_aads_emit, "VS");
+                     v.runtime_check_aads_emit, MESA_SHADER_VERTEX);
       if (INTEL_DEBUG & DEBUG_VS) {
          const char *debug_name =
             ralloc_asprintf(mem_ctx, "%s vertex shader %s",