i965/fs: No need to set compression control at the top of generate_code().
[mesa.git] / src / mesa / drivers / dri / i965 / brw_fs_generator.cpp
index cac92b37bd5d5803fa8ec3dc7b009dc2faa4a894..3ac27f224b5dfde780f463c43e73ecab6cd910d0 100644 (file)
@@ -54,7 +54,8 @@ brw_file_from_reg(fs_reg *reg)
 }
 
 static struct brw_reg
-brw_reg_from_fs_reg(fs_inst *inst, fs_reg *reg, unsigned gen)
+brw_reg_from_fs_reg(const struct brw_codegen *p,
+                    fs_inst *inst, fs_reg *reg, unsigned gen)
 {
    struct brw_reg brw_reg;
 
@@ -65,8 +66,10 @@ brw_reg_from_fs_reg(fs_inst *inst, fs_reg *reg, unsigned gen)
    case VGRF:
       if (reg->stride == 0) {
          brw_reg = brw_vec1_reg(brw_file_from_reg(reg), reg->nr, 0);
-      } else if (inst->exec_size < 8) {
-         brw_reg = brw_vec8_reg(brw_file_from_reg(reg), reg->nr, 0);
+      } else if (!p->compressed &&
+                 inst->exec_size * reg->stride * type_sz(reg->type) <= 32) {
+         brw_reg = brw_vecn_reg(inst->exec_size, brw_file_from_reg(reg),
+                                reg->nr, 0);
          brw_reg = stride(brw_reg, inst->exec_size * reg->stride,
                           inst->exec_size, reg->stride);
       } else {
@@ -76,11 +79,14 @@ brw_reg_from_fs_reg(fs_inst *inst, fs_reg *reg, unsigned gen)
           * rule implies that elements within a 'Width' cannot cross GRF
           * boundaries.
           *
-          * So, for registers with width > 8, we have to use a width of 8
-          * and trust the compression state to sort out the exec size.
+          * So, for registers that are large enough, we have to split the exec
+          * size in two and trust the compression state to sort it out.
           */
-         brw_reg = brw_vec8_reg(brw_file_from_reg(reg), reg->nr, 0);
-         brw_reg = stride(brw_reg, 8 * reg->stride, 8, reg->stride);
+         assert(inst->exec_size / 2 * reg->stride * type_sz(reg->type) <= 32);
+         brw_reg = brw_vecn_reg(inst->exec_size / 2, brw_file_from_reg(reg),
+                                reg->nr, 0);
+         brw_reg = stride(brw_reg, inst->exec_size / 2 * reg->stride,
+                          inst->exec_size / 2, reg->stride);
       }
 
       brw_reg = retype(brw_reg, reg->type);
@@ -367,29 +373,53 @@ fs_generator::generate_mov_indirect(fs_inst *inst,
       /* The destination stride of an instruction (in bytes) must be greater
        * than or equal to the size of the rest of the instruction.  Since the
        * address register is of type UW, we can't use a D-type instruction.
-       * In order to get around this, re re-type to UW and use a stride.
+       * In order to get around this, re retype to UW and use a stride.
        */
       indirect_byte_offset =
          retype(spread(indirect_byte_offset, 2), BRW_REGISTER_TYPE_UW);
 
+      struct brw_reg ind_src;
       if (devinfo->gen < 8) {
-         /* Prior to broadwell, we have a restriction that the bottom 5 bits
-          * of the base offset and the bottom 5 bits of the indirect must add
-          * to less than 32.  In other words, the hardware needs to be able to
-          * add the bottom five bits of the two to get the subnumber and add
-          * the next 7 bits of each to get the actual register number.  Since
-          * the indirect may cause us to cross a register boundary, this makes
-          * it almost useless.  We could try and do something clever where we
-          * use a actual base offset if base_offset % 32 == 0 but that would
-          * mean we were generating different code depending on the base
-          * offset.  Instead, for the sake of consistency, we'll just do the
-          * add ourselves.
+         /* From the Haswell PRM section "Register Region Restrictions":
+          *
+          *    "The lower bits of the AddressImmediate must not overflow to
+          *    change the register address.  The lower 5 bits of Address
+          *    Immediate when added to lower 5 bits of address register gives
+          *    the sub-register offset. The upper bits of Address Immediate
+          *    when added to upper bits of address register gives the register
+          *    address. Any overflow from sub-register offset is dropped."
+          *
+          * This restriction is only listed in the Haswell PRM but emperical
+          * testing indicates that it applies on all older generations and is
+          * lifted on Broadwell.
+          *
+          * Since the indirect may cause us to cross a register boundary, this
+          * makes the base offset almost useless.  We could try and do
+          * something clever where we use a actual base offset if
+          * base_offset % 32 == 0 but that would mean we were generating
+          * different code depending on the base offset.  Instead, for the
+          * sake of consistency, we'll just do the add ourselves.
           */
          brw_ADD(p, addr, indirect_byte_offset, brw_imm_uw(imm_byte_offset));
-         brw_MOV(p, dst, retype(brw_VxH_indirect(0, 0), dst.type));
+         ind_src = brw_VxH_indirect(0, 0);
       } else {
          brw_MOV(p, addr, indirect_byte_offset);
-         brw_MOV(p, dst, retype(brw_VxH_indirect(0, imm_byte_offset), dst.type));
+         ind_src = brw_VxH_indirect(0, imm_byte_offset);
+      }
+
+      brw_inst *mov = brw_MOV(p, dst, retype(ind_src, dst.type));
+
+      if (devinfo->gen == 6 && dst.file == BRW_MESSAGE_REGISTER_FILE &&
+          !inst->get_next()->is_tail_sentinel() &&
+          ((fs_inst *)inst->get_next())->mlen > 0) {
+         /* From the Sandybridge PRM:
+          *
+          *    "[Errata: DevSNB(SNB)] If MRF register is updated by any
+          *    instruction that “indexed/indirect” source AND is followed by a
+          *    send, the instruction requires a “Switch”. This is to avoid
+          *    race condition where send may dispatch before MRF is updated."
+          */
+         brw_inst_set_thread_control(devinfo, mov, BRW_THREAD_SWITCH);
       }
    }
 }
@@ -455,7 +485,7 @@ fs_generator::generate_cs_terminate(fs_inst *inst, struct brw_reg payload)
 
    insn = brw_next_insn(p, BRW_OPCODE_SEND);
 
-   brw_set_dest(p, insn, brw_null_reg());
+   brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_UW));
    brw_set_src0(p, insn, payload);
    brw_set_src1(p, insn, brw_imm_d(0));
 
@@ -479,47 +509,6 @@ fs_generator::generate_cs_terminate(fs_inst *inst, struct brw_reg payload)
    brw_inst_set_mask_control(devinfo, insn, BRW_MASK_DISABLE);
 }
 
-void
-fs_generator::generate_stencil_ref_packing(fs_inst *inst,
-                                           struct brw_reg dst,
-                                           struct brw_reg src)
-{
-   assert(dispatch_width == 8);
-   assert(devinfo->gen >= 9);
-
-   /* Stencil value updates are provided in 8 slots of 1 byte per slot.
-    * Presumably, in order to save memory bandwidth, the stencil reference
-    * values written from the FS need to be packed into 2 dwords (this makes
-    * sense because the stencil values are limited to 1 byte each and a SIMD8
-    * send, so stencil slots 0-3 in dw0, and 4-7 in dw1.)
-    *
-    * The spec is confusing here because in the payload definition of MDP_RTW_S8
-    * (Message Data Payload for Render Target Writes with Stencil 8b) the
-    * stencil value seems to be dw4.0-dw4.7. However, if you look at the type of
-    * dw4 it is type MDPR_STENCIL (Message Data Payload Register) which is the
-    * packed values specified above and diagrammed below:
-    *
-    *     31                             0
-    *     --------------------------------
-    * DW  |                              |
-    * 2-7 |            IGNORED           |
-    *     |                              |
-    *     --------------------------------
-    * DW1 | STC   | STC   | STC   | STC  |
-    *     | slot7 | slot6 | slot5 | slot4|
-    *     --------------------------------
-    * DW0 | STC   | STC   | STC   | STC  |
-    *     | slot3 | slot2 | slot1 | slot0|
-    *     --------------------------------
-    */
-
-   src.vstride = BRW_VERTICAL_STRIDE_4;
-   src.width = BRW_WIDTH_1;
-   src.hstride = BRW_HORIZONTAL_STRIDE_0;
-   assert(src.type == BRW_REGISTER_TYPE_UB);
-   brw_MOV(p, retype(dst, BRW_REGISTER_TYPE_UB), src);
-}
-
 void
 fs_generator::generate_barrier(fs_inst *inst, struct brw_reg src)
 {
@@ -527,22 +516,6 @@ fs_generator::generate_barrier(fs_inst *inst, struct brw_reg src)
    brw_WAIT(p);
 }
 
-void
-fs_generator::generate_blorp_fb_write(fs_inst *inst)
-{
-   brw_fb_WRITE(p,
-                16 /* dispatch_width */,
-                brw_message_reg(inst->base_mrf),
-                brw_reg_from_fs_reg(inst, &inst->src[0], devinfo->gen),
-                BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD16_SINGLE_SOURCE,
-                inst->target,
-                inst->mlen,
-                0,
-                true,
-                true,
-                inst->header_size != 0);
-}
-
 void
 fs_generator::generate_linterp(fs_inst *inst,
                             struct brw_reg dst, struct brw_reg *src)
@@ -580,82 +553,6 @@ fs_generator::generate_linterp(fs_inst *inst,
    }
 }
 
-void
-fs_generator::generate_math_gen6(fs_inst *inst,
-                                 struct brw_reg dst,
-                                 struct brw_reg src0,
-                                 struct brw_reg src1)
-{
-   int op = brw_math_function(inst->opcode);
-   bool binop = src1.file != BRW_ARCHITECTURE_REGISTER_FILE;
-
-   if (dispatch_width == 8) {
-      gen6_math(p, dst, op, src0, src1);
-   } else if (dispatch_width == 16) {
-      brw_push_insn_state(p);
-      brw_set_default_exec_size(p, BRW_EXECUTE_8);
-      brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
-      gen6_math(p, firsthalf(dst), op, firsthalf(src0), firsthalf(src1));
-      brw_set_default_compression_control(p, BRW_COMPRESSION_2NDHALF);
-      gen6_math(p, sechalf(dst), op, sechalf(src0),
-                binop ? sechalf(src1) : brw_null_reg());
-      brw_pop_insn_state(p);
-   }
-}
-
-void
-fs_generator::generate_math_gen4(fs_inst *inst,
-                              struct brw_reg dst,
-                              struct brw_reg src)
-{
-   int op = brw_math_function(inst->opcode);
-
-   assert(inst->mlen >= 1);
-
-   if (dispatch_width == 8) {
-      gen4_math(p, dst,
-                op,
-                inst->base_mrf, src,
-                BRW_MATH_PRECISION_FULL);
-   } else if (dispatch_width == 16) {
-      brw_set_default_exec_size(p, BRW_EXECUTE_8);
-      brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
-      gen4_math(p, firsthalf(dst),
-               op,
-               inst->base_mrf, firsthalf(src),
-               BRW_MATH_PRECISION_FULL);
-      brw_set_default_compression_control(p, BRW_COMPRESSION_2NDHALF);
-      gen4_math(p, sechalf(dst),
-               op,
-               inst->base_mrf + 1, sechalf(src),
-               BRW_MATH_PRECISION_FULL);
-
-      brw_set_default_compression_control(p, BRW_COMPRESSION_COMPRESSED);
-   }
-}
-
-void
-fs_generator::generate_math_g45(fs_inst *inst,
-                                struct brw_reg dst,
-                                struct brw_reg src)
-{
-   if (inst->opcode == SHADER_OPCODE_POW ||
-       inst->opcode == SHADER_OPCODE_INT_QUOTIENT ||
-       inst->opcode == SHADER_OPCODE_INT_REMAINDER) {
-      generate_math_gen4(inst, dst, src);
-      return;
-   }
-
-   int op = brw_math_function(inst->opcode);
-
-   assert(inst->mlen >= 1);
-
-   gen4_math(p, dst,
-             op,
-             inst->base_mrf, src,
-             BRW_MATH_PRECISION_FULL);
-}
-
 void
 fs_generator::generate_get_buffer_size(fs_inst *inst,
                                        struct brw_reg dst,
@@ -706,7 +603,6 @@ fs_generator::generate_tex(fs_inst *inst, struct brw_reg dst, struct brw_reg src
                            struct brw_reg sampler_index)
 {
    int msg_type = -1;
-   int rlen = 4;
    uint32_t simd_mode;
    uint32_t return_format;
    bool is_combined_send = inst->eot;
@@ -768,6 +664,14 @@ fs_generator::generate_tex(fs_inst *inst, struct brw_reg dst, struct brw_reg src
            msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LOD;
         }
         break;
+      case SHADER_OPCODE_TXL_LZ:
+         assert(devinfo->gen >= 9);
+        if (inst->shadow_compare) {
+            msg_type = GEN9_SAMPLER_MESSAGE_SAMPLE_C_LZ;
+         } else {
+            msg_type = GEN9_SAMPLER_MESSAGE_SAMPLE_LZ;
+         }
+         break;
       case SHADER_OPCODE_TXS:
         msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_RESINFO;
         break;
@@ -783,6 +687,10 @@ fs_generator::generate_tex(fs_inst *inst, struct brw_reg dst, struct brw_reg src
       case SHADER_OPCODE_TXF:
         msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LD;
         break;
+      case SHADER_OPCODE_TXF_LZ:
+         assert(devinfo->gen >= 9);
+         msg_type = GEN9_SAMPLER_MESSAGE_SAMPLE_LD_LZ;
+         break;
       case SHADER_OPCODE_TXF_CMS_W:
          assert(devinfo->gen >= 9);
          msg_type = GEN9_SAMPLER_MESSAGE_SAMPLE_LD2DMS_W;
@@ -895,15 +803,9 @@ fs_generator::generate_tex(fs_inst *inst, struct brw_reg dst, struct brw_reg src
    assert(msg_type != -1);
 
    if (simd_mode == BRW_SAMPLER_SIMD_MODE_SIMD16) {
-      rlen = 8;
       dst = vec16(dst);
    }
 
-   if (is_combined_send) {
-      assert(devinfo->gen >= 9 || devinfo->is_cherryview);
-      rlen = 0;
-   }
-
    assert(devinfo->gen < 7 || inst->header_size == 0 ||
           src.file == BRW_GENERAL_REGISTER_FILE);
 
@@ -970,7 +872,7 @@ fs_generator::generate_tex(fs_inst *inst, struct brw_reg dst, struct brw_reg src
                  surface + base_binding_table_index,
                  sampler % 16,
                  msg_type,
-                 rlen,
+                 inst->regs_written,
                  inst->mlen,
                  inst->header_size != 0,
                  simd_mode,
@@ -988,7 +890,7 @@ fs_generator::generate_tex(fs_inst *inst, struct brw_reg dst, struct brw_reg src
       brw_set_default_mask_control(p, BRW_MASK_DISABLE);
       brw_set_default_access_mode(p, BRW_ALIGN_1);
 
-      if (memcmp(&surface_reg, &sampler_reg, sizeof(surface_reg)) == 0) {
+      if (brw_regs_equal(&surface_reg, &sampler_reg)) {
          brw_MUL(p, addr, sampler_reg, brw_imm_uw(0x101));
       } else {
          brw_SHL(p, addr, sampler_reg, brw_imm_ud(8));
@@ -1007,7 +909,7 @@ fs_generator::generate_tex(fs_inst *inst, struct brw_reg dst, struct brw_reg src
                               0 /* surface */,
                               0 /* sampler */,
                               msg_type,
-                              rlen,
+                              inst->regs_written,
                               inst->mlen /* mlen */,
                               inst->header_size != 0 /* header */,
                               simd_mode,
@@ -1092,34 +994,9 @@ fs_generator::generate_ddx(enum opcode opcode,
  */
 void
 fs_generator::generate_ddy(enum opcode opcode,
-                           struct brw_reg dst, struct brw_reg src,
-                           bool negate_value)
+                           struct brw_reg dst, struct brw_reg src)
 {
    if (opcode == FS_OPCODE_DDY_FINE) {
-      /* From the Ivy Bridge PRM, volume 4 part 3, section 3.3.9 (Register
-       * Region Restrictions):
-       *
-       *     In Align16 access mode, SIMD16 is not allowed for DW operations
-       *     and SIMD8 is not allowed for DF operations.
-       *
-       * In this context, "DW operations" means "operations acting on 32-bit
-       * values", so it includes operations on floats.
-       *
-       * Gen4 has a similar restriction.  From the i965 PRM, section 11.5.3
-       * (Instruction Compression -> Rules and Restrictions):
-       *
-       *     A compressed instruction must be in Align1 access mode. Align16
-       *     mode instructions cannot be compressed.
-       *
-       * Similar text exists in the g45 PRM.
-       *
-       * On these platforms, if we're building a SIMD16 shader, we need to
-       * manually unroll to a pair of SIMD8 instructions.
-       */
-      bool unroll_to_simd8 =
-         (dispatch_width == 16 &&
-          (devinfo->gen == 4 || (devinfo->gen == 7 && !devinfo->is_haswell)));
-
       /* produce accurate derivatives */
       struct brw_reg src0 = brw_reg(src.file, src.nr, 0,
                                     src.negate, src.abs,
@@ -1137,24 +1014,7 @@ fs_generator::generate_ddy(enum opcode opcode,
                                     BRW_SWIZZLE_ZWZW, WRITEMASK_XYZW);
       brw_push_insn_state(p);
       brw_set_default_access_mode(p, BRW_ALIGN_16);
-      if (unroll_to_simd8) {
-         brw_set_default_exec_size(p, BRW_EXECUTE_8);
-         brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
-         if (negate_value) {
-            brw_ADD(p, firsthalf(dst), firsthalf(src1), negate(firsthalf(src0)));
-            brw_set_default_compression_control(p, BRW_COMPRESSION_2NDHALF);
-            brw_ADD(p, sechalf(dst), sechalf(src1), negate(sechalf(src0)));
-         } else {
-            brw_ADD(p, firsthalf(dst), firsthalf(src0), negate(firsthalf(src1)));
-            brw_set_default_compression_control(p, BRW_COMPRESSION_2NDHALF);
-            brw_ADD(p, sechalf(dst), sechalf(src0), negate(sechalf(src1)));
-         }
-      } else {
-         if (negate_value)
-            brw_ADD(p, dst, src1, negate(src0));
-         else
-            brw_ADD(p, dst, src0, negate(src1));
-      }
+      brw_ADD(p, dst, negate(src0), src1);
       brw_pop_insn_state(p);
    } else {
       /* replicate the derivative at the top-left pixel to other pixels */
@@ -1172,10 +1032,7 @@ fs_generator::generate_ddy(enum opcode opcode,
                                     BRW_WIDTH_4,
                                     BRW_HORIZONTAL_STRIDE_0,
                                     BRW_SWIZZLE_XYZW, WRITEMASK_XYZW);
-      if (negate_value)
-         brw_ADD(p, dst, src1, negate(src0));
-      else
-         brw_ADD(p, dst, src0, negate(src1));
+      brw_ADD(p, dst, negate(src0), src1);
    }
 }
 
@@ -1288,6 +1145,7 @@ fs_generator::generate_uniform_pull_constant_load_gen7(fs_inst *inst,
       brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
       brw_set_default_mask_control(p, BRW_MASK_DISABLE);
       brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
+      brw_inst_set_exec_size(devinfo, send, BRW_EXECUTE_4);
       brw_pop_insn_state(p);
 
       brw_set_dest(p, send, dst);
@@ -1334,10 +1192,9 @@ fs_generator::generate_uniform_pull_constant_load_gen7(fs_inst *inst,
 }
 
 void
-fs_generator::generate_varying_pull_constant_load(fs_inst *inst,
-                                                  struct brw_reg dst,
-                                                  struct brw_reg index,
-                                                  struct brw_reg offset)
+fs_generator::generate_varying_pull_constant_load_gen4(fs_inst *inst,
+                                                       struct brw_reg dst,
+                                                       struct brw_reg index)
 {
    assert(devinfo->gen < 7); /* Should use the gen7 variant. */
    assert(inst->header_size != 0);
@@ -1369,15 +1226,11 @@ fs_generator::generate_varying_pull_constant_load(fs_inst *inst,
       simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD16;
    }
 
-   struct brw_reg offset_mrf = retype(brw_message_reg(inst->base_mrf + 1),
-                                      BRW_REGISTER_TYPE_D);
-   brw_MOV(p, offset_mrf, offset);
-
    struct brw_reg header = brw_vec8_grf(0, 0);
    gen6_resolve_implied_move(p, &header, inst->base_mrf);
 
    brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
-   brw_inst_set_qtr_control(p->devinfo, send, BRW_COMPRESSION_NONE);
+   brw_inst_set_compression(devinfo, send, false);
    brw_set_dest(p, send, retype(dst, BRW_REGISTER_TYPE_UW));
    brw_set_src0(p, send, header);
    if (devinfo->gen < 6)
@@ -1688,8 +1541,6 @@ fs_generator::generate_code(const cfg_t *cfg, int dispatch_width)
       brw_NOP(p);
 
    this->dispatch_width = dispatch_width;
-   if (dispatch_width == 16)
-      brw_set_default_compression_control(p, BRW_COMPRESSION_COMPRESSED);
 
    int start_offset = p->next_insn_offset;
    int spill_count = 0, fill_count = 0;
@@ -1703,34 +1554,26 @@ fs_generator::generate_code(const cfg_t *cfg, int dispatch_width)
       unsigned int last_insn_offset = p->next_insn_offset;
       bool multiple_instructions_emitted = false;
 
-      if (unlikely(debug_flag))
-         annotate(p->devinfo, &annotation, cfg, inst, p->next_insn_offset);
-
-      for (unsigned int i = 0; i < inst->sources; i++) {
-        src[i] = brw_reg_from_fs_reg(inst, &inst->src[i], devinfo->gen);
-
-        /* The accumulator result appears to get used for the
-         * conditional modifier generation.  When negating a UD
-         * value, there is a 33rd bit generated for the sign in the
-         * accumulator value, so now you can't check, for example,
-         * equality with a 32-bit value.  See piglit fs-op-neg-uvec4.
-         */
-        assert(!inst->conditional_mod ||
-               inst->src[i].type != BRW_REGISTER_TYPE_UD ||
-               !inst->src[i].negate);
+      /* From the Broadwell PRM, Volume 7, "3D-Media-GPGPU", in the
+       * "Register Region Restrictions" section: for BDW, SKL:
+       *
+       *    "A POW/FDIV operation must not be followed by an instruction
+       *     that requires two destination registers."
+       *
+       * The documentation is often lacking annotations for Atom parts,
+       * and empirically this affects CHV as well.
+       */
+      if (devinfo->gen >= 8 &&
+          p->nr_insn > 1 &&
+          brw_inst_opcode(devinfo, brw_last_inst) == BRW_OPCODE_MATH &&
+          brw_inst_math_function(devinfo, brw_last_inst) == BRW_MATH_FUNCTION_POW &&
+          inst->dst.component_size(inst->exec_size) > REG_SIZE) {
+         brw_NOP(p);
+         last_insn_offset = p->next_insn_offset;
       }
-      dst = brw_reg_from_fs_reg(inst, &inst->dst, devinfo->gen);
 
-      brw_set_default_predicate_control(p, inst->predicate);
-      brw_set_default_predicate_inverse(p, inst->predicate_inverse);
-      brw_set_default_flag_reg(p, 0, inst->flag_subreg);
-      brw_set_default_saturate(p, inst->saturate);
-      brw_set_default_mask_control(p, inst->force_writemask_all);
-      brw_set_default_acc_write_control(p, inst->writes_accumulator);
-      brw_set_default_exec_size(p, cvt(inst->exec_size) - 1);
-
-      assert(inst->base_mrf + inst->mlen <= BRW_MAX_MRF(devinfo->gen));
-      assert(inst->mlen <= BRW_MAX_MSG_LENGTH);
+      if (unlikely(debug_flag))
+         annotate(p->devinfo, &annotation, cfg, inst, p->next_insn_offset);
 
       switch (inst->exec_size) {
       case 1:
@@ -1760,6 +1603,33 @@ fs_generator::generate_code(const cfg_t *cfg, int dispatch_width)
          unreachable("Invalid instruction width");
       }
 
+      for (unsigned int i = 0; i < inst->sources; i++) {
+        src[i] = brw_reg_from_fs_reg(p, inst, &inst->src[i], devinfo->gen);
+
+        /* The accumulator result appears to get used for the
+         * conditional modifier generation.  When negating a UD
+         * value, there is a 33rd bit generated for the sign in the
+         * accumulator value, so now you can't check, for example,
+         * equality with a 32-bit value.  See piglit fs-op-neg-uvec4.
+         */
+        assert(!inst->conditional_mod ||
+               inst->src[i].type != BRW_REGISTER_TYPE_UD ||
+               !inst->src[i].negate);
+      }
+      dst = brw_reg_from_fs_reg(p, inst, &inst->dst, devinfo->gen);
+
+      brw_set_default_access_mode(p, BRW_ALIGN_1);
+      brw_set_default_predicate_control(p, inst->predicate);
+      brw_set_default_predicate_inverse(p, inst->predicate_inverse);
+      brw_set_default_flag_reg(p, 0, inst->flag_subreg);
+      brw_set_default_saturate(p, inst->saturate);
+      brw_set_default_mask_control(p, inst->force_writemask_all);
+      brw_set_default_acc_write_control(p, inst->writes_accumulator);
+      brw_set_default_exec_size(p, cvt(inst->exec_size) - 1);
+
+      assert(inst->base_mrf + inst->mlen <= BRW_MAX_MRF(devinfo->gen));
+      assert(inst->mlen <= BRW_MAX_MSG_LENGTH);
+
       switch (inst->opcode) {
       case BRW_OPCODE_MOV:
         brw_MOV(p, dst, src[0]);
@@ -1784,45 +1654,13 @@ fs_generator::generate_code(const cfg_t *cfg, int dispatch_width)
       case BRW_OPCODE_MAD:
          assert(devinfo->gen >= 6);
         brw_set_default_access_mode(p, BRW_ALIGN_16);
-         if (dispatch_width == 16 && !devinfo->supports_simd16_3src) {
-            brw_set_default_exec_size(p, BRW_EXECUTE_8);
-           brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
-            brw_inst *f = brw_MAD(p, firsthalf(dst), firsthalf(src[0]), firsthalf(src[1]), firsthalf(src[2]));
-           brw_set_default_compression_control(p, BRW_COMPRESSION_2NDHALF);
-            brw_inst *s = brw_MAD(p, sechalf(dst), sechalf(src[0]), sechalf(src[1]), sechalf(src[2]));
-           brw_set_default_compression_control(p, BRW_COMPRESSION_COMPRESSED);
-
-            if (inst->conditional_mod) {
-               brw_inst_set_cond_modifier(p->devinfo, f, inst->conditional_mod);
-               brw_inst_set_cond_modifier(p->devinfo, s, inst->conditional_mod);
-               multiple_instructions_emitted = true;
-            }
-        } else {
-           brw_MAD(p, dst, src[0], src[1], src[2]);
-        }
-        brw_set_default_access_mode(p, BRW_ALIGN_1);
+         brw_MAD(p, dst, src[0], src[1], src[2]);
         break;
 
       case BRW_OPCODE_LRP:
          assert(devinfo->gen >= 6);
         brw_set_default_access_mode(p, BRW_ALIGN_16);
-         if (dispatch_width == 16 && !devinfo->supports_simd16_3src) {
-            brw_set_default_exec_size(p, BRW_EXECUTE_8);
-           brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
-            brw_inst *f = brw_LRP(p, firsthalf(dst), firsthalf(src[0]), firsthalf(src[1]), firsthalf(src[2]));
-           brw_set_default_compression_control(p, BRW_COMPRESSION_2NDHALF);
-            brw_inst *s = brw_LRP(p, sechalf(dst), sechalf(src[0]), sechalf(src[1]), sechalf(src[2]));
-           brw_set_default_compression_control(p, BRW_COMPRESSION_COMPRESSED);
-
-            if (inst->conditional_mod) {
-               brw_inst_set_cond_modifier(p->devinfo, f, inst->conditional_mod);
-               brw_inst_set_cond_modifier(p->devinfo, s, inst->conditional_mod);
-               multiple_instructions_emitted = true;
-            }
-        } else {
-           brw_LRP(p, dst, src[0], src[1], src[2]);
-        }
-        brw_set_default_access_mode(p, BRW_ALIGN_1);
+         brw_LRP(p, dst, src[0], src[1], src[2]);
         break;
 
       case BRW_OPCODE_FRC:
@@ -1868,43 +1706,17 @@ fs_generator::generate_code(const cfg_t *cfg, int dispatch_width)
          brw_F16TO32(p, dst, src[0]);
          break;
       case BRW_OPCODE_CMP:
-         /* The Ivybridge/BayTrail WaCMPInstFlagDepClearedEarly workaround says
-          * that when the destination is a GRF that the dependency-clear bit on
-          * the flag register is cleared early.
-          *
-          * Suggested workarounds are to disable coissuing CMP instructions
-          * or to split CMP(16) instructions into two CMP(8) instructions.
-          *
-          * We choose to split into CMP(8) instructions since disabling
-          * coissuing would affect CMP instructions not otherwise affected by
-          * the errata.
-          */
-         if (dispatch_width == 16 && devinfo->gen == 7 && !devinfo->is_haswell) {
-            if (dst.file == BRW_GENERAL_REGISTER_FILE) {
-               brw_set_default_exec_size(p, BRW_EXECUTE_8);
-               brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
-               brw_CMP(p, firsthalf(dst), inst->conditional_mod,
-                          firsthalf(src[0]), firsthalf(src[1]));
-               brw_set_default_compression_control(p, BRW_COMPRESSION_2NDHALF);
-               brw_CMP(p, sechalf(dst), inst->conditional_mod,
-                          sechalf(src[0]), sechalf(src[1]));
-               brw_set_default_compression_control(p, BRW_COMPRESSION_COMPRESSED);
-
-               multiple_instructions_emitted = true;
-            } else if (dst.file == BRW_ARCHITECTURE_REGISTER_FILE) {
-               /* For unknown reasons, the aforementioned workaround is not
-                * sufficient. Overriding the type when the destination is the
-                * null register is necessary but not sufficient by itself.
-                */
-               assert(dst.nr == BRW_ARF_NULL);
-               dst.type = BRW_REGISTER_TYPE_D;
-               brw_CMP(p, dst, inst->conditional_mod, src[0], src[1]);
-            } else {
-               unreachable("not reached");
-            }
-         } else {
-            brw_CMP(p, dst, inst->conditional_mod, src[0], src[1]);
+         if (inst->exec_size >= 16 && devinfo->gen == 7 && !devinfo->is_haswell &&
+             dst.file == BRW_ARCHITECTURE_REGISTER_FILE) {
+            /* For unknown reasons the WaCMPInstFlagDepClearedEarly workaround
+             * implemented in the compiler is not sufficient. Overriding the
+             * type when the destination is the null register is necessary but
+             * not sufficient by itself.
+             */
+            assert(dst.nr == BRW_ARF_NULL);
+            dst.type = BRW_REGISTER_TYPE_D;
          }
+         brw_CMP(p, dst, inst->conditional_mod, src[0], src[1]);
         break;
       case BRW_OPCODE_SEL:
         brw_SEL(p, dst, src[0], src[1]);
@@ -1945,60 +1757,17 @@ fs_generator::generate_code(const cfg_t *cfg, int dispatch_width)
       case BRW_OPCODE_BFE:
          assert(devinfo->gen >= 7);
          brw_set_default_access_mode(p, BRW_ALIGN_16);
-         if (dispatch_width == 16 && !devinfo->supports_simd16_3src) {
-            brw_set_default_exec_size(p, BRW_EXECUTE_8);
-            brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
-            brw_BFE(p, firsthalf(dst), firsthalf(src[0]), firsthalf(src[1]), firsthalf(src[2]));
-            brw_set_default_compression_control(p, BRW_COMPRESSION_2NDHALF);
-            brw_BFE(p, sechalf(dst), sechalf(src[0]), sechalf(src[1]), sechalf(src[2]));
-            brw_set_default_compression_control(p, BRW_COMPRESSION_COMPRESSED);
-         } else {
-            brw_BFE(p, dst, src[0], src[1], src[2]);
-         }
-         brw_set_default_access_mode(p, BRW_ALIGN_1);
+         brw_BFE(p, dst, src[0], src[1], src[2]);
          break;
 
       case BRW_OPCODE_BFI1:
          assert(devinfo->gen >= 7);
-         /* The Haswell WaForceSIMD8ForBFIInstruction workaround says that we
-          * should
-          *
-          *    "Force BFI instructions to be executed always in SIMD8."
-          */
-         if (dispatch_width == 16 && devinfo->is_haswell) {
-            brw_set_default_exec_size(p, BRW_EXECUTE_8);
-            brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
-            brw_BFI1(p, firsthalf(dst), firsthalf(src[0]), firsthalf(src[1]));
-            brw_set_default_compression_control(p, BRW_COMPRESSION_2NDHALF);
-            brw_BFI1(p, sechalf(dst), sechalf(src[0]), sechalf(src[1]));
-            brw_set_default_compression_control(p, BRW_COMPRESSION_COMPRESSED);
-         } else {
-            brw_BFI1(p, dst, src[0], src[1]);
-         }
+         brw_BFI1(p, dst, src[0], src[1]);
          break;
       case BRW_OPCODE_BFI2:
          assert(devinfo->gen >= 7);
          brw_set_default_access_mode(p, BRW_ALIGN_16);
-         /* The Haswell WaForceSIMD8ForBFIInstruction workaround says that we
-          * should
-          *
-          *    "Force BFI instructions to be executed always in SIMD8."
-          *
-          * Otherwise we would be able to emit compressed instructions like we
-          * do for the other three-source instructions.
-          */
-         if (dispatch_width == 16 &&
-             (devinfo->is_haswell || !devinfo->supports_simd16_3src)) {
-            brw_set_default_exec_size(p, BRW_EXECUTE_8);
-            brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
-            brw_BFI2(p, firsthalf(dst), firsthalf(src[0]), firsthalf(src[1]), firsthalf(src[2]));
-            brw_set_default_compression_control(p, BRW_COMPRESSION_2NDHALF);
-            brw_BFI2(p, sechalf(dst), sechalf(src[0]), sechalf(src[1]), sechalf(src[2]));
-            brw_set_default_compression_control(p, BRW_COMPRESSION_COMPRESSED);
-         } else {
-            brw_BFI2(p, dst, src[0], src[1], src[2]);
-         }
-         brw_set_default_access_mode(p, BRW_ALIGN_1);
+         brw_BFI2(p, dst, src[0], src[1], src[2]);
          break;
 
       case BRW_OPCODE_IF:
@@ -2043,30 +1812,36 @@ fs_generator::generate_code(const cfg_t *cfg, int dispatch_width)
       case SHADER_OPCODE_LOG2:
       case SHADER_OPCODE_SIN:
       case SHADER_OPCODE_COS:
-         assert(devinfo->gen < 6 || inst->mlen == 0);
          assert(inst->conditional_mod == BRW_CONDITIONAL_NONE);
-        if (devinfo->gen >= 7) {
-            gen6_math(p, dst, brw_math_function(inst->opcode), src[0],
-                      brw_null_reg());
-        } else if (devinfo->gen == 6) {
-           generate_math_gen6(inst, dst, src[0], brw_null_reg());
-        } else if (devinfo->gen == 5 || devinfo->is_g4x) {
-           generate_math_g45(inst, dst, src[0]);
+        if (devinfo->gen >= 6) {
+            assert(inst->mlen == 0);
+            assert(devinfo->gen >= 7 || inst->exec_size == 8);
+            gen6_math(p, dst, brw_math_function(inst->opcode),
+                      src[0], brw_null_reg());
         } else {
-           generate_math_gen4(inst, dst, src[0]);
+            assert(inst->mlen >= 1);
+            assert(devinfo->gen == 5 || devinfo->is_g4x || inst->exec_size == 8);
+            gen4_math(p, dst,
+                      brw_math_function(inst->opcode),
+                      inst->base_mrf, src[0],
+                      BRW_MATH_PRECISION_FULL);
         }
         break;
       case SHADER_OPCODE_INT_QUOTIENT:
       case SHADER_OPCODE_INT_REMAINDER:
       case SHADER_OPCODE_POW:
-         assert(devinfo->gen < 6 || inst->mlen == 0);
          assert(inst->conditional_mod == BRW_CONDITIONAL_NONE);
-        if (devinfo->gen >= 7 && inst->opcode == SHADER_OPCODE_POW) {
+         if (devinfo->gen >= 6) {
+            assert(inst->mlen == 0);
+            assert((devinfo->gen >= 7 && inst->opcode == SHADER_OPCODE_POW) ||
+                   inst->exec_size == 8);
             gen6_math(p, dst, brw_math_function(inst->opcode), src[0], src[1]);
-        } else if (devinfo->gen >= 6) {
-           generate_math_gen6(inst, dst, src[0], src[1]);
-        } else {
-           generate_math_gen4(inst, dst, src[0]);
+         } else {
+            assert(inst->mlen >= 1);
+            assert(inst->exec_size == 8);
+            gen4_math(p, dst, brw_math_function(inst->opcode),
+                      inst->base_mrf, src[0],
+                      BRW_MATH_PRECISION_FULL);
         }
         break;
       case FS_OPCODE_CINTERP:
@@ -2092,11 +1867,13 @@ fs_generator::generate_code(const cfg_t *cfg, int dispatch_width)
       case FS_OPCODE_TXB:
       case SHADER_OPCODE_TXD:
       case SHADER_OPCODE_TXF:
+      case SHADER_OPCODE_TXF_LZ:
       case SHADER_OPCODE_TXF_CMS:
       case SHADER_OPCODE_TXF_CMS_W:
       case SHADER_OPCODE_TXF_UMS:
       case SHADER_OPCODE_TXF_MCS:
       case SHADER_OPCODE_TXL:
+      case SHADER_OPCODE_TXL_LZ:
       case SHADER_OPCODE_TXS:
       case SHADER_OPCODE_LOD:
       case SHADER_OPCODE_TG4:
@@ -2110,8 +1887,7 @@ fs_generator::generate_code(const cfg_t *cfg, int dispatch_width)
          break;
       case FS_OPCODE_DDY_COARSE:
       case FS_OPCODE_DDY_FINE:
-         assert(src[1].file == BRW_IMMEDIATE_VALUE);
-         generate_ddy(inst->opcode, dst, src[0], src[1].ud);
+         generate_ddy(inst->opcode, dst, src[0]);
         break;
 
       case SHADER_OPCODE_GEN4_SCRATCH_WRITE:
@@ -2146,15 +1922,17 @@ fs_generator::generate_code(const cfg_t *cfg, int dispatch_width)
         break;
 
       case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
+         assert(inst->force_writemask_all);
         generate_uniform_pull_constant_load(inst, dst, src[0], src[1]);
         break;
 
       case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD_GEN7:
+         assert(inst->force_writemask_all);
         generate_uniform_pull_constant_load_gen7(inst, dst, src[0], src[1]);
         break;
 
-      case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD:
-        generate_varying_pull_constant_load(inst, dst, src[0], src[1]);
+      case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN4:
+        generate_varying_pull_constant_load_gen4(inst, dst, src[0]);
         break;
 
       case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN7:
@@ -2166,10 +1944,6 @@ fs_generator::generate_code(const cfg_t *cfg, int dispatch_width)
         generate_fb_write(inst, src[0]);
         break;
 
-      case FS_OPCODE_BLORP_FB_WRITE:
-        generate_blorp_fb_write(inst);
-        break;
-
       case FS_OPCODE_MOV_DISPATCH_TO_FLAGS:
          generate_mov_dispatch_to_flags(inst);
          break;
@@ -2233,28 +2007,6 @@ fs_generator::generate_code(const cfg_t *cfg, int dispatch_width)
          brw_broadcast(p, dst, src[0], src[1]);
          break;
 
-      case SHADER_OPCODE_EXTRACT_BYTE: {
-         assert(src[0].type == BRW_REGISTER_TYPE_D ||
-                src[0].type == BRW_REGISTER_TYPE_UD);
-
-         enum brw_reg_type type =
-            src[0].type == BRW_REGISTER_TYPE_D ? BRW_REGISTER_TYPE_B
-                                               : BRW_REGISTER_TYPE_UB;
-         brw_MOV(p, dst, spread(suboffset(retype(src[0], type), src[1].ud), 4));
-         break;
-      }
-
-      case SHADER_OPCODE_EXTRACT_WORD: {
-         assert(src[0].type == BRW_REGISTER_TYPE_D ||
-                src[0].type == BRW_REGISTER_TYPE_UD);
-
-         enum brw_reg_type type =
-            src[0].type == BRW_REGISTER_TYPE_D ? BRW_REGISTER_TYPE_W
-                                               : BRW_REGISTER_TYPE_UW;
-         brw_MOV(p, dst, spread(suboffset(retype(src[0], type), src[1].ud), 2));
-         break;
-      }
-
       case FS_OPCODE_SET_SAMPLE_ID:
          generate_set_sample_id(inst, dst, src[0], src[1]);
          break;
@@ -2307,10 +2059,6 @@ fs_generator::generate_code(const cfg_t *cfg, int dispatch_width)
         generate_barrier(inst, src[0]);
         break;
 
-      case FS_OPCODE_PACK_STENCIL_REF:
-         generate_stencil_ref_packing(inst, dst, src[0]);
-         break;
-
       default:
          unreachable("Unsupported opcode");