i965/fs: Fix signedness of local variables and arguments of emit_(un)spill.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_fs_generator.cpp
index 77969c4dc12dfe845a67bc0b64dd03f0e608e95d..2a486513691f1d3393fb020d29385980b8582c4e 100644 (file)
  * native instructions.
  */
 
-#include "main/macros.h"
-#include "brw_context.h"
 #include "brw_eu.h"
 #include "brw_fs.h"
 #include "brw_cfg.h"
+#include "brw_program.h"
 
 static enum brw_reg_file
 brw_file_from_reg(fs_reg *reg)
@@ -55,7 +54,8 @@ brw_file_from_reg(fs_reg *reg)
 }
 
 static struct brw_reg
-brw_reg_from_fs_reg(fs_inst *inst, fs_reg *reg, unsigned gen)
+brw_reg_from_fs_reg(const struct brw_codegen *p,
+                    fs_inst *inst, fs_reg *reg, unsigned gen)
 {
    struct brw_reg brw_reg;
 
@@ -66,8 +66,10 @@ brw_reg_from_fs_reg(fs_inst *inst, fs_reg *reg, unsigned gen)
    case VGRF:
       if (reg->stride == 0) {
          brw_reg = brw_vec1_reg(brw_file_from_reg(reg), reg->nr, 0);
-      } else if (inst->exec_size < 8) {
-         brw_reg = brw_vec8_reg(brw_file_from_reg(reg), reg->nr, 0);
+      } else if (!p->compressed &&
+                 inst->exec_size * reg->stride * type_sz(reg->type) <= 32) {
+         brw_reg = brw_vecn_reg(inst->exec_size, brw_file_from_reg(reg),
+                                reg->nr, 0);
          brw_reg = stride(brw_reg, inst->exec_size * reg->stride,
                           inst->exec_size, reg->stride);
       } else {
@@ -77,11 +79,14 @@ brw_reg_from_fs_reg(fs_inst *inst, fs_reg *reg, unsigned gen)
           * rule implies that elements within a 'Width' cannot cross GRF
           * boundaries.
           *
-          * So, for registers with width > 8, we have to use a width of 8
-          * and trust the compression state to sort out the exec size.
+          * So, for registers that are large enough, we have to split the exec
+          * size in two and trust the compression state to sort it out.
           */
-         brw_reg = brw_vec8_reg(brw_file_from_reg(reg), reg->nr, 0);
-         brw_reg = stride(brw_reg, 8 * reg->stride, 8, reg->stride);
+         assert(inst->exec_size / 2 * reg->stride * type_sz(reg->type) <= 32);
+         brw_reg = brw_vecn_reg(inst->exec_size / 2, brw_file_from_reg(reg),
+                                reg->nr, 0);
+         brw_reg = stride(brw_reg, inst->exec_size / 2 * reg->stride,
+                          inst->exec_size / 2, reg->stride);
       }
 
       brw_reg = retype(brw_reg, reg->type);
@@ -92,7 +97,7 @@ brw_reg_from_fs_reg(fs_inst *inst, fs_reg *reg, unsigned gen)
    case ARF:
    case FIXED_GRF:
    case IMM:
-      brw_reg = *static_cast<struct brw_reg *>(reg);
+      brw_reg = reg->as_brw_reg();
       break;
    case BAD_FILE:
       /* Probably unused. */
@@ -112,14 +117,14 @@ fs_generator::fs_generator(const struct brw_compiler *compiler, void *log_data,
                            struct brw_stage_prog_data *prog_data,
                            unsigned promoted_constants,
                            bool runtime_check_aads_emit,
-                           const char *stage_abbrev)
+                           gl_shader_stage stage)
 
    : compiler(compiler), log_data(log_data),
      devinfo(compiler->devinfo), key(key),
      prog_data(prog_data),
      promoted_constants(promoted_constants),
      runtime_check_aads_emit(runtime_check_aads_emit), debug_flag(false),
-     stage_abbrev(stage_abbrev), mem_ctx(mem_ctx)
+     stage(stage), mem_ctx(mem_ctx)
 {
    p = rzalloc(mem_ctx, struct brw_codegen);
    brw_init_codegen(devinfo, p, mem_ctx);
@@ -352,23 +357,71 @@ fs_generator::generate_mov_indirect(fs_inst *inst,
 
    unsigned imm_byte_offset = reg.nr * REG_SIZE + reg.subnr;
 
-   /* We use VxH indirect addressing, clobbering a0.0 through a0.7. */
-   struct brw_reg addr = vec8(brw_address_reg(0));
+   if (indirect_byte_offset.file == BRW_IMMEDIATE_VALUE) {
+      imm_byte_offset += indirect_byte_offset.ud;
 
-   /* The destination stride of an instruction (in bytes) must be greater
-    * than or equal to the size of the rest of the instruction.  Since the
-    * address register is of type UW, we can't use a D-type instruction.
-    * In order to get around this, re re-type to UW and use a stride.
-    */
-   indirect_byte_offset =
-      retype(spread(indirect_byte_offset, 2), BRW_REGISTER_TYPE_UW);
+      reg.nr = imm_byte_offset / REG_SIZE;
+      reg.subnr = imm_byte_offset % REG_SIZE;
+      brw_MOV(p, dst, reg);
+   } else {
+      /* Prior to Broadwell, there are only 8 address registers. */
+      assert(inst->exec_size == 8 || devinfo->gen >= 8);
 
-   /* Prior to Broadwell, there are only 8 address registers. */
-   assert(inst->exec_size == 8 || devinfo->gen >= 8);
+      /* We use VxH indirect addressing, clobbering a0.0 through a0.7. */
+      struct brw_reg addr = vec8(brw_address_reg(0));
+
+      /* The destination stride of an instruction (in bytes) must be greater
+       * than or equal to the size of the rest of the instruction.  Since the
+       * address register is of type UW, we can't use a D-type instruction.
+       * In order to get around this, re retype to UW and use a stride.
+       */
+      indirect_byte_offset =
+         retype(spread(indirect_byte_offset, 2), BRW_REGISTER_TYPE_UW);
 
-   brw_MOV(p, addr, indirect_byte_offset);
-   brw_inst_set_mask_control(devinfo, brw_last_inst, BRW_MASK_DISABLE);
-   brw_MOV(p, dst, retype(brw_VxH_indirect(0, imm_byte_offset), dst.type));
+      struct brw_reg ind_src;
+      if (devinfo->gen < 8) {
+         /* From the Haswell PRM section "Register Region Restrictions":
+          *
+          *    "The lower bits of the AddressImmediate must not overflow to
+          *    change the register address.  The lower 5 bits of Address
+          *    Immediate when added to lower 5 bits of address register gives
+          *    the sub-register offset. The upper bits of Address Immediate
+          *    when added to upper bits of address register gives the register
+          *    address. Any overflow from sub-register offset is dropped."
+          *
+          * This restriction is only listed in the Haswell PRM but emperical
+          * testing indicates that it applies on all older generations and is
+          * lifted on Broadwell.
+          *
+          * Since the indirect may cause us to cross a register boundary, this
+          * makes the base offset almost useless.  We could try and do
+          * something clever where we use a actual base offset if
+          * base_offset % 32 == 0 but that would mean we were generating
+          * different code depending on the base offset.  Instead, for the
+          * sake of consistency, we'll just do the add ourselves.
+          */
+         brw_ADD(p, addr, indirect_byte_offset, brw_imm_uw(imm_byte_offset));
+         ind_src = brw_VxH_indirect(0, 0);
+      } else {
+         brw_MOV(p, addr, indirect_byte_offset);
+         ind_src = brw_VxH_indirect(0, imm_byte_offset);
+      }
+
+      brw_inst *mov = brw_MOV(p, dst, retype(ind_src, dst.type));
+
+      if (devinfo->gen == 6 && dst.file == BRW_MESSAGE_REGISTER_FILE &&
+          !inst->get_next()->is_tail_sentinel() &&
+          ((fs_inst *)inst->get_next())->mlen > 0) {
+         /* From the Sandybridge PRM:
+          *
+          *    "[Errata: DevSNB(SNB)] If MRF register is updated by any
+          *    instruction that “indexed/indirect” source AND is followed by a
+          *    send, the instruction requires a “Switch”. This is to avoid
+          *    race condition where send may dispatch before MRF is updated."
+          */
+         brw_inst_set_thread_control(devinfo, mov, BRW_THREAD_SWITCH);
+      }
+   }
 }
 
 void
@@ -432,7 +485,7 @@ fs_generator::generate_cs_terminate(fs_inst *inst, struct brw_reg payload)
 
    insn = brw_next_insn(p, BRW_OPCODE_SEND);
 
-   brw_set_dest(p, insn, brw_null_reg());
+   brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_UW));
    brw_set_src0(p, insn, payload);
    brw_set_src1(p, insn, brw_imm_d(0));
 
@@ -504,22 +557,6 @@ fs_generator::generate_barrier(fs_inst *inst, struct brw_reg src)
    brw_WAIT(p);
 }
 
-void
-fs_generator::generate_blorp_fb_write(fs_inst *inst)
-{
-   brw_fb_WRITE(p,
-                16 /* dispatch_width */,
-                brw_message_reg(inst->base_mrf),
-                brw_reg_from_fs_reg(inst, &inst->src[0], devinfo->gen),
-                BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD16_SINGLE_SOURCE,
-                inst->target,
-                inst->mlen,
-                0,
-                true,
-                true,
-                inst->header_size != 0);
-}
-
 void
 fs_generator::generate_linterp(fs_inst *inst,
                             struct brw_reg dst, struct brw_reg *src)
@@ -679,10 +716,10 @@ fs_generator::generate_get_buffer_size(fs_inst *inst,
 
 void
 fs_generator::generate_tex(fs_inst *inst, struct brw_reg dst, struct brw_reg src,
+                           struct brw_reg surface_index,
                            struct brw_reg sampler_index)
 {
    int msg_type = -1;
-   int rlen = 4;
    uint32_t simd_mode;
    uint32_t return_format;
    bool is_combined_send = inst->eot;
@@ -699,6 +736,17 @@ fs_generator::generate_tex(fs_inst *inst, struct brw_reg dst, struct brw_reg src
       break;
    }
 
+   /* Stomp the resinfo output type to UINT32.  On gens 4-5, the output type
+    * is set as part of the message descriptor.  On gen4, the PRM seems to
+    * allow UINT32 and FLOAT32 (i965 PRM, Vol. 4 Section 4.8.1.1), but on
+    * later gens UINT32 is required.  Once you hit Sandy Bridge, the bit is
+    * gone from the message descriptor entirely and you just get UINT32 all
+    * the time regasrdless.  Since we can really only do non-UINT32 on gen4,
+    * just stomp it to UINT32 all the time.
+    */
+   if (inst->opcode == SHADER_OPCODE_TXS)
+      return_format = BRW_SAMPLER_RETURN_FORMAT_UINT32;
+
    switch (inst->exec_size) {
    case 8:
       simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD8;
@@ -733,6 +781,14 @@ fs_generator::generate_tex(fs_inst *inst, struct brw_reg dst, struct brw_reg src
            msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LOD;
         }
         break;
+      case SHADER_OPCODE_TXL_LZ:
+         assert(devinfo->gen >= 9);
+        if (inst->shadow_compare) {
+            msg_type = GEN9_SAMPLER_MESSAGE_SAMPLE_C_LZ;
+         } else {
+            msg_type = GEN9_SAMPLER_MESSAGE_SAMPLE_LZ;
+         }
+         break;
       case SHADER_OPCODE_TXS:
         msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_RESINFO;
         break;
@@ -748,6 +804,10 @@ fs_generator::generate_tex(fs_inst *inst, struct brw_reg dst, struct brw_reg src
       case SHADER_OPCODE_TXF:
         msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LD;
         break;
+      case SHADER_OPCODE_TXF_LZ:
+         assert(devinfo->gen >= 9);
+         msg_type = GEN9_SAMPLER_MESSAGE_SAMPLE_LD_LZ;
+         break;
       case SHADER_OPCODE_TXF_CMS_W:
          assert(devinfo->gen >= 9);
          msg_type = GEN9_SAMPLER_MESSAGE_SAMPLE_LD2DMS_W;
@@ -860,15 +920,9 @@ fs_generator::generate_tex(fs_inst *inst, struct brw_reg dst, struct brw_reg src
    assert(msg_type != -1);
 
    if (simd_mode == BRW_SAMPLER_SIMD_MODE_SIMD16) {
-      rlen = 8;
       dst = vec16(dst);
    }
 
-   if (is_combined_send) {
-      assert(devinfo->gen >= 9 || devinfo->is_cherryview);
-      rlen = 0;
-   }
-
    assert(devinfo->gen < 7 || inst->header_size == 0 ||
           src.file == BRW_GENERAL_REGISTER_FILE);
 
@@ -903,6 +957,14 @@ fs_generator::generate_tex(fs_inst *inst, struct brw_reg dst, struct brw_reg src
             /* Set the offset bits in DWord 2. */
             brw_MOV(p, get_element_ud(header_reg, 2),
                        brw_imm_ud(inst->offset));
+         } else if (stage != MESA_SHADER_VERTEX &&
+                    stage != MESA_SHADER_FRAGMENT) {
+            /* The vertex and fragment stages have g0.2 set to 0, so
+             * header0.2 is 0 when g0 is copied. Other stages may not, so we
+             * must set it to 0 to avoid setting undesirable bits in the
+             * message.
+             */
+            brw_MOV(p, get_element_ud(header_reg, 2), brw_imm_ud(0));
          }
 
          brw_adjust_sampler_state_pointer(p, header_reg, sampler_index);
@@ -915,35 +977,42 @@ fs_generator::generate_tex(fs_inst *inst, struct brw_reg dst, struct brw_reg src
          ? prog_data->binding_table.gather_texture_start
          : prog_data->binding_table.texture_start;
 
-   if (sampler_index.file == BRW_IMMEDIATE_VALUE) {
+   if (surface_index.file == BRW_IMMEDIATE_VALUE &&
+       sampler_index.file == BRW_IMMEDIATE_VALUE) {
+      uint32_t surface = surface_index.ud;
       uint32_t sampler = sampler_index.ud;
 
       brw_SAMPLE(p,
                  retype(dst, BRW_REGISTER_TYPE_UW),
                  inst->base_mrf,
                  src,
-                 sampler + base_binding_table_index,
+                 surface + base_binding_table_index,
                  sampler % 16,
                  msg_type,
-                 rlen,
+                 inst->regs_written,
                  inst->mlen,
                  inst->header_size != 0,
                  simd_mode,
                  return_format);
 
-      brw_mark_surface_used(prog_data, sampler + base_binding_table_index);
+      brw_mark_surface_used(prog_data, surface + base_binding_table_index);
    } else {
       /* Non-const sampler index */
 
       struct brw_reg addr = vec1(retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD));
+      struct brw_reg surface_reg = vec1(retype(surface_index, BRW_REGISTER_TYPE_UD));
       struct brw_reg sampler_reg = vec1(retype(sampler_index, BRW_REGISTER_TYPE_UD));
 
       brw_push_insn_state(p);
       brw_set_default_mask_control(p, BRW_MASK_DISABLE);
       brw_set_default_access_mode(p, BRW_ALIGN_1);
 
-      /* addr = ((sampler * 0x101) + base_binding_table_index) & 0xfff */
-      brw_MUL(p, addr, sampler_reg, brw_imm_uw(0x101));
+      if (brw_regs_equal(&surface_reg, &sampler_reg)) {
+         brw_MUL(p, addr, sampler_reg, brw_imm_uw(0x101));
+      } else {
+         brw_SHL(p, addr, sampler_reg, brw_imm_ud(8));
+         brw_OR(p, addr, addr, surface_reg);
+      }
       if (base_binding_table_index)
          brw_ADD(p, addr, addr, brw_imm_ud(base_binding_table_index));
       brw_AND(p, addr, addr, brw_imm_ud(0xfff));
@@ -957,7 +1026,7 @@ fs_generator::generate_tex(fs_inst *inst, struct brw_reg dst, struct brw_reg src
                               0 /* surface */,
                               0 /* sampler */,
                               msg_type,
-                              rlen,
+                              inst->regs_written,
                               inst->mlen /* mlen */,
                               inst->header_size != 0 /* header */,
                               simd_mode,
@@ -1042,8 +1111,7 @@ fs_generator::generate_ddx(enum opcode opcode,
  */
 void
 fs_generator::generate_ddy(enum opcode opcode,
-                           struct brw_reg dst, struct brw_reg src,
-                           bool negate_value)
+                           struct brw_reg dst, struct brw_reg src)
 {
    if (opcode == FS_OPCODE_DDY_FINE) {
       /* From the Ivy Bridge PRM, volume 4 part 3, section 3.3.9 (Register
@@ -1063,12 +1131,16 @@ fs_generator::generate_ddy(enum opcode opcode,
        *
        * Similar text exists in the g45 PRM.
        *
+       * Empirically, compressed align16 instructions using odd register
+       * numbers don't appear to work on Sandybridge either.
+       *
        * On these platforms, if we're building a SIMD16 shader, we need to
        * manually unroll to a pair of SIMD8 instructions.
        */
       bool unroll_to_simd8 =
          (dispatch_width == 16 &&
-          (devinfo->gen == 4 || (devinfo->gen == 7 && !devinfo->is_haswell)));
+          (devinfo->gen == 4 || devinfo->gen == 6 ||
+           (devinfo->gen == 7 && !devinfo->is_haswell)));
 
       /* produce accurate derivatives */
       struct brw_reg src0 = brw_reg(src.file, src.nr, 0,
@@ -1090,20 +1162,11 @@ fs_generator::generate_ddy(enum opcode opcode,
       if (unroll_to_simd8) {
          brw_set_default_exec_size(p, BRW_EXECUTE_8);
          brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
-         if (negate_value) {
-            brw_ADD(p, firsthalf(dst), firsthalf(src1), negate(firsthalf(src0)));
-            brw_set_default_compression_control(p, BRW_COMPRESSION_2NDHALF);
-            brw_ADD(p, sechalf(dst), sechalf(src1), negate(sechalf(src0)));
-         } else {
-            brw_ADD(p, firsthalf(dst), firsthalf(src0), negate(firsthalf(src1)));
-            brw_set_default_compression_control(p, BRW_COMPRESSION_2NDHALF);
-            brw_ADD(p, sechalf(dst), sechalf(src0), negate(sechalf(src1)));
-         }
+         brw_ADD(p, firsthalf(dst), negate(firsthalf(src0)), firsthalf(src1));
+         brw_set_default_compression_control(p, BRW_COMPRESSION_2NDHALF);
+         brw_ADD(p, sechalf(dst), negate(sechalf(src0)), sechalf(src1));
       } else {
-         if (negate_value)
-            brw_ADD(p, dst, src1, negate(src0));
-         else
-            brw_ADD(p, dst, src0, negate(src1));
+         brw_ADD(p, dst, negate(src0), src1);
       }
       brw_pop_insn_state(p);
    } else {
@@ -1122,10 +1185,7 @@ fs_generator::generate_ddy(enum opcode opcode,
                                     BRW_WIDTH_4,
                                     BRW_HORIZONTAL_STRIDE_0,
                                     BRW_SWIZZLE_XYZW, WRITEMASK_XYZW);
-      if (negate_value)
-         brw_ADD(p, dst, src1, negate(src0));
-      else
-         brw_ADD(p, dst, src0, negate(src1));
+      brw_ADD(p, dst, negate(src0), src1);
    }
 }
 
@@ -1238,6 +1298,7 @@ fs_generator::generate_uniform_pull_constant_load_gen7(fs_inst *inst,
       brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
       brw_set_default_mask_control(p, BRW_MASK_DISABLE);
       brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
+      brw_inst_set_exec_size(devinfo, send, BRW_EXECUTE_4);
       brw_pop_insn_state(p);
 
       brw_set_dest(p, send, dst);
@@ -1653,34 +1714,26 @@ fs_generator::generate_code(const cfg_t *cfg, int dispatch_width)
       unsigned int last_insn_offset = p->next_insn_offset;
       bool multiple_instructions_emitted = false;
 
-      if (unlikely(debug_flag))
-         annotate(p->devinfo, &annotation, cfg, inst, p->next_insn_offset);
-
-      for (unsigned int i = 0; i < inst->sources; i++) {
-        src[i] = brw_reg_from_fs_reg(inst, &inst->src[i], devinfo->gen);
-
-        /* The accumulator result appears to get used for the
-         * conditional modifier generation.  When negating a UD
-         * value, there is a 33rd bit generated for the sign in the
-         * accumulator value, so now you can't check, for example,
-         * equality with a 32-bit value.  See piglit fs-op-neg-uvec4.
-         */
-        assert(!inst->conditional_mod ||
-               inst->src[i].type != BRW_REGISTER_TYPE_UD ||
-               !inst->src[i].negate);
+      /* From the Broadwell PRM, Volume 7, "3D-Media-GPGPU", in the
+       * "Register Region Restrictions" section: for BDW, SKL:
+       *
+       *    "A POW/FDIV operation must not be followed by an instruction
+       *     that requires two destination registers."
+       *
+       * The documentation is often lacking annotations for Atom parts,
+       * and empirically this affects CHV as well.
+       */
+      if (devinfo->gen >= 8 &&
+          p->nr_insn > 1 &&
+          brw_inst_opcode(devinfo, brw_last_inst) == BRW_OPCODE_MATH &&
+          brw_inst_math_function(devinfo, brw_last_inst) == BRW_MATH_FUNCTION_POW &&
+          inst->dst.component_size(inst->exec_size) > REG_SIZE) {
+         brw_NOP(p);
+         last_insn_offset = p->next_insn_offset;
       }
-      dst = brw_reg_from_fs_reg(inst, &inst->dst, devinfo->gen);
 
-      brw_set_default_predicate_control(p, inst->predicate);
-      brw_set_default_predicate_inverse(p, inst->predicate_inverse);
-      brw_set_default_flag_reg(p, 0, inst->flag_subreg);
-      brw_set_default_saturate(p, inst->saturate);
-      brw_set_default_mask_control(p, inst->force_writemask_all);
-      brw_set_default_acc_write_control(p, inst->writes_accumulator);
-      brw_set_default_exec_size(p, cvt(inst->exec_size) - 1);
-
-      assert(inst->base_mrf + inst->mlen <= BRW_MAX_MRF(devinfo->gen));
-      assert(inst->mlen <= BRW_MAX_MSG_LENGTH);
+      if (unlikely(debug_flag))
+         annotate(p->devinfo, &annotation, cfg, inst, p->next_insn_offset);
 
       switch (inst->exec_size) {
       case 1:
@@ -1710,6 +1763,32 @@ fs_generator::generate_code(const cfg_t *cfg, int dispatch_width)
          unreachable("Invalid instruction width");
       }
 
+      for (unsigned int i = 0; i < inst->sources; i++) {
+        src[i] = brw_reg_from_fs_reg(p, inst, &inst->src[i], devinfo->gen);
+
+        /* The accumulator result appears to get used for the
+         * conditional modifier generation.  When negating a UD
+         * value, there is a 33rd bit generated for the sign in the
+         * accumulator value, so now you can't check, for example,
+         * equality with a 32-bit value.  See piglit fs-op-neg-uvec4.
+         */
+        assert(!inst->conditional_mod ||
+               inst->src[i].type != BRW_REGISTER_TYPE_UD ||
+               !inst->src[i].negate);
+      }
+      dst = brw_reg_from_fs_reg(p, inst, &inst->dst, devinfo->gen);
+
+      brw_set_default_predicate_control(p, inst->predicate);
+      brw_set_default_predicate_inverse(p, inst->predicate_inverse);
+      brw_set_default_flag_reg(p, 0, inst->flag_subreg);
+      brw_set_default_saturate(p, inst->saturate);
+      brw_set_default_mask_control(p, inst->force_writemask_all);
+      brw_set_default_acc_write_control(p, inst->writes_accumulator);
+      brw_set_default_exec_size(p, cvt(inst->exec_size) - 1);
+
+      assert(inst->base_mrf + inst->mlen <= BRW_MAX_MRF(devinfo->gen));
+      assert(inst->mlen <= BRW_MAX_MSG_LENGTH);
+
       switch (inst->opcode) {
       case BRW_OPCODE_MOV:
         brw_MOV(p, dst, src[0]);
@@ -2042,17 +2121,19 @@ fs_generator::generate_code(const cfg_t *cfg, int dispatch_width)
       case FS_OPCODE_TXB:
       case SHADER_OPCODE_TXD:
       case SHADER_OPCODE_TXF:
+      case SHADER_OPCODE_TXF_LZ:
       case SHADER_OPCODE_TXF_CMS:
       case SHADER_OPCODE_TXF_CMS_W:
       case SHADER_OPCODE_TXF_UMS:
       case SHADER_OPCODE_TXF_MCS:
       case SHADER_OPCODE_TXL:
+      case SHADER_OPCODE_TXL_LZ:
       case SHADER_OPCODE_TXS:
       case SHADER_OPCODE_LOD:
       case SHADER_OPCODE_TG4:
       case SHADER_OPCODE_TG4_OFFSET:
       case SHADER_OPCODE_SAMPLEINFO:
-        generate_tex(inst, dst, src[0], src[1]);
+        generate_tex(inst, dst, src[0], src[1], src[2]);
         break;
       case FS_OPCODE_DDX_COARSE:
       case FS_OPCODE_DDX_FINE:
@@ -2060,8 +2141,7 @@ fs_generator::generate_code(const cfg_t *cfg, int dispatch_width)
          break;
       case FS_OPCODE_DDY_COARSE:
       case FS_OPCODE_DDY_FINE:
-         assert(src[1].file == BRW_IMMEDIATE_VALUE);
-         generate_ddy(inst->opcode, dst, src[0], src[1].ud);
+         generate_ddy(inst->opcode, dst, src[0]);
         break;
 
       case SHADER_OPCODE_GEN4_SCRATCH_WRITE:
@@ -2116,10 +2196,6 @@ fs_generator::generate_code(const cfg_t *cfg, int dispatch_width)
         generate_fb_write(inst, src[0]);
         break;
 
-      case FS_OPCODE_BLORP_FB_WRITE:
-        generate_blorp_fb_write(inst);
-        break;
-
       case FS_OPCODE_MOV_DISPATCH_TO_FLAGS:
          generate_mov_dispatch_to_flags(inst);
          break;
@@ -2183,6 +2259,28 @@ fs_generator::generate_code(const cfg_t *cfg, int dispatch_width)
          brw_broadcast(p, dst, src[0], src[1]);
          break;
 
+      case SHADER_OPCODE_EXTRACT_BYTE: {
+         assert(src[0].type == BRW_REGISTER_TYPE_D ||
+                src[0].type == BRW_REGISTER_TYPE_UD);
+
+         enum brw_reg_type type =
+            src[0].type == BRW_REGISTER_TYPE_D ? BRW_REGISTER_TYPE_B
+                                               : BRW_REGISTER_TYPE_UB;
+         brw_MOV(p, dst, spread(suboffset(retype(src[0], type), src[1].ud), 4));
+         break;
+      }
+
+      case SHADER_OPCODE_EXTRACT_WORD: {
+         assert(src[0].type == BRW_REGISTER_TYPE_D ||
+                src[0].type == BRW_REGISTER_TYPE_UD);
+
+         enum brw_reg_type type =
+            src[0].type == BRW_REGISTER_TYPE_D ? BRW_REGISTER_TYPE_W
+                                               : BRW_REGISTER_TYPE_UW;
+         brw_MOV(p, dst, spread(suboffset(retype(src[0], type), src[1].ud), 2));
+         break;
+      }
+
       case FS_OPCODE_SET_SAMPLE_ID:
          generate_set_sample_id(inst, dst, src[0], src[1]);
          break;
@@ -2295,8 +2393,9 @@ fs_generator::generate_code(const cfg_t *cfg, int dispatch_width)
    compiler->shader_debug_log(log_data,
                               "%s SIMD%d shader: %d inst, %d loops, %u cycles, "
                               "%d:%d spills:fills, Promoted %u constants, "
-                              "compacted %d to %d bytes.\n",
-                              stage_abbrev, dispatch_width, before_size / 16,
+                              "compacted %d to %d bytes.",
+                              _mesa_shader_stage_to_abbrev(stage),
+                              dispatch_width, before_size / 16,
                               loop_count, cfg->cycle_count, spill_count,
                               fill_count, promoted_constants, before_size,
                               after_size);