i965: Use unreachable() instead of unconditional assert().
[mesa.git] / src / mesa / drivers / dri / i965 / brw_fs_generator.cpp
index 5672eaee915254e51ab0cc90a0a6f78ef18ee81b..52e88d41b365b537ccffc253cf73a65e21d516bc 100644 (file)
@@ -37,19 +37,22 @@ extern "C" {
 #include "brw_cfg.h"
 
 fs_generator::fs_generator(struct brw_context *brw,
-                           struct brw_wm_compile *c,
+                           void *mem_ctx,
+                           const struct brw_wm_prog_key *key,
+                           struct brw_wm_prog_data *prog_data,
                            struct gl_shader_program *prog,
                            struct gl_fragment_program *fp,
-                           bool dual_source_output)
-
-   : brw(brw), c(c), prog(prog), fp(fp), dual_source_output(dual_source_output)
+                           bool dual_source_output,
+                           bool runtime_check_aads_emit,
+                           bool debug_flag)
+
+   : brw(brw), key(key), prog_data(prog_data), prog(prog), fp(fp),
+     dual_source_output(dual_source_output),
+     runtime_check_aads_emit(runtime_check_aads_emit), debug_flag(debug_flag),
+     mem_ctx(mem_ctx)
 {
    ctx = &brw->ctx;
 
-   shader = prog ? prog->_LinkedShaders[MESA_SHADER_FRAGMENT] : NULL;
-
-   mem_ctx = c;
-
    p = rzalloc(mem_ctx, struct brw_compile);
    brw_init_compile(brw, p, mem_ctx);
 }
@@ -58,20 +61,11 @@ fs_generator::~fs_generator()
 {
 }
 
-void
-fs_generator::mark_surface_used(unsigned surf_index)
-{
-   assert(surf_index < BRW_MAX_WM_SURFACES);
-
-   c->prog_data.binding_table_size =
-      MAX2(c->prog_data.binding_table_size, surf_index + 1);
-}
-
-void
+bool
 fs_generator::patch_discard_jumps_to_fb_writes()
 {
    if (brw->gen < 6 || this->discard_halt_patches.is_empty())
-      return;
+      return false;
 
    /* There is a somewhat strange undocumented requirement of using
     * HALT, according to the simulator.  If some channel has HALTed to
@@ -84,58 +78,103 @@ fs_generator::patch_discard_jumps_to_fb_writes()
     * included GPU hangs and sparkly rendering on the piglit discard
     * tests.
     */
-   struct brw_instruction *last_halt = gen6_HALT(p);
-   last_halt->bits3.break_cont.uip = 2;
-   last_halt->bits3.break_cont.jip = 2;
+   brw_inst *last_halt = gen6_HALT(p);
+   brw_inst_set_uip(brw, last_halt, 2);
+   brw_inst_set_jip(brw, last_halt, 2);
 
    int ip = p->nr_insn;
 
-   foreach_list(node, &this->discard_halt_patches) {
-      ip_record *patch_ip = (ip_record *)node;
-      struct brw_instruction *patch = &p->store[patch_ip->ip];
+   foreach_in_list(ip_record, patch_ip, &discard_halt_patches) {
+      brw_inst *patch = &p->store[patch_ip->ip];
 
-      assert(patch->header.opcode == BRW_OPCODE_HALT);
+      assert(brw_inst_opcode(brw, patch) == BRW_OPCODE_HALT);
       /* HALT takes a half-instruction distance from the pre-incremented IP. */
-      patch->bits3.break_cont.uip = (ip - patch_ip->ip) * 2;
+      brw_inst_set_uip(brw, patch, (ip - patch_ip->ip) * 2);
    }
 
    this->discard_halt_patches.make_empty();
+   return true;
+}
+
+void
+fs_generator::fire_fb_write(fs_inst *inst,
+                            GLuint base_reg,
+                            struct brw_reg implied_header,
+                            GLuint nr)
+{
+   uint32_t msg_control;
+
+   if (brw->gen < 6) {
+      brw_push_insn_state(p);
+      brw_set_default_mask_control(p, BRW_MASK_DISABLE);
+      brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
+      brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
+      brw_MOV(p,
+              brw_message_reg(base_reg + 1),
+              brw_vec8_grf(1, 0));
+      brw_pop_insn_state(p);
+   }
+
+   if (this->dual_source_output)
+      msg_control = BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_DUAL_SOURCE_SUBSPAN01;
+   else if (dispatch_width == 16)
+      msg_control = BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD16_SINGLE_SOURCE;
+   else
+      msg_control = BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_SINGLE_SOURCE_SUBSPAN01;
+
+   uint32_t surf_index =
+      prog_data->binding_table.render_target_start + inst->target;
+
+   brw_fb_WRITE(p,
+                dispatch_width,
+                base_reg,
+                implied_header,
+                msg_control,
+                surf_index,
+                nr,
+                0,
+                inst->eot,
+                inst->header_present);
+
+   brw_mark_surface_used(&prog_data->base, surf_index);
 }
 
 void
 fs_generator::generate_fb_write(fs_inst *inst)
 {
-   bool eot = inst->eot;
    struct brw_reg implied_header;
-   uint32_t msg_control;
 
    /* Header is 2 regs, g0 and g1 are the contents. g0 will be implied
     * move, here's g1.
     */
-   brw_push_insn_state(p);
-   brw_set_mask_control(p, BRW_MASK_DISABLE);
-   brw_set_compression_control(p, BRW_COMPRESSION_NONE);
+   if (inst->header_present) {
+      brw_push_insn_state(p);
+      brw_set_default_mask_control(p, BRW_MASK_DISABLE);
+      brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
+      brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
 
-   if (fp->UsesKill) {
-      struct brw_reg pixel_mask;
+      /* On HSW, the GPU will use the predicate on SENDC, unless the header is
+       * present.
+       */
+      if ((fp && fp->UsesKill) || key->alpha_test_func) {
+         struct brw_reg pixel_mask;
 
-      if (brw->gen >= 6)
-         pixel_mask = retype(brw_vec1_grf(1, 7), BRW_REGISTER_TYPE_UW);
-      else
-         pixel_mask = retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UW);
+         if (brw->gen >= 6)
+            pixel_mask = retype(brw_vec1_grf(1, 7), BRW_REGISTER_TYPE_UW);
+         else
+            pixel_mask = retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UW);
 
-      brw_MOV(p, pixel_mask, brw_flag_reg(0, 1));
-   }
+         brw_MOV(p, pixel_mask, brw_flag_reg(0, 1));
+      }
 
-   if (inst->header_present) {
       if (brw->gen >= 6) {
-        brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
+        brw_set_default_compression_control(p, BRW_COMPRESSION_COMPRESSED);
         brw_MOV(p,
                 retype(brw_message_reg(inst->base_mrf), BRW_REGISTER_TYPE_UD),
                 retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
-        brw_set_compression_control(p, BRW_COMPRESSION_NONE);
+        brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
 
-         if (inst->target > 0 && c->key.replicate_alpha) {
+         if (inst->target > 0 && key->replicate_alpha) {
             /* Set "Source0 Alpha Present to RenderTarget" bit in message
              * header.
              */
@@ -156,36 +195,53 @@ fs_generator::generate_fb_write(fs_inst *inst)
         implied_header = brw_null_reg();
       } else {
         implied_header = retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UW);
-
-        brw_MOV(p,
-                brw_message_reg(inst->base_mrf + 1),
-                brw_vec8_grf(1, 0));
       }
+
+      brw_pop_insn_state(p);
    } else {
       implied_header = brw_null_reg();
    }
 
-   if (this->dual_source_output)
-      msg_control = BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_DUAL_SOURCE_SUBSPAN01;
-   else if (dispatch_width == 16)
-      msg_control = BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD16_SINGLE_SOURCE;
-   else
-      msg_control = BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_SINGLE_SOURCE_SUBSPAN01;
-
-   brw_pop_insn_state(p);
+   if (!runtime_check_aads_emit) {
+      fire_fb_write(inst, inst->base_mrf, implied_header, inst->mlen);
+   } else {
+      /* This can only happen in gen < 6 */
+      assert(brw->gen < 6);
+
+      struct brw_reg v1_null_ud = vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_UD));
+
+      /* Check runtime bit to detect if we have to send AA data or not */
+      brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
+      brw_AND(p,
+              v1_null_ud,
+              retype(brw_vec1_grf(1, 6), BRW_REGISTER_TYPE_UD),
+              brw_imm_ud(1<<26));
+      brw_inst_set_cond_modifier(brw, brw_last_inst, BRW_CONDITIONAL_NZ);
+
+      int jmp = brw_JMPI(p, brw_imm_ud(0), BRW_PREDICATE_NORMAL) - p->store;
+      brw_inst_set_exec_size(brw, brw_last_inst, BRW_EXECUTE_1);
+      {
+         /* Don't send AA data */
+         fire_fb_write(inst, inst->base_mrf+1, implied_header, inst->mlen-1);
+      }
+      brw_land_fwd_jump(p, jmp);
+      fire_fb_write(inst, inst->base_mrf, implied_header, inst->mlen);
+   }
+}
 
+void
+fs_generator::generate_blorp_fb_write(fs_inst *inst)
+{
    brw_fb_WRITE(p,
-               dispatch_width,
-               inst->base_mrf,
-               implied_header,
-               msg_control,
-               SURF_INDEX_DRAW(inst->target),
-               inst->mlen,
-               0,
-               eot,
-               inst->header_present);
-
-   mark_surface_used(SURF_INDEX_DRAW(inst->target));
+                16 /* dispatch_width */,
+                inst->base_mrf,
+                brw_reg_from_fs_reg(&inst->src[0]),
+                BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD16_SINGLE_SOURCE,
+                inst->target,
+                inst->mlen,
+                0,
+                true,
+                inst->header_present);
 }
 
 /* Computes the integer pixel x,y values from the origin.
@@ -213,11 +269,11 @@ fs_generator::generate_pixel_xy(struct brw_reg dst, bool is_x)
       dst = vec16(dst);
    }
 
-   /* We do this 8 or 16-wide, but since the destination is UW we
-    * don't do compression in the 16-wide case.
+   /* We do this SIMD8 or SIMD16, but since the destination is UW we
+    * don't do compression in the SIMD16 case.
     */
    brw_push_insn_state(p);
-   brw_set_compression_control(p, BRW_COMPRESSION_NONE);
+   brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
    brw_ADD(p, dst, src, deltas);
    brw_pop_insn_state(p);
 }
@@ -241,72 +297,22 @@ fs_generator::generate_linterp(fs_inst *inst,
 }
 
 void
-fs_generator::generate_math1_gen7(fs_inst *inst,
-                               struct brw_reg dst,
-                               struct brw_reg src0)
-{
-   assert(inst->mlen == 0);
-   brw_math(p, dst,
-           brw_math_function(inst->opcode),
-           0, src0,
-           BRW_MATH_DATA_VECTOR,
-           BRW_MATH_PRECISION_FULL);
-}
-
-void
-fs_generator::generate_math2_gen7(fs_inst *inst,
-                               struct brw_reg dst,
-                               struct brw_reg src0,
-                               struct brw_reg src1)
-{
-   assert(inst->mlen == 0);
-   brw_math2(p, dst, brw_math_function(inst->opcode), src0, src1);
-}
-
-void
-fs_generator::generate_math1_gen6(fs_inst *inst,
-                               struct brw_reg dst,
-                               struct brw_reg src0)
-{
-   int op = brw_math_function(inst->opcode);
-
-   assert(inst->mlen == 0);
-
-   brw_set_compression_control(p, BRW_COMPRESSION_NONE);
-   brw_math(p, dst,
-           op,
-           0, src0,
-           BRW_MATH_DATA_VECTOR,
-           BRW_MATH_PRECISION_FULL);
-
-   if (dispatch_width == 16) {
-      brw_set_compression_control(p, BRW_COMPRESSION_2NDHALF);
-      brw_math(p, sechalf(dst),
-              op,
-              0, sechalf(src0),
-              BRW_MATH_DATA_VECTOR,
-              BRW_MATH_PRECISION_FULL);
-      brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
-   }
-}
-
-void
-fs_generator::generate_math2_gen6(fs_inst *inst,
-                               struct brw_reg dst,
-                               struct brw_reg src0,
-                               struct brw_reg src1)
+fs_generator::generate_math_gen6(fs_inst *inst,
+                                 struct brw_reg dst,
+                                 struct brw_reg src0,
+                                 struct brw_reg src1)
 {
    int op = brw_math_function(inst->opcode);
+   bool binop = src1.file == BRW_GENERAL_REGISTER_FILE;
 
-   assert(inst->mlen == 0);
-
-   brw_set_compression_control(p, BRW_COMPRESSION_NONE);
-   brw_math2(p, dst, op, src0, src1);
+   brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
+   gen6_math(p, dst, op, src0, src1);
 
    if (dispatch_width == 16) {
-      brw_set_compression_control(p, BRW_COMPRESSION_2NDHALF);
-      brw_math2(p, sechalf(dst), op, sechalf(src0), sechalf(src1));
-      brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
+      brw_set_default_compression_control(p, BRW_COMPRESSION_2NDHALF);
+      gen6_math(p, sechalf(dst), op, sechalf(src0),
+                binop ? sechalf(src1) : brw_null_reg());
+      brw_set_default_compression_control(p, BRW_COMPRESSION_COMPRESSED);
    }
 }
 
@@ -319,22 +325,22 @@ fs_generator::generate_math_gen4(fs_inst *inst,
 
    assert(inst->mlen >= 1);
 
-   brw_set_compression_control(p, BRW_COMPRESSION_NONE);
-   brw_math(p, dst,
-           op,
-           inst->base_mrf, src,
-           BRW_MATH_DATA_VECTOR,
-           BRW_MATH_PRECISION_FULL);
+   brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
+   gen4_math(p, dst,
+            op,
+            inst->base_mrf, src,
+            BRW_MATH_DATA_VECTOR,
+            BRW_MATH_PRECISION_FULL);
 
    if (dispatch_width == 16) {
-      brw_set_compression_control(p, BRW_COMPRESSION_2NDHALF);
-      brw_math(p, sechalf(dst),
-              op,
-              inst->base_mrf + 1, sechalf(src),
-              BRW_MATH_DATA_VECTOR,
-              BRW_MATH_PRECISION_FULL);
-
-      brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
+      brw_set_default_compression_control(p, BRW_COMPRESSION_2NDHALF);
+      gen4_math(p, sechalf(dst),
+               op,
+               inst->base_mrf + 1, sechalf(src),
+               BRW_MATH_DATA_VECTOR,
+               BRW_MATH_PRECISION_FULL);
+
+      brw_set_default_compression_control(p, BRW_COMPRESSION_COMPRESSED);
    }
 }
 
@@ -354,11 +360,11 @@ fs_generator::generate_math_g45(fs_inst *inst,
 
    assert(inst->mlen >= 1);
 
-   brw_math(p, dst,
-            op,
-            inst->base_mrf, src,
-            BRW_MATH_DATA_VECTOR,
-            BRW_MATH_PRECISION_FULL);
+   gen4_math(p, dst,
+             op,
+             inst->base_mrf, src,
+             BRW_MATH_DATA_VECTOR,
+             BRW_MATH_PRECISION_FULL);
 }
 
 void
@@ -381,7 +387,8 @@ fs_generator::generate_tex(fs_inst *inst, struct brw_reg dst, struct brw_reg src
       break;
    }
 
-   if (dispatch_width == 16)
+   if (dispatch_width == 16 &&
+      !inst->force_uncompressed && !inst->force_sechalf)
       simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD16;
 
    if (brw->gen >= 5) {
@@ -413,7 +420,7 @@ fs_generator::generate_tex(fs_inst *inst, struct brw_reg dst, struct brw_reg src
       case SHADER_OPCODE_TXD:
          if (inst->shadow_compare) {
             /* Gen7.5+.  Otherwise, lowered by brw_lower_texture_gradients(). */
-            assert(brw->is_haswell);
+            assert(brw->gen >= 8 || brw->is_haswell);
             msg_type = HSW_SAMPLER_MESSAGE_SAMPLE_DERIV_COMPARE;
          } else {
             msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_DERIVS;
@@ -422,22 +429,42 @@ fs_generator::generate_tex(fs_inst *inst, struct brw_reg dst, struct brw_reg src
       case SHADER_OPCODE_TXF:
         msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LD;
         break;
-      case SHADER_OPCODE_TXF_MS:
+      case SHADER_OPCODE_TXF_CMS:
          if (brw->gen >= 7)
             msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_LD2DMS;
          else
             msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LD;
          break;
+      case SHADER_OPCODE_TXF_UMS:
+         assert(brw->gen >= 7);
+         msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_LD2DSS;
+         break;
+      case SHADER_OPCODE_TXF_MCS:
+         assert(brw->gen >= 7);
+         msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_LD_MCS;
+         break;
       case SHADER_OPCODE_LOD:
          msg_type = GEN5_SAMPLER_MESSAGE_LOD;
          break;
       case SHADER_OPCODE_TG4:
-         assert(brw->gen >= 6);
-         msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4;
+         if (inst->shadow_compare) {
+            assert(brw->gen >= 7);
+            msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4_C;
+         } else {
+            assert(brw->gen >= 6);
+            msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4;
+         }
+         break;
+      case SHADER_OPCODE_TG4_OFFSET:
+         assert(brw->gen >= 7);
+         if (inst->shadow_compare) {
+            msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO_C;
+         } else {
+            msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO;
+         }
          break;
       default:
-        assert(!"not reached");
-        break;
+        unreachable("not reached");
       }
    } else {
       switch (inst->opcode) {
@@ -489,8 +516,7 @@ fs_generator::generate_tex(fs_inst *inst, struct brw_reg dst, struct brw_reg src
         simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD16;
         break;
       default:
-        assert(!"not reached");
-        break;
+        unreachable("not reached");
       }
    }
    assert(msg_type != -1);
@@ -500,34 +526,77 @@ fs_generator::generate_tex(fs_inst *inst, struct brw_reg dst, struct brw_reg src
       dst = vec16(dst);
    }
 
+   if (brw->gen >= 7 && inst->header_present && dispatch_width == 16) {
+      /* The send-from-GRF for SIMD16 texturing with a header has an extra
+       * hardware register allocated to it, which we need to skip over (since
+       * our coordinates in the payload are in the even-numbered registers,
+       * and the header comes right before the first one).
+       */
+      assert(src.file == BRW_GENERAL_REGISTER_FILE);
+      src.nr++;
+   }
+
    /* Load the message header if present.  If there's a texture offset,
     * we need to set it up explicitly and load the offset bitfield.
     * Otherwise, we can use an implied move from g0 to the first message reg.
     */
-   if (inst->texture_offset) {
-      brw_push_insn_state(p);
-      brw_set_mask_control(p, BRW_MASK_DISABLE);
-      brw_set_compression_control(p, BRW_COMPRESSION_NONE);
-      /* Explicitly set up the message header by copying g0 to the MRF. */
-      brw_MOV(p, retype(brw_message_reg(inst->base_mrf), BRW_REGISTER_TYPE_UD),
-                 retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
-
-      /* Then set the offset bits in DWord 2. */
-      brw_MOV(p, retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE,
-                                     inst->base_mrf, 2), BRW_REGISTER_TYPE_UD),
-                 brw_imm_ud(inst->texture_offset));
-      brw_pop_insn_state(p);
-   } else if (inst->header_present) {
-      /* Set up an implied move from g0 to the MRF. */
-      src = retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UW);
+   if (inst->header_present) {
+      if (brw->gen < 6 && !inst->texture_offset) {
+         /* Set up an implied move from g0 to the MRF. */
+         src = retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UW);
+      } else {
+         struct brw_reg header_reg;
+
+         if (brw->gen >= 7) {
+            header_reg = src;
+         } else {
+            assert(inst->base_mrf != -1);
+            header_reg = brw_message_reg(inst->base_mrf);
+         }
+
+         brw_push_insn_state(p);
+         brw_set_default_mask_control(p, BRW_MASK_DISABLE);
+         brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
+         /* Explicitly set up the message header by copying g0 to the MRF. */
+         brw_MOV(p, header_reg, brw_vec8_grf(0, 0));
+
+         if (inst->texture_offset) {
+            /* Set the offset bits in DWord 2. */
+            brw_MOV(p, get_element_ud(header_reg, 2),
+                       brw_imm_ud(inst->texture_offset));
+         }
+
+         if (inst->sampler >= 16) {
+            /* The "Sampler Index" field can only store values between 0 and 15.
+             * However, we can add an offset to the "Sampler State Pointer"
+             * field, effectively selecting a different set of 16 samplers.
+             *
+             * The "Sampler State Pointer" needs to be aligned to a 32-byte
+             * offset, and each sampler state is only 16-bytes, so we can't
+             * exclusively use the offset - we have to use both.
+             */
+            assert(brw->gen >= 8 || brw->is_haswell);
+            brw_ADD(p,
+                    get_element_ud(header_reg, 3),
+                    get_element_ud(brw_vec8_grf(0, 0), 3),
+                    brw_imm_ud(16 * (inst->sampler / 16) *
+                               sizeof(gen7_sampler_state)));
+         }
+         brw_pop_insn_state(p);
+      }
    }
 
+   uint32_t surface_index = ((inst->opcode == SHADER_OPCODE_TG4 ||
+      inst->opcode == SHADER_OPCODE_TG4_OFFSET)
+      ? prog_data->base.binding_table.gather_texture_start
+      : prog_data->base.binding_table.texture_start) + inst->sampler;
+
    brw_SAMPLE(p,
              retype(dst, BRW_REGISTER_TYPE_UW),
              inst->base_mrf,
              src,
-              SURF_INDEX_TEXTURE(inst->sampler),
-             inst->sampler,
+              surface_index,
+             inst->sampler % 16,
              msg_type,
              rlen,
              inst->mlen,
@@ -535,7 +604,7 @@ fs_generator::generate_tex(fs_inst *inst, struct brw_reg dst, struct brw_reg src
              simd_mode,
              return_format);
 
-   mark_surface_used(SURF_INDEX_TEXTURE(inst->sampler));
+   brw_mark_surface_used(&prog_data->base, surface_index);
 }
 
 
@@ -564,17 +633,15 @@ fs_generator::generate_tex(fs_inst *inst, struct brw_reg dst, struct brw_reg src
  * sample_d.  On at least Haswell, sample_d instruction does some
  * optimizations if the same LOD is used for all pixels in the subspan.
  *
- * For DDY, it's harder, as we want to produce the pairs swizzled between each
- * other.  We could probably do it like ddx and swizzle the right order later,
- * but bail for now and just produce
- * ((ss0.tl - ss0.bl)x4 (ss1.tl - ss1.bl)x4)
+ * For DDY, we need to use ALIGN16 mode since it's capable of doing the
+ * appropriate swizzling.
  */
 void
 fs_generator::generate_ddx(fs_inst *inst, struct brw_reg dst, struct brw_reg src)
 {
    unsigned vstride, width;
 
-   if (c->key.high_quality_derivatives) {
+   if (key->high_quality_derivatives) {
       /* produce accurate derivatives */
       vstride = BRW_VERTICAL_STRIDE_2;
       width = BRW_WIDTH_2;
@@ -608,22 +675,82 @@ void
 fs_generator::generate_ddy(fs_inst *inst, struct brw_reg dst, struct brw_reg src,
                          bool negate_value)
 {
-   struct brw_reg src0 = brw_reg(src.file, src.nr, 0,
-                                BRW_REGISTER_TYPE_F,
-                                BRW_VERTICAL_STRIDE_4,
-                                BRW_WIDTH_4,
-                                BRW_HORIZONTAL_STRIDE_0,
-                                BRW_SWIZZLE_XYZW, WRITEMASK_XYZW);
-   struct brw_reg src1 = brw_reg(src.file, src.nr, 2,
-                                BRW_REGISTER_TYPE_F,
-                                BRW_VERTICAL_STRIDE_4,
-                                BRW_WIDTH_4,
-                                BRW_HORIZONTAL_STRIDE_0,
-                                BRW_SWIZZLE_XYZW, WRITEMASK_XYZW);
-   if (negate_value)
-      brw_ADD(p, dst, src1, negate(src0));
-   else
-      brw_ADD(p, dst, src0, negate(src1));
+   if (key->high_quality_derivatives) {
+      /* From the Ivy Bridge PRM, volume 4 part 3, section 3.3.9 (Register
+       * Region Restrictions):
+       *
+       *     In Align16 access mode, SIMD16 is not allowed for DW operations
+       *     and SIMD8 is not allowed for DF operations.
+       *
+       * In this context, "DW operations" means "operations acting on 32-bit
+       * values", so it includes operations on floats.
+       *
+       * Gen4 has a similar restriction.  From the i965 PRM, section 11.5.3
+       * (Instruction Compression -> Rules and Restrictions):
+       *
+       *     A compressed instruction must be in Align1 access mode. Align16
+       *     mode instructions cannot be compressed.
+       *
+       * Similar text exists in the g45 PRM.
+       *
+       * On these platforms, if we're building a SIMD16 shader, we need to
+       * manually unroll to a pair of SIMD8 instructions.
+       */
+      bool unroll_to_simd8 =
+         (dispatch_width == 16 &&
+          (brw->gen == 4 || (brw->gen == 7 && !brw->is_haswell)));
+
+      /* produce accurate derivatives */
+      struct brw_reg src0 = brw_reg(src.file, src.nr, 0,
+                                    BRW_REGISTER_TYPE_F,
+                                    BRW_VERTICAL_STRIDE_4,
+                                    BRW_WIDTH_4,
+                                    BRW_HORIZONTAL_STRIDE_1,
+                                    BRW_SWIZZLE_XYXY, WRITEMASK_XYZW);
+      struct brw_reg src1 = brw_reg(src.file, src.nr, 0,
+                                    BRW_REGISTER_TYPE_F,
+                                    BRW_VERTICAL_STRIDE_4,
+                                    BRW_WIDTH_4,
+                                    BRW_HORIZONTAL_STRIDE_1,
+                                    BRW_SWIZZLE_ZWZW, WRITEMASK_XYZW);
+      brw_push_insn_state(p);
+      brw_set_default_access_mode(p, BRW_ALIGN_16);
+      if (unroll_to_simd8)
+         brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
+      if (negate_value)
+         brw_ADD(p, dst, src1, negate(src0));
+      else
+         brw_ADD(p, dst, src0, negate(src1));
+      if (unroll_to_simd8) {
+         brw_set_default_compression_control(p, BRW_COMPRESSION_2NDHALF);
+         src0 = sechalf(src0);
+         src1 = sechalf(src1);
+         dst = sechalf(dst);
+         if (negate_value)
+            brw_ADD(p, dst, src1, negate(src0));
+         else
+            brw_ADD(p, dst, src0, negate(src1));
+      }
+      brw_pop_insn_state(p);
+   } else {
+      /* replicate the derivative at the top-left pixel to other pixels */
+      struct brw_reg src0 = brw_reg(src.file, src.nr, 0,
+                                    BRW_REGISTER_TYPE_F,
+                                    BRW_VERTICAL_STRIDE_4,
+                                    BRW_WIDTH_4,
+                                    BRW_HORIZONTAL_STRIDE_0,
+                                    BRW_SWIZZLE_XYZW, WRITEMASK_XYZW);
+      struct brw_reg src1 = brw_reg(src.file, src.nr, 2,
+                                    BRW_REGISTER_TYPE_F,
+                                    BRW_VERTICAL_STRIDE_4,
+                                    BRW_WIDTH_4,
+                                    BRW_HORIZONTAL_STRIDE_0,
+                                    BRW_SWIZZLE_XYZW, WRITEMASK_XYZW);
+      if (negate_value)
+         brw_ADD(p, dst, src1, negate(src0));
+      else
+         brw_ADD(p, dst, src0, negate(src1));
+   }
 }
 
 void
@@ -638,30 +765,36 @@ fs_generator::generate_discard_jump(fs_inst *inst)
    this->discard_halt_patches.push_tail(new(mem_ctx) ip_record(p->nr_insn));
 
    brw_push_insn_state(p);
-   brw_set_mask_control(p, BRW_MASK_DISABLE);
+   brw_set_default_mask_control(p, BRW_MASK_DISABLE);
    gen6_HALT(p);
    brw_pop_insn_state(p);
 }
 
 void
-fs_generator::generate_spill(fs_inst *inst, struct brw_reg src)
+fs_generator::generate_scratch_write(fs_inst *inst, struct brw_reg src)
 {
    assert(inst->mlen != 0);
 
    brw_MOV(p,
           retype(brw_message_reg(inst->base_mrf + 1), BRW_REGISTER_TYPE_UD),
           retype(src, BRW_REGISTER_TYPE_UD));
-   brw_oword_block_write_scratch(p, brw_message_reg(inst->base_mrf), 1,
-                                inst->offset);
+   brw_oword_block_write_scratch(p, brw_message_reg(inst->base_mrf),
+                                 dispatch_width / 8, inst->offset);
 }
 
 void
-fs_generator::generate_unspill(fs_inst *inst, struct brw_reg dst)
+fs_generator::generate_scratch_read(fs_inst *inst, struct brw_reg dst)
 {
    assert(inst->mlen != 0);
 
-   brw_oword_block_read_scratch(p, dst, brw_message_reg(inst->base_mrf), 1,
-                               inst->offset);
+   brw_oword_block_read_scratch(p, dst, brw_message_reg(inst->base_mrf),
+                                dispatch_width / 8, inst->offset);
+}
+
+void
+fs_generator::generate_scratch_read_gen7(fs_inst *inst, struct brw_reg dst)
+{
+   gen7_block_read_scratch(p, dst, dispatch_width / 8, inst->offset);
 }
 
 void
@@ -683,7 +816,7 @@ fs_generator::generate_uniform_pull_constant_load(fs_inst *inst,
    brw_oword_block_read(p, dst, brw_message_reg(inst->base_mrf),
                        read_offset, surf_index);
 
-   mark_surface_used(surf_index);
+   brw_mark_surface_used(&prog_data->base, surf_index);
 }
 
 void
@@ -703,9 +836,9 @@ fs_generator::generate_uniform_pull_constant_load_gen7(fs_inst *inst,
    offset = brw_vec1_grf(offset.nr, 0);
 
    brw_push_insn_state(p);
-   brw_set_compression_control(p, BRW_COMPRESSION_NONE);
-   brw_set_mask_control(p, BRW_MASK_DISABLE);
-   struct brw_instruction *send = brw_next_insn(p, BRW_OPCODE_SEND);
+   brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
+   brw_set_default_mask_control(p, BRW_MASK_DISABLE);
+   brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
    brw_pop_insn_state(p);
 
    /* We use the SIMD4x2 mode because we want to end up with 4 components in
@@ -725,7 +858,7 @@ fs_generator::generate_uniform_pull_constant_load_gen7(fs_inst *inst,
                            BRW_SAMPLER_SIMD_MODE_SIMD4X2,
                            0);
 
-   mark_surface_used(surf_index);
+   brw_mark_surface_used(&prog_data->base, surf_index);
 }
 
 void
@@ -771,12 +904,12 @@ fs_generator::generate_varying_pull_constant_load(fs_inst *inst,
    struct brw_reg header = brw_vec8_grf(0, 0);
    gen6_resolve_implied_move(p, &header, inst->base_mrf);
 
-   struct brw_instruction *send = brw_next_insn(p, BRW_OPCODE_SEND);
-   send->header.compression_control = BRW_COMPRESSION_NONE;
-   brw_set_dest(p, send, dst);
+   brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
+   brw_inst_set_qtr_control(brw, send, BRW_COMPRESSION_NONE);
+   brw_set_dest(p, send, retype(dst, BRW_REGISTER_TYPE_UW));
    brw_set_src0(p, send, header);
    if (brw->gen < 6)
-      send->header.destreg__conditionalmod = inst->base_mrf;
+      brw_inst_set_base_mrf(brw, send, inst->base_mrf);
 
    /* Our surface is set up as floats, regardless of what actual data is
     * stored in it.
@@ -792,7 +925,7 @@ fs_generator::generate_varying_pull_constant_load(fs_inst *inst,
                            simd_mode,
                            return_format);
 
-   mark_surface_used(surf_index);
+   brw_mark_surface_used(&prog_data->base, surf_index);
 }
 
 void
@@ -823,7 +956,7 @@ fs_generator::generate_varying_pull_constant_load_gen7(fs_inst *inst,
       simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD8;
    }
 
-   struct brw_instruction *send = brw_next_insn(p, BRW_OPCODE_SEND);
+   brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
    brw_set_dest(p, send, dst);
    brw_set_src0(p, send, offset);
    brw_set_sampler_message(p, send,
@@ -836,7 +969,7 @@ fs_generator::generate_varying_pull_constant_load_gen7(fs_inst *inst,
                            simd_mode,
                            0);
 
-   mark_surface_used(surf_index);
+   brw_mark_surface_used(&prog_data->base, surf_index);
 }
 
 /**
@@ -857,7 +990,7 @@ fs_generator::generate_mov_dispatch_to_flags(fs_inst *inst)
       dispatch_mask = retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UW);
 
    brw_push_insn_state(p);
-   brw_set_mask_control(p, BRW_MASK_DISABLE);
+   brw_set_default_mask_control(p, BRW_MASK_DISABLE);
    brw_MOV(p, flags, dispatch_mask);
    brw_pop_insn_state(p);
 }
@@ -866,8 +999,6 @@ fs_generator::generate_mov_dispatch_to_flags(fs_inst *inst)
 static uint32_t brw_file_from_reg(fs_reg *reg)
 {
    switch (reg->file) {
-   case ARF:
-      return BRW_ARCHITECTURE_REGISTER_FILE;
    case GRF:
       return BRW_GENERAL_REGISTER_FILE;
    case MRF:
@@ -875,28 +1006,27 @@ static uint32_t brw_file_from_reg(fs_reg *reg)
    case IMM:
       return BRW_IMMEDIATE_VALUE;
    default:
-      assert(!"not reached");
-      return BRW_GENERAL_REGISTER_FILE;
+      unreachable("not reached");
    }
 }
 
-static struct brw_reg
+struct brw_reg
 brw_reg_from_fs_reg(fs_reg *reg)
 {
    struct brw_reg brw_reg;
 
    switch (reg->file) {
    case GRF:
-   case ARF:
    case MRF:
-      if (reg->smear == -1) {
-        brw_reg = brw_vec8_reg(brw_file_from_reg(reg), reg->reg, 0);
+      if (reg->stride == 0) {
+         brw_reg = brw_vec1_reg(brw_file_from_reg(reg), reg->reg, 0);
       } else {
-        brw_reg = brw_vec1_reg(brw_file_from_reg(reg), reg->reg, reg->smear);
+         brw_reg = brw_vec8_reg(brw_file_from_reg(reg), reg->reg, 0);
+         brw_reg = stride(brw_reg, 8 * reg->stride, 8, reg->stride);
       }
+
       brw_reg = retype(brw_reg, reg->type);
-      if (reg->sechalf)
-        brw_reg = sechalf(brw_reg);
+      brw_reg = byte_offset(brw_reg, reg->subreg_offset);
       break;
    case IMM:
       switch (reg->type) {
@@ -910,12 +1040,11 @@ brw_reg_from_fs_reg(fs_reg *reg)
         brw_reg = brw_imm_ud(reg->imm.u);
         break;
       default:
-        assert(!"not reached");
-        brw_reg = brw_null_reg();
-        break;
+        unreachable("not reached");
       }
       break;
    case HW_REG:
+      assert(reg->type == reg->fixed_hw_reg.type);
       brw_reg = reg->fixed_hw_reg;
       break;
    case BAD_FILE:
@@ -923,13 +1052,9 @@ brw_reg_from_fs_reg(fs_reg *reg)
       brw_reg = brw_null_reg();
       break;
    case UNIFORM:
-      assert(!"not reached");
-      brw_reg = brw_null_reg();
-      break;
+      unreachable("not reached");
    default:
-      assert(!"not reached");
-      brw_reg = brw_null_reg();
-      break;
+      unreachable("not reached");
    }
    if (reg->abs)
       brw_reg = brw_abs(brw_reg);
@@ -954,12 +1079,72 @@ fs_generator::generate_set_simd4x2_offset(fs_inst *inst,
    assert(value.file == BRW_IMMEDIATE_VALUE);
 
    brw_push_insn_state(p);
-   brw_set_compression_control(p, BRW_COMPRESSION_NONE);
-   brw_set_mask_control(p, BRW_MASK_DISABLE);
+   brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
+   brw_set_default_mask_control(p, BRW_MASK_DISABLE);
    brw_MOV(p, retype(brw_vec1_reg(dst.file, dst.nr, 0), value.type), value);
    brw_pop_insn_state(p);
 }
 
+/* Sets vstride=16, width=8, hstride=2 or vstride=0, width=1, hstride=0
+ * (when mask is passed as a uniform) of register mask before moving it
+ * to register dst.
+ */
+void
+fs_generator::generate_set_omask(fs_inst *inst,
+                                 struct brw_reg dst,
+                                 struct brw_reg mask)
+{
+   bool stride_8_8_1 =
+    (mask.vstride == BRW_VERTICAL_STRIDE_8 &&
+     mask.width == BRW_WIDTH_8 &&
+     mask.hstride == BRW_HORIZONTAL_STRIDE_1);
+
+   bool stride_0_1_0 =
+    (mask.vstride == BRW_VERTICAL_STRIDE_0 &&
+     mask.width == BRW_WIDTH_1 &&
+     mask.hstride == BRW_HORIZONTAL_STRIDE_0);
+
+   assert(stride_8_8_1 || stride_0_1_0);
+   assert(dst.type == BRW_REGISTER_TYPE_UW);
+
+   if (dispatch_width == 16)
+      dst = vec16(dst);
+   brw_push_insn_state(p);
+   brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
+   brw_set_default_mask_control(p, BRW_MASK_DISABLE);
+
+   if (stride_8_8_1) {
+      brw_MOV(p, dst, retype(stride(mask, 16, 8, 2), dst.type));
+   } else if (stride_0_1_0) {
+      brw_MOV(p, dst, retype(mask, dst.type));
+   }
+   brw_pop_insn_state(p);
+}
+
+/* Sets vstride=1, width=4, hstride=0 of register src1 during
+ * the ADD instruction.
+ */
+void
+fs_generator::generate_set_sample_id(fs_inst *inst,
+                                     struct brw_reg dst,
+                                     struct brw_reg src0,
+                                     struct brw_reg src1)
+{
+   assert(dst.type == BRW_REGISTER_TYPE_D ||
+          dst.type == BRW_REGISTER_TYPE_UD);
+   assert(src0.type == BRW_REGISTER_TYPE_D ||
+          src0.type == BRW_REGISTER_TYPE_UD);
+
+   brw_push_insn_state(p);
+   brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
+   brw_set_default_mask_control(p, BRW_MASK_DISABLE);
+   struct brw_reg reg = retype(stride(src1, 1, 4, 0), BRW_REGISTER_TYPE_UW);
+   brw_ADD(p, dst, src0, reg);
+   if (dispatch_width == 16)
+      brw_ADD(p, offset(dst, 1), offset(src0, 1), suboffset(reg, 2));
+   brw_pop_insn_state(p);
+}
+
 /**
  * Change the register's data type from UD to W, doubling the strides in order
  * to compensate for halving the data type width.
@@ -1057,7 +1242,7 @@ fs_generator::generate_shader_time_add(fs_inst *inst,
 {
    assert(brw->gen >= 7);
    brw_push_insn_state(p);
-   brw_set_mask_control(p, true);
+   brw_set_default_mask_control(p, true);
 
    assert(payload.file == BRW_GENERAL_REGISTER_FILE);
    struct brw_reg payload_offset = retype(brw_vec1_grf(payload.nr, 0),
@@ -1080,79 +1265,65 @@ fs_generator::generate_shader_time_add(fs_inst *inst,
     */
    brw_MOV(p, payload_offset, offset);
    brw_MOV(p, payload_value, value);
-   brw_shader_time_add(p, payload, SURF_INDEX_WM_SHADER_TIME);
+   brw_shader_time_add(p, payload,
+                       prog_data->base.binding_table.shader_time_start);
    brw_pop_insn_state(p);
 
-   mark_surface_used(SURF_INDEX_WM_SHADER_TIME);
+   brw_mark_surface_used(&prog_data->base,
+                         prog_data->base.binding_table.shader_time_start);
+}
+
+void
+fs_generator::generate_untyped_atomic(fs_inst *inst, struct brw_reg dst,
+                                      struct brw_reg atomic_op,
+                                      struct brw_reg surf_index)
+{
+   assert(atomic_op.file == BRW_IMMEDIATE_VALUE &&
+          atomic_op.type == BRW_REGISTER_TYPE_UD &&
+          surf_index.file == BRW_IMMEDIATE_VALUE &&
+         surf_index.type == BRW_REGISTER_TYPE_UD);
+
+   brw_untyped_atomic(p, dst, brw_message_reg(inst->base_mrf),
+                      atomic_op.dw1.ud, surf_index.dw1.ud,
+                      inst->mlen, dispatch_width / 8);
+
+   brw_mark_surface_used(&prog_data->base, surf_index.dw1.ud);
+}
+
+void
+fs_generator::generate_untyped_surface_read(fs_inst *inst, struct brw_reg dst,
+                                            struct brw_reg surf_index)
+{
+   assert(surf_index.file == BRW_IMMEDIATE_VALUE &&
+         surf_index.type == BRW_REGISTER_TYPE_UD);
+
+   brw_untyped_surface_read(p, dst, brw_message_reg(inst->base_mrf),
+                            surf_index.dw1.ud,
+                            inst->mlen, dispatch_width / 8);
+
+   brw_mark_surface_used(&prog_data->base, surf_index.dw1.ud);
 }
 
 void
 fs_generator::generate_code(exec_list *instructions)
 {
-   int last_native_insn_offset = p->next_insn_offset;
-   const char *last_annotation_string = NULL;
-   const void *last_annotation_ir = NULL;
-
-   if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
-      if (shader) {
-         printf("Native code for fragment shader %d (%d-wide dispatch):\n",
-                prog->Name, dispatch_width);
-      } else {
-         printf("Native code for fragment program %d (%d-wide dispatch):\n",
-                fp->Base.Id, dispatch_width);
-      }
-   }
+   int start_offset = p->next_insn_offset;
+
+   struct annotation_info annotation;
+   memset(&annotation, 0, sizeof(annotation));
 
    cfg_t *cfg = NULL;
-   if (unlikely(INTEL_DEBUG & DEBUG_WM))
-      cfg = new(mem_ctx) cfg_t(mem_ctx, instructions);
+   if (unlikely(debug_flag))
+      cfg = new(mem_ctx) cfg_t(instructions);
 
-   foreach_list(node, instructions) {
-      fs_inst *inst = (fs_inst *)node;
+   foreach_in_list(fs_inst, inst, instructions) {
       struct brw_reg src[3], dst;
+      unsigned int last_insn_offset = p->next_insn_offset;
 
-      if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
-        foreach_list(node, &cfg->block_list) {
-           bblock_link *link = (bblock_link *)node;
-           bblock_t *block = link->block;
-
-           if (block->start == inst) {
-              printf("   START B%d", block->block_num);
-              foreach_list(predecessor_node, &block->parents) {
-                 bblock_link *predecessor_link =
-                    (bblock_link *)predecessor_node;
-                 bblock_t *predecessor_block = predecessor_link->block;
-                 printf(" <-B%d", predecessor_block->block_num);
-              }
-              printf("\n");
-           }
-        }
+      if (unlikely(debug_flag))
+         annotate(brw, &annotation, cfg, inst, p->next_insn_offset);
 
-        if (last_annotation_ir != inst->ir) {
-           last_annotation_ir = inst->ir;
-           if (last_annotation_ir) {
-              printf("   ");
-               if (shader)
-                  ((ir_instruction *)inst->ir)->print();
-               else {
-                  const prog_instruction *fpi;
-                  fpi = (const prog_instruction *)inst->ir;
-                  printf("%d: ", (int)(fpi - fp->Base.Instructions));
-                  _mesa_fprint_instruction_opt(stdout,
-                                               fpi,
-                                               0, PROG_PRINT_DEBUG, NULL);
-               }
-              printf("\n");
-           }
-        }
-        if (last_annotation_string != inst->annotation) {
-           last_annotation_string = inst->annotation;
-           if (last_annotation_string)
-              printf("   %s\n", last_annotation_string);
-        }
-      }
-
-      for (unsigned int i = 0; i < 3; i++) {
+      for (unsigned int i = 0; i < inst->sources; i++) {
         src[i] = brw_reg_from_fs_reg(&inst->src[i]);
 
         /* The accumulator result appears to get used for the
@@ -1167,19 +1338,19 @@ fs_generator::generate_code(exec_list *instructions)
       }
       dst = brw_reg_from_fs_reg(&inst->dst);
 
-      brw_set_conditionalmod(p, inst->conditional_mod);
-      brw_set_predicate_control(p, inst->predicate);
-      brw_set_predicate_inverse(p, inst->predicate_inverse);
-      brw_set_flag_reg(p, 0, inst->flag_subreg);
-      brw_set_saturate(p, inst->saturate);
-      brw_set_mask_control(p, inst->force_writemask_all);
+      brw_set_default_predicate_control(p, inst->predicate);
+      brw_set_default_predicate_inverse(p, inst->predicate_inverse);
+      brw_set_default_flag_reg(p, 0, inst->flag_subreg);
+      brw_set_default_saturate(p, inst->saturate);
+      brw_set_default_mask_control(p, inst->force_writemask_all);
+      brw_set_default_acc_write_control(p, inst->writes_accumulator);
 
       if (inst->force_uncompressed || dispatch_width == 8) {
-        brw_set_compression_control(p, BRW_COMPRESSION_NONE);
+        brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
       } else if (inst->force_sechalf) {
-        brw_set_compression_control(p, BRW_COMPRESSION_2NDHALF);
+        brw_set_default_compression_control(p, BRW_COMPRESSION_2NDHALF);
       } else {
-        brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
+        brw_set_default_compression_control(p, BRW_COMPRESSION_COMPRESSED);
       }
 
       switch (inst->opcode) {
@@ -1192,38 +1363,41 @@ fs_generator::generate_code(exec_list *instructions)
       case BRW_OPCODE_MUL:
         brw_MUL(p, dst, src[0], src[1]);
         break;
+      case BRW_OPCODE_AVG:
+        brw_AVG(p, dst, src[0], src[1]);
+        break;
       case BRW_OPCODE_MACH:
-        brw_set_acc_write_control(p, 1);
         brw_MACH(p, dst, src[0], src[1]);
-        brw_set_acc_write_control(p, 0);
         break;
 
       case BRW_OPCODE_MAD:
-        brw_set_access_mode(p, BRW_ALIGN_16);
-        if (dispatch_width == 16) {
-           brw_set_compression_control(p, BRW_COMPRESSION_NONE);
+         assert(brw->gen >= 6);
+        brw_set_default_access_mode(p, BRW_ALIGN_16);
+         if (dispatch_width == 16 && brw->gen < 8 && !brw->is_haswell) {
+           brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
            brw_MAD(p, dst, src[0], src[1], src[2]);
-           brw_set_compression_control(p, BRW_COMPRESSION_2NDHALF);
+           brw_set_default_compression_control(p, BRW_COMPRESSION_2NDHALF);
            brw_MAD(p, sechalf(dst), sechalf(src[0]), sechalf(src[1]), sechalf(src[2]));
-           brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
+           brw_set_default_compression_control(p, BRW_COMPRESSION_COMPRESSED);
         } else {
            brw_MAD(p, dst, src[0], src[1], src[2]);
         }
-        brw_set_access_mode(p, BRW_ALIGN_1);
+        brw_set_default_access_mode(p, BRW_ALIGN_1);
         break;
 
       case BRW_OPCODE_LRP:
-        brw_set_access_mode(p, BRW_ALIGN_16);
-        if (dispatch_width == 16) {
-           brw_set_compression_control(p, BRW_COMPRESSION_NONE);
+         assert(brw->gen >= 6);
+        brw_set_default_access_mode(p, BRW_ALIGN_16);
+         if (dispatch_width == 16 && brw->gen < 8 && !brw->is_haswell) {
+           brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
            brw_LRP(p, dst, src[0], src[1], src[2]);
-           brw_set_compression_control(p, BRW_COMPRESSION_2NDHALF);
+           brw_set_default_compression_control(p, BRW_COMPRESSION_2NDHALF);
            brw_LRP(p, sechalf(dst), sechalf(src[0]), sechalf(src[1]), sechalf(src[2]));
-           brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
+           brw_set_default_compression_control(p, BRW_COMPRESSION_COMPRESSED);
         } else {
            brw_LRP(p, dst, src[0], src[1], src[2]);
         }
-        brw_set_access_mode(p, BRW_ALIGN_1);
+        brw_set_default_access_mode(p, BRW_ALIGN_1);
         break;
 
       case BRW_OPCODE_FRC:
@@ -1261,9 +1435,11 @@ fs_generator::generate_code(exec_list *instructions)
         brw_SHL(p, dst, src[0], src[1]);
         break;
       case BRW_OPCODE_F32TO16:
+         assert(brw->gen >= 7);
          brw_F32TO16(p, dst, src[0]);
          break;
       case BRW_OPCODE_F16TO32:
+         assert(brw->gen >= 7);
          brw_F16TO32(p, dst, src[0]);
          break;
       case BRW_OPCODE_CMP:
@@ -1273,52 +1449,91 @@ fs_generator::generate_code(exec_list *instructions)
         brw_SEL(p, dst, src[0], src[1]);
         break;
       case BRW_OPCODE_BFREV:
+         assert(brw->gen >= 7);
          /* BFREV only supports UD type for src and dst. */
          brw_BFREV(p, retype(dst, BRW_REGISTER_TYPE_UD),
                       retype(src[0], BRW_REGISTER_TYPE_UD));
          break;
       case BRW_OPCODE_FBH:
+         assert(brw->gen >= 7);
          /* FBH only supports UD type for dst. */
          brw_FBH(p, retype(dst, BRW_REGISTER_TYPE_UD), src[0]);
          break;
       case BRW_OPCODE_FBL:
+         assert(brw->gen >= 7);
          /* FBL only supports UD type for dst. */
          brw_FBL(p, retype(dst, BRW_REGISTER_TYPE_UD), src[0]);
          break;
       case BRW_OPCODE_CBIT:
+         assert(brw->gen >= 7);
          /* CBIT only supports UD type for dst. */
          brw_CBIT(p, retype(dst, BRW_REGISTER_TYPE_UD), src[0]);
          break;
+      case BRW_OPCODE_ADDC:
+         assert(brw->gen >= 7);
+         brw_ADDC(p, dst, src[0], src[1]);
+         break;
+      case BRW_OPCODE_SUBB:
+         assert(brw->gen >= 7);
+         brw_SUBB(p, dst, src[0], src[1]);
+         break;
+      case BRW_OPCODE_MAC:
+         brw_MAC(p, dst, src[0], src[1]);
+         break;
 
       case BRW_OPCODE_BFE:
-         brw_set_access_mode(p, BRW_ALIGN_16);
-         if (dispatch_width == 16) {
-            brw_set_compression_control(p, BRW_COMPRESSION_NONE);
+         assert(brw->gen >= 7);
+         brw_set_default_access_mode(p, BRW_ALIGN_16);
+         if (dispatch_width == 16 && brw->gen < 8 && !brw->is_haswell) {
+            brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
             brw_BFE(p, dst, src[0], src[1], src[2]);
-            brw_set_compression_control(p, BRW_COMPRESSION_2NDHALF);
+            brw_set_default_compression_control(p, BRW_COMPRESSION_2NDHALF);
             brw_BFE(p, sechalf(dst), sechalf(src[0]), sechalf(src[1]), sechalf(src[2]));
-            brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
+            brw_set_default_compression_control(p, BRW_COMPRESSION_COMPRESSED);
          } else {
             brw_BFE(p, dst, src[0], src[1], src[2]);
          }
-         brw_set_access_mode(p, BRW_ALIGN_1);
+         brw_set_default_access_mode(p, BRW_ALIGN_1);
          break;
 
       case BRW_OPCODE_BFI1:
-         brw_BFI1(p, dst, src[0], src[1]);
+         assert(brw->gen >= 7);
+         /* The Haswell WaForceSIMD8ForBFIInstruction workaround says that we
+          * should
+          *
+          *    "Force BFI instructions to be executed always in SIMD8."
+          */
+         if (dispatch_width == 16 && brw->is_haswell) {
+            brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
+            brw_BFI1(p, dst, src[0], src[1]);
+            brw_set_default_compression_control(p, BRW_COMPRESSION_2NDHALF);
+            brw_BFI1(p, sechalf(dst), sechalf(src[0]), sechalf(src[1]));
+            brw_set_default_compression_control(p, BRW_COMPRESSION_COMPRESSED);
+         } else {
+            brw_BFI1(p, dst, src[0], src[1]);
+         }
          break;
       case BRW_OPCODE_BFI2:
-         brw_set_access_mode(p, BRW_ALIGN_16);
+         assert(brw->gen >= 7);
+         brw_set_default_access_mode(p, BRW_ALIGN_16);
+         /* The Haswell WaForceSIMD8ForBFIInstruction workaround says that we
+          * should
+          *
+          *    "Force BFI instructions to be executed always in SIMD8."
+          *
+          * Otherwise we would be able to emit compressed instructions like we
+          * do for the other three-source instructions.
+          */
          if (dispatch_width == 16) {
-            brw_set_compression_control(p, BRW_COMPRESSION_NONE);
+            brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
             brw_BFI2(p, dst, src[0], src[1], src[2]);
-            brw_set_compression_control(p, BRW_COMPRESSION_2NDHALF);
+            brw_set_default_compression_control(p, BRW_COMPRESSION_2NDHALF);
             brw_BFI2(p, sechalf(dst), sechalf(src[0]), sechalf(src[1]), sechalf(src[2]));
-            brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
+            brw_set_default_compression_control(p, BRW_COMPRESSION_COMPRESSED);
          } else {
             brw_BFI2(p, dst, src[0], src[1], src[2]);
          }
-         brw_set_access_mode(p, BRW_ALIGN_1);
+         brw_set_default_access_mode(p, BRW_ALIGN_1);
          break;
 
       case BRW_OPCODE_IF:
@@ -1344,7 +1559,7 @@ fs_generator::generate_code(exec_list *instructions)
 
       case BRW_OPCODE_BREAK:
         brw_BREAK(p);
-        brw_set_predicate_control(p, BRW_PREDICATE_NONE);
+        brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
         break;
       case BRW_OPCODE_CONTINUE:
         /* FINISHME: We need to write the loop instruction support still. */
@@ -1352,7 +1567,7 @@ fs_generator::generate_code(exec_list *instructions)
            gen6_CONT(p);
         else
            brw_CONT(p);
-        brw_set_predicate_control(p, BRW_PREDICATE_NONE);
+        brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
         break;
 
       case BRW_OPCODE_WHILE:
@@ -1366,10 +1581,12 @@ fs_generator::generate_code(exec_list *instructions)
       case SHADER_OPCODE_LOG2:
       case SHADER_OPCODE_SIN:
       case SHADER_OPCODE_COS:
+         assert(brw->gen < 6 || inst->mlen == 0);
         if (brw->gen >= 7) {
-           generate_math1_gen7(inst, dst, src[0]);
+            gen6_math(p, dst, brw_math_function(inst->opcode), src[0],
+                      brw_null_reg());
         } else if (brw->gen == 6) {
-           generate_math1_gen6(inst, dst, src[0]);
+           generate_math_gen6(inst, dst, src[0], brw_null_reg());
         } else if (brw->gen == 5 || brw->is_g4x) {
            generate_math_g45(inst, dst, src[0]);
         } else {
@@ -1379,10 +1596,11 @@ fs_generator::generate_code(exec_list *instructions)
       case SHADER_OPCODE_INT_QUOTIENT:
       case SHADER_OPCODE_INT_REMAINDER:
       case SHADER_OPCODE_POW:
+         assert(brw->gen < 6 || inst->mlen == 0);
         if (brw->gen >= 7) {
-           generate_math2_gen7(inst, dst, src[0], src[1]);
+            gen6_math(p, dst, brw_math_function(inst->opcode), src[0], src[1]);
         } else if (brw->gen == 6) {
-           generate_math2_gen6(inst, dst, src[0], src[1]);
+           generate_math_gen6(inst, dst, src[0], src[1]);
         } else {
            generate_math_gen4(inst, dst, src[0]);
         }
@@ -1403,11 +1621,14 @@ fs_generator::generate_code(exec_list *instructions)
       case FS_OPCODE_TXB:
       case SHADER_OPCODE_TXD:
       case SHADER_OPCODE_TXF:
-      case SHADER_OPCODE_TXF_MS:
+      case SHADER_OPCODE_TXF_CMS:
+      case SHADER_OPCODE_TXF_UMS:
+      case SHADER_OPCODE_TXF_MCS:
       case SHADER_OPCODE_TXL:
       case SHADER_OPCODE_TXS:
       case SHADER_OPCODE_LOD:
       case SHADER_OPCODE_TG4:
+      case SHADER_OPCODE_TG4_OFFSET:
         generate_tex(inst, dst, src[0]);
         break;
       case FS_OPCODE_DDX:
@@ -1415,18 +1636,22 @@ fs_generator::generate_code(exec_list *instructions)
         break;
       case FS_OPCODE_DDY:
          /* Make sure fp->UsesDFdy flag got set (otherwise there's no
-          * guarantee that c->key.render_to_fbo is set).
+          * guarantee that key->render_to_fbo is set).
           */
          assert(fp->UsesDFdy);
-        generate_ddy(inst, dst, src[0], c->key.render_to_fbo);
+        generate_ddy(inst, dst, src[0], key->render_to_fbo);
+        break;
+
+      case SHADER_OPCODE_GEN4_SCRATCH_WRITE:
+        generate_scratch_write(inst, src[0]);
         break;
 
-      case FS_OPCODE_SPILL:
-        generate_spill(inst, src[0]);
+      case SHADER_OPCODE_GEN4_SCRATCH_READ:
+        generate_scratch_read(inst, dst);
         break;
 
-      case FS_OPCODE_UNSPILL:
-        generate_unspill(inst, dst);
+      case SHADER_OPCODE_GEN7_SCRATCH_READ:
+        generate_scratch_read_gen7(inst, dst);
         break;
 
       case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
@@ -1449,6 +1674,10 @@ fs_generator::generate_code(exec_list *instructions)
         generate_fb_write(inst);
         break;
 
+      case FS_OPCODE_BLORP_FB_WRITE:
+        generate_blorp_fb_write(inst);
+        break;
+
       case FS_OPCODE_MOV_DISPATCH_TO_FLAGS:
          generate_mov_dispatch_to_flags(inst);
          break;
@@ -1461,10 +1690,26 @@ fs_generator::generate_code(exec_list *instructions)
          generate_shader_time_add(inst, src[0], src[1], src[2]);
          break;
 
+      case SHADER_OPCODE_UNTYPED_ATOMIC:
+         generate_untyped_atomic(inst, dst, src[0], src[1]);
+         break;
+
+      case SHADER_OPCODE_UNTYPED_SURFACE_READ:
+         generate_untyped_surface_read(inst, dst, src[0]);
+         break;
+
       case FS_OPCODE_SET_SIMD4X2_OFFSET:
          generate_set_simd4x2_offset(inst, dst, src[0]);
          break;
 
+      case FS_OPCODE_SET_OMASK:
+         generate_set_omask(inst, dst, src[0]);
+         break;
+
+      case FS_OPCODE_SET_SAMPLE_ID:
+         generate_set_sample_id(inst, dst, src[0], src[1]);
+         break;
+
       case FS_OPCODE_PACK_HALF_2x16_SPLIT:
           generate_pack_half_2x16_split(inst, dst, src[0], src[1]);
           break;
@@ -1478,7 +1723,11 @@ fs_generator::generate_code(exec_list *instructions)
          /* This is the place where the final HALT needs to be inserted if
           * we've emitted any discards.  If not, this will emit no code.
           */
-         patch_discard_jumps_to_fb_writes();
+         if (!patch_discard_jumps_to_fb_writes()) {
+            if (unlikely(debug_flag)) {
+               annotation.ann_count--;
+            }
+         }
          break;
 
       default:
@@ -1489,45 +1738,55 @@ fs_generator::generate_code(exec_list *instructions)
            _mesa_problem(ctx, "Unsupported opcode %d in FS", inst->opcode);
         }
         abort();
-      }
 
-      if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
-        brw_dump_compile(p, stdout,
-                         last_native_insn_offset, p->next_insn_offset);
-
-        foreach_list(node, &cfg->block_list) {
-           bblock_link *link = (bblock_link *)node;
-           bblock_t *block = link->block;
-
-           if (block->end == inst) {
-              printf("   END B%d", block->block_num);
-              foreach_list(successor_node, &block->children) {
-                 bblock_link *successor_link =
-                    (bblock_link *)successor_node;
-                 bblock_t *successor_block = successor_link->block;
-                 printf(" ->B%d", successor_block->block_num);
-              }
-              printf("\n");
-           }
-        }
+      case SHADER_OPCODE_LOAD_PAYLOAD:
+         unreachable("Should be lowered by lower_load_payload()");
       }
 
-      last_native_insn_offset = p->next_insn_offset;
-   }
+      if (inst->no_dd_clear || inst->no_dd_check || inst->conditional_mod) {
+         assert(p->next_insn_offset == last_insn_offset + 16 ||
+                !"conditional_mod, no_dd_check, or no_dd_clear set for IR "
+                 "emitting more than 1 instruction");
 
-   if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
-      printf("\n");
+         brw_inst *last = &p->store[last_insn_offset / 16];
+
+         brw_inst_set_cond_modifier(brw, last, inst->conditional_mod);
+         brw_inst_set_no_dd_clear(brw, last, inst->no_dd_clear);
+         brw_inst_set_no_dd_check(brw, last, inst->no_dd_check);
+      }
    }
 
    brw_set_uip_jip(p);
+   annotation_finalize(&annotation, p->next_insn_offset);
+
+   int before_size = p->next_insn_offset - start_offset;
+   brw_compact_instructions(p, start_offset, annotation.ann_count,
+                            annotation.ann);
+   int after_size = p->next_insn_offset - start_offset;
+
+   if (unlikely(debug_flag)) {
+      if (prog) {
+         fprintf(stderr,
+                 "Native code for %s fragment shader %d (SIMD%d dispatch):\n",
+                 prog->Label ? prog->Label : "unnamed",
+                 prog->Name, dispatch_width);
+      } else if (fp) {
+         fprintf(stderr,
+                 "Native code for fragment program %d (SIMD%d dispatch):\n",
+                 fp->Base.Id, dispatch_width);
+      } else {
+         fprintf(stderr, "Native code for blorp program (SIMD%d dispatch):\n",
+                 dispatch_width);
+      }
+      fprintf(stderr, "SIMD%d shader: %d instructions. Compacted %d to %d"
+                      " bytes (%.0f%%)\n",
+              dispatch_width, before_size / 16, before_size, after_size,
+              100.0f * (before_size - after_size) / before_size);
 
-   /* OK, while the INTEL_DEBUG=wm above is very nice for debugging FS
-    * emit issues, it doesn't get the jump distances into the output,
-    * which is often something we want to debug.  So this is here in
-    * case you're doing that.
-    */
-   if (0) {
-      brw_dump_compile(p, stdout, 0, p->next_insn_offset);
+      const struct gl_program *prog = fp ? &fp->Base : NULL;
+
+      dump_assembly(p->store, annotation.ann_count, annotation.ann, brw, prog);
+      ralloc_free(annotation.ann);
    }
 }
 
@@ -1536,25 +1795,23 @@ fs_generator::generate_assembly(exec_list *simd8_instructions,
                                 exec_list *simd16_instructions,
                                 unsigned *assembly_size)
 {
-   dispatch_width = 8;
-   generate_code(simd8_instructions);
+   assert(simd8_instructions || simd16_instructions);
 
-   if (simd16_instructions) {
-      /* We have to do a compaction pass now, or the one at the end of
-       * execution will squash down where our prog_offset start needs
-       * to be.
-       */
-      brw_compact_instructions(p);
+   if (simd8_instructions) {
+      dispatch_width = 8;
+      generate_code(simd8_instructions);
+   }
 
+   if (simd16_instructions) {
       /* align to 64 byte boundary. */
-      while ((p->nr_insn * sizeof(struct brw_instruction)) % 64) {
+      while (p->next_insn_offset % 64) {
          brw_NOP(p);
       }
 
-      /* Save off the start of this 16-wide program */
-      c->prog_data.prog_offset_16 = p->nr_insn * sizeof(struct brw_instruction);
+      /* Save off the start of this SIMD16 program */
+      prog_data->prog_offset_16 = p->next_insn_offset;
 
-      brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
+      brw_set_default_compression_control(p, BRW_COMPRESSION_COMPRESSED);
 
       dispatch_width = 16;
       generate_code(simd16_instructions);