intel/eu: Use descriptor constructors for sampler messages.
[mesa.git] / src / intel / compiler / brw_fs_generator.cpp
index e6fb7c92d402b5975336c054259a9e8f5970424f..49cc0302fae04721558e0c27b5d61b809bce5e08 100644 (file)
@@ -175,14 +175,13 @@ brw_reg_from_fs_reg(const struct gen_device_info *devinfo, fs_inst *inst,
 
 fs_generator::fs_generator(const struct brw_compiler *compiler, void *log_data,
                            void *mem_ctx,
-                           const void *key,
                            struct brw_stage_prog_data *prog_data,
                            unsigned promoted_constants,
                            bool runtime_check_aads_emit,
                            gl_shader_stage stage)
 
    : compiler(compiler), log_data(log_data),
-     devinfo(compiler->devinfo), key(key),
+     devinfo(compiler->devinfo),
      prog_data(prog_data),
      promoted_constants(promoted_constants),
      runtime_check_aads_emit(runtime_check_aads_emit), debug_flag(false),
@@ -267,21 +266,35 @@ fs_generator::fire_fb_write(fs_inst *inst,
       brw_set_default_mask_control(p, BRW_MASK_DISABLE);
       brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
       brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
-      brw_MOV(p, offset(payload, 1), brw_vec8_grf(1, 0));
+      brw_MOV(p, offset(retype(payload, BRW_REGISTER_TYPE_UD), 1),
+              offset(retype(implied_header, BRW_REGISTER_TYPE_UD), 1));
       brw_pop_insn_state(p);
    }
 
-   if (inst->opcode == FS_OPCODE_REP_FB_WRITE)
+   if (inst->opcode == FS_OPCODE_REP_FB_WRITE) {
+      assert(inst->group == 0 && inst->exec_size == 16);
       msg_control = BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD16_SINGLE_SOURCE_REPLICATED;
-   else if (prog_data->dual_src_blend) {
-      if (!inst->group)
+
+   } else if (prog_data->dual_src_blend) {
+      assert(inst->exec_size == 8);
+
+      if (inst->group % 16 == 0)
          msg_control = BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_DUAL_SOURCE_SUBSPAN01;
-      else
+      else if (inst->group % 16 == 8)
          msg_control = BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_DUAL_SOURCE_SUBSPAN23;
-   } else if (inst->exec_size == 16)
-      msg_control = BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD16_SINGLE_SOURCE;
-   else
-      msg_control = BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_SINGLE_SOURCE_SUBSPAN01;
+      else
+         unreachable("Invalid dual-source FB write instruction group");
+
+   } else {
+      assert(inst->group == 0 || (inst->group == 16 && inst->exec_size == 16));
+
+      if (inst->exec_size == 16)
+         msg_control = BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD16_SINGLE_SOURCE;
+      else if (inst->exec_size == 8)
+         msg_control = BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_SINGLE_SOURCE_SUBSPAN01;
+      else
+         unreachable("Invalid FB write execution size");
+   }
 
    /* We assume render targets start at 0, because headerless FB write
     * messages set "Render Target Index" to 0.  Using a different binding
@@ -289,20 +302,19 @@ fs_generator::fire_fb_write(fs_inst *inst,
     */
    const uint32_t surf_index = inst->target;
 
-   bool last_render_target = inst->eot ||
-                             (prog_data->dual_src_blend && dispatch_width == 16);
-
+   brw_inst *insn = brw_fb_WRITE(p,
+                                 payload,
+                                 retype(implied_header, BRW_REGISTER_TYPE_UW),
+                                 msg_control,
+                                 surf_index,
+                                 nr,
+                                 0,
+                                 inst->eot,
+                                 inst->last_rt,
+                                 inst->header_size != 0);
 
-   brw_fb_WRITE(p,
-                payload,
-                implied_header,
-                msg_control,
-                surf_index,
-                nr,
-                0,
-                inst->eot,
-                last_render_target,
-                inst->header_size != 0);
+   if (devinfo->gen >= 6)
+      brw_inst_set_rt_slot_group(devinfo, insn, inst->group / 16);
 
    brw_mark_surface_used(&prog_data->base, surf_index);
 }
@@ -310,86 +322,16 @@ fs_generator::fire_fb_write(fs_inst *inst,
 void
 fs_generator::generate_fb_write(fs_inst *inst, struct brw_reg payload)
 {
-   struct brw_wm_prog_data *prog_data = brw_wm_prog_data(this->prog_data);
-   const brw_wm_prog_key * const key = (brw_wm_prog_key * const) this->key;
-   struct brw_reg implied_header;
-
    if (devinfo->gen < 8 && !devinfo->is_haswell) {
       brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
    }
 
+   const struct brw_reg implied_header =
+      devinfo->gen < 6 ? payload : brw_null_reg();
+
    if (inst->base_mrf >= 0)
       payload = brw_message_reg(inst->base_mrf);
 
-   /* Header is 2 regs, g0 and g1 are the contents. g0 will be implied
-    * move, here's g1.
-    */
-   if (inst->header_size != 0) {
-      brw_push_insn_state(p);
-      brw_set_default_mask_control(p, BRW_MASK_DISABLE);
-      brw_set_default_exec_size(p, BRW_EXECUTE_1);
-      brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
-      brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
-      brw_set_default_flag_reg(p, 0, 0);
-
-      /* On HSW, the GPU will use the predicate on SENDC, unless the header is
-       * present.
-       */
-      if (prog_data->uses_kill) {
-         struct brw_reg pixel_mask;
-
-         if (devinfo->gen >= 6)
-            pixel_mask = retype(brw_vec1_grf(1, 7), BRW_REGISTER_TYPE_UW);
-         else
-            pixel_mask = retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UW);
-
-         brw_MOV(p, pixel_mask, brw_flag_reg(0, 1));
-      }
-
-      if (devinfo->gen >= 6) {
-         brw_push_insn_state(p);
-         brw_set_default_exec_size(p, BRW_EXECUTE_16);
-        brw_set_default_compression_control(p, BRW_COMPRESSION_COMPRESSED);
-        brw_MOV(p,
-                retype(payload, BRW_REGISTER_TYPE_UD),
-                retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
-         brw_pop_insn_state(p);
-
-         if (inst->target > 0 && key->replicate_alpha) {
-            /* Set "Source0 Alpha Present to RenderTarget" bit in message
-             * header.
-             */
-            brw_OR(p,
-                  vec1(retype(payload, BRW_REGISTER_TYPE_UD)),
-                  vec1(retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD)),
-                  brw_imm_ud(0x1 << 11));
-         }
-
-        if (inst->target > 0) {
-           /* Set the render target index for choosing BLEND_STATE. */
-           brw_MOV(p, retype(vec1(suboffset(payload, 2)),
-                              BRW_REGISTER_TYPE_UD),
-                   brw_imm_ud(inst->target));
-        }
-
-         /* Set computes stencil to render target */
-         if (prog_data->computed_stencil) {
-            brw_OR(p,
-                   vec1(retype(payload, BRW_REGISTER_TYPE_UD)),
-                   vec1(retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD)),
-                   brw_imm_ud(0x1 << 14));
-         }
-
-        implied_header = brw_null_reg();
-      } else {
-        implied_header = retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UW);
-      }
-
-      brw_pop_insn_state(p);
-   } else {
-      implied_header = brw_null_reg();
-   }
-
    if (!runtime_check_aads_emit) {
       fire_fb_write(inst, payload, implied_header, inst->mlen);
    } else {
@@ -540,6 +482,106 @@ fs_generator::generate_mov_indirect(fs_inst *inst,
    }
 }
 
+void
+fs_generator::generate_shuffle(fs_inst *inst,
+                               struct brw_reg dst,
+                               struct brw_reg src,
+                               struct brw_reg idx)
+{
+   /* Ivy bridge has some strange behavior that makes this a real pain to
+    * implement for 64-bit values so we just don't bother.
+    */
+   assert(devinfo->gen >= 8 || devinfo->is_haswell || type_sz(src.type) <= 4);
+
+   /* Because we're using the address register, we're limited to 8-wide
+    * execution on gen7.  On gen8, we're limited to 16-wide by the address
+    * register file and 8-wide for 64-bit types.  We could try and make this
+    * instruction splittable higher up in the compiler but that gets weird
+    * because it reads all of the channels regardless of execution size.  It's
+    * easier just to split it here.
+    */
+   const unsigned lower_width =
+      (devinfo->gen <= 7 || type_sz(src.type) > 4) ?
+      8 : MIN2(16, inst->exec_size);
+
+   brw_set_default_exec_size(p, cvt(lower_width) - 1);
+   for (unsigned group = 0; group < inst->exec_size; group += lower_width) {
+      brw_set_default_group(p, group);
+
+      if ((src.vstride == 0 && src.hstride == 0) ||
+          idx.file == BRW_IMMEDIATE_VALUE) {
+         /* Trivial, the source is already uniform or the index is a constant.
+          * We will typically not get here if the optimizer is doing its job,
+          * but asserting would be mean.
+          */
+         const unsigned i = idx.file == BRW_IMMEDIATE_VALUE ? idx.ud : 0;
+         brw_MOV(p, suboffset(dst, group), stride(suboffset(src, i), 0, 1, 0));
+      } else {
+         /* We use VxH indirect addressing, clobbering a0.0 through a0.7. */
+         struct brw_reg addr = vec8(brw_address_reg(0));
+
+         struct brw_reg group_idx = suboffset(idx, group);
+
+         if (lower_width == 8 && group_idx.width == BRW_WIDTH_16) {
+            /* Things get grumpy if the register is too wide. */
+            group_idx.width--;
+            group_idx.vstride--;
+         }
+
+         assert(type_sz(group_idx.type) <= 4);
+         if (type_sz(group_idx.type) == 4) {
+            /* The destination stride of an instruction (in bytes) must be
+             * greater than or equal to the size of the rest of the
+             * instruction.  Since the address register is of type UW, we
+             * can't use a D-type instruction.  In order to get around this,
+             * re retype to UW and use a stride.
+             */
+            group_idx = retype(spread(group_idx, 2), BRW_REGISTER_TYPE_W);
+         }
+
+         /* Take into account the component size and horizontal stride. */
+         assert(src.vstride == src.hstride + src.width);
+         brw_SHL(p, addr, group_idx,
+                 brw_imm_uw(_mesa_logbase2(type_sz(src.type)) +
+                            src.hstride - 1));
+
+         /* Add on the register start offset */
+         brw_ADD(p, addr, addr, brw_imm_uw(src.nr * REG_SIZE + src.subnr));
+
+         if (type_sz(src.type) > 4 &&
+             ((devinfo->gen == 7 && !devinfo->is_haswell) ||
+              devinfo->is_cherryview || gen_device_info_is_9lp(devinfo))) {
+            /* IVB has an issue (which we found empirically) where it reads
+             * two address register components per channel for indirectly
+             * addressed 64-bit sources.
+             *
+             * From the Cherryview PRM Vol 7. "Register Region Restrictions":
+             *
+             *    "When source or destination datatype is 64b or operation is
+             *    integer DWord multiply, indirect addressing must not be
+             *    used."
+             *
+             * To work around both of these, we do two integer MOVs insead of
+             * one 64-bit MOV.  Because no double value should ever cross a
+             * register boundary, it's safe to use the immediate offset in the
+             * indirect here to handle adding 4 bytes to the offset and avoid
+             * the extra ADD to the register file.
+             */
+            struct brw_reg gdst = suboffset(dst, group);
+            struct brw_reg dst_d = retype(spread(gdst, 2),
+                                          BRW_REGISTER_TYPE_D);
+            brw_MOV(p, dst_d,
+                    retype(brw_VxH_indirect(0, 0), BRW_REGISTER_TYPE_D));
+            brw_MOV(p, byte_offset(dst_d, 4),
+                    retype(brw_VxH_indirect(0, 4), BRW_REGISTER_TYPE_D));
+         } else {
+            brw_MOV(p, suboffset(dst, group),
+                    retype(brw_VxH_indirect(0, 0), src.type));
+         }
+      }
+   }
+}
+
 void
 fs_generator::generate_urb_read(fs_inst *inst,
                                 struct brw_reg dst,
@@ -640,7 +682,7 @@ fs_generator::generate_cs_terminate(fs_inst *inst, struct brw_reg payload)
 }
 
 void
-fs_generator::generate_barrier(fs_inst *inst, struct brw_reg src)
+fs_generator::generate_barrier(fs_inst *, struct brw_reg src)
 {
    brw_barrier(p, src);
    brw_WAIT(p);
@@ -681,6 +723,7 @@ fs_generator::generate_linterp(fs_inst *inst,
       struct brw_reg dwQ = suboffset(interp, 1);
       struct brw_reg dwR = suboffset(interp, 3);
 
+      brw_push_insn_state(p);
       brw_set_default_exec_size(p, BRW_EXECUTE_8);
 
       if (inst->exec_size == 8) {
@@ -695,16 +738,14 @@ fs_generator::generate_linterp(fs_inst *inst,
           */
          brw_inst_set_saturate(p->devinfo, i[0], false);
       } else {
-         brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
+         brw_set_default_group(p, inst->group);
          i[0] = brw_MAD(p,            acc, dwR, offset(delta_x, 0), dwP);
          i[1] = brw_MAD(p, offset(dst, 0), acc, offset(delta_x, 1), dwQ);
 
-         brw_set_default_compression_control(p, BRW_COMPRESSION_2NDHALF);
+         brw_set_default_group(p, inst->group + 8);
          i[2] = brw_MAD(p,            acc, dwR, offset(delta_y, 0), dwP);
          i[3] = brw_MAD(p, offset(dst, 1), acc, offset(delta_y, 1), dwQ);
 
-         brw_set_default_compression_control(p, BRW_COMPRESSION_COMPRESSED);
-
          brw_inst_set_cond_modifier(p->devinfo, i[1], inst->conditional_mod);
          brw_inst_set_cond_modifier(p->devinfo, i[3], inst->conditional_mod);
 
@@ -716,12 +757,65 @@ fs_generator::generate_linterp(fs_inst *inst,
          brw_inst_set_saturate(p->devinfo, i[2], false);
       }
 
+      brw_pop_insn_state(p);
+
       return true;
-   } else if (devinfo->has_pln &&
-              (devinfo->gen >= 7 || (delta_x.nr & 1) == 0)) {
-      brw_PLN(p, dst, interp, delta_x);
+   } else if (devinfo->has_pln) {
+      if (devinfo->gen <= 6 && (delta_x.nr & 1) != 0) {
+         /* From the Sandy Bridge PRM Vol. 4, Pt. 2, Section 8.3.53, "Plane":
+          *
+          *    "[DevSNB]:<src1> must be even register aligned.
+          *
+          * This restriction is lifted on Ivy Bridge.
+          *
+          * This means that we need to split PLN into LINE+MAC on-the-fly.
+          * Unfortunately, the inputs are laid out for PLN and not LINE+MAC so
+          * we have to split into SIMD8 pieces.  For gen4 (!has_pln), the
+          * coordinate registers are laid out differently so we leave it as a
+          * SIMD16 instruction.
+          */
+         assert(inst->exec_size == 8 || inst->exec_size == 16);
+         assert(inst->group % 16 == 0);
 
-      return false;
+         brw_push_insn_state(p);
+         brw_set_default_exec_size(p, BRW_EXECUTE_8);
+
+         /* Thanks to two accumulators, we can emit all the LINEs and then all
+          * the MACs.  This improves parallelism a bit.
+          */
+         for (unsigned g = 0; g < inst->exec_size / 8; g++) {
+            brw_inst *line = brw_LINE(p, brw_null_reg(), interp,
+                                      offset(delta_x, g * 2));
+            brw_inst_set_group(devinfo, line, inst->group + g * 8);
+
+            /* LINE writes the accumulator automatically on gen4-5.  On Sandy
+             * Bridge and later, we have to explicitly enable it.
+             */
+            if (devinfo->gen >= 6)
+               brw_inst_set_acc_wr_control(p->devinfo, line, true);
+
+            /* brw_set_default_saturate() is called before emitting
+             * instructions, so the saturate bit is set in each instruction,
+             * so we need to unset it on the LINE instructions.
+             */
+            brw_inst_set_saturate(p->devinfo, line, false);
+         }
+
+         for (unsigned g = 0; g < inst->exec_size / 8; g++) {
+            brw_inst *mac = brw_MAC(p, offset(dst, g), suboffset(interp, 1),
+                                    offset(delta_x, g * 2 + 1));
+            brw_inst_set_group(devinfo, mac, inst->group + g * 8);
+            brw_inst_set_cond_modifier(p->devinfo, mac, inst->conditional_mod);
+         }
+
+         brw_pop_insn_state(p);
+
+         return true;
+      } else {
+         brw_PLN(p, dst, interp, delta_x);
+
+         return false;
+      }
    } else {
       i[0] = brw_LINE(p, brw_null_reg(), interp, delta_x);
       i[1] = brw_MAC(p, dst, suboffset(interp, 1), delta_y);
@@ -793,6 +887,11 @@ fs_generator::generate_tex(fs_inst *inst, struct brw_reg dst, struct brw_reg src
    uint32_t return_format;
    bool is_combined_send = inst->eot;
 
+   /* Sampler EOT message of less than the dispatch width would kill the
+    * thread prematurely.
+    */
+   assert(!is_combined_send || inst->exec_size == dispatch_width);
+
    switch (dst.type) {
    case BRW_REGISTER_TYPE_D:
       return_format = BRW_SAMPLER_RETURN_FORMAT_SINT32;
@@ -1001,19 +1100,13 @@ fs_generator::generate_tex(fs_inst *inst, struct brw_reg dst, struct brw_reg src
     * we need to set it up explicitly and load the offset bitfield.
     * Otherwise, we can use an implied move from g0 to the first message reg.
     */
-   if (inst->header_size != 0) {
+   if (inst->header_size != 0 && devinfo->gen < 7) {
       if (devinfo->gen < 6 && !inst->offset) {
          /* Set up an implied move from g0 to the MRF. */
          src = retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UW);
       } else {
-         struct brw_reg header_reg;
-
-         if (devinfo->gen >= 7) {
-            header_reg = src;
-         } else {
-            assert(inst->base_mrf != -1);
-            header_reg = brw_message_reg(inst->base_mrf);
-         }
+         assert(inst->base_mrf != -1);
+         struct brw_reg header_reg = brw_message_reg(inst->base_mrf);
 
          brw_push_insn_state(p);
          brw_set_default_exec_size(p, BRW_EXECUTE_8);
@@ -1027,17 +1120,8 @@ fs_generator::generate_tex(fs_inst *inst, struct brw_reg dst, struct brw_reg src
             /* Set the offset bits in DWord 2. */
             brw_MOV(p, get_element_ud(header_reg, 2),
                        brw_imm_ud(inst->offset));
-         } else if (stage != MESA_SHADER_VERTEX &&
-                    stage != MESA_SHADER_FRAGMENT) {
-            /* The vertex and fragment stages have g0.2 set to 0, so
-             * header0.2 is 0 when g0 is copied. Other stages may not, so we
-             * must set it to 0 to avoid setting undesirable bits in the
-             * message.
-             */
-            brw_MOV(p, get_element_ud(header_reg, 2), brw_imm_ud(0));
          }
 
-         brw_adjust_sampler_state_pointer(p, header_reg, sampler_index);
          brw_pop_insn_state(p);
       }
    }
@@ -1095,17 +1179,16 @@ fs_generator::generate_tex(fs_inst *inst, struct brw_reg dst, struct brw_reg src
       brw_pop_insn_state(p);
 
       /* dst = send(offset, a0.0 | <descriptor>) */
-      brw_inst *insn = brw_send_indirect_message(
-         p, BRW_SFID_SAMPLER, dst, src, addr);
-      brw_set_sampler_message(p, insn,
-                              0 /* surface */,
-                              0 /* sampler */,
-                              msg_type,
-                              inst->size_written / REG_SIZE,
-                              inst->mlen /* mlen */,
-                              inst->header_size != 0 /* header */,
-                              simd_mode,
-                              return_format);
+      brw_send_indirect_message(
+         p, BRW_SFID_SAMPLER, dst, src, addr,
+         brw_message_desc(devinfo, inst->mlen, inst->size_written / REG_SIZE,
+                          inst->header_size) |
+         brw_sampler_desc(devinfo,
+                          0 /* surface */,
+                          0 /* sampler */,
+                          msg_type,
+                          simd_mode,
+                          return_format));
 
       /* visitor knows more than we do about the surface limit required,
        * so has already done marking.
@@ -1187,15 +1270,45 @@ fs_generator::generate_ddy(const fs_inst *inst,
 {
    if (inst->opcode == FS_OPCODE_DDY_FINE) {
       /* produce accurate derivatives */
-      struct brw_reg src0 = stride(src, 4, 4, 1);
-      struct brw_reg src1 = stride(src, 4, 4, 1);
-      src0.swizzle = BRW_SWIZZLE_XYXY;
-      src1.swizzle = BRW_SWIZZLE_ZWZW;
+      if (devinfo->gen >= 11) {
+         src = stride(src, 0, 2, 1);
+         struct brw_reg src_0  = byte_offset(src,  0 * sizeof(float));
+         struct brw_reg src_2  = byte_offset(src,  2 * sizeof(float));
+         struct brw_reg src_4  = byte_offset(src,  4 * sizeof(float));
+         struct brw_reg src_6  = byte_offset(src,  6 * sizeof(float));
+         struct brw_reg src_8  = byte_offset(src,  8 * sizeof(float));
+         struct brw_reg src_10 = byte_offset(src, 10 * sizeof(float));
+         struct brw_reg src_12 = byte_offset(src, 12 * sizeof(float));
+         struct brw_reg src_14 = byte_offset(src, 14 * sizeof(float));
+
+         struct brw_reg dst_0  = byte_offset(dst,  0 * sizeof(float));
+         struct brw_reg dst_4  = byte_offset(dst,  4 * sizeof(float));
+         struct brw_reg dst_8  = byte_offset(dst,  8 * sizeof(float));
+         struct brw_reg dst_12 = byte_offset(dst, 12 * sizeof(float));
 
-      brw_push_insn_state(p);
-      brw_set_default_access_mode(p, BRW_ALIGN_16);
-      brw_ADD(p, dst, negate(src0), src1);
-      brw_pop_insn_state(p);
+         brw_push_insn_state(p);
+         brw_set_default_exec_size(p, BRW_EXECUTE_4);
+
+         brw_ADD(p, dst_0, negate(src_0), src_2);
+         brw_ADD(p, dst_4, negate(src_4), src_6);
+
+         if (inst->exec_size == 16) {
+            brw_ADD(p, dst_8,  negate(src_8),  src_10);
+            brw_ADD(p, dst_12, negate(src_12), src_14);
+         }
+
+         brw_pop_insn_state(p);
+      } else {
+         struct brw_reg src0 = stride(src, 4, 4, 1);
+         struct brw_reg src1 = stride(src, 4, 4, 1);
+         src0.swizzle = BRW_SWIZZLE_XYXY;
+         src1.swizzle = BRW_SWIZZLE_ZWZW;
+
+         brw_push_insn_state(p);
+         brw_set_default_access_mode(p, BRW_ALIGN_16);
+         brw_ADD(p, dst, negate(src0), src1);
+         brw_pop_insn_state(p);
+      }
    } else {
       /* replicate the derivative at the top-left pixel to other pixels */
       struct brw_reg src0 = stride(src, 4, 4, 0);
@@ -1208,7 +1321,7 @@ fs_generator::generate_ddy(const fs_inst *inst,
 }
 
 void
-fs_generator::generate_discard_jump(fs_inst *inst)
+fs_generator::generate_discard_jump(fs_inst *)
 {
    assert(devinfo->gen >= 6);
 
@@ -1335,7 +1448,7 @@ fs_generator::generate_uniform_pull_constant_load_gen7(fs_inst *inst,
       brw_inst *insn = brw_send_indirect_message(
          p, GEN6_SFID_DATAPORT_CONSTANT_CACHE,
          retype(dst, BRW_REGISTER_TYPE_UD),
-         retype(payload, BRW_REGISTER_TYPE_UD), addr);
+         retype(payload, BRW_REGISTER_TYPE_UD), addr, 0);
       brw_set_dp_read_message(p, insn, 0 /* surface */,
                               BRW_DATAPORT_OWORD_BLOCK_DWORDS(inst->exec_size),
                               GEN7_DATAPORT_DC_OWORD_BLOCK_READ,
@@ -1389,6 +1502,7 @@ fs_generator::generate_varying_pull_constant_load_gen4(fs_inst *inst,
 
    brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
    brw_inst_set_compression(devinfo, send, false);
+   brw_inst_set_sfid(devinfo, send, BRW_SFID_SAMPLER);
    brw_set_dest(p, send, retype(dst, BRW_REGISTER_TYPE_UW));
    brw_set_src0(p, send, header);
    if (devinfo->gen < 6)
@@ -1398,15 +1512,11 @@ fs_generator::generate_varying_pull_constant_load_gen4(fs_inst *inst,
     * stored in it.
     */
    uint32_t return_format = BRW_SAMPLER_RETURN_FORMAT_FLOAT32;
-   brw_set_sampler_message(p, send,
-                           surf_index,
-                           0, /* sampler (unused) */
-                           msg_type,
-                           rlen,
-                           inst->mlen,
-                           inst->header_size != 0,
-                           simd_mode,
-                           return_format);
+   brw_set_desc(p, send,
+                brw_message_desc(devinfo, inst->mlen, rlen, inst->header_size) |
+                brw_sampler_desc(devinfo, surf_index,
+                                 0, /* sampler (unused) */
+                                 msg_type, simd_mode, return_format));
 }
 
 void
@@ -1440,17 +1550,15 @@ fs_generator::generate_varying_pull_constant_load_gen7(fs_inst *inst,
       uint32_t surf_index = index.ud;
 
       brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
+      brw_inst_set_sfid(devinfo, send, BRW_SFID_SAMPLER);
       brw_set_dest(p, send, retype(dst, BRW_REGISTER_TYPE_UW));
       brw_set_src0(p, send, offset);
-      brw_set_sampler_message(p, send,
-                              surf_index,
-                              0, /* LD message ignores sampler unit */
-                              GEN5_SAMPLER_MESSAGE_SAMPLE_LD,
-                              rlen,
-                              mlen,
-                              false, /* no header */
-                              simd_mode,
-                              0);
+      brw_set_desc(p, send,
+                   brw_message_desc(devinfo, mlen, rlen, false) |
+                   brw_sampler_desc(devinfo, surf_index,
+                                    0, /* LD message ignores sampler unit */
+                                    GEN5_SAMPLER_MESSAGE_SAMPLE_LD,
+                                    simd_mode, 0));
 
    } else {
 
@@ -1469,45 +1577,19 @@ fs_generator::generate_varying_pull_constant_load_gen7(fs_inst *inst,
       brw_pop_insn_state(p);
 
       /* dst = send(offset, a0.0 | <descriptor>) */
-      brw_inst *insn = brw_send_indirect_message(
+      brw_send_indirect_message(
          p, BRW_SFID_SAMPLER, retype(dst, BRW_REGISTER_TYPE_UW),
-         offset, addr);
-      brw_set_sampler_message(p, insn,
-                              0 /* surface */,
-                              0 /* sampler */,
-                              GEN5_SAMPLER_MESSAGE_SAMPLE_LD,
-                              rlen /* rlen */,
-                              mlen /* mlen */,
-                              false /* header */,
-                              simd_mode,
-                              0);
+         offset, addr,
+         brw_message_desc(devinfo, mlen, rlen, false) |
+         brw_sampler_desc(devinfo,
+                          0 /* surface */,
+                          0 /* sampler */,
+                          GEN5_SAMPLER_MESSAGE_SAMPLE_LD,
+                          simd_mode,
+                          0));
    }
 }
 
-/**
- * Cause the current pixel/sample mask (from R1.7 bits 15:0) to be transferred
- * into the flags register (f0.0).
- *
- * Used only on Gen6 and above.
- */
-void
-fs_generator::generate_mov_dispatch_to_flags(fs_inst *inst)
-{
-   struct brw_reg flags = brw_flag_reg(0, inst->flag_subreg);
-   struct brw_reg dispatch_mask;
-
-   if (devinfo->gen >= 6)
-      dispatch_mask = retype(brw_vec1_grf(1, 7), BRW_REGISTER_TYPE_UW);
-   else
-      dispatch_mask = retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UW);
-
-   brw_push_insn_state(p);
-   brw_set_default_mask_control(p, BRW_MASK_DISABLE);
-   brw_set_default_exec_size(p, BRW_EXECUTE_1);
-   brw_MOV(p, flags, dispatch_mask);
-   brw_pop_insn_state(p);
-}
-
 void
 fs_generator::generate_pixel_interpolator_query(fs_inst *inst,
                                                 struct brw_reg dst,
@@ -1515,16 +1597,18 @@ fs_generator::generate_pixel_interpolator_query(fs_inst *inst,
                                                 struct brw_reg msg_data,
                                                 unsigned msg_type)
 {
-   assert(inst->size_written % REG_SIZE == 0);
+   const bool has_payload = inst->src[0].file != BAD_FILE;
    assert(msg_data.type == BRW_REGISTER_TYPE_UD);
+   assert(inst->size_written % REG_SIZE == 0);
 
    brw_pixel_interpolator_query(p,
          retype(dst, BRW_REGISTER_TYPE_UW),
-         src,
+         /* If we don't have a payload, what we send doesn't matter */
+         has_payload ? src : brw_vec8_grf(0, 0),
          inst->pi_noperspective,
          msg_type,
          msg_data,
-         inst->mlen,
+         has_payload ? 2 * inst->exec_size / 8 : 1,
          inst->size_written / REG_SIZE);
 }
 
@@ -1542,22 +1626,24 @@ fs_generator::generate_set_sample_id(fs_inst *inst,
    assert(src0.type == BRW_REGISTER_TYPE_D ||
           src0.type == BRW_REGISTER_TYPE_UD);
 
-   struct brw_reg reg = stride(src1, 1, 4, 0);
-   if (devinfo->gen >= 8 || inst->exec_size == 8) {
-      brw_ADD(p, dst, src0, reg);
-   } else if (inst->exec_size == 16) {
-      brw_push_insn_state(p);
-      brw_set_default_exec_size(p, BRW_EXECUTE_8);
-      brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
-      brw_ADD(p, firsthalf(dst), firsthalf(src0), reg);
-      brw_set_default_compression_control(p, BRW_COMPRESSION_2NDHALF);
-      brw_ADD(p, sechalf(dst), sechalf(src0), suboffset(reg, 2));
-      brw_pop_insn_state(p);
+   const struct brw_reg reg = stride(src1, 1, 4, 0);
+   const unsigned lower_size = MIN2(inst->exec_size,
+                                    devinfo->gen >= 8 ? 16 : 8);
+
+   for (unsigned i = 0; i < inst->exec_size / lower_size; i++) {
+      brw_inst *insn = brw_ADD(p, offset(dst, i * lower_size / 8),
+                               offset(src0, (src0.vstride == 0 ? 0 : (1 << (src0.vstride - 1)) *
+                                             (i * lower_size / (1 << src0.width))) *
+                                            type_sz(src0.type) / REG_SIZE),
+                               suboffset(reg, i * lower_size / 4));
+      brw_inst_set_exec_size(devinfo, insn, cvt(lower_size) - 1);
+      brw_inst_set_group(devinfo, insn, inst->group + lower_size * i);
+      brw_inst_set_compression(devinfo, insn, lower_size > 8);
    }
 }
 
 void
-fs_generator::generate_pack_half_2x16_split(fs_inst *inst,
+fs_generator::generate_pack_half_2x16_split(fs_inst *,
                                             struct brw_reg dst,
                                             struct brw_reg x,
                                             struct brw_reg y)
@@ -1625,7 +1711,7 @@ fs_generator::generate_unpack_half_2x16_split(fs_inst *inst,
 }
 
 void
-fs_generator::generate_shader_time_add(fs_inst *inst,
+fs_generator::generate_shader_time_add(fs_inst *,
                                        struct brw_reg payload,
                                        struct brw_reg offset,
                                        struct brw_reg value)
@@ -1749,7 +1835,13 @@ fs_generator::generate_code(const cfg_t *cfg, int dispatch_width)
       brw_set_default_access_mode(p, BRW_ALIGN_1);
       brw_set_default_predicate_control(p, inst->predicate);
       brw_set_default_predicate_inverse(p, inst->predicate_inverse);
-      brw_set_default_flag_reg(p, 0, inst->flag_subreg);
+      /* On gen7 and above, hardware automatically adds the group onto the
+       * flag subregister number.  On Sandy Bridge and older, we have to do it
+       * ourselves.
+       */
+      const unsigned flag_subreg = inst->flag_subreg +
+         (devinfo->gen >= 7 ? 0 : inst->group / 16);
+      brw_set_default_flag_reg(p, flag_subreg / 2, flag_subreg % 2);
       brw_set_default_saturate(p, inst->saturate);
       brw_set_default_mask_control(p, inst->force_writemask_all);
       brw_set_default_acc_write_control(p, inst->writes_accumulator);
@@ -1796,7 +1888,7 @@ fs_generator::generate_code(const cfg_t *cfg, int dispatch_width)
         break;
 
       case BRW_OPCODE_LRP:
-         assert(devinfo->gen >= 6);
+         assert(devinfo->gen >= 6 && devinfo->gen <= 10);
          if (devinfo->gen < 10)
             brw_set_default_access_mode(p, BRW_ALIGN_16);
          brw_LRP(p, dst, src[0], src[1], src[2]);
@@ -1860,6 +1952,12 @@ fs_generator::generate_code(const cfg_t *cfg, int dispatch_width)
       case BRW_OPCODE_SEL:
         brw_SEL(p, dst, src[0], src[1]);
         break;
+      case BRW_OPCODE_CSEL:
+         assert(devinfo->gen >= 8);
+         if (devinfo->gen < 10)
+            brw_set_default_access_mode(p, BRW_ALIGN_16);
+         brw_CSEL(p, dst, src[0], src[1], src[2]);
+         break;
       case BRW_OPCODE_BFREV:
          assert(devinfo->gen >= 7);
          brw_BFREV(p, retype(dst, BRW_REGISTER_TYPE_UD),
@@ -1918,7 +2016,7 @@ fs_generator::generate_code(const cfg_t *cfg, int dispatch_width)
            assert(devinfo->gen == 6);
            gen6_IF(p, inst->conditional_mod, src[0], src[1]);
         } else {
-           brw_IF(p, brw_inst_exec_size(devinfo, p->current));
+           brw_IF(p, brw_get_default_exec_size(p));
         }
         break;
 
@@ -1930,7 +2028,7 @@ fs_generator::generate_code(const cfg_t *cfg, int dispatch_width)
         break;
 
       case BRW_OPCODE_DO:
-        brw_DO(p, brw_inst_exec_size(devinfo, p->current));
+        brw_DO(p, brw_get_default_exec_size(p));
         break;
 
       case BRW_OPCODE_BREAK:
@@ -1984,9 +2082,6 @@ fs_generator::generate_code(const cfg_t *cfg, int dispatch_width)
                       BRW_MATH_PRECISION_FULL);
         }
         break;
-      case FS_OPCODE_CINTERP:
-        brw_MOV(p, dst, src[0]);
-        break;
       case FS_OPCODE_LINTERP:
         multiple_instructions_emitted = generate_linterp(inst, dst, src);
         break;
@@ -2088,10 +2183,6 @@ fs_generator::generate_code(const cfg_t *cfg, int dispatch_width)
          generate_fb_read(inst, dst, src[0]);
          break;
 
-      case FS_OPCODE_MOV_DISPATCH_TO_FLAGS:
-         generate_mov_dispatch_to_flags(inst);
-         break;
-
       case FS_OPCODE_DISCARD_JUMP:
          generate_discard_jump(inst);
          break;
@@ -2103,10 +2194,12 @@ fs_generator::generate_code(const cfg_t *cfg, int dispatch_width)
       case SHADER_OPCODE_UNTYPED_ATOMIC:
          assert(src[2].file == BRW_IMMEDIATE_VALUE);
          brw_untyped_atomic(p, dst, src[0], src[1], src[2].ud,
-                            inst->mlen, !inst->dst.is_null());
+                            inst->mlen, !inst->dst.is_null(),
+                            inst->header_size);
          break;
 
       case SHADER_OPCODE_UNTYPED_SURFACE_READ:
+         assert(!inst->header_size);
          assert(src[2].file == BRW_IMMEDIATE_VALUE);
          brw_untyped_surface_read(p, dst, src[0], src[1],
                                   inst->mlen, src[2].ud);
@@ -2115,10 +2208,12 @@ fs_generator::generate_code(const cfg_t *cfg, int dispatch_width)
       case SHADER_OPCODE_UNTYPED_SURFACE_WRITE:
          assert(src[2].file == BRW_IMMEDIATE_VALUE);
          brw_untyped_surface_write(p, src[0], src[1],
-                                   inst->mlen, src[2].ud);
+                                   inst->mlen, src[2].ud,
+                                   inst->header_size);
          break;
 
       case SHADER_OPCODE_BYTE_SCATTERED_READ:
+         assert(!inst->header_size);
          assert(src[2].file == BRW_IMMEDIATE_VALUE);
          brw_byte_scattered_read(p, dst, src[0], src[1],
                                  inst->mlen, src[2].ud);
@@ -2127,28 +2222,37 @@ fs_generator::generate_code(const cfg_t *cfg, int dispatch_width)
       case SHADER_OPCODE_BYTE_SCATTERED_WRITE:
          assert(src[2].file == BRW_IMMEDIATE_VALUE);
          brw_byte_scattered_write(p, src[0], src[1],
-                                  inst->mlen, src[2].ud);
+                                  inst->mlen, src[2].ud,
+                                  inst->header_size);
          break;
 
       case SHADER_OPCODE_TYPED_ATOMIC:
          assert(src[2].file == BRW_IMMEDIATE_VALUE);
          brw_typed_atomic(p, dst, src[0], src[1],
-                          src[2].ud, inst->mlen, !inst->dst.is_null());
+                          src[2].ud, inst->mlen, !inst->dst.is_null(),
+                          inst->header_size);
          break;
 
       case SHADER_OPCODE_TYPED_SURFACE_READ:
          assert(src[2].file == BRW_IMMEDIATE_VALUE);
          brw_typed_surface_read(p, dst, src[0], src[1],
-                                inst->mlen, src[2].ud);
+                                inst->mlen, src[2].ud,
+                                inst->header_size);
          break;
 
       case SHADER_OPCODE_TYPED_SURFACE_WRITE:
          assert(src[2].file == BRW_IMMEDIATE_VALUE);
-         brw_typed_surface_write(p, src[0], src[1], inst->mlen, src[2].ud);
+         brw_typed_surface_write(p, src[0], src[1], inst->mlen, src[2].ud,
+                                 inst->header_size);
          break;
 
       case SHADER_OPCODE_MEMORY_FENCE:
-         brw_memory_fence(p, dst);
+         brw_memory_fence(p, dst, BRW_OPCODE_SEND);
+         break;
+
+      case SHADER_OPCODE_INTERLOCK:
+         /* The interlock is basically a memory fence issued via sendc */
+         brw_memory_fence(p, dst, BRW_OPCODE_SENDC);
          break;
 
       case SHADER_OPCODE_FIND_LIVE_CHANNEL: {
@@ -2166,6 +2270,77 @@ fs_generator::generate_code(const cfg_t *cfg, int dispatch_width)
          brw_broadcast(p, dst, src[0], src[1]);
          break;
 
+      case SHADER_OPCODE_SHUFFLE:
+         generate_shuffle(inst, dst, src[0], src[1]);
+         break;
+
+      case SHADER_OPCODE_SEL_EXEC:
+         assert(inst->force_writemask_all);
+         brw_set_default_mask_control(p, BRW_MASK_DISABLE);
+         brw_MOV(p, dst, src[1]);
+         brw_set_default_mask_control(p, BRW_MASK_ENABLE);
+         brw_MOV(p, dst, src[0]);
+         break;
+
+      case SHADER_OPCODE_QUAD_SWIZZLE:
+         /* This only works on 8-wide 32-bit values */
+         assert(inst->exec_size == 8);
+         assert(type_sz(src[0].type) == 4);
+         assert(inst->force_writemask_all);
+         assert(src[1].file == BRW_IMMEDIATE_VALUE);
+         assert(src[1].type == BRW_REGISTER_TYPE_UD);
+
+         if (src[0].file == BRW_IMMEDIATE_VALUE ||
+             (src[0].vstride == 0 && src[0].hstride == 0)) {
+            /* The value is uniform across all channels */
+            brw_MOV(p, dst, src[0]);
+         } else {
+            brw_set_default_access_mode(p, BRW_ALIGN_16);
+            struct brw_reg swiz_src = stride(src[0], 4, 4, 1);
+            swiz_src.swizzle = inst->src[1].ud;
+            brw_MOV(p, dst, swiz_src);
+         }
+         break;
+
+      case SHADER_OPCODE_CLUSTER_BROADCAST: {
+         assert(src[0].type == dst.type);
+         assert(!src[0].negate && !src[0].abs);
+         assert(src[1].file == BRW_IMMEDIATE_VALUE);
+         assert(src[1].type == BRW_REGISTER_TYPE_UD);
+         assert(src[2].file == BRW_IMMEDIATE_VALUE);
+         assert(src[2].type == BRW_REGISTER_TYPE_UD);
+         const unsigned component = src[1].ud;
+         const unsigned cluster_size = src[2].ud;
+         struct brw_reg strided = stride(suboffset(src[0], component),
+                                         cluster_size, cluster_size, 0);
+         if (type_sz(src[0].type) > 4 &&
+             (devinfo->is_cherryview || gen_device_info_is_9lp(devinfo))) {
+            /* IVB has an issue (which we found empirically) where it reads
+             * two address register components per channel for indirectly
+             * addressed 64-bit sources.
+             *
+             * From the Cherryview PRM Vol 7. "Register Region Restrictions":
+             *
+             *    "When source or destination datatype is 64b or operation is
+             *    integer DWord multiply, indirect addressing must not be
+             *    used."
+             *
+             * To work around both of these, we do two integer MOVs insead of
+             * one 64-bit MOV.  Because no double value should ever cross a
+             * register boundary, it's safe to use the immediate offset in the
+             * indirect here to handle adding 4 bytes to the offset and avoid
+             * the extra ADD to the register file.
+             */
+            brw_MOV(p, subscript(dst, BRW_REGISTER_TYPE_D, 0),
+                       subscript(strided, BRW_REGISTER_TYPE_D, 0));
+            brw_MOV(p, subscript(dst, BRW_REGISTER_TYPE_D, 1),
+                       subscript(strided, BRW_REGISTER_TYPE_D, 1));
+         } else {
+            brw_MOV(p, dst, strided);
+         }
+         break;
+      }
+
       case FS_OPCODE_SET_SAMPLE_ID:
          generate_set_sample_id(inst, dst, src[0], src[1]);
          break;
@@ -2295,7 +2470,7 @@ fs_generator::generate_code(const cfg_t *cfg, int dispatch_width)
 }
 
 const unsigned *
-fs_generator::get_assembly(unsigned int *assembly_size)
+fs_generator::get_assembly()
 {
-   return brw_get_program(p, assembly_size);
+   return brw_get_program(p, &prog_data->program_size);
 }