i965: Be more aggressive in tracking live/dead intervals within loops.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_fs.cpp
index 0353bb54e7236e919b691e9470fa5f1005a15aad..174f622d5955356d09111020e47e9114c88c8c37 100644 (file)
@@ -48,11 +48,10 @@ extern "C" {
 #include "../glsl/ir_optimization.h"
 #include "../glsl/ir_print_visitor.h"
 
-static int using_new_fs = -1;
 static struct brw_reg brw_reg_from_fs_reg(class fs_reg *reg);
 
 struct gl_shader *
-brw_new_shader(GLcontext *ctx, GLuint name, GLuint type)
+brw_new_shader(struct gl_context *ctx, GLuint name, GLuint type)
 {
    struct brw_shader *shader;
 
@@ -67,7 +66,7 @@ brw_new_shader(GLcontext *ctx, GLuint name, GLuint type)
 }
 
 struct gl_shader_program *
-brw_new_shader_program(GLcontext *ctx, GLuint name)
+brw_new_shader_program(struct gl_context *ctx, GLuint name)
 {
    struct brw_shader_program *prog;
    prog = talloc_zero(NULL, struct brw_shader_program);
@@ -79,7 +78,7 @@ brw_new_shader_program(GLcontext *ctx, GLuint name)
 }
 
 GLboolean
-brw_compile_shader(GLcontext *ctx, struct gl_shader *shader)
+brw_compile_shader(struct gl_context *ctx, struct gl_shader *shader)
 {
    if (!_mesa_ir_compile_shader(ctx, shader))
       return GL_FALSE;
@@ -88,64 +87,60 @@ brw_compile_shader(GLcontext *ctx, struct gl_shader *shader)
 }
 
 GLboolean
-brw_link_shader(GLcontext *ctx, struct gl_shader_program *prog)
+brw_link_shader(struct gl_context *ctx, struct gl_shader_program *prog)
 {
    struct intel_context *intel = intel_context(ctx);
-   if (using_new_fs == -1)
-      using_new_fs = getenv("INTEL_NEW_FS") != NULL;
-
-   for (unsigned i = 0; i < prog->_NumLinkedShaders; i++) {
-      struct brw_shader *shader = (struct brw_shader *)prog->_LinkedShaders[i];
-
-      if (using_new_fs && shader->base.Type == GL_FRAGMENT_SHADER) {
-        void *mem_ctx = talloc_new(NULL);
-        bool progress;
-
-        if (shader->ir)
-           talloc_free(shader->ir);
-        shader->ir = new(shader) exec_list;
-        clone_ir_list(mem_ctx, shader->ir, shader->base.ir);
-
-        do_mat_op_to_vec(shader->ir);
-        do_mod_to_fract(shader->ir);
-        do_div_to_mul_rcp(shader->ir);
-        do_sub_to_add_neg(shader->ir);
-        do_explog_to_explog2(shader->ir);
-        do_lower_texture_projection(shader->ir);
-        brw_do_cubemap_normalize(shader->ir);
-
-        do {
-           progress = false;
-
-           brw_do_channel_expressions(shader->ir);
-           brw_do_vector_splitting(shader->ir);
-
-           progress = do_lower_jumps(shader->ir, true, true,
-                                     true, /* main return */
-                                     false, /* continue */
-                                     false /* loops */
-                                     ) || progress;
-
-           progress = do_common_optimization(shader->ir, true, 32) || progress;
-
-           progress = lower_noise(shader->ir) || progress;
-           progress =
-              lower_variable_index_to_cond_assign(shader->ir,
-                                                  GL_TRUE, /* input */
-                                                  GL_TRUE, /* output */
-                                                  GL_TRUE, /* temp */
-                                                  GL_TRUE /* uniform */
-                                                  ) || progress;
-           if (intel->gen == 6) {
-              progress = do_if_to_cond_assign(shader->ir) || progress;
-           }
-        } while (progress);
 
-        validate_ir_tree(shader->ir);
+   struct brw_shader *shader =
+      (struct brw_shader *)prog->_LinkedShaders[MESA_SHADER_FRAGMENT];
+   if (shader != NULL) {
+      void *mem_ctx = talloc_new(NULL);
+      bool progress;
 
-        reparent_ir(shader->ir, shader->ir);
-        talloc_free(mem_ctx);
-      }
+      if (shader->ir)
+        talloc_free(shader->ir);
+      shader->ir = new(shader) exec_list;
+      clone_ir_list(mem_ctx, shader->ir, shader->base.ir);
+
+      do_mat_op_to_vec(shader->ir);
+      do_mod_to_fract(shader->ir);
+      do_div_to_mul_rcp(shader->ir);
+      do_sub_to_add_neg(shader->ir);
+      do_explog_to_explog2(shader->ir);
+      do_lower_texture_projection(shader->ir);
+      brw_do_cubemap_normalize(shader->ir);
+
+      do {
+        progress = false;
+
+        brw_do_channel_expressions(shader->ir);
+        brw_do_vector_splitting(shader->ir);
+
+        progress = do_lower_jumps(shader->ir, true, true,
+                                  true, /* main return */
+                                  false, /* continue */
+                                  false /* loops */
+                                  ) || progress;
+
+        progress = do_common_optimization(shader->ir, true, 32) || progress;
+
+        progress = lower_noise(shader->ir) || progress;
+        progress =
+           lower_variable_index_to_cond_assign(shader->ir,
+                                               GL_TRUE, /* input */
+                                               GL_TRUE, /* output */
+                                               GL_TRUE, /* temp */
+                                               GL_TRUE /* uniform */
+                                               ) || progress;
+        if (intel->gen == 6) {
+           progress = do_if_to_cond_assign(shader->ir) || progress;
+        }
+      } while (progress);
+
+      validate_ir_tree(shader->ir);
+
+      reparent_ir(shader->ir, shader->ir);
+      talloc_free(mem_ctx);
    }
 
    if (!_mesa_ir_link_shader(ctx, prog))
@@ -184,9 +179,6 @@ type_size(const struct glsl_type *type)
    }
 }
 
-static const fs_reg reg_undef;
-static const fs_reg reg_null(ARF, BRW_ARF_NULL);
-
 int
 fs_visitor::virtual_grf_alloc(int size)
 {
@@ -214,6 +206,15 @@ fs_reg::fs_reg(enum register_file file, int hw_reg)
    this->type = BRW_REGISTER_TYPE_F;
 }
 
+/** Fixed HW reg constructor. */
+fs_reg::fs_reg(enum register_file file, int hw_reg, uint32_t type)
+{
+   init();
+   this->file = file;
+   this->hw_reg = hw_reg;
+   this->type = type;
+}
+
 int
 brw_type_for_base_type(const struct glsl_type *type)
 {
@@ -498,7 +499,6 @@ fs_visitor::emit_frontfacing_interpolation(ir_variable *ir)
                   *reg,
                   fs_reg(1)));
    } else {
-      fs_reg *reg = new(this->mem_ctx) fs_reg(this, ir->type);
       struct brw_reg r1_6ud = retype(brw_vec1_grf(1, 6), BRW_REGISTER_TYPE_UD);
       /* bit 31 is "primitive is back face", so checking < (1 << 31) gives
        * us front face
@@ -530,6 +530,18 @@ fs_visitor::emit_math(fs_opcodes opcode, fs_reg dst, fs_reg src)
       assert(!"not reached: bad math opcode");
       return NULL;
    }
+
+   /* Can't do hstride == 0 args to gen6 math, so expand it out.  We
+    * might be able to do better by doing execsize = 1 math and then
+    * expanding that result out, but we would need to be careful with
+    * masking.
+    */
+   if (intel->gen >= 6 && src.file == UNIFORM) {
+      fs_reg expanded = fs_reg(this, glsl_type::float_type);
+      emit(fs_inst(BRW_OPCODE_MOV, expanded, src));
+      src = expanded;
+   }
+
    fs_inst *inst = emit(fs_inst(opcode, dst, src));
 
    if (intel->gen < 6) {
@@ -549,10 +561,23 @@ fs_visitor::emit_math(fs_opcodes opcode, fs_reg dst, fs_reg src0, fs_reg src1)
    assert(opcode == FS_OPCODE_POW);
 
    if (intel->gen >= 6) {
+      /* Can't do hstride == 0 args to gen6 math, so expand it out. */
+      if (src0.file == UNIFORM) {
+        fs_reg expanded = fs_reg(this, glsl_type::float_type);
+        emit(fs_inst(BRW_OPCODE_MOV, expanded, src0));
+        src0 = expanded;
+      }
+
+      if (src1.file == UNIFORM) {
+        fs_reg expanded = fs_reg(this, glsl_type::float_type);
+        emit(fs_inst(BRW_OPCODE_MOV, expanded, src1));
+        src1 = expanded;
+      }
+
       inst = emit(fs_inst(opcode, dst, src0, src1));
    } else {
       emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + 1), src1));
-      inst = emit(fs_inst(opcode, dst, src0, reg_null));
+      inst = emit(fs_inst(opcode, dst, src0, reg_null_f));
 
       inst->base_mrf = base_mrf;
       inst->mlen = 2;
@@ -658,7 +683,6 @@ fs_visitor::visit(ir_expression *ir)
 {
    unsigned int operand;
    fs_reg op[2], temp;
-   fs_reg result;
    fs_inst *inst;
 
    for (operand = 0; operand < ir->get_num_operands(); operand++) {
@@ -687,7 +711,10 @@ fs_visitor::visit(ir_expression *ir)
 
    switch (ir->operation) {
    case ir_unop_logic_not:
-      emit(fs_inst(BRW_OPCODE_ADD, this->result, op[0], fs_reg(-1)));
+      /* Note that BRW_OPCODE_NOT is not appropriate here, since it is
+       * ones complement of the whole register, not just bit 0.
+       */
+      emit(fs_inst(BRW_OPCODE_XOR, this->result, op[0], fs_reg(1)));
       break;
    case ir_unop_neg:
       op[0].negate = !op[0].negate;
@@ -702,12 +729,12 @@ fs_visitor::visit(ir_expression *ir)
 
       emit(fs_inst(BRW_OPCODE_MOV, this->result, fs_reg(0.0f)));
 
-      inst = emit(fs_inst(BRW_OPCODE_CMP, reg_null, op[0], fs_reg(0.0f)));
+      inst = emit(fs_inst(BRW_OPCODE_CMP, reg_null_f, op[0], fs_reg(0.0f)));
       inst->conditional_mod = BRW_CONDITIONAL_G;
       inst = emit(fs_inst(BRW_OPCODE_MOV, this->result, fs_reg(1.0f)));
       inst->predicated = true;
 
-      inst = emit(fs_inst(BRW_OPCODE_CMP, reg_null, op[0], fs_reg(0.0f)));
+      inst = emit(fs_inst(BRW_OPCODE_CMP, reg_null_f, op[0], fs_reg(0.0f)));
       inst->conditional_mod = BRW_CONDITIONAL_L;
       inst = emit(fs_inst(BRW_OPCODE_MOV, this->result, fs_reg(-1.0f)));
       inst->predicated = true;
@@ -824,8 +851,6 @@ fs_visitor::visit(ir_expression *ir)
    case ir_unop_i2f:
    case ir_unop_b2f:
    case ir_unop_b2i:
-      emit(fs_inst(BRW_OPCODE_MOV, this->result, op[0]));
-      break;
    case ir_unop_f2i:
       emit(fs_inst(BRW_OPCODE_MOV, this->result, op[0]));
       break;
@@ -833,12 +858,15 @@ fs_visitor::visit(ir_expression *ir)
    case ir_unop_i2b:
       inst = emit(fs_inst(BRW_OPCODE_CMP, this->result, op[0], fs_reg(0.0f)));
       inst->conditional_mod = BRW_CONDITIONAL_NZ;
+      inst = emit(fs_inst(BRW_OPCODE_AND, this->result,
+                         this->result, fs_reg(1)));
+      break;
 
    case ir_unop_trunc:
-      emit(fs_inst(BRW_OPCODE_RNDD, this->result, op[0]));
+      emit(fs_inst(BRW_OPCODE_RNDZ, this->result, op[0]));
       break;
    case ir_unop_ceil:
-      op[0].negate = ~op[0].negate;
+      op[0].negate = !op[0].negate;
       inst = emit(fs_inst(BRW_OPCODE_RNDD, this->result, op[0]));
       this->result.negate = true;
       break;
@@ -848,6 +876,9 @@ fs_visitor::visit(ir_expression *ir)
    case ir_unop_fract:
       inst = emit(fs_inst(BRW_OPCODE_FRC, this->result, op[0]));
       break;
+   case ir_unop_round_even:
+      emit(fs_inst(BRW_OPCODE_RNDE, this->result, op[0]));
+      break;
 
    case ir_binop_min:
       inst = emit(fs_inst(BRW_OPCODE_CMP, this->result, op[0], op[1]));
@@ -904,6 +935,7 @@ fs_visitor::emit_assignment_writes(fs_reg &l, fs_reg &r,
       for (unsigned int i = 0; i < type->length; i++) {
         emit_assignment_writes(l, r, type->fields.array, predicated);
       }
+      break;
 
    case GLSL_TYPE_STRUCT:
       for (unsigned int i = 0; i < type->length; i++) {
@@ -938,10 +970,7 @@ fs_visitor::visit(ir_assignment *ir)
    assert(r.file != BAD_FILE);
 
    if (ir->condition) {
-      /* Get the condition bool into the predicate. */
-      ir->condition->accept(this);
-      inst = emit(fs_inst(BRW_OPCODE_CMP, reg_null, this->result, fs_reg(0)));
-      inst->conditional_mod = BRW_CONDITIONAL_NZ;
+      emit_bool_to_cond_code(ir->condition);
    }
 
    if (ir->lhs->type->is_scalar() ||
@@ -1017,7 +1046,7 @@ fs_visitor::emit_texture_gen4(ir_texture *ir, fs_reg dst, fs_reg coordinate)
        */
       assert(ir->op == ir_txb || ir->op == ir_txl);
 
-      for (int i = 0; i < ir->coordinate->type->vector_elements * 2;) {
+      for (int i = 0; i < ir->coordinate->type->vector_elements; i++) {
         emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen + i * 2),
                      coordinate));
         coordinate.reg_offset++;
@@ -1146,6 +1175,7 @@ fs_visitor::emit_texture_gen5(ir_texture *ir, fs_reg dst, fs_reg coordinate)
 void
 fs_visitor::visit(ir_texture *ir)
 {
+   int sampler;
    fs_inst *inst = NULL;
 
    ir->coordinate->accept(this);
@@ -1154,6 +1184,44 @@ fs_visitor::visit(ir_texture *ir)
    /* Should be lowered by do_lower_texture_projection */
    assert(!ir->projector);
 
+   sampler = _mesa_get_sampler_uniform_value(ir->sampler,
+                                            ctx->Shader.CurrentProgram,
+                                            &brw->fragment_program->Base);
+   sampler = c->fp->program.Base.SamplerUnits[sampler];
+
+   /* The 965 requires the EU to do the normalization of GL rectangle
+    * texture coordinates.  We use the program parameter state
+    * tracking to get the scaling factor.
+    */
+   if (ir->sampler->type->sampler_dimensionality == GLSL_SAMPLER_DIM_RECT) {
+      struct gl_program_parameter_list *params = c->fp->program.Base.Parameters;
+      int tokens[STATE_LENGTH] = {
+        STATE_INTERNAL,
+        STATE_TEXRECT_SCALE,
+        sampler,
+        0,
+        0
+      };
+
+      fs_reg scale_x = fs_reg(UNIFORM, c->prog_data.nr_params);
+      fs_reg scale_y = fs_reg(UNIFORM, c->prog_data.nr_params + 1);
+      GLuint index = _mesa_add_state_reference(params,
+                                              (gl_state_index *)tokens);
+      float *vec_values = this->fp->Base.Parameters->ParameterValues[index];
+
+      c->prog_data.param[c->prog_data.nr_params++] = &vec_values[0];
+      c->prog_data.param[c->prog_data.nr_params++] = &vec_values[1];
+
+      fs_reg dst = fs_reg(this, ir->coordinate->type);
+      fs_reg src = coordinate;
+      coordinate = dst;
+
+      emit(fs_inst(BRW_OPCODE_MUL, dst, src, scale_x));
+      dst.reg_offset++;
+      src.reg_offset++;
+      emit(fs_inst(BRW_OPCODE_MUL, dst, src, scale_y));
+   }
+
    /* Writemasking doesn't eliminate channels on SIMD8 texture
     * samples, so don't worry about them.
     */
@@ -1165,11 +1233,7 @@ fs_visitor::visit(ir_texture *ir)
       inst = emit_texture_gen5(ir, dst, coordinate);
    }
 
-   inst->sampler =
-      _mesa_get_sampler_uniform_value(ir->sampler,
-                                     ctx->Shader.CurrentProgram,
-                                     &brw->fragment_program->Base);
-   inst->sampler = c->fp->program.Base.SamplerUnits[inst->sampler];
+   inst->sampler = sampler;
 
    this->result = dst;
 
@@ -1244,8 +1308,8 @@ fs_visitor::visit(ir_discard *ir)
 
    assert(ir->condition == NULL); /* FINISHME */
 
-   emit(fs_inst(FS_OPCODE_DISCARD_NOT, temp, reg_null));
-   emit(fs_inst(FS_OPCODE_DISCARD_AND, reg_null, temp));
+   emit(fs_inst(FS_OPCODE_DISCARD_NOT, temp, reg_null_d));
+   emit(fs_inst(FS_OPCODE_DISCARD_AND, reg_null_d, temp));
    kill_emitted = true;
 }
 
@@ -1276,6 +1340,206 @@ fs_visitor::visit(ir_constant *ir)
    }
 }
 
+void
+fs_visitor::emit_bool_to_cond_code(ir_rvalue *ir)
+{
+   ir_expression *expr = ir->as_expression();
+
+   if (expr) {
+      fs_reg op[2];
+      fs_inst *inst;
+
+      for (unsigned int i = 0; i < expr->get_num_operands(); i++) {
+        assert(expr->operands[i]->type->is_scalar());
+
+        expr->operands[i]->accept(this);
+        op[i] = this->result;
+      }
+
+      switch (expr->operation) {
+      case ir_unop_logic_not:
+        inst = emit(fs_inst(BRW_OPCODE_AND, reg_null_d, op[0], fs_reg(1)));
+        inst->conditional_mod = BRW_CONDITIONAL_Z;
+        break;
+
+      case ir_binop_logic_xor:
+        inst = emit(fs_inst(BRW_OPCODE_XOR, reg_null_d, op[0], op[1]));
+        inst->conditional_mod = BRW_CONDITIONAL_NZ;
+        break;
+
+      case ir_binop_logic_or:
+        inst = emit(fs_inst(BRW_OPCODE_OR, reg_null_d, op[0], op[1]));
+        inst->conditional_mod = BRW_CONDITIONAL_NZ;
+        break;
+
+      case ir_binop_logic_and:
+        inst = emit(fs_inst(BRW_OPCODE_AND, reg_null_d, op[0], op[1]));
+        inst->conditional_mod = BRW_CONDITIONAL_NZ;
+        break;
+
+      case ir_unop_f2b:
+        if (intel->gen >= 6) {
+           inst = emit(fs_inst(BRW_OPCODE_CMP, reg_null_d,
+                               op[0], fs_reg(0.0f)));
+        } else {
+           inst = emit(fs_inst(BRW_OPCODE_MOV, reg_null_d, op[0]));
+        }
+        inst->conditional_mod = BRW_CONDITIONAL_NZ;
+        break;
+
+      case ir_unop_i2b:
+        if (intel->gen >= 6) {
+           inst = emit(fs_inst(BRW_OPCODE_CMP, reg_null_d, op[0], fs_reg(0)));
+        } else {
+           inst = emit(fs_inst(BRW_OPCODE_MOV, reg_null_d, op[0]));
+        }
+        inst->conditional_mod = BRW_CONDITIONAL_NZ;
+        break;
+
+      case ir_binop_greater:
+        inst = emit(fs_inst(BRW_OPCODE_CMP, reg_null_d, op[0], op[1]));
+        inst->conditional_mod = BRW_CONDITIONAL_G;
+        break;
+      case ir_binop_gequal:
+        inst = emit(fs_inst(BRW_OPCODE_CMP, reg_null_d, op[0], op[1]));
+        inst->conditional_mod = BRW_CONDITIONAL_GE;
+        break;
+      case ir_binop_less:
+        inst = emit(fs_inst(BRW_OPCODE_CMP, reg_null_d, op[0], op[1]));
+        inst->conditional_mod = BRW_CONDITIONAL_L;
+        break;
+      case ir_binop_lequal:
+        inst = emit(fs_inst(BRW_OPCODE_CMP, reg_null_d, op[0], op[1]));
+        inst->conditional_mod = BRW_CONDITIONAL_LE;
+        break;
+      case ir_binop_equal:
+      case ir_binop_all_equal:
+        inst = emit(fs_inst(BRW_OPCODE_CMP, reg_null_d, op[0], op[1]));
+        inst->conditional_mod = BRW_CONDITIONAL_Z;
+        break;
+      case ir_binop_nequal:
+      case ir_binop_any_nequal:
+        inst = emit(fs_inst(BRW_OPCODE_CMP, reg_null_d, op[0], op[1]));
+        inst->conditional_mod = BRW_CONDITIONAL_NZ;
+        break;
+      default:
+        assert(!"not reached");
+        this->fail = true;
+        break;
+      }
+      return;
+   }
+
+   ir->accept(this);
+
+   if (intel->gen >= 6) {
+      fs_inst *inst = emit(fs_inst(BRW_OPCODE_AND, reg_null_d,
+                                  this->result, fs_reg(1)));
+      inst->conditional_mod = BRW_CONDITIONAL_NZ;
+   } else {
+      fs_inst *inst = emit(fs_inst(BRW_OPCODE_MOV, reg_null_d, this->result));
+      inst->conditional_mod = BRW_CONDITIONAL_NZ;
+   }
+}
+
+/**
+ * Emit a gen6 IF statement with the comparison folded into the IF
+ * instruction.
+ */
+void
+fs_visitor::emit_if_gen6(ir_if *ir)
+{
+   ir_expression *expr = ir->condition->as_expression();
+
+   if (expr) {
+      fs_reg op[2];
+      fs_inst *inst;
+      fs_reg temp;
+
+      for (unsigned int i = 0; i < expr->get_num_operands(); i++) {
+        assert(expr->operands[i]->type->is_scalar());
+
+        expr->operands[i]->accept(this);
+        op[i] = this->result;
+      }
+
+      switch (expr->operation) {
+      case ir_unop_logic_not:
+        inst = emit(fs_inst(BRW_OPCODE_IF, temp, op[0], fs_reg(1)));
+        inst->conditional_mod = BRW_CONDITIONAL_Z;
+        return;
+
+      case ir_binop_logic_xor:
+        inst = emit(fs_inst(BRW_OPCODE_IF, reg_null_d, op[0], op[1]));
+        inst->conditional_mod = BRW_CONDITIONAL_NZ;
+        return;
+
+      case ir_binop_logic_or:
+        temp = fs_reg(this, glsl_type::bool_type);
+        emit(fs_inst(BRW_OPCODE_OR, temp, op[0], op[1]));
+        inst = emit(fs_inst(BRW_OPCODE_IF, reg_null_d, temp, fs_reg(0)));
+        inst->conditional_mod = BRW_CONDITIONAL_NZ;
+        return;
+
+      case ir_binop_logic_and:
+        temp = fs_reg(this, glsl_type::bool_type);
+        emit(fs_inst(BRW_OPCODE_AND, temp, op[0], op[1]));
+        inst = emit(fs_inst(BRW_OPCODE_IF, reg_null_d, temp, fs_reg(0)));
+        inst->conditional_mod = BRW_CONDITIONAL_NZ;
+        return;
+
+      case ir_unop_f2b:
+        inst = emit(fs_inst(BRW_OPCODE_IF, reg_null_f, op[0], fs_reg(0)));
+        inst->conditional_mod = BRW_CONDITIONAL_NZ;
+        return;
+
+      case ir_unop_i2b:
+        inst = emit(fs_inst(BRW_OPCODE_IF, reg_null_d, op[0], fs_reg(0)));
+        inst->conditional_mod = BRW_CONDITIONAL_NZ;
+        return;
+
+      case ir_binop_greater:
+        inst = emit(fs_inst(BRW_OPCODE_IF, reg_null_d, op[0], op[1]));
+        inst->conditional_mod = BRW_CONDITIONAL_G;
+        return;
+      case ir_binop_gequal:
+        inst = emit(fs_inst(BRW_OPCODE_IF, reg_null_d, op[0], op[1]));
+        inst->conditional_mod = BRW_CONDITIONAL_GE;
+        return;
+      case ir_binop_less:
+        inst = emit(fs_inst(BRW_OPCODE_IF, reg_null_d, op[0], op[1]));
+        inst->conditional_mod = BRW_CONDITIONAL_L;
+        return;
+      case ir_binop_lequal:
+        inst = emit(fs_inst(BRW_OPCODE_IF, reg_null_d, op[0], op[1]));
+        inst->conditional_mod = BRW_CONDITIONAL_LE;
+        return;
+      case ir_binop_equal:
+      case ir_binop_all_equal:
+        inst = emit(fs_inst(BRW_OPCODE_IF, reg_null_d, op[0], op[1]));
+        inst->conditional_mod = BRW_CONDITIONAL_Z;
+        return;
+      case ir_binop_nequal:
+      case ir_binop_any_nequal:
+        inst = emit(fs_inst(BRW_OPCODE_IF, reg_null_d, op[0], op[1]));
+        inst->conditional_mod = BRW_CONDITIONAL_NZ;
+        return;
+      default:
+        assert(!"not reached");
+        inst = emit(fs_inst(BRW_OPCODE_IF, reg_null_d, op[0], fs_reg(0)));
+        inst->conditional_mod = BRW_CONDITIONAL_NZ;
+        this->fail = true;
+        return;
+      }
+      return;
+   }
+
+   ir->condition->accept(this);
+
+   fs_inst *inst = emit(fs_inst(BRW_OPCODE_IF, reg_null_d, this->result, fs_reg(0)));
+   inst->conditional_mod = BRW_CONDITIONAL_NZ;
+}
+
 void
 fs_visitor::visit(ir_if *ir)
 {
@@ -1286,13 +1550,14 @@ fs_visitor::visit(ir_if *ir)
     */
    this->base_ir = ir->condition;
 
-   /* Generate the condition into the condition code. */
-   ir->condition->accept(this);
-   inst = emit(fs_inst(BRW_OPCODE_MOV, fs_reg(brw_null_reg()), this->result));
-   inst->conditional_mod = BRW_CONDITIONAL_NZ;
+   if (intel->gen >= 6) {
+      emit_if_gen6(ir);
+   } else {
+      emit_bool_to_cond_code(ir->condition);
 
-   inst = emit(fs_inst(BRW_OPCODE_IF));
-   inst->predicated = true;
+      inst = emit(fs_inst(BRW_OPCODE_IF));
+      inst->predicated = true;
+   }
 
    foreach_iter(exec_list_iterator, iter, ir->then_instructions) {
       ir_instruction *ir = (ir_instruction *)iter.get();
@@ -1339,7 +1604,7 @@ fs_visitor::visit(ir_loop *ir)
       this->base_ir = ir->to;
       ir->to->accept(this);
 
-      fs_inst *inst = emit(fs_inst(BRW_OPCODE_CMP, reg_null,
+      fs_inst *inst = emit(fs_inst(BRW_OPCODE_CMP, reg_null_d,
                                   counter, this->result));
       switch (ir->cmp) {
       case ir_binop_equal:
@@ -1554,19 +1819,28 @@ fs_visitor::emit_interpolation_setup_gen6()
 
    /* If the pixel centers end up used, the setup is the same as for gen4. */
    this->current_annotation = "compute pixel centers";
-   this->pixel_x = fs_reg(this, glsl_type::uint_type);
-   this->pixel_y = fs_reg(this, glsl_type::uint_type);
-   this->pixel_x.type = BRW_REGISTER_TYPE_UW;
-   this->pixel_y.type = BRW_REGISTER_TYPE_UW;
+   fs_reg int_pixel_x = fs_reg(this, glsl_type::uint_type);
+   fs_reg int_pixel_y = fs_reg(this, glsl_type::uint_type);
+   int_pixel_x.type = BRW_REGISTER_TYPE_UW;
+   int_pixel_y.type = BRW_REGISTER_TYPE_UW;
    emit(fs_inst(BRW_OPCODE_ADD,
-               this->pixel_x,
+               int_pixel_x,
                fs_reg(stride(suboffset(g1_uw, 4), 2, 4, 0)),
                fs_reg(brw_imm_v(0x10101010))));
    emit(fs_inst(BRW_OPCODE_ADD,
-               this->pixel_y,
+               int_pixel_y,
                fs_reg(stride(suboffset(g1_uw, 5), 2, 4, 0)),
                fs_reg(brw_imm_v(0x11001100))));
 
+   /* As of gen6, we can no longer mix float and int sources.  We have
+    * to turn the integer pixel centers into floats for their actual
+    * use.
+    */
+   this->pixel_x = fs_reg(this, glsl_type::float_type);
+   this->pixel_y = fs_reg(this, glsl_type::float_type);
+   emit(fs_inst(BRW_OPCODE_MOV, this->pixel_x, int_pixel_x));
+   emit(fs_inst(BRW_OPCODE_MOV, this->pixel_y, int_pixel_y));
+
    this->current_annotation = "compute 1/pos.w";
    this->wpos_w = fs_reg(brw_vec8_grf(c->key.source_w_reg, 0));
    this->pixel_w = fs_reg(this, glsl_type::float_type);
@@ -1825,17 +2099,17 @@ fs_visitor::generate_tex(fs_inst *inst, struct brw_reg dst)
          */
         msg_type = BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE;
         if (inst->shadow_compare) {
-           assert(inst->mlen == 5);
+           assert(inst->mlen == 6);
         } else {
-           assert(inst->mlen <= 6);
+           assert(inst->mlen <= 4);
         }
         break;
       case FS_OPCODE_TXB:
         if (inst->shadow_compare) {
-           assert(inst->mlen == 5);
+           assert(inst->mlen == 6);
            msg_type = BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE;
         } else {
-           assert(inst->mlen == 8);
+           assert(inst->mlen == 9);
            msg_type = BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_BIAS;
            simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD16;
         }
@@ -1948,6 +2222,47 @@ fs_visitor::generate_discard_and(fs_inst *inst, struct brw_reg mask)
    brw_pop_insn_state(p);
 }
 
+void
+fs_visitor::generate_spill(fs_inst *inst, struct brw_reg src)
+{
+   assert(inst->mlen != 0);
+
+   brw_MOV(p,
+          retype(brw_message_reg(inst->base_mrf + 1), BRW_REGISTER_TYPE_UD),
+          retype(src, BRW_REGISTER_TYPE_UD));
+   brw_oword_block_write(p, brw_message_reg(inst->base_mrf), 1, inst->offset);
+}
+
+void
+fs_visitor::generate_unspill(fs_inst *inst, struct brw_reg dst)
+{
+   assert(inst->mlen != 0);
+
+   /* Clear any post destination dependencies that would be ignored by
+    * the block read.  See the B-Spec for pre-gen5 send instruction.
+    *
+    * This could use a better solution, since texture sampling and
+    * math reads could potentially run into it as well -- anywhere
+    * that we have a SEND with a destination that is a register that
+    * was written but not read within the last N instructions (what's
+    * N?  unsure).  This is rare because of dead code elimination, but
+    * not impossible.
+    */
+   if (intel->gen == 4 && !intel->is_g4x)
+      brw_MOV(p, brw_null_reg(), dst);
+
+   brw_oword_block_read(p, dst, brw_message_reg(inst->base_mrf), 1,
+                       inst->offset);
+
+   if (intel->gen == 4 && !intel->is_g4x) {
+      /* gen4 errata: destination from a send can't be used as a
+       * destination until it's been read.  Just read it so we don't
+       * have to worry.
+       */
+      brw_MOV(p, brw_null_reg(), dst);
+   }
+}
+
 void
 fs_visitor::assign_curb_setup()
 {
@@ -2032,218 +2347,90 @@ fs_visitor::assign_urb_setup()
    this->first_non_payload_grf = urb_start + c->prog_data.urb_read_length;
 }
 
-static void
-assign_reg(int *reg_hw_locations, fs_reg *reg)
-{
-   if (reg->file == GRF && reg->reg != 0) {
-      reg->hw_reg = reg_hw_locations[reg->reg] + reg->reg_offset;
-      reg->reg = 0;
-   }
-}
-
-void
-fs_visitor::assign_regs_trivial()
-{
-   int last_grf = 0;
-   int hw_reg_mapping[this->virtual_grf_next];
-   int i;
-
-   hw_reg_mapping[0] = 0;
-   hw_reg_mapping[1] = this->first_non_payload_grf;
-   for (i = 2; i < this->virtual_grf_next; i++) {
-      hw_reg_mapping[i] = (hw_reg_mapping[i - 1] +
-                          this->virtual_grf_sizes[i - 1]);
-   }
-   last_grf = hw_reg_mapping[i - 1] + this->virtual_grf_sizes[i - 1];
-
-   foreach_iter(exec_list_iterator, iter, this->instructions) {
-      fs_inst *inst = (fs_inst *)iter.get();
-
-      assign_reg(hw_reg_mapping, &inst->dst);
-      assign_reg(hw_reg_mapping, &inst->src[0]);
-      assign_reg(hw_reg_mapping, &inst->src[1]);
-   }
-
-   this->grf_used = last_grf + 1;
-}
-
+/**
+ * Split large virtual GRFs into separate components if we can.
+ *
+ * This is mostly duplicated with what brw_fs_vector_splitting does,
+ * but that's really conservative because it's afraid of doing
+ * splitting that doesn't result in real progress after the rest of
+ * the optimization phases, which would cause infinite looping in
+ * optimization.  We can do it once here, safely.  This also has the
+ * opportunity to split interpolated values, or maybe even uniforms,
+ * which we don't have at the IR level.
+ *
+ * We want to split, because virtual GRFs are what we register
+ * allocate and spill (due to contiguousness requirements for some
+ * instructions), and they're what we naturally generate in the
+ * codegen process, but most virtual GRFs don't actually need to be
+ * contiguous sets of GRFs.  If we split, we'll end up with reduced
+ * live intervals and better dead code elimination and coalescing.
+ */
 void
-fs_visitor::assign_regs()
+fs_visitor::split_virtual_grfs()
 {
-   int last_grf = 0;
-   int hw_reg_mapping[this->virtual_grf_next + 1];
-   int base_reg_count = BRW_MAX_GRF - this->first_non_payload_grf;
-   int class_sizes[base_reg_count];
-   int class_count = 0;
-   int aligned_pair_class = -1;
-
-   /* Set up the register classes.
-    *
-    * The base registers store a scalar value.  For texture samples,
-    * we get virtual GRFs composed of 4 contiguous hw register.  For
-    * structures and arrays, we store them as contiguous larger things
-    * than that, though we should be able to do better most of the
-    * time.
-    */
-   class_sizes[class_count++] = 1;
-   if (brw->has_pln && intel->gen < 6) {
-      /* Always set up the (unaligned) pairs for gen5, so we can find
-       * them for making the aligned pair class.
-       */
-      class_sizes[class_count++] = 2;
-   }
-   for (int r = 1; r < this->virtual_grf_next; r++) {
-      int i;
-
-      for (i = 0; i < class_count; i++) {
-        if (class_sizes[i] == this->virtual_grf_sizes[r])
-           break;
-      }
-      if (i == class_count) {
-        if (this->virtual_grf_sizes[r] >= base_reg_count) {
-           fprintf(stderr, "Object too large to register allocate.\n");
-           this->fail = true;
-        }
-
-        class_sizes[class_count++] = this->virtual_grf_sizes[r];
-      }
-   }
-
-   int ra_reg_count = 0;
-   int class_base_reg[class_count];
-   int class_reg_count[class_count];
-   int classes[class_count + 1];
-
-   for (int i = 0; i < class_count; i++) {
-      class_base_reg[i] = ra_reg_count;
-      class_reg_count[i] = base_reg_count - (class_sizes[i] - 1);
-      ra_reg_count += class_reg_count[i];
-   }
-
-   struct ra_regs *regs = ra_alloc_reg_set(ra_reg_count);
-   for (int i = 0; i < class_count; i++) {
-      classes[i] = ra_alloc_reg_class(regs);
-
-      for (int i_r = 0; i_r < class_reg_count[i]; i_r++) {
-        ra_class_add_reg(regs, classes[i], class_base_reg[i] + i_r);
-      }
-
-      /* Add conflicts between our contiguous registers aliasing
-       * base regs and other register classes' contiguous registers
-       * that alias base regs, or the base regs themselves for classes[0].
-       */
-      for (int c = 0; c <= i; c++) {
-        for (int i_r = 0; i_r < class_reg_count[i]; i_r++) {
-           for (int c_r = MAX2(0, i_r - (class_sizes[c] - 1));
-                c_r < MIN2(class_reg_count[c], i_r + class_sizes[i]);
-                c_r++) {
-
-              if (0) {
-                 printf("%d/%d conflicts %d/%d\n",
-                        class_sizes[i], this->first_non_payload_grf + i_r,
-                        class_sizes[c], this->first_non_payload_grf + c_r);
-              }
+   int num_vars = this->virtual_grf_next;
+   bool split_grf[num_vars];
+   int new_virtual_grf[num_vars];
 
-              ra_add_reg_conflict(regs,
-                                  class_base_reg[i] + i_r,
-                                  class_base_reg[c] + c_r);
-           }
-        }
-      }
+   /* Try to split anything > 0 sized. */
+   for (int i = 0; i < num_vars; i++) {
+      if (this->virtual_grf_sizes[i] != 1)
+        split_grf[i] = true;
+      else
+        split_grf[i] = false;
    }
 
-   /* Add a special class for aligned pairs, which we'll put delta_x/y
-    * in on gen5 so that we can do PLN.
-    */
-   if (brw->has_pln && intel->gen < 6) {
-      int reg_count = (base_reg_count - 1) / 2;
-      int unaligned_pair_class = 1;
-      assert(class_sizes[unaligned_pair_class] == 2);
-
-      aligned_pair_class = class_count;
-      classes[aligned_pair_class] = ra_alloc_reg_class(regs);
-      class_base_reg[aligned_pair_class] = 0;
-      class_reg_count[aligned_pair_class] = 0;
-      int start = (this->first_non_payload_grf & 1) ? 1 : 0;
-
-      for (int i = 0; i < reg_count; i++) {
-        ra_class_add_reg(regs, classes[aligned_pair_class],
-                         class_base_reg[unaligned_pair_class] + i * 2 + start);
-      }
-      class_count++;
+   if (brw->has_pln) {
+      /* PLN opcodes rely on the delta_xy being contiguous. */
+      split_grf[this->delta_x.reg] = false;
    }
 
-   ra_set_finalize(regs);
-
-   struct ra_graph *g = ra_alloc_interference_graph(regs,
-                                                   this->virtual_grf_next);
-   /* Node 0 is just a placeholder to keep virtual_grf[] mapping 1:1
-    * with nodes.
-    */
-   ra_set_node_class(g, 0, classes[0]);
-
-   for (int i = 1; i < this->virtual_grf_next; i++) {
-      for (int c = 0; c < class_count; c++) {
-        if (class_sizes[c] == this->virtual_grf_sizes[i]) {
-           if (aligned_pair_class >= 0 &&
-               this->delta_x.reg == i) {
-              ra_set_node_class(g, i, classes[aligned_pair_class]);
-           } else {
-              ra_set_node_class(g, i, classes[c]);
-           }
-           break;
-        }
-      }
+   foreach_iter(exec_list_iterator, iter, this->instructions) {
+      fs_inst *inst = (fs_inst *)iter.get();
 
-      for (int j = 1; j < i; j++) {
-        if (virtual_grf_interferes(i, j)) {
-           ra_add_node_interference(g, i, j);
-        }
+      /* Texturing produces 4 contiguous registers, so no splitting. */
+      if ((inst->opcode == FS_OPCODE_TEX ||
+          inst->opcode == FS_OPCODE_TXB ||
+          inst->opcode == FS_OPCODE_TXL) &&
+         inst->dst.file == GRF) {
+        split_grf[inst->dst.reg] = false;
       }
    }
 
-   /* FINISHME: Handle spilling */
-   if (!ra_allocate_no_spills(g)) {
-      fprintf(stderr, "Failed to allocate registers.\n");
-      this->fail = true;
-      return;
-   }
-
-   /* Get the chosen virtual registers for each node, and map virtual
-    * regs in the register classes back down to real hardware reg
-    * numbers.
+   /* Allocate new space for split regs.  Note that the virtual
+    * numbers will be contiguous.
     */
-   hw_reg_mapping[0] = 0; /* unused */
-   for (int i = 1; i < this->virtual_grf_next; i++) {
-      int reg = ra_get_node_reg(g, i);
-      int hw_reg = -1;
-
-      for (int c = 0; c < class_count; c++) {
-        if (reg >= class_base_reg[c] &&
-            reg < class_base_reg[c] + class_reg_count[c]) {
-           hw_reg = reg - class_base_reg[c];
-           break;
+   for (int i = 0; i < num_vars; i++) {
+      if (split_grf[i]) {
+        new_virtual_grf[i] = virtual_grf_alloc(1);
+        for (int j = 2; j < this->virtual_grf_sizes[i]; j++) {
+           int reg = virtual_grf_alloc(1);
+           assert(reg == new_virtual_grf[i] + j - 1);
         }
+        this->virtual_grf_sizes[i] = 1;
       }
-
-      assert(hw_reg != -1);
-      hw_reg_mapping[i] = this->first_non_payload_grf + hw_reg;
-      last_grf = MAX2(last_grf,
-                     hw_reg_mapping[i] + this->virtual_grf_sizes[i] - 1);
    }
 
    foreach_iter(exec_list_iterator, iter, this->instructions) {
       fs_inst *inst = (fs_inst *)iter.get();
 
-      assign_reg(hw_reg_mapping, &inst->dst);
-      assign_reg(hw_reg_mapping, &inst->src[0]);
-      assign_reg(hw_reg_mapping, &inst->src[1]);
+      if (inst->dst.file == GRF &&
+         split_grf[inst->dst.reg] &&
+         inst->dst.reg_offset != 0) {
+        inst->dst.reg = (new_virtual_grf[inst->dst.reg] +
+                         inst->dst.reg_offset - 1);
+        inst->dst.reg_offset = 0;
+      }
+      for (int i = 0; i < 3; i++) {
+        if (inst->src[i].file == GRF &&
+            split_grf[inst->src[i].reg] &&
+            inst->src[i].reg_offset != 0) {
+           inst->src[i].reg = (new_virtual_grf[inst->src[i].reg] +
+                               inst->src[i].reg_offset - 1);
+           inst->src[i].reg_offset = 0;
+        }
+      }
    }
-
-   this->grf_used = last_grf + 1;
-
-   talloc_free(g);
-   talloc_free(regs);
 }
 
 void
@@ -2254,6 +2441,7 @@ fs_visitor::calculate_live_intervals()
    int *use = talloc_array(mem_ctx, int, num_vars);
    int loop_depth = 0;
    int loop_start = 0;
+   int bb_header_ip = 0;
 
    for (int i = 0; i < num_vars; i++) {
       def[i] = 1 << 30;
@@ -2271,12 +2459,8 @@ fs_visitor::calculate_live_intervals()
         loop_depth--;
 
         if (loop_depth == 0) {
-           /* FINISHME:
-            *
-            * Patches up any vars marked for use within the loop as
-            * live until the end.  This is conservative, as there
-            * will often be variables defined and used inside the
-            * loop but dead at the end of the loop body.
+           /* Patches up the use of vars marked for being live across
+            * the whole loop.
             */
            for (int i = 0; i < num_vars; i++) {
               if (use[i] == loop_start) {
@@ -2285,22 +2469,53 @@ fs_visitor::calculate_live_intervals()
            }
         }
       } else {
-        int eip = ip;
-
-        if (loop_depth)
-           eip = loop_start;
-
         for (unsigned int i = 0; i < 3; i++) {
            if (inst->src[i].file == GRF && inst->src[i].reg != 0) {
-              use[inst->src[i].reg] = MAX2(use[inst->src[i].reg], eip);
+              int reg = inst->src[i].reg;
+
+              if (!loop_depth || (this->virtual_grf_sizes[reg] == 1 &&
+                                  def[reg] >= bb_header_ip)) {
+                 use[reg] = ip;
+              } else {
+                 def[reg] = MIN2(loop_start, def[reg]);
+                 use[reg] = loop_start;
+
+                 /* Nobody else is going to go smash our start to
+                  * later in the loop now, because def[reg] now
+                  * points before the bb header.
+                  */
+              }
            }
         }
         if (inst->dst.file == GRF && inst->dst.reg != 0) {
-           def[inst->dst.reg] = MIN2(def[inst->dst.reg], eip);
+           int reg = inst->dst.reg;
+
+           if (!loop_depth || (this->virtual_grf_sizes[reg] == 1 &&
+                               !inst->predicated)) {
+              def[reg] = MIN2(def[reg], ip);
+           } else {
+              def[reg] = MIN2(def[reg], loop_start);
+           }
         }
       }
 
       ip++;
+
+      /* Set the basic block header IP.  This is used for determining
+       * if a complete def of single-register virtual GRF in a loop
+       * dominates a use in the same basic block.  It's a quick way to
+       * reduce the live interval range of most register used in a
+       * loop.
+       */
+      if (inst->opcode == BRW_OPCODE_IF ||
+         inst->opcode == BRW_OPCODE_ELSE ||
+         inst->opcode == BRW_OPCODE_ENDIF ||
+         inst->opcode == BRW_OPCODE_DO ||
+         inst->opcode == BRW_OPCODE_WHILE ||
+         inst->opcode == BRW_OPCODE_BREAK ||
+         inst->opcode == BRW_OPCODE_CONTINUE) {
+        bb_header_ip = ip;
+      }
    }
 
    talloc_free(this->virtual_grf_def);
@@ -2487,6 +2702,10 @@ fs_visitor::register_coalesce()
         continue;
       }
 
+      /* Update live interval so we don't have to recalculate. */
+      this->virtual_grf_use[inst->src[0].reg] = MAX2(virtual_grf_use[inst->src[0].reg],
+                                                    virtual_grf_use[inst->dst.reg]);
+
       /* Rewrite the later usage to point at the source of the move to
        * be removed.
        */
@@ -2759,6 +2978,9 @@ fs_visitor::generate_code()
       case BRW_OPCODE_RNDD:
         brw_RNDD(p, dst, src[0]);
         break;
+      case BRW_OPCODE_RNDE:
+        brw_RNDE(p, dst, src[0]);
+        break;
       case BRW_OPCODE_RNDZ:
         brw_RNDZ(p, dst, src[0]);
         break;
@@ -2794,10 +3016,16 @@ fs_visitor::generate_code()
 
       case BRW_OPCODE_IF:
         assert(if_stack_depth < 16);
-        if_stack[if_stack_depth] = brw_IF(p, BRW_EXECUTE_8);
+        if (inst->src[0].file != BAD_FILE) {
+           assert(intel->gen >= 6);
+           if_stack[if_stack_depth] = brw_IF_gen6(p, inst->conditional_mod, src[0], src[1]);
+        } else {
+           if_stack[if_stack_depth] = brw_IF(p, BRW_EXECUTE_8);
+        }
         if_depth_in_loop[loop_stack_depth]++;
         if_stack_depth++;
         break;
+
       case BRW_OPCODE_ELSE:
         if_stack[if_stack_depth - 1] =
            brw_ELSE(p, if_stack[if_stack_depth - 1]);
@@ -2877,6 +3105,15 @@ fs_visitor::generate_code()
       case FS_OPCODE_DDY:
         generate_ddy(inst, dst, src[0]);
         break;
+
+      case FS_OPCODE_SPILL:
+        generate_spill(inst, src[0]);
+        break;
+
+      case FS_OPCODE_UNSPILL:
+        generate_unspill(inst, dst);
+        break;
+
       case FS_OPCODE_FB_WRITE:
         generate_fb_write(inst);
         break;
@@ -2918,22 +3155,14 @@ brw_wm_fs_emit(struct brw_context *brw, struct brw_wm_compile *c)
 {
    struct brw_compile *p = &c->func;
    struct intel_context *intel = &brw->intel;
-   GLcontext *ctx = &intel->ctx;
-   struct brw_shader *shader = NULL;
+   struct gl_context *ctx = &intel->ctx;
    struct gl_shader_program *prog = ctx->Shader.CurrentProgram;
 
    if (!prog)
       return GL_FALSE;
 
-   if (!using_new_fs)
-      return GL_FALSE;
-
-   for (unsigned int i = 0; i < prog->_NumLinkedShaders; i++) {
-      if (prog->_LinkedShaders[i]->Type == GL_FRAGMENT_SHADER) {
-        shader = (struct brw_shader *)prog->_LinkedShaders[i];
-        break;
-      }
-   }
+   struct brw_shader *shader =
+     (brw_shader *) prog->_LinkedShaders[MESA_SHADER_FRAGMENT];
    if (!shader)
       return GL_FALSE;
 
@@ -2974,13 +3203,15 @@ brw_wm_fs_emit(struct brw_context *brw, struct brw_wm_compile *c)
       }
 
       v.emit_fb_writes();
+
+      v.split_virtual_grfs();
+
       v.assign_curb_setup();
       v.assign_urb_setup();
 
       bool progress;
       do {
         progress = false;
-
         v.calculate_live_intervals();
         progress = v.propagate_constants() || progress;
         progress = v.register_coalesce() || progress;
@@ -2988,10 +3219,25 @@ brw_wm_fs_emit(struct brw_context *brw, struct brw_wm_compile *c)
         progress = v.dead_code_eliminate() || progress;
       } while (progress);
 
+      if (0) {
+        /* Debug of register spilling: Go spill everything. */
+        int virtual_grf_count = v.virtual_grf_next;
+        for (int i = 1; i < virtual_grf_count; i++) {
+           v.spill_reg(i);
+        }
+        v.calculate_live_intervals();
+      }
+
       if (0)
         v.assign_regs_trivial();
-      else
-        v.assign_regs();
+      else {
+        while (!v.assign_regs()) {
+           if (v.fail)
+              break;
+
+           v.calculate_live_intervals();
+        }
+      }
    }
 
    if (!v.fail)
@@ -3021,13 +3267,19 @@ brw_wm_fs_emit(struct brw_context *brw, struct brw_wm_compile *c)
            if (last_annotation_string)
               printf("   %s\n", last_annotation_string);
         }
+        if (0) {
+           printf("0x%08x 0x%08x 0x%08x 0x%08x ",
+                  ((uint32_t *)&p->store[i])[3],
+                  ((uint32_t *)&p->store[i])[2],
+                  ((uint32_t *)&p->store[i])[1],
+                  ((uint32_t *)&p->store[i])[0]);
+        }
         brw_disasm(stdout, &p->store[i], intel->gen);
       }
       printf("\n");
    }
 
    c->prog_data.total_grf = v.grf_used;
-   c->prog_data.total_scratch = 0;
 
    return GL_TRUE;
 }