#include "brw_eu.h"
#include "brw_wm.h"
}
-#include "brw_shader.h"
#include "brw_fs.h"
#include "glsl/glsl_types.h"
#include "glsl/ir_optimization.h"
-#include "glsl/ir_print_visitor.h"
void
fs_visitor::visit(ir_variable *ir)
if (variable_storage(ir))
return;
- if (ir->mode == ir_var_in) {
+ if (ir->mode == ir_var_shader_in) {
if (!strcmp(ir->name, "gl_FragCoord")) {
reg = emit_fragcoord_interpolation(ir);
} else if (!strcmp(ir->name, "gl_FrontFacing")) {
assert(reg);
hash_table_insert(this->variable_ht, reg, ir);
return;
- } else if (ir->mode == ir_var_out) {
+ } else if (ir->mode == ir_var_shader_out) {
reg = new(this->mem_ctx) fs_reg(this, ir->type);
if (ir->index > 0) {
/* Writing gl_FragColor outputs to all color regions. */
for (unsigned int i = 0; i < MAX2(c->key.nr_color_regions, 1); i++) {
this->outputs[i] = *reg;
+ this->output_components[i] = 4;
}
} else if (ir->location == FRAG_RESULT_DEPTH) {
- this->frag_depth = ir;
+ this->frag_depth = *reg;
} else {
/* gl_FragData or a user-defined FS output */
assert(ir->location >= FRAG_RESULT_DATA0 &&
ir->location < FRAG_RESULT_DATA0 + BRW_MAX_DRAW_BUFFERS);
+ int vector_elements =
+ ir->type->is_array() ? ir->type->fields.array->vector_elements
+ : ir->type->vector_elements;
+
/* General color output. */
for (unsigned int i = 0; i < MAX2(1, ir->type->length); i++) {
int output = ir->location - FRAG_RESULT_DATA0 + i;
this->outputs[output] = *reg;
- this->outputs[output].reg_offset += 4 * i;
+ this->outputs[output].reg_offset += vector_elements * i;
+ this->output_components[output] = vector_elements;
}
}
} else if (ir->mode == ir_var_uniform) {
int param_index = c->prog_data.nr_params;
- if (c->dispatch_width == 16) {
+ /* Thanks to the lower_ubo_reference pass, we will see only
+ * ir_binop_ubo_load expressions and not ir_dereference_variable for UBO
+ * variables, so no need for them to be in variable_ht.
+ */
+ if (ir->is_in_uniform_block())
+ return;
+
+ if (dispatch_width == 16) {
if (!variable_storage(ir)) {
fail("Failed to find uniform '%s' in 16-wide\n", ir->name);
}
return;
}
+ param_size[param_index] = type_size(ir->type);
if (!strncmp(ir->name, "gl_", 3)) {
setup_builtin_uniform_values(ir);
} else {
- setup_uniform_values(ir->location, ir->type);
+ setup_uniform_values(ir);
}
reg = new(this->mem_ctx) fs_reg(UNIFORM, param_index);
void
fs_visitor::visit(ir_dereference_array *ir)
{
- ir_constant *index;
- int element_size;
+ ir_constant *constant_index;
+ fs_reg src;
+ int element_size = type_size(ir->type);
+
+ constant_index = ir->array_index->as_constant();
ir->array->accept(this);
- index = ir->array_index->as_constant();
+ src = this->result;
+ src.type = brw_type_for_base_type(ir->type);
- element_size = type_size(ir->type);
- this->result.type = brw_type_for_base_type(ir->type);
+ if (constant_index) {
+ assert(src.file == UNIFORM || src.file == GRF);
+ src.reg_offset += constant_index->value.i[0] * element_size;
+ } else {
+ /* Variable index array dereference. We attach the variable index
+ * component to the reg as a pointer to a register containing the
+ * offset. Currently only uniform arrays are supported in this patch,
+ * and that reladdr pointer is resolved by
+ * move_uniform_array_access_to_pull_constants(). All other array types
+ * are lowered by lower_variable_index_to_cond_assign().
+ */
+ ir->array_index->accept(this);
+
+ fs_reg index_reg;
+ index_reg = fs_reg(this, glsl_type::int_type);
+ emit(BRW_OPCODE_MUL, index_reg, this->result, fs_reg(element_size));
+
+ if (src.reladdr) {
+ emit(BRW_OPCODE_ADD, index_reg, *src.reladdr, index_reg);
+ }
+
+ src.reladdr = ralloc(mem_ctx, fs_reg);
+ memcpy(src.reladdr, &index_reg, sizeof(index_reg));
+ }
+ this->result = src;
+}
+
+void
+fs_visitor::emit_lrp(fs_reg dst, fs_reg x, fs_reg y, fs_reg a)
+{
+ if (brw->gen < 6 ||
+ !x.is_valid_3src() ||
+ !y.is_valid_3src() ||
+ !a.is_valid_3src()) {
+ /* We can't use the LRP instruction. Emit x*(1-a) + y*a. */
+ fs_reg y_times_a = fs_reg(this, glsl_type::float_type);
+ fs_reg one_minus_a = fs_reg(this, glsl_type::float_type);
+ fs_reg x_times_one_minus_a = fs_reg(this, glsl_type::float_type);
+
+ emit(MUL(y_times_a, y, a));
+
+ a.negate = !a.negate;
+ emit(ADD(one_minus_a, a, fs_reg(1.0f)));
+ emit(MUL(x_times_one_minus_a, x, one_minus_a));
+
+ emit(ADD(dst, x_times_one_minus_a, y_times_a));
+ } else {
+ /* The LRP instruction actually does op1 * op0 + op2 * (1 - op0), so
+ * we need to reorder the operands.
+ */
+ emit(LRP(dst, a, y, x));
+ }
+}
+
+void
+fs_visitor::emit_minmax(uint32_t conditionalmod, fs_reg dst,
+ fs_reg src0, fs_reg src1)
+{
+ fs_inst *inst;
- if (index) {
- assert(this->result.file == UNIFORM || this->result.file == GRF);
- this->result.reg_offset += index->value.i[0] * element_size;
+ if (brw->gen >= 6) {
+ inst = emit(BRW_OPCODE_SEL, dst, src0, src1);
+ inst->conditional_mod = conditionalmod;
} else {
- assert(!"FINISHME: non-constant array element");
+ emit(CMP(reg_null_d, src0, src1, conditionalmod));
+
+ inst = emit(BRW_OPCODE_SEL, dst, src0, src1);
+ inst->predicate = BRW_PREDICATE_NORMAL;
}
}
* src, generate a saturated MOV
*/
fs_inst *modify = get_instruction_generating_reg(pre_inst, last_inst, src);
- if (!modify || modify->regs_written() != 1) {
- fs_inst *inst = emit(BRW_OPCODE_MOV, this->result, src);
+ if (!modify || modify->regs_written != 1) {
+ this->result = fs_reg(this, ir->type);
+ fs_inst *inst = emit(MOV(this->result, src));
inst->saturate = true;
} else {
modify->saturate = true;
fs_visitor::try_emit_mad(ir_expression *ir, int mul_arg)
{
/* 3-src instructions were introduced in gen6. */
- if (intel->gen < 6)
+ if (brw->gen < 6)
return false;
/* MAD can only handle floating-point data. */
fs_visitor::visit(ir_expression *ir)
{
unsigned int operand;
- fs_reg op[2], temp;
+ fs_reg op[3], temp;
fs_inst *inst;
- assert(ir->get_num_operands() <= 2);
+ assert(ir->get_num_operands() <= 3);
if (try_emit_saturate(ir))
return;
for (operand = 0; operand < ir->get_num_operands(); operand++) {
ir->operands[operand]->accept(this);
if (this->result.file == BAD_FILE) {
- ir_print_visitor v;
fail("Failed to get tree for expression operand:\n");
- ir->operands[operand]->accept(&v);
+ ir->operands[operand]->print();
+ printf("\n");
}
+ assert(this->result.is_valid_3src());
op[operand] = this->result;
/* Matrix expression operands should have been broken down to vector
/* Note that BRW_OPCODE_NOT is not appropriate here, since it is
* ones complement of the whole register, not just bit 0.
*/
- emit(BRW_OPCODE_XOR, this->result, op[0], fs_reg(1));
+ emit(XOR(this->result, op[0], fs_reg(1)));
break;
case ir_unop_neg:
op[0].negate = !op[0].negate;
- this->result = op[0];
+ emit(MOV(this->result, op[0]));
break;
case ir_unop_abs:
op[0].abs = true;
op[0].negate = false;
- this->result = op[0];
+ emit(MOV(this->result, op[0]));
break;
case ir_unop_sign:
temp = fs_reg(this, ir->type);
- emit(BRW_OPCODE_MOV, this->result, fs_reg(0.0f));
+ emit(MOV(this->result, fs_reg(0.0f)));
- inst = emit(BRW_OPCODE_CMP, reg_null_f, op[0], fs_reg(0.0f));
- inst->conditional_mod = BRW_CONDITIONAL_G;
- inst = emit(BRW_OPCODE_MOV, this->result, fs_reg(1.0f));
- inst->predicated = true;
+ emit(CMP(reg_null_f, op[0], fs_reg(0.0f), BRW_CONDITIONAL_G));
+ inst = emit(MOV(this->result, fs_reg(1.0f)));
+ inst->predicate = BRW_PREDICATE_NORMAL;
- inst = emit(BRW_OPCODE_CMP, reg_null_f, op[0], fs_reg(0.0f));
- inst->conditional_mod = BRW_CONDITIONAL_L;
- inst = emit(BRW_OPCODE_MOV, this->result, fs_reg(-1.0f));
- inst->predicated = true;
+ emit(CMP(reg_null_f, op[0], fs_reg(0.0f), BRW_CONDITIONAL_L));
+ inst = emit(MOV(this->result, fs_reg(-1.0f)));
+ inst->predicate = BRW_PREDICATE_NORMAL;
break;
case ir_unop_rcp:
break;
case ir_binop_add:
- emit(BRW_OPCODE_ADD, this->result, op[0], op[1]);
+ emit(ADD(this->result, op[0], op[1]));
break;
case ir_binop_sub:
assert(!"not reached: should be handled by ir_sub_to_add_neg");
* FINISHME: Emit just the MUL if we know an operand is small
* enough.
*/
- if (intel->gen >= 7 && c->dispatch_width == 16)
+ if (brw->gen >= 7 && dispatch_width == 16)
fail("16-wide explicit accumulator operands unsupported\n");
struct brw_reg acc = retype(brw_acc_reg(), BRW_REGISTER_TYPE_D);
- emit(BRW_OPCODE_MUL, acc, op[0], op[1]);
- emit(BRW_OPCODE_MACH, reg_null_d, op[0], op[1]);
- emit(BRW_OPCODE_MOV, this->result, fs_reg(acc));
+ emit(MUL(acc, op[0], op[1]));
+ emit(MACH(reg_null_d, op[0], op[1]));
+ emit(MOV(this->result, fs_reg(acc)));
} else {
- emit(BRW_OPCODE_MUL, this->result, op[0], op[1]);
+ emit(MUL(this->result, op[0], op[1]));
}
break;
case ir_binop_div:
- if (intel->gen >= 7 && c->dispatch_width == 16)
- fail("16-wide INTDIV unsupported\n");
-
/* Floating point should be lowered by DIV_TO_MUL_RCP in the compiler. */
assert(ir->type->is_integer());
emit_math(SHADER_OPCODE_INT_QUOTIENT, this->result, op[0], op[1]);
break;
case ir_binop_mod:
- if (intel->gen >= 7 && c->dispatch_width == 16)
- fail("16-wide INTDIV unsupported\n");
-
/* Floating point should be lowered by MOD_TO_FRACT in the compiler. */
assert(ir->type->is_integer());
emit_math(SHADER_OPCODE_INT_REMAINDER, this->result, op[0], op[1]);
case ir_binop_all_equal:
case ir_binop_nequal:
case ir_binop_any_nequal:
- temp = this->result;
- /* original gen4 does implicit conversion before comparison. */
- if (intel->gen < 5)
- temp.type = op[0].type;
-
- resolve_ud_negate(&op[0]);
- resolve_ud_negate(&op[1]);
-
resolve_bool_comparison(ir->operands[0], &op[0]);
resolve_bool_comparison(ir->operands[1], &op[1]);
- inst = emit(BRW_OPCODE_CMP, temp, op[0], op[1]);
- inst->conditional_mod = brw_conditional_for_comparison(ir->operation);
+ emit(CMP(this->result, op[0], op[1],
+ brw_conditional_for_comparison(ir->operation)));
break;
case ir_binop_logic_xor:
- emit(BRW_OPCODE_XOR, this->result, op[0], op[1]);
+ emit(XOR(this->result, op[0], op[1]));
break;
case ir_binop_logic_or:
- emit(BRW_OPCODE_OR, this->result, op[0], op[1]);
+ emit(OR(this->result, op[0], op[1]));
break;
case ir_binop_logic_and:
- emit(BRW_OPCODE_AND, this->result, op[0], op[1]);
+ emit(AND(this->result, op[0], op[1]));
break;
case ir_binop_dot:
assert(!"not reached: should be handled by lower_quadop_vector");
break;
+ case ir_binop_vector_extract:
+ assert(!"not reached: should be handled by lower_vec_index_to_cond_assign()");
+ break;
+
+ case ir_triop_vector_insert:
+ assert(!"not reached: should be handled by lower_vector_insert()");
+ break;
+
case ir_unop_sqrt:
emit_math(SHADER_OPCODE_SQRT, this->result, op[0]);
break;
emit_math(SHADER_OPCODE_RSQ, this->result, op[0]);
break;
+ case ir_unop_bitcast_i2f:
+ case ir_unop_bitcast_u2f:
+ op[0].type = BRW_REGISTER_TYPE_F;
+ this->result = op[0];
+ break;
case ir_unop_i2u:
+ case ir_unop_bitcast_f2u:
op[0].type = BRW_REGISTER_TYPE_UD;
this->result = op[0];
break;
case ir_unop_u2i:
+ case ir_unop_bitcast_f2i:
op[0].type = BRW_REGISTER_TYPE_D;
this->result = op[0];
break;
case ir_unop_i2f:
case ir_unop_u2f:
case ir_unop_f2i:
- emit(BRW_OPCODE_MOV, this->result, op[0]);
+ case ir_unop_f2u:
+ emit(MOV(this->result, op[0]));
break;
case ir_unop_b2i:
- inst = emit(BRW_OPCODE_AND, this->result, op[0], fs_reg(1));
+ emit(AND(this->result, op[0], fs_reg(1)));
break;
case ir_unop_b2f:
temp = fs_reg(this, glsl_type::int_type);
- emit(BRW_OPCODE_AND, temp, op[0], fs_reg(1));
- emit(BRW_OPCODE_MOV, this->result, temp);
+ emit(AND(temp, op[0], fs_reg(1)));
+ emit(MOV(this->result, temp));
break;
case ir_unop_f2b:
+ emit(CMP(this->result, op[0], fs_reg(0.0f), BRW_CONDITIONAL_NZ));
+ break;
case ir_unop_i2b:
- temp = this->result;
- /* original gen4 does implicit conversion before comparison. */
- if (intel->gen < 5)
- temp.type = op[0].type;
-
- resolve_ud_negate(&op[0]);
-
- inst = emit(BRW_OPCODE_CMP, temp, op[0], fs_reg(0.0f));
- inst->conditional_mod = BRW_CONDITIONAL_NZ;
+ emit(CMP(this->result, op[0], fs_reg(0), BRW_CONDITIONAL_NZ));
break;
case ir_unop_trunc:
- emit(BRW_OPCODE_RNDZ, this->result, op[0]);
+ emit(RNDZ(this->result, op[0]));
break;
case ir_unop_ceil:
op[0].negate = !op[0].negate;
- inst = emit(BRW_OPCODE_RNDD, this->result, op[0]);
+ emit(RNDD(this->result, op[0]));
this->result.negate = true;
break;
case ir_unop_floor:
- inst = emit(BRW_OPCODE_RNDD, this->result, op[0]);
+ emit(RNDD(this->result, op[0]));
break;
case ir_unop_fract:
- inst = emit(BRW_OPCODE_FRC, this->result, op[0]);
+ emit(FRC(this->result, op[0]));
break;
case ir_unop_round_even:
- emit(BRW_OPCODE_RNDE, this->result, op[0]);
+ emit(RNDE(this->result, op[0]));
break;
case ir_binop_min:
+ case ir_binop_max:
resolve_ud_negate(&op[0]);
resolve_ud_negate(&op[1]);
+ emit_minmax(ir->operation == ir_binop_min ?
+ BRW_CONDITIONAL_L : BRW_CONDITIONAL_GE,
+ this->result, op[0], op[1]);
+ break;
+ case ir_unop_pack_snorm_2x16:
+ case ir_unop_pack_snorm_4x8:
+ case ir_unop_pack_unorm_2x16:
+ case ir_unop_pack_unorm_4x8:
+ case ir_unop_unpack_snorm_2x16:
+ case ir_unop_unpack_snorm_4x8:
+ case ir_unop_unpack_unorm_2x16:
+ case ir_unop_unpack_unorm_4x8:
+ case ir_unop_unpack_half_2x16:
+ case ir_unop_pack_half_2x16:
+ assert(!"not reached: should be handled by lower_packing_builtins");
+ break;
+ case ir_unop_unpack_half_2x16_split_x:
+ emit(FS_OPCODE_UNPACK_HALF_2x16_SPLIT_X, this->result, op[0]);
+ break;
+ case ir_unop_unpack_half_2x16_split_y:
+ emit(FS_OPCODE_UNPACK_HALF_2x16_SPLIT_Y, this->result, op[0]);
+ break;
+ case ir_binop_pow:
+ emit_math(SHADER_OPCODE_POW, this->result, op[0], op[1]);
+ break;
- if (intel->gen >= 6) {
- inst = emit(BRW_OPCODE_SEL, this->result, op[0], op[1]);
- inst->conditional_mod = BRW_CONDITIONAL_L;
- } else {
- /* Unalias the destination */
- this->result = fs_reg(this, ir->type);
-
- inst = emit(BRW_OPCODE_CMP, this->result, op[0], op[1]);
- inst->conditional_mod = BRW_CONDITIONAL_L;
-
- inst = emit(BRW_OPCODE_SEL, this->result, op[0], op[1]);
- inst->predicated = true;
- }
+ case ir_unop_bitfield_reverse:
+ emit(BFREV(this->result, op[0]));
break;
- case ir_binop_max:
- resolve_ud_negate(&op[0]);
- resolve_ud_negate(&op[1]);
+ case ir_unop_bit_count:
+ emit(CBIT(this->result, op[0]));
+ break;
+ case ir_unop_find_msb:
+ temp = fs_reg(this, glsl_type::uint_type);
+ emit(FBH(temp, op[0]));
- if (intel->gen >= 6) {
- inst = emit(BRW_OPCODE_SEL, this->result, op[0], op[1]);
- inst->conditional_mod = BRW_CONDITIONAL_GE;
- } else {
- /* Unalias the destination */
- this->result = fs_reg(this, ir->type);
+ /* FBH counts from the MSB side, while GLSL's findMSB() wants the count
+ * from the LSB side. If FBH didn't return an error (0xFFFFFFFF), then
+ * subtract the result from 31 to convert the MSB count into an LSB count.
+ */
- inst = emit(BRW_OPCODE_CMP, this->result, op[0], op[1]);
- inst->conditional_mod = BRW_CONDITIONAL_G;
+ /* FBH only supports UD type for dst, so use a MOV to convert UD to D. */
+ emit(MOV(this->result, temp));
+ emit(CMP(reg_null_d, this->result, fs_reg(-1), BRW_CONDITIONAL_NZ));
- inst = emit(BRW_OPCODE_SEL, this->result, op[0], op[1]);
- inst->predicated = true;
- }
+ temp.negate = true;
+ inst = emit(ADD(this->result, temp, fs_reg(31)));
+ inst->predicate = BRW_PREDICATE_NORMAL;
break;
-
- case ir_binop_pow:
- emit_math(SHADER_OPCODE_POW, this->result, op[0], op[1]);
+ case ir_unop_find_lsb:
+ emit(FBL(this->result, op[0]));
+ break;
+ case ir_triop_bitfield_extract:
+ /* Note that the instruction's argument order is reversed from GLSL
+ * and the IR.
+ */
+ emit(BFE(this->result, op[2], op[1], op[0]));
+ break;
+ case ir_binop_bfm:
+ emit(BFI1(this->result, op[0], op[1]));
+ break;
+ case ir_triop_bfi:
+ emit(BFI2(this->result, op[0], op[1], op[2]));
+ break;
+ case ir_quadop_bitfield_insert:
+ assert(!"not reached: should be handled by "
+ "lower_instructions::bitfield_insert_to_bfm_bfi");
break;
case ir_unop_bit_not:
- inst = emit(BRW_OPCODE_NOT, this->result, op[0]);
+ emit(NOT(this->result, op[0]));
break;
case ir_binop_bit_and:
- inst = emit(BRW_OPCODE_AND, this->result, op[0], op[1]);
+ emit(AND(this->result, op[0], op[1]));
break;
case ir_binop_bit_xor:
- inst = emit(BRW_OPCODE_XOR, this->result, op[0], op[1]);
+ emit(XOR(this->result, op[0], op[1]));
break;
case ir_binop_bit_or:
- inst = emit(BRW_OPCODE_OR, this->result, op[0], op[1]);
+ emit(OR(this->result, op[0], op[1]));
break;
case ir_binop_lshift:
- inst = emit(BRW_OPCODE_SHL, this->result, op[0], op[1]);
+ emit(SHL(this->result, op[0], op[1]));
break;
case ir_binop_rshift:
if (ir->type->base_type == GLSL_TYPE_INT)
- inst = emit(BRW_OPCODE_ASR, this->result, op[0], op[1]);
+ emit(ASR(this->result, op[0], op[1]));
else
- inst = emit(BRW_OPCODE_SHR, this->result, op[0], op[1]);
+ emit(SHR(this->result, op[0], op[1]));
+ break;
+ case ir_binop_pack_half_2x16_split:
+ emit(FS_OPCODE_PACK_HALF_2x16_SPLIT, this->result, op[0], op[1]);
+ break;
+ case ir_binop_ubo_load: {
+ /* This IR node takes a constant uniform block and a constant or
+ * variable byte offset within the block and loads a vector from that.
+ */
+ ir_constant *uniform_block = ir->operands[0]->as_constant();
+ ir_constant *const_offset = ir->operands[1]->as_constant();
+ fs_reg surf_index = fs_reg((unsigned)SURF_INDEX_WM_UBO(uniform_block->value.u[0]));
+ if (const_offset) {
+ fs_reg packed_consts = fs_reg(this, glsl_type::float_type);
+ packed_consts.type = result.type;
+
+ fs_reg const_offset_reg = fs_reg(const_offset->value.u[0] & ~15);
+ emit(fs_inst(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD,
+ packed_consts, surf_index, const_offset_reg));
+
+ packed_consts.smear = const_offset->value.u[0] % 16 / 4;
+ for (int i = 0; i < ir->type->vector_elements; i++) {
+ /* UBO bools are any nonzero value. We consider bools to be
+ * values with the low bit set to 1. Convert them using CMP.
+ */
+ if (ir->type->base_type == GLSL_TYPE_BOOL) {
+ emit(CMP(result, packed_consts, fs_reg(0u), BRW_CONDITIONAL_NZ));
+ } else {
+ emit(MOV(result, packed_consts));
+ }
+
+ packed_consts.smear++;
+ result.reg_offset++;
+
+ /* The std140 packing rules don't allow vectors to cross 16-byte
+ * boundaries, and a reg is 32 bytes.
+ */
+ assert(packed_consts.smear < 8);
+ }
+ } else {
+ /* Turn the byte offset into a dword offset. */
+ fs_reg base_offset = fs_reg(this, glsl_type::int_type);
+ emit(SHR(base_offset, op[1], fs_reg(2)));
+
+ for (int i = 0; i < ir->type->vector_elements; i++) {
+ emit(VARYING_PULL_CONSTANT_LOAD(result, surf_index,
+ base_offset, i));
+
+ if (ir->type->base_type == GLSL_TYPE_BOOL)
+ emit(CMP(result, result, fs_reg(0), BRW_CONDITIONAL_NZ));
+
+ result.reg_offset++;
+ }
+ }
+
+ result.reg_offset = 0;
+ break;
+ }
+
+ case ir_triop_fma:
+ /* Note that the instruction's argument order is reversed from GLSL
+ * and the IR.
+ */
+ emit(MAD(this->result, op[2], op[1], op[0]));
+ break;
+
+ case ir_triop_lrp:
+ emit_lrp(this->result, op[0], op[1], op[2]);
break;
}
}
r.type = brw_type_for_base_type(type);
if (predicated || !l.equals(r)) {
- fs_inst *inst = emit(BRW_OPCODE_MOV, l, r);
- inst->predicated = predicated;
+ fs_inst *inst = emit(MOV(l, r));
+ inst->predicate = predicated ? BRW_PREDICATE_NORMAL : BRW_PREDICATE_NONE;
}
l.reg_offset++;
case GLSL_TYPE_SAMPLER:
break;
- default:
+ case GLSL_TYPE_VOID:
+ case GLSL_TYPE_ERROR:
+ case GLSL_TYPE_INTERFACE:
assert(!"not reached");
break;
}
/* If last_rhs_inst wrote a different number of components than our LHS,
* we can't safely rewrite it.
*/
- if (ir->lhs->type->vector_elements != modify->regs_written())
+ if (virtual_grf_sizes[dst.reg] != modify->regs_written)
return false;
/* Success! Rewrite the instruction. */
ir->lhs->type->is_vector()) {
for (int i = 0; i < ir->lhs->type->vector_elements; i++) {
if (ir->write_mask & (1 << i)) {
- inst = emit(BRW_OPCODE_MOV, l, r);
+ inst = emit(MOV(l, r));
if (ir->condition)
- inst->predicated = true;
+ inst->predicate = BRW_PREDICATE_NORMAL;
r.reg_offset++;
}
l.reg_offset++;
fs_inst *
fs_visitor::emit_texture_gen4(ir_texture *ir, fs_reg dst, fs_reg coordinate,
- int sampler)
+ fs_reg shadow_c, fs_reg lod, fs_reg dPdy)
{
int mlen;
int base_mrf = 1;
/* g0 header. */
mlen = 1;
- if (ir->shadow_comparitor && ir->op != ir_txd) {
+ if (ir->shadow_comparitor) {
for (int i = 0; i < ir->coordinate->type->vector_elements; i++) {
- emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen + i), coordinate);
+ emit(MOV(fs_reg(MRF, base_mrf + mlen + i), coordinate));
coordinate.reg_offset++;
}
/* gen4's SIMD8 sampler always has the slots for u,v,r present. */
/* There's no plain shadow compare message, so we use shadow
* compare with a bias of 0.0.
*/
- emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), fs_reg(0.0f));
+ emit(MOV(fs_reg(MRF, base_mrf + mlen), fs_reg(0.0f)));
mlen++;
- } else if (ir->op == ir_txb) {
- ir->lod_info.bias->accept(this);
- emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result);
+ } else if (ir->op == ir_txb || ir->op == ir_txl) {
+ emit(MOV(fs_reg(MRF, base_mrf + mlen), lod));
mlen++;
} else {
- assert(ir->op == ir_txl);
- ir->lod_info.lod->accept(this);
- emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result);
- mlen++;
+ assert(!"Should not get here.");
}
- ir->shadow_comparitor->accept(this);
- emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result);
+ emit(MOV(fs_reg(MRF, base_mrf + mlen), shadow_c));
mlen++;
} else if (ir->op == ir_tex) {
for (int i = 0; i < ir->coordinate->type->vector_elements; i++) {
- emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen + i), coordinate);
+ emit(MOV(fs_reg(MRF, base_mrf + mlen + i), coordinate));
coordinate.reg_offset++;
}
+ /* zero the others. */
+ for (int i = ir->coordinate->type->vector_elements; i<3; i++) {
+ emit(MOV(fs_reg(MRF, base_mrf + mlen + i), fs_reg(0.0f)));
+ }
/* gen4's SIMD8 sampler always has the slots for u,v,r present. */
mlen += 3;
} else if (ir->op == ir_txd) {
- ir->lod_info.grad.dPdx->accept(this);
- fs_reg dPdx = this->result;
-
- ir->lod_info.grad.dPdy->accept(this);
- fs_reg dPdy = this->result;
+ fs_reg &dPdx = lod;
for (int i = 0; i < ir->coordinate->type->vector_elements; i++) {
- emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen + i), coordinate);
+ emit(MOV(fs_reg(MRF, base_mrf + mlen + i), coordinate));
coordinate.reg_offset++;
}
/* the slots for u and v are always present, but r is optional */
* m5 m6 m7 m8 m9 m10
*/
for (int i = 0; i < ir->lod_info.grad.dPdx->type->vector_elements; i++) {
- emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), dPdx);
+ emit(MOV(fs_reg(MRF, base_mrf + mlen), dPdx));
dPdx.reg_offset++;
}
mlen += MAX2(ir->lod_info.grad.dPdx->type->vector_elements, 2);
for (int i = 0; i < ir->lod_info.grad.dPdy->type->vector_elements; i++) {
- emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), dPdy);
+ emit(MOV(fs_reg(MRF, base_mrf + mlen), dPdy));
dPdy.reg_offset++;
}
mlen += MAX2(ir->lod_info.grad.dPdy->type->vector_elements, 2);
} else if (ir->op == ir_txs) {
/* There's no SIMD8 resinfo message on Gen4. Use SIMD16 instead. */
simd16 = true;
- ir->lod_info.lod->accept(this);
- emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen, BRW_REGISTER_TYPE_UD), this->result);
+ emit(MOV(fs_reg(MRF, base_mrf + mlen, BRW_REGISTER_TYPE_UD), lod));
mlen += 2;
} else {
/* Oh joy. gen4 doesn't have SIMD8 non-shadow-compare bias/lod
assert(ir->op == ir_txb || ir->op == ir_txl || ir->op == ir_txf);
for (int i = 0; i < ir->coordinate->type->vector_elements; i++) {
- emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen + i * 2, coordinate.type),
- coordinate);
+ emit(MOV(fs_reg(MRF, base_mrf + mlen + i * 2, coordinate.type),
+ coordinate));
coordinate.reg_offset++;
}
* be necessary for TXF (ld), but seems wise to do for all messages.
*/
for (int i = ir->coordinate->type->vector_elements; i < 3; i++) {
- emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen + i * 2), fs_reg(0.0f));
+ emit(MOV(fs_reg(MRF, base_mrf + mlen + i * 2), fs_reg(0.0f)));
}
/* lod/bias appears after u/v/r. */
mlen += 6;
- if (ir->op == ir_txb) {
- ir->lod_info.bias->accept(this);
- emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result);
- mlen++;
- } else {
- ir->lod_info.lod->accept(this);
- emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen, this->result.type),
- this->result);
- mlen++;
- }
+ emit(MOV(fs_reg(MRF, base_mrf + mlen, lod.type), lod));
+ mlen++;
/* The unused upper half. */
mlen++;
* this weirdness around to the expected layout.
*/
orig_dst = dst;
- const glsl_type *vec_type =
- glsl_type::get_instance(ir->type->base_type, 4, 1);
- dst = fs_reg(this, glsl_type::get_array_instance(vec_type, 2));
- dst.type = intel->is_g4x ? brw_type_for_base_type(ir->type)
- : BRW_REGISTER_TYPE_F;
+ dst = fs_reg(GRF, virtual_grf_alloc(8),
+ (brw->is_g4x ?
+ brw_type_for_base_type(ir->type) :
+ BRW_REGISTER_TYPE_F));
}
fs_inst *inst = NULL;
case ir_txf:
inst = emit(SHADER_OPCODE_TXF, dst);
break;
+ default:
+ fail("unrecognized texture opcode");
}
inst->base_mrf = base_mrf;
inst->mlen = mlen;
inst->header_present = true;
+ inst->regs_written = simd16 ? 8 : 4;
if (simd16) {
for (int i = 0; i < 4; i++) {
- emit(BRW_OPCODE_MOV, orig_dst, dst);
+ emit(MOV(orig_dst, dst));
orig_dst.reg_offset++;
dst.reg_offset += 2;
}
*/
fs_inst *
fs_visitor::emit_texture_gen5(ir_texture *ir, fs_reg dst, fs_reg coordinate,
- int sampler)
+ fs_reg shadow_c, fs_reg lod, fs_reg lod2,
+ fs_reg sample_index)
{
int mlen = 0;
int base_mrf = 2;
- int reg_width = c->dispatch_width / 8;
+ int reg_width = dispatch_width / 8;
bool header_present = false;
const int vector_elements =
ir->coordinate ? ir->coordinate->type->vector_elements : 0;
- if (ir->offset) {
- /* The offsets set up by the ir_texture visitor are in the
- * m1 header, so we can't go headerless.
+ if (ir->offset != NULL && ir->op == ir_txf) {
+ /* It appears that the ld instruction used for txf does its
+ * address bounds check before adding in the offset. To work
+ * around this, just add the integer offset to the integer texel
+ * coordinate, and don't put the offset in the header.
*/
- header_present = true;
- mlen++;
- base_mrf--;
- }
+ ir_constant *offset = ir->offset->as_constant();
+ for (int i = 0; i < vector_elements; i++) {
+ emit(ADD(fs_reg(MRF, base_mrf + mlen + i * reg_width, coordinate.type),
+ coordinate,
+ offset->value.i[i]));
+ coordinate.reg_offset++;
+ }
+ } else {
+ if (ir->offset) {
+ /* The offsets set up by the ir_texture visitor are in the
+ * m1 header, so we can't go headerless.
+ */
+ header_present = true;
+ mlen++;
+ base_mrf--;
+ }
- for (int i = 0; i < vector_elements; i++) {
- emit(BRW_OPCODE_MOV,
- fs_reg(MRF, base_mrf + mlen + i * reg_width, coordinate.type),
- coordinate);
- coordinate.reg_offset++;
+ for (int i = 0; i < vector_elements; i++) {
+ emit(MOV(fs_reg(MRF, base_mrf + mlen + i * reg_width, coordinate.type),
+ coordinate));
+ coordinate.reg_offset++;
+ }
}
mlen += vector_elements * reg_width;
- if (ir->shadow_comparitor && ir->op != ir_txd) {
+ if (ir->shadow_comparitor) {
mlen = MAX2(mlen, header_present + 4 * reg_width);
- ir->shadow_comparitor->accept(this);
- emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result);
+ emit(MOV(fs_reg(MRF, base_mrf + mlen), shadow_c));
mlen += reg_width;
}
inst = emit(SHADER_OPCODE_TEX, dst);
break;
case ir_txb:
- ir->lod_info.bias->accept(this);
mlen = MAX2(mlen, header_present + 4 * reg_width);
- emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result);
+ emit(MOV(fs_reg(MRF, base_mrf + mlen), lod));
mlen += reg_width;
inst = emit(FS_OPCODE_TXB, dst);
-
break;
case ir_txl:
- ir->lod_info.lod->accept(this);
mlen = MAX2(mlen, header_present + 4 * reg_width);
- emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result);
+ emit(MOV(fs_reg(MRF, base_mrf + mlen), lod));
mlen += reg_width;
inst = emit(SHADER_OPCODE_TXL, dst);
break;
case ir_txd: {
- ir->lod_info.grad.dPdx->accept(this);
- fs_reg dPdx = this->result;
-
- ir->lod_info.grad.dPdy->accept(this);
- fs_reg dPdy = this->result;
-
mlen = MAX2(mlen, header_present + 4 * reg_width); /* skip over 'ai' */
/**
* - dPdx.x dPdy.x dPdx.y dPdy.y dPdx.z dPdy.z
*/
for (int i = 0; i < ir->lod_info.grad.dPdx->type->vector_elements; i++) {
- emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), dPdx);
- dPdx.reg_offset++;
+ emit(MOV(fs_reg(MRF, base_mrf + mlen), lod));
+ lod.reg_offset++;
mlen += reg_width;
- emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), dPdy);
- dPdy.reg_offset++;
+ emit(MOV(fs_reg(MRF, base_mrf + mlen), lod2));
+ lod2.reg_offset++;
mlen += reg_width;
}
break;
}
case ir_txs:
- ir->lod_info.lod->accept(this);
- emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen, BRW_REGISTER_TYPE_UD), this->result);
+ emit(MOV(fs_reg(MRF, base_mrf + mlen, BRW_REGISTER_TYPE_UD), lod));
mlen += reg_width;
inst = emit(SHADER_OPCODE_TXS, dst);
break;
case ir_txf:
mlen = header_present + 4 * reg_width;
-
- ir->lod_info.lod->accept(this);
- emit(BRW_OPCODE_MOV,
- fs_reg(MRF, base_mrf + mlen - reg_width, BRW_REGISTER_TYPE_UD),
- this->result);
+ emit(MOV(fs_reg(MRF, base_mrf + mlen - reg_width, BRW_REGISTER_TYPE_UD), lod));
inst = emit(SHADER_OPCODE_TXF, dst);
break;
+ case ir_txf_ms:
+ mlen = header_present + 4 * reg_width;
+
+ /* lod */
+ emit(MOV(fs_reg(MRF, base_mrf + mlen - reg_width, BRW_REGISTER_TYPE_UD), fs_reg(0)));
+ /* sample index */
+ emit(MOV(fs_reg(MRF, base_mrf + mlen, BRW_REGISTER_TYPE_UD), sample_index));
+ mlen += reg_width;
+ inst = emit(SHADER_OPCODE_TXF_MS, dst);
+ break;
+ case ir_lod:
+ inst = emit(SHADER_OPCODE_LOD, dst);
+ break;
}
inst->base_mrf = base_mrf;
inst->mlen = mlen;
inst->header_present = header_present;
+ inst->regs_written = 4;
if (mlen > 11) {
fail("Message length >11 disallowed by hardware\n");
fs_inst *
fs_visitor::emit_texture_gen7(ir_texture *ir, fs_reg dst, fs_reg coordinate,
- int sampler)
+ fs_reg shadow_c, fs_reg lod, fs_reg lod2,
+ fs_reg sample_index)
{
int mlen = 0;
int base_mrf = 2;
- int reg_width = c->dispatch_width / 8;
+ int reg_width = dispatch_width / 8;
bool header_present = false;
int offsets[3];
base_mrf--;
}
- if (ir->shadow_comparitor && ir->op != ir_txd) {
- ir->shadow_comparitor->accept(this);
- emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result);
+ if (ir->shadow_comparitor) {
+ emit(MOV(fs_reg(MRF, base_mrf + mlen), shadow_c));
mlen += reg_width;
}
/* Set up the LOD info */
switch (ir->op) {
case ir_tex:
+ case ir_lod:
break;
case ir_txb:
- ir->lod_info.bias->accept(this);
- emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result);
+ emit(MOV(fs_reg(MRF, base_mrf + mlen), lod));
mlen += reg_width;
break;
case ir_txl:
- ir->lod_info.lod->accept(this);
- emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result);
+ emit(MOV(fs_reg(MRF, base_mrf + mlen), lod));
mlen += reg_width;
break;
case ir_txd: {
- if (c->dispatch_width == 16)
+ if (dispatch_width == 16)
fail("Gen7 does not support sample_d/sample_d_c in SIMD16 mode.");
- ir->lod_info.grad.dPdx->accept(this);
- fs_reg dPdx = this->result;
-
- ir->lod_info.grad.dPdy->accept(this);
- fs_reg dPdy = this->result;
-
/* Load dPdx and the coordinate together:
* [hdr], [ref], x, dPdx.x, dPdy.x, y, dPdx.y, dPdy.y, z, dPdx.z, dPdy.z
*/
for (int i = 0; i < ir->coordinate->type->vector_elements; i++) {
- emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), coordinate);
+ emit(MOV(fs_reg(MRF, base_mrf + mlen), coordinate));
coordinate.reg_offset++;
mlen += reg_width;
- emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), dPdx);
- dPdx.reg_offset++;
- mlen += reg_width;
-
- emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), dPdy);
- dPdy.reg_offset++;
- mlen += reg_width;
+ /* For cube map array, the coordinate is (u,v,r,ai) but there are
+ * only derivatives for (u, v, r).
+ */
+ if (i < ir->lod_info.grad.dPdx->type->vector_elements) {
+ emit(MOV(fs_reg(MRF, base_mrf + mlen), lod));
+ lod.reg_offset++;
+ mlen += reg_width;
+
+ emit(MOV(fs_reg(MRF, base_mrf + mlen), lod2));
+ lod2.reg_offset++;
+ mlen += reg_width;
+ }
}
break;
}
case ir_txs:
- ir->lod_info.lod->accept(this);
- emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen, BRW_REGISTER_TYPE_UD), this->result);
+ emit(MOV(fs_reg(MRF, base_mrf + mlen, BRW_REGISTER_TYPE_UD), lod));
mlen += reg_width;
break;
case ir_txf:
}
/* Unfortunately, the parameters for LD are intermixed: u, lod, v, r. */
- emit(BRW_OPCODE_ADD,
- fs_reg(MRF, base_mrf + mlen, BRW_REGISTER_TYPE_D), coordinate, offsets[0]);
+ emit(ADD(fs_reg(MRF, base_mrf + mlen, BRW_REGISTER_TYPE_D),
+ coordinate, offsets[0]));
coordinate.reg_offset++;
mlen += reg_width;
- ir->lod_info.lod->accept(this);
- emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen, BRW_REGISTER_TYPE_D), this->result);
+ emit(MOV(fs_reg(MRF, base_mrf + mlen, BRW_REGISTER_TYPE_D), lod));
mlen += reg_width;
for (int i = 1; i < ir->coordinate->type->vector_elements; i++) {
- emit(BRW_OPCODE_ADD,
- fs_reg(MRF, base_mrf + mlen, BRW_REGISTER_TYPE_D), coordinate, offsets[i]);
+ emit(ADD(fs_reg(MRF, base_mrf + mlen, BRW_REGISTER_TYPE_D),
+ coordinate, offsets[i]));
coordinate.reg_offset++;
mlen += reg_width;
}
break;
+ case ir_txf_ms:
+ emit(MOV(fs_reg(MRF, base_mrf + mlen, BRW_REGISTER_TYPE_UD), sample_index));
+ mlen += reg_width;
+
+ /* constant zero MCS; we arrange to never actually have a compressed
+ * multisample surface here for now. TODO: issue ld_mcs to get this first,
+ * if we ever support texturing from compressed multisample surfaces
+ */
+ emit(MOV(fs_reg(MRF, base_mrf + mlen, BRW_REGISTER_TYPE_UD), fs_reg(0u)));
+ mlen += reg_width;
+
+ /* there is no offsetting for this message; just copy in the integer
+ * texture coordinates
+ */
+ for (int i = 0; i < ir->coordinate->type->vector_elements; i++) {
+ emit(MOV(fs_reg(MRF, base_mrf + mlen, BRW_REGISTER_TYPE_D),
+ coordinate));
+ coordinate.reg_offset++;
+ mlen += reg_width;
+ }
+ break;
}
/* Set up the coordinate (except for cases where it was done above) */
- if (ir->op != ir_txd && ir->op != ir_txs && ir->op != ir_txf) {
+ if (ir->op != ir_txd && ir->op != ir_txs && ir->op != ir_txf && ir->op != ir_txf_ms) {
for (int i = 0; i < ir->coordinate->type->vector_elements; i++) {
- emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), coordinate);
+ emit(MOV(fs_reg(MRF, base_mrf + mlen), coordinate));
coordinate.reg_offset++;
mlen += reg_width;
}
case ir_txl: inst = emit(SHADER_OPCODE_TXL, dst); break;
case ir_txd: inst = emit(SHADER_OPCODE_TXD, dst); break;
case ir_txf: inst = emit(SHADER_OPCODE_TXF, dst); break;
+ case ir_txf_ms: inst = emit(SHADER_OPCODE_TXF_MS, dst); break;
case ir_txs: inst = emit(SHADER_OPCODE_TXS, dst); break;
+ case ir_lod: inst = emit(SHADER_OPCODE_LOD, dst); break;
}
inst->base_mrf = base_mrf;
inst->mlen = mlen;
inst->header_present = header_present;
+ inst->regs_written = 4;
if (mlen > 11) {
fail("Message length >11 disallowed by hardware\n");
return inst;
}
-void
-fs_visitor::visit(ir_texture *ir)
+fs_reg
+fs_visitor::rescale_texcoord(ir_texture *ir, fs_reg coordinate,
+ bool is_rect, int sampler, int texunit)
{
fs_inst *inst = NULL;
-
- int sampler = _mesa_get_sampler_uniform_value(ir->sampler, prog, &fp->Base);
- sampler = fp->Base.SamplerUnits[sampler];
-
- /* Our hardware doesn't have a sample_d_c message, so shadow compares
- * for textureGrad/TXD need to be emulated with instructions.
- */
- bool hw_compare_supported = ir->op != ir_txd;
- if (ir->shadow_comparitor && !hw_compare_supported) {
- assert(c->key.tex.compare_funcs[sampler] != GL_NONE);
- /* No need to even sample for GL_ALWAYS or GL_NEVER...bail early */
- if (c->key.tex.compare_funcs[sampler] == GL_ALWAYS)
- return swizzle_result(ir, fs_reg(1.0f), sampler);
- else if (c->key.tex.compare_funcs[sampler] == GL_NEVER)
- return swizzle_result(ir, fs_reg(0.0f), sampler);
- }
-
- if (ir->coordinate)
- ir->coordinate->accept(this);
- fs_reg coordinate = this->result;
-
- if (ir->offset != NULL && !(intel->gen == 7 && ir->op == ir_txf)) {
- uint32_t offset_bits = brw_texture_offset(ir->offset->as_constant());
-
- /* Explicitly set up the message header by copying g0 to msg reg m1. */
- emit(BRW_OPCODE_MOV, fs_reg(MRF, 1, BRW_REGISTER_TYPE_UD),
- fs_reg(retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD)));
-
- /* Then set the offset bits in DWord 2 of the message header. */
- emit(BRW_OPCODE_MOV,
- fs_reg(retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE, 1, 2),
- BRW_REGISTER_TYPE_UD)),
- fs_reg(brw_imm_uw(offset_bits)));
- }
-
- /* Should be lowered by do_lower_texture_projection */
- assert(!ir->projector);
-
bool needs_gl_clamp = true;
-
fs_reg scale_x, scale_y;
/* The 965 requires the EU to do the normalization of GL rectangle
* texture coordinates. We use the program parameter state
* tracking to get the scaling factor.
*/
- if (ir->sampler->type->sampler_dimensionality == GLSL_SAMPLER_DIM_RECT &&
- (intel->gen < 6 ||
- (intel->gen >= 6 && (c->key.tex.gl_clamp_mask[0] & (1 << sampler) ||
+ if (is_rect &&
+ (brw->gen < 6 ||
+ (brw->gen >= 6 && (c->key.tex.gl_clamp_mask[0] & (1 << sampler) ||
c->key.tex.gl_clamp_mask[1] & (1 << sampler))))) {
- struct gl_program_parameter_list *params = c->fp->program.Base.Parameters;
+ struct gl_program_parameter_list *params = fp->Base.Parameters;
int tokens[STATE_LENGTH] = {
STATE_INTERNAL,
STATE_TEXRECT_SCALE,
- sampler,
+ texunit,
0,
0
};
- if (c->dispatch_width == 16) {
+ if (dispatch_width == 16) {
fail("rectangle scale uniform setup not supported on 16-wide\n");
- this->result = fs_reg(this, ir->type);
- return;
+ return coordinate;
}
- c->prog_data.param_convert[c->prog_data.nr_params] =
- PARAM_NO_CONVERT;
- c->prog_data.param_convert[c->prog_data.nr_params + 1] =
- PARAM_NO_CONVERT;
-
scale_x = fs_reg(UNIFORM, c->prog_data.nr_params);
scale_y = fs_reg(UNIFORM, c->prog_data.nr_params + 1);
GLuint index = _mesa_add_state_reference(params,
(gl_state_index *)tokens);
-
- this->param_index[c->prog_data.nr_params] = index;
- this->param_offset[c->prog_data.nr_params] = 0;
- c->prog_data.nr_params++;
- this->param_index[c->prog_data.nr_params] = index;
- this->param_offset[c->prog_data.nr_params] = 1;
- c->prog_data.nr_params++;
+ c->prog_data.param[c->prog_data.nr_params++] =
+ &fp->Base.Parameters->ParameterValues[index][0].f;
+ c->prog_data.param[c->prog_data.nr_params++] =
+ &fp->Base.Parameters->ParameterValues[index][1].f;
}
/* The 965 requires the EU to do the normalization of GL rectangle
* texture coordinates. We use the program parameter state
* tracking to get the scaling factor.
*/
- if (intel->gen < 6 &&
- ir->sampler->type->sampler_dimensionality == GLSL_SAMPLER_DIM_RECT) {
+ if (brw->gen < 6 && is_rect) {
fs_reg dst = fs_reg(this, ir->coordinate->type);
fs_reg src = coordinate;
coordinate = dst;
- emit(BRW_OPCODE_MUL, dst, src, scale_x);
+ emit(MUL(dst, src, scale_x));
dst.reg_offset++;
src.reg_offset++;
- emit(BRW_OPCODE_MUL, dst, src, scale_y);
- } else if (ir->sampler->type->sampler_dimensionality == GLSL_SAMPLER_DIM_RECT) {
+ emit(MUL(dst, src, scale_y));
+ } else if (is_rect) {
/* On gen6+, the sampler handles the rectangle coordinates
* natively, without needing rescaling. But that means we have
* to do GL_CLAMP clamping at the [0, width], [0, height] scale,
* parameter type, so just invert back.
*/
fs_reg limit = fs_reg(this, glsl_type::float_type);
- emit(BRW_OPCODE_MOV, limit, i == 0 ? scale_x : scale_y);
+ emit(MOV(limit, i == 0 ? scale_x : scale_y));
emit(SHADER_OPCODE_RCP, limit, limit);
inst = emit(BRW_OPCODE_SEL, chan, chan, limit);
fs_reg chan = coordinate;
chan.reg_offset += i;
- fs_inst *inst = emit(BRW_OPCODE_MOV, chan, chan);
+ fs_inst *inst = emit(MOV(chan, chan));
inst->saturate = true;
}
}
}
+ return coordinate;
+}
- /* Writemasking doesn't eliminate channels on SIMD8 texture
- * samples, so don't worry about them.
+void
+fs_visitor::visit(ir_texture *ir)
+{
+ fs_inst *inst = NULL;
+
+ int sampler =
+ _mesa_get_sampler_uniform_value(ir->sampler, shader_prog, &fp->Base);
+ /* FINISHME: We're failing to recompile our programs when the sampler is
+ * updated. This only matters for the texture rectangle scale parameters
+ * (pre-gen6, or gen6+ with GL_CLAMP).
*/
- fs_reg dst = fs_reg(this, glsl_type::get_instance(ir->type->base_type, 4, 1));
+ int texunit = fp->Base.SamplerUnits[sampler];
- if (intel->gen >= 7) {
- inst = emit_texture_gen7(ir, dst, coordinate, sampler);
- } else if (intel->gen >= 5) {
- inst = emit_texture_gen5(ir, dst, coordinate, sampler);
- } else {
- inst = emit_texture_gen4(ir, dst, coordinate, sampler);
- }
+ /* Should be lowered by do_lower_texture_projection */
+ assert(!ir->projector);
- /* If there's an offset, we already set up m1. To avoid the implied move,
- * use the null register. Otherwise, we want an implied move from g0.
+ /* Generate code to compute all the subexpression trees. This has to be
+ * done before loading any values into MRFs for the sampler message since
+ * generating these values may involve SEND messages that need the MRFs.
*/
- if (ir->offset != NULL || !inst->header_present)
- inst->src[0] = reg_undef;
- else
- inst->src[0] = fs_reg(retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UW));
+ fs_reg coordinate;
+ if (ir->coordinate) {
+ ir->coordinate->accept(this);
- inst->sampler = sampler;
+ coordinate = rescale_texcoord(ir, this->result,
+ ir->sampler->type->sampler_dimensionality ==
+ GLSL_SAMPLER_DIM_RECT,
+ sampler, texunit);
+ }
+ fs_reg shadow_comparitor;
if (ir->shadow_comparitor) {
- if (hw_compare_supported) {
- inst->shadow_compare = true;
- } else {
- ir->shadow_comparitor->accept(this);
- fs_reg ref = this->result;
-
- fs_reg value = dst;
- dst = fs_reg(this, glsl_type::vec4_type);
-
- /* FINISHME: This needs to be done pre-filtering. */
-
- uint32_t conditional = 0;
- switch (c->key.tex.compare_funcs[sampler]) {
- /* GL_ALWAYS and GL_NEVER were handled at the top of the function */
- case GL_LESS: conditional = BRW_CONDITIONAL_L; break;
- case GL_GREATER: conditional = BRW_CONDITIONAL_G; break;
- case GL_LEQUAL: conditional = BRW_CONDITIONAL_LE; break;
- case GL_GEQUAL: conditional = BRW_CONDITIONAL_GE; break;
- case GL_EQUAL: conditional = BRW_CONDITIONAL_EQ; break;
- case GL_NOTEQUAL: conditional = BRW_CONDITIONAL_NEQ; break;
- default: assert(!"Should not get here: bad shadow compare function");
- }
+ ir->shadow_comparitor->accept(this);
+ shadow_comparitor = this->result;
+ }
- /* Use conditional moves to load 0 or 1 as the result */
- this->current_annotation = "manual shadow comparison";
- for (int i = 0; i < 4; i++) {
- inst = emit(BRW_OPCODE_MOV, dst, fs_reg(0.0f));
+ fs_reg lod, lod2, sample_index;
+ switch (ir->op) {
+ case ir_tex:
+ case ir_lod:
+ break;
+ case ir_txb:
+ ir->lod_info.bias->accept(this);
+ lod = this->result;
+ break;
+ case ir_txd:
+ ir->lod_info.grad.dPdx->accept(this);
+ lod = this->result;
- inst = emit(BRW_OPCODE_CMP, reg_null_f, ref, value);
- inst->conditional_mod = conditional;
+ ir->lod_info.grad.dPdy->accept(this);
+ lod2 = this->result;
+ break;
+ case ir_txf:
+ case ir_txl:
+ case ir_txs:
+ ir->lod_info.lod->accept(this);
+ lod = this->result;
+ break;
+ case ir_txf_ms:
+ ir->lod_info.sample_index->accept(this);
+ sample_index = this->result;
+ break;
+ };
- inst = emit(BRW_OPCODE_MOV, dst, fs_reg(1.0f));
- inst->predicated = true;
+ /* Writemasking doesn't eliminate channels on SIMD8 texture
+ * samples, so don't worry about them.
+ */
+ fs_reg dst = fs_reg(this, glsl_type::get_instance(ir->type->base_type, 4, 1));
- dst.reg_offset++;
- value.reg_offset++;
- }
- dst.reg_offset = 0;
+ if (brw->gen >= 7) {
+ inst = emit_texture_gen7(ir, dst, coordinate, shadow_comparitor,
+ lod, lod2, sample_index);
+ } else if (brw->gen >= 5) {
+ inst = emit_texture_gen5(ir, dst, coordinate, shadow_comparitor,
+ lod, lod2, sample_index);
+ } else {
+ inst = emit_texture_gen4(ir, dst, coordinate, shadow_comparitor,
+ lod, lod2);
+ }
+
+ /* The header is set up by generate_tex() when necessary. */
+ inst->src[0] = reg_undef;
+
+ if (ir->offset != NULL && ir->op != ir_txf)
+ inst->texture_offset = brw_texture_offset(ir->offset->as_constant());
+
+ inst->sampler = sampler;
+
+ if (ir->shadow_comparitor)
+ inst->shadow_compare = true;
+
+ /* fixup #layers for cube map arrays */
+ if (ir->op == ir_txs) {
+ glsl_type const *type = ir->sampler->type;
+ if (type->sampler_dimensionality == GLSL_SAMPLER_DIM_CUBE &&
+ type->sampler_array) {
+ fs_reg depth = dst;
+ depth.reg_offset = 2;
+ emit_math(SHADER_OPCODE_INT_QUOTIENT, depth, depth, fs_reg(6));
}
}
{
this->result = orig_val;
- if (ir->op == ir_txs)
+ if (ir->op == ir_txs || ir->op == ir_lod)
return;
if (ir->type == glsl_type::float_type) {
l.reg_offset += i;
if (swiz == SWIZZLE_ZERO) {
- emit(BRW_OPCODE_MOV, l, fs_reg(0.0f));
+ emit(MOV(l, fs_reg(0.0f)));
} else if (swiz == SWIZZLE_ONE) {
- emit(BRW_OPCODE_MOV, l, fs_reg(1.0f));
+ emit(MOV(l, fs_reg(1.0f)));
} else {
fs_reg r = orig_val;
r.reg_offset += GET_SWZ(c->key.tex.swizzles[sampler], i);
- emit(BRW_OPCODE_MOV, l, r);
+ emit(MOV(l, r));
}
}
this->result = swizzled_result;
}
channel.reg_offset += swiz;
- emit(BRW_OPCODE_MOV, result, channel);
+ emit(MOV(result, channel));
result.reg_offset++;
}
}
{
assert(ir->condition == NULL); /* FINISHME */
- emit(FS_OPCODE_DISCARD);
- kill_emitted = true;
+ /* We track our discarded pixels in f0.1. By predicating on it, we can
+ * update just the flag bits that aren't yet discarded. By emitting a
+ * CMP of g0 != g0, all our currently executing channels will get turned
+ * off.
+ */
+ fs_reg some_reg = fs_reg(retype(brw_vec8_grf(0, 0),
+ BRW_REGISTER_TYPE_UW));
+ fs_inst *cmp = emit(CMP(reg_null_f, some_reg, some_reg,
+ BRW_CONDITIONAL_NZ));
+ cmp->predicate = BRW_PREDICATE_NORMAL;
+ cmp->flag_subreg = 1;
+
+ if (brw->gen >= 6) {
+ /* For performance, after a discard, jump to the end of the shader.
+ * However, many people will do foliage by discarding based on a
+ * texture's alpha mask, and then continue on to texture with the
+ * remaining pixels. To avoid trashing the derivatives for those
+ * texture samples, we'll only jump if all of the pixels in the subspan
+ * have been discarded.
+ */
+ fs_inst *discard_jump = emit(FS_OPCODE_DISCARD_JUMP);
+ discard_jump->flag_subreg = 1;
+ discard_jump->predicate = BRW_PREDICATE_ALIGN1_ANY4H;
+ discard_jump->predicate_inverse = true;
+ }
}
void
dst_reg.type = src_reg.type;
for (unsigned j = 0; j < size; j++) {
- emit(BRW_OPCODE_MOV, dst_reg, src_reg);
+ emit(MOV(dst_reg, src_reg));
src_reg.reg_offset++;
dst_reg.reg_offset++;
}
dst_reg.type = src_reg.type;
for (unsigned j = 0; j < size; j++) {
- emit(BRW_OPCODE_MOV, dst_reg, src_reg);
+ emit(MOV(dst_reg, src_reg));
src_reg.reg_offset++;
dst_reg.reg_offset++;
}
for (unsigned i = 0; i < size; i++) {
switch (ir->type->base_type) {
case GLSL_TYPE_FLOAT:
- emit(BRW_OPCODE_MOV, dst_reg, fs_reg(ir->value.f[i]));
+ emit(MOV(dst_reg, fs_reg(ir->value.f[i])));
break;
case GLSL_TYPE_UINT:
- emit(BRW_OPCODE_MOV, dst_reg, fs_reg(ir->value.u[i]));
+ emit(MOV(dst_reg, fs_reg(ir->value.u[i])));
break;
case GLSL_TYPE_INT:
- emit(BRW_OPCODE_MOV, dst_reg, fs_reg(ir->value.i[i]));
+ emit(MOV(dst_reg, fs_reg(ir->value.i[i])));
break;
case GLSL_TYPE_BOOL:
- emit(BRW_OPCODE_MOV, dst_reg, fs_reg((int)ir->value.b[i]));
+ emit(MOV(dst_reg, fs_reg((int)ir->value.b[i])));
break;
default:
assert(!"Non-float/uint/int/bool constant");
switch (expr->operation) {
case ir_unop_logic_not:
- inst = emit(BRW_OPCODE_AND, reg_null_d, op[0], fs_reg(1));
+ inst = emit(AND(reg_null_d, op[0], fs_reg(1)));
inst->conditional_mod = BRW_CONDITIONAL_Z;
break;
goto out;
case ir_unop_f2b:
- if (intel->gen >= 6) {
- inst = emit(BRW_OPCODE_CMP, reg_null_d, op[0], fs_reg(0.0f));
+ if (brw->gen >= 6) {
+ emit(CMP(reg_null_d, op[0], fs_reg(0.0f), BRW_CONDITIONAL_NZ));
} else {
- inst = emit(BRW_OPCODE_MOV, reg_null_f, op[0]);
+ inst = emit(MOV(reg_null_f, op[0]));
+ inst->conditional_mod = BRW_CONDITIONAL_NZ;
}
- inst->conditional_mod = BRW_CONDITIONAL_NZ;
break;
case ir_unop_i2b:
- if (intel->gen >= 6) {
- inst = emit(BRW_OPCODE_CMP, reg_null_d, op[0], fs_reg(0));
+ if (brw->gen >= 6) {
+ emit(CMP(reg_null_d, op[0], fs_reg(0), BRW_CONDITIONAL_NZ));
} else {
- inst = emit(BRW_OPCODE_MOV, reg_null_d, op[0]);
+ inst = emit(MOV(reg_null_d, op[0]));
+ inst->conditional_mod = BRW_CONDITIONAL_NZ;
}
- inst->conditional_mod = BRW_CONDITIONAL_NZ;
break;
case ir_binop_greater:
resolve_bool_comparison(expr->operands[0], &op[0]);
resolve_bool_comparison(expr->operands[1], &op[1]);
- inst = emit(BRW_OPCODE_CMP, reg_null_cmp, op[0], op[1]);
- inst->conditional_mod =
- brw_conditional_for_comparison(expr->operation);
+ emit(CMP(reg_null_d, op[0], op[1],
+ brw_conditional_for_comparison(expr->operation)));
break;
default:
out:
ir->accept(this);
- fs_inst *inst = emit(BRW_OPCODE_AND, reg_null_d, this->result, fs_reg(1));
+ fs_inst *inst = emit(AND(reg_null_d, this->result, fs_reg(1)));
inst->conditional_mod = BRW_CONDITIONAL_NZ;
}
switch (expr->operation) {
case ir_unop_logic_not:
- inst = emit(BRW_OPCODE_IF, temp, op[0], fs_reg(0));
- inst->conditional_mod = BRW_CONDITIONAL_Z;
- return;
-
case ir_binop_logic_xor:
- inst = emit(BRW_OPCODE_IF, reg_null_d, op[0], op[1]);
- inst->conditional_mod = BRW_CONDITIONAL_NZ;
- return;
-
case ir_binop_logic_or:
- temp = fs_reg(this, glsl_type::bool_type);
- emit(BRW_OPCODE_OR, temp, op[0], op[1]);
- inst = emit(BRW_OPCODE_IF, reg_null_d, temp, fs_reg(0));
- inst->conditional_mod = BRW_CONDITIONAL_NZ;
- return;
-
case ir_binop_logic_and:
- temp = fs_reg(this, glsl_type::bool_type);
- emit(BRW_OPCODE_AND, temp, op[0], op[1]);
- inst = emit(BRW_OPCODE_IF, reg_null_d, temp, fs_reg(0));
- inst->conditional_mod = BRW_CONDITIONAL_NZ;
- return;
+ /* For operations on bool arguments, only the low bit of the bool is
+ * valid, and the others are undefined. Fall back to the condition
+ * code path.
+ */
+ break;
case ir_unop_f2b:
inst = emit(BRW_OPCODE_IF, reg_null_f, op[0], fs_reg(0));
return;
case ir_unop_i2b:
- inst = emit(BRW_OPCODE_IF, reg_null_d, op[0], fs_reg(0));
- inst->conditional_mod = BRW_CONDITIONAL_NZ;
+ emit(IF(op[0], fs_reg(0), BRW_CONDITIONAL_NZ));
return;
case ir_binop_greater:
case ir_binop_all_equal:
case ir_binop_nequal:
case ir_binop_any_nequal:
- inst = emit(BRW_OPCODE_IF, reg_null_d, op[0], op[1]);
- inst->conditional_mod =
- brw_conditional_for_comparison(expr->operation);
+ resolve_bool_comparison(expr->operands[0], &op[0]);
+ resolve_bool_comparison(expr->operands[1], &op[1]);
+
+ emit(IF(op[0], op[1],
+ brw_conditional_for_comparison(expr->operation)));
return;
default:
assert(!"not reached");
- inst = emit(BRW_OPCODE_IF, reg_null_d, op[0], fs_reg(0));
- inst->conditional_mod = BRW_CONDITIONAL_NZ;
+ emit(IF(op[0], fs_reg(0), BRW_CONDITIONAL_NZ));
fail("bad condition\n");
return;
}
- return;
}
- ir->condition->accept(this);
+ emit_bool_to_cond_code(ir->condition);
+ fs_inst *inst = emit(BRW_OPCODE_IF);
+ inst->predicate = BRW_PREDICATE_NORMAL;
+}
- fs_inst *inst = emit(BRW_OPCODE_IF, reg_null_d, this->result, fs_reg(0));
- inst->conditional_mod = BRW_CONDITIONAL_NZ;
+/**
+ * Try to replace IF/MOV/ELSE/MOV/ENDIF with SEL.
+ *
+ * Many GLSL shaders contain the following pattern:
+ *
+ * x = condition ? foo : bar
+ *
+ * The compiler emits an ir_if tree for this, since each subexpression might be
+ * a complex tree that could have side-effects or short-circuit logic.
+ *
+ * However, the common case is to simply select one of two constants or
+ * variable values---which is exactly what SEL is for. In this case, the
+ * assembly looks like:
+ *
+ * (+f0) IF
+ * MOV dst src0
+ * ELSE
+ * MOV dst src1
+ * ENDIF
+ *
+ * which can be easily translated into:
+ *
+ * (+f0) SEL dst src0 src1
+ *
+ * If src0 is an immediate value, we promote it to a temporary GRF.
+ */
+void
+fs_visitor::try_replace_with_sel()
+{
+ fs_inst *endif_inst = (fs_inst *) instructions.get_tail();
+ assert(endif_inst->opcode == BRW_OPCODE_ENDIF);
+
+ /* Pattern match in reverse: IF, MOV, ELSE, MOV, ENDIF. */
+ int opcodes[] = {
+ BRW_OPCODE_IF, BRW_OPCODE_MOV, BRW_OPCODE_ELSE, BRW_OPCODE_MOV,
+ };
+
+ fs_inst *match = (fs_inst *) endif_inst->prev;
+ for (int i = 0; i < 4; i++) {
+ if (match->is_head_sentinel() || match->opcode != opcodes[4-i-1])
+ return;
+ match = (fs_inst *) match->prev;
+ }
+
+ /* The opcodes match; it looks like the right sequence of instructions. */
+ fs_inst *else_mov = (fs_inst *) endif_inst->prev;
+ fs_inst *then_mov = (fs_inst *) else_mov->prev->prev;
+ fs_inst *if_inst = (fs_inst *) then_mov->prev;
+
+ /* Check that the MOVs are the right form. */
+ if (then_mov->dst.equals(else_mov->dst) &&
+ !then_mov->is_partial_write() &&
+ !else_mov->is_partial_write()) {
+
+ /* Remove the matched instructions; we'll emit a SEL to replace them. */
+ while (!if_inst->next->is_tail_sentinel())
+ if_inst->next->remove();
+ if_inst->remove();
+
+ /* Only the last source register can be a constant, so if the MOV in
+ * the "then" clause uses a constant, we need to put it in a temporary.
+ */
+ fs_reg src0(then_mov->src[0]);
+ if (src0.file == IMM) {
+ src0 = fs_reg(this, glsl_type::float_type);
+ src0.type = then_mov->src[0].type;
+ emit(MOV(src0, then_mov->src[0]));
+ }
+
+ fs_inst *sel;
+ if (if_inst->conditional_mod) {
+ /* Sandybridge-specific IF with embedded comparison */
+ emit(CMP(reg_null_d, if_inst->src[0], if_inst->src[1],
+ if_inst->conditional_mod));
+ sel = emit(BRW_OPCODE_SEL, then_mov->dst, src0, else_mov->src[0]);
+ sel->predicate = BRW_PREDICATE_NORMAL;
+ } else {
+ /* Separate CMP and IF instructions */
+ sel = emit(BRW_OPCODE_SEL, then_mov->dst, src0, else_mov->src[0]);
+ sel->predicate = if_inst->predicate;
+ sel->predicate_inverse = if_inst->predicate_inverse;
+ }
+ }
}
void
fs_visitor::visit(ir_if *ir)
{
- fs_inst *inst;
-
- if (intel->gen < 6 && c->dispatch_width == 16) {
+ if (brw->gen < 6 && dispatch_width == 16) {
fail("Can't support (non-uniform) control flow on 16-wide\n");
}
*/
this->base_ir = ir->condition;
- if (intel->gen == 6) {
+ if (brw->gen == 6) {
emit_if_gen6(ir);
} else {
emit_bool_to_cond_code(ir->condition);
- inst = emit(BRW_OPCODE_IF);
- inst->predicated = true;
+ emit(IF(BRW_PREDICATE_NORMAL));
}
foreach_list(node, &ir->then_instructions) {
}
emit(BRW_OPCODE_ENDIF);
+
+ try_replace_with_sel();
}
void
{
fs_reg counter = reg_undef;
- if (intel->gen < 6 && c->dispatch_width == 16) {
+ if (brw->gen < 6 && dispatch_width == 16) {
fail("Can't support (non-uniform) control flow on 16-wide\n");
}
this->base_ir = ir->from;
ir->from->accept(this);
- emit(BRW_OPCODE_MOV, counter, this->result);
+ emit(MOV(counter, this->result));
}
}
this->base_ir = ir->to;
ir->to->accept(this);
- fs_inst *inst = emit(BRW_OPCODE_CMP, reg_null_cmp, counter, this->result);
- inst->conditional_mod = brw_conditional_for_comparison(ir->cmp);
+ emit(CMP(reg_null_d, counter, this->result,
+ brw_conditional_for_comparison(ir->cmp)));
- inst = emit(BRW_OPCODE_BREAK);
- inst->predicated = true;
+ fs_inst *inst = emit(BRW_OPCODE_BREAK);
+ inst->predicate = BRW_PREDICATE_NORMAL;
}
foreach_list(node, &ir->body_instructions) {
if (ir->increment) {
this->base_ir = ir->increment;
ir->increment->accept(this);
- emit(BRW_OPCODE_ADD, counter, counter, this->result);
+ emit(ADD(counter, counter, this->result));
}
this->base_ir = NULL;
(void)ir;
}
+void
+fs_visitor::visit(ir_emit_vertex *)
+{
+ assert(!"not reached");
+}
+
+void
+fs_visitor::visit(ir_end_primitive *)
+{
+ assert(!"not reached");
+}
+
fs_inst *
fs_visitor::emit(fs_inst inst)
{
fs_inst *list_inst = new(mem_ctx) fs_inst;
*list_inst = inst;
+ emit(list_inst);
+ return list_inst;
+}
+fs_inst *
+fs_visitor::emit(fs_inst *inst)
+{
if (force_uncompressed_stack > 0)
- list_inst->force_uncompressed = true;
+ inst->force_uncompressed = true;
else if (force_sechalf_stack > 0)
- list_inst->force_sechalf = true;
+ inst->force_sechalf = true;
- list_inst->annotation = this->current_annotation;
- list_inst->ir = this->base_ir;
+ inst->annotation = this->current_annotation;
+ inst->ir = this->base_ir;
- this->instructions.push_tail(list_inst);
+ this->instructions.push_tail(inst);
- return list_inst;
+ return inst;
+}
+
+void
+fs_visitor::emit(exec_list list)
+{
+ foreach_list_safe(node, &list) {
+ fs_inst *inst = (fs_inst *)node;
+ inst->remove();
+ emit(inst);
+ }
}
/** Emits a dummy fragment shader consisting of magenta for bringup purposes. */
void
fs_visitor::emit_dummy_fs()
{
- int reg_width = c->dispatch_width / 8;
+ int reg_width = dispatch_width / 8;
/* Everyone's favorite color. */
- emit(BRW_OPCODE_MOV, fs_reg(MRF, 2 + 0 * reg_width), fs_reg(1.0f));
- emit(BRW_OPCODE_MOV, fs_reg(MRF, 2 + 1 * reg_width), fs_reg(0.0f));
- emit(BRW_OPCODE_MOV, fs_reg(MRF, 2 + 2 * reg_width), fs_reg(1.0f));
- emit(BRW_OPCODE_MOV, fs_reg(MRF, 2 + 3 * reg_width), fs_reg(0.0f));
+ emit(MOV(fs_reg(MRF, 2 + 0 * reg_width), fs_reg(1.0f)));
+ emit(MOV(fs_reg(MRF, 2 + 1 * reg_width), fs_reg(0.0f)));
+ emit(MOV(fs_reg(MRF, 2 + 2 * reg_width), fs_reg(1.0f)));
+ emit(MOV(fs_reg(MRF, 2 + 3 * reg_width), fs_reg(0.0f)));
fs_inst *write;
write = emit(FS_OPCODE_FB_WRITE, fs_reg(0), fs_reg(0));
this->delta_y[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC] =
fs_reg(this, glsl_type::float_type);
}
- emit(BRW_OPCODE_ADD, this->delta_x[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC],
- this->pixel_x, fs_reg(negate(brw_vec1_grf(1, 0))));
- emit(BRW_OPCODE_ADD, this->delta_y[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC],
- this->pixel_y, fs_reg(negate(brw_vec1_grf(1, 1))));
+ emit(ADD(this->delta_x[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC],
+ this->pixel_x, fs_reg(negate(brw_vec1_grf(1, 0)))));
+ emit(ADD(this->delta_y[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC],
+ this->pixel_y, fs_reg(negate(brw_vec1_grf(1, 1)))));
this->current_annotation = "compute pos.w and 1/pos.w";
/* Compute wpos.w. It's always in our setup, since it's needed to
emit(FS_OPCODE_LINTERP, wpos_w,
this->delta_x[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC],
this->delta_y[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC],
- interp_reg(FRAG_ATTRIB_WPOS, 3));
+ interp_reg(VARYING_SLOT_POS, 3));
/* Compute the pixel 1/W value from wpos.w. */
this->pixel_w = fs_reg(this, glsl_type::float_type);
emit_math(SHADER_OPCODE_RCP, this->pixel_w, wpos_w);
fs_reg int_pixel_y = fs_reg(this, glsl_type::uint_type);
int_pixel_x.type = BRW_REGISTER_TYPE_UW;
int_pixel_y.type = BRW_REGISTER_TYPE_UW;
- emit(BRW_OPCODE_ADD,
- int_pixel_x,
- fs_reg(stride(suboffset(g1_uw, 4), 2, 4, 0)),
- fs_reg(brw_imm_v(0x10101010)));
- emit(BRW_OPCODE_ADD,
- int_pixel_y,
- fs_reg(stride(suboffset(g1_uw, 5), 2, 4, 0)),
- fs_reg(brw_imm_v(0x11001100)));
+ emit(ADD(int_pixel_x,
+ fs_reg(stride(suboffset(g1_uw, 4), 2, 4, 0)),
+ fs_reg(brw_imm_v(0x10101010))));
+ emit(ADD(int_pixel_y,
+ fs_reg(stride(suboffset(g1_uw, 5), 2, 4, 0)),
+ fs_reg(brw_imm_v(0x11001100))));
/* As of gen6, we can no longer mix float and int sources. We have
* to turn the integer pixel centers into floats for their actual
*/
this->pixel_x = fs_reg(this, glsl_type::float_type);
this->pixel_y = fs_reg(this, glsl_type::float_type);
- emit(BRW_OPCODE_MOV, this->pixel_x, int_pixel_x);
- emit(BRW_OPCODE_MOV, this->pixel_y, int_pixel_y);
+ emit(MOV(this->pixel_x, int_pixel_x));
+ emit(MOV(this->pixel_y, int_pixel_y));
this->current_annotation = "compute pos.w";
this->pixel_w = fs_reg(brw_vec8_grf(c->source_w_reg, 0));
void
fs_visitor::emit_color_write(int target, int index, int first_color_mrf)
{
- int reg_width = c->dispatch_width / 8;
+ int reg_width = dispatch_width / 8;
fs_inst *inst;
fs_reg color = outputs[target];
fs_reg mrf;
color.reg_offset += index;
- if (c->dispatch_width == 8 || intel->gen >= 6) {
+ if (dispatch_width == 8 || brw->gen >= 6) {
/* SIMD8 write looks like:
* m + 0: r0
* m + 1: r1
* m + 6: a0
* m + 7: a1
*/
- inst = emit(BRW_OPCODE_MOV,
- fs_reg(MRF, first_color_mrf + index * reg_width, color.type),
- color);
+ inst = emit(MOV(fs_reg(MRF, first_color_mrf + index * reg_width,
+ color.type),
+ color));
inst->saturate = c->key.clamp_fragment_color;
} else {
/* pre-gen6 SIMD16 single source DP write looks like:
* usual destination + 1 for the second half we get
* destination + 4.
*/
- inst = emit(BRW_OPCODE_MOV,
- fs_reg(MRF, BRW_MRF_COMPR4 + first_color_mrf + index,
- color.type),
- color);
+ inst = emit(MOV(fs_reg(MRF, BRW_MRF_COMPR4 + first_color_mrf + index,
+ color.type),
+ color));
inst->saturate = c->key.clamp_fragment_color;
} else {
push_force_uncompressed();
- inst = emit(BRW_OPCODE_MOV, fs_reg(MRF, first_color_mrf + index,
- color.type),
- color);
+ inst = emit(MOV(fs_reg(MRF, first_color_mrf + index, color.type),
+ color));
inst->saturate = c->key.clamp_fragment_color;
pop_force_uncompressed();
push_force_sechalf();
color.sechalf = true;
- inst = emit(BRW_OPCODE_MOV, fs_reg(MRF, first_color_mrf + index + 4,
- color.type),
- color);
+ inst = emit(MOV(fs_reg(MRF, first_color_mrf + index + 4, color.type),
+ color));
inst->saturate = c->key.clamp_fragment_color;
pop_force_sechalf();
color.sechalf = false;
*/
int base_mrf = 1;
int nr = base_mrf;
- int reg_width = c->dispatch_width / 8;
+ int reg_width = dispatch_width / 8;
bool do_dual_src = this->dual_src_output.file != BAD_FILE;
+ bool src0_alpha_to_render_target = false;
- if (c->dispatch_width == 16 && do_dual_src) {
+ if (dispatch_width == 16 && do_dual_src) {
fail("GL_ARB_blend_func_extended not yet supported in 16-wide.");
do_dual_src = false;
}
* dispatched. This field is only required for the end-of-
* thread message and on all dual-source messages."
*/
- if (intel->gen >= 6 &&
- !this->kill_emitted &&
+ if (brw->gen >= 6 &&
+ !this->fp->UsesKill &&
!do_dual_src &&
c->key.nr_color_regions == 1) {
header_present = false;
}
if (header_present) {
+ src0_alpha_to_render_target = brw->gen >= 6 &&
+ !do_dual_src &&
+ c->key.replicate_alpha;
/* m2, m3 header */
nr += 2;
}
if (c->aa_dest_stencil_reg) {
push_force_uncompressed();
- emit(BRW_OPCODE_MOV, fs_reg(MRF, nr++),
- fs_reg(brw_vec8_grf(c->aa_dest_stencil_reg, 0)));
+ emit(MOV(fs_reg(MRF, nr++),
+ fs_reg(brw_vec8_grf(c->aa_dest_stencil_reg, 0))));
pop_force_uncompressed();
}
nr += 4 * reg_width;
if (do_dual_src)
nr += 4;
+ if (src0_alpha_to_render_target)
+ nr += reg_width;
if (c->source_depth_to_render_target) {
- if (intel->gen == 6 && c->dispatch_width == 16) {
+ if (brw->gen == 6 && dispatch_width == 16) {
/* For outputting oDepth on gen6, SIMD8 writes have to be
* used. This would require 8-wide moves of each half to
* message regs, kind of like pre-gen5 SIMD16 FB writes.
fail("Missing support for simd16 depth writes on gen6\n");
}
- if (c->computes_depth) {
+ if (fp->Base.OutputsWritten & BITFIELD64_BIT(FRAG_RESULT_DEPTH)) {
/* Hand over gl_FragDepth. */
- assert(this->frag_depth);
- fs_reg depth = *(variable_storage(this->frag_depth));
-
- emit(BRW_OPCODE_MOV, fs_reg(MRF, nr), depth);
+ assert(this->frag_depth.file != BAD_FILE);
+ emit(MOV(fs_reg(MRF, nr), this->frag_depth));
} else {
/* Pass through the payload depth. */
- emit(BRW_OPCODE_MOV, fs_reg(MRF, nr),
- fs_reg(brw_vec8_grf(c->source_depth_reg, 0)));
+ emit(MOV(fs_reg(MRF, nr),
+ fs_reg(brw_vec8_grf(c->source_depth_reg, 0))));
}
nr += reg_width;
}
if (c->dest_depth_reg) {
- emit(BRW_OPCODE_MOV, fs_reg(MRF, nr),
- fs_reg(brw_vec8_grf(c->dest_depth_reg, 0)));
+ emit(MOV(fs_reg(MRF, nr),
+ fs_reg(brw_vec8_grf(c->dest_depth_reg, 0))));
nr += reg_width;
}
this->current_annotation = ralloc_asprintf(this->mem_ctx,
"FB write src0");
for (int i = 0; i < 4; i++) {
- fs_inst *inst = emit(BRW_OPCODE_MOV,
- fs_reg(MRF, color_mrf + i, src0.type),
- src0);
+ fs_inst *inst = emit(MOV(fs_reg(MRF, color_mrf + i, src0.type), src0));
src0.reg_offset++;
inst->saturate = c->key.clamp_fragment_color;
}
this->current_annotation = ralloc_asprintf(this->mem_ctx,
"FB write src1");
for (int i = 0; i < 4; i++) {
- fs_inst *inst = emit(BRW_OPCODE_MOV,
- fs_reg(MRF, color_mrf + 4 + i, src1.type),
- src1);
+ fs_inst *inst = emit(MOV(fs_reg(MRF, color_mrf + 4 + i, src1.type),
+ src1));
src1.reg_offset++;
inst->saturate = c->key.clamp_fragment_color;
}
+ if (INTEL_DEBUG & DEBUG_SHADER_TIME)
+ emit_shader_time_end();
+
fs_inst *inst = emit(FS_OPCODE_FB_WRITE);
inst->target = 0;
inst->base_mrf = base_mrf;
this->current_annotation = ralloc_asprintf(this->mem_ctx,
"FB write target %d",
target);
- for (int i = 0; i < 4; i++)
- emit_color_write(target, i, color_mrf);
+ /* If src0_alpha_to_render_target is true, include source zero alpha
+ * data in RenderTargetWrite message for targets > 0.
+ */
+ int write_color_mrf = color_mrf;
+ if (src0_alpha_to_render_target && target != 0) {
+ fs_inst *inst;
+ fs_reg color = outputs[0];
+ color.reg_offset += 3;
+
+ inst = emit(MOV(fs_reg(MRF, write_color_mrf, color.type),
+ color));
+ inst->saturate = c->key.clamp_fragment_color;
+ write_color_mrf = color_mrf + reg_width;
+ }
+
+ for (unsigned i = 0; i < this->output_components[target]; i++)
+ emit_color_write(target, i, write_color_mrf);
+
+ bool eot = false;
+ if (target == c->key.nr_color_regions - 1) {
+ eot = true;
+
+ if (INTEL_DEBUG & DEBUG_SHADER_TIME)
+ emit_shader_time_end();
+ }
fs_inst *inst = emit(FS_OPCODE_FB_WRITE);
inst->target = target;
inst->base_mrf = base_mrf;
- inst->mlen = nr - base_mrf;
- if (target == c->key.nr_color_regions - 1)
- inst->eot = true;
+ if (src0_alpha_to_render_target && target == 0)
+ inst->mlen = nr - base_mrf - reg_width;
+ else
+ inst->mlen = nr - base_mrf;
+ inst->eot = eot;
inst->header_present = header_present;
}
if (c->key.nr_color_regions == 0) {
- if (c->key.alpha_test) {
- /* If the alpha test is enabled but there's no color buffer,
- * we still need to send alpha out the pipeline to our null
- * renderbuffer.
- */
- emit_color_write(0, 3, color_mrf);
- }
+ /* Even if there's no color buffers enabled, we still need to send
+ * alpha out the pipeline to our null renderbuffer to support
+ * alpha-testing, alpha-to-coverage, and so on.
+ */
+ emit_color_write(0, 3, color_mrf);
+
+ if (INTEL_DEBUG & DEBUG_SHADER_TIME)
+ emit_shader_time_end();
fs_inst *inst = emit(FS_OPCODE_FB_WRITE);
inst->base_mrf = base_mrf;
return;
fs_reg temp = fs_reg(this, glsl_type::uint_type);
- emit(BRW_OPCODE_MOV, temp, *reg);
+ emit(MOV(temp, *reg));
*reg = temp;
}
return;
fs_reg temp = fs_reg(this, glsl_type::bool_type);
- emit(BRW_OPCODE_AND, temp, *reg, fs_reg(1));
+ emit(AND(temp, *reg, fs_reg(1)));
*reg = temp;
}
+
+fs_visitor::fs_visitor(struct brw_context *brw,
+ struct brw_wm_compile *c,
+ struct gl_shader_program *shader_prog,
+ struct gl_fragment_program *fp,
+ unsigned dispatch_width)
+ : dispatch_width(dispatch_width)
+{
+ this->c = c;
+ this->brw = brw;
+ this->fp = fp;
+ this->shader_prog = shader_prog;
+ this->ctx = &brw->ctx;
+ this->mem_ctx = ralloc_context(NULL);
+ if (shader_prog)
+ shader = (struct brw_shader *)
+ shader_prog->_LinkedShaders[MESA_SHADER_FRAGMENT];
+ else
+ shader = NULL;
+ this->failed = false;
+ this->variable_ht = hash_table_ctor(0,
+ hash_table_pointer_hash,
+ hash_table_pointer_compare);
+
+ memset(this->outputs, 0, sizeof(this->outputs));
+ memset(this->output_components, 0, sizeof(this->output_components));
+ this->first_non_payload_grf = 0;
+ this->max_grf = brw->gen >= 7 ? GEN7_MRF_HACK_START : BRW_MAX_GRF;
+
+ this->current_annotation = NULL;
+ this->base_ir = NULL;
+
+ this->virtual_grf_sizes = NULL;
+ this->virtual_grf_count = 0;
+ this->virtual_grf_array_size = 0;
+ this->virtual_grf_start = NULL;
+ this->virtual_grf_end = NULL;
+ this->live_intervals_valid = false;
+
+ this->params_remap = NULL;
+ this->nr_params_remap = 0;
+
+ this->force_uncompressed_stack = 0;
+ this->force_sechalf_stack = 0;
+
+ memset(&this->param_size, 0, sizeof(this->param_size));
+}
+
+fs_visitor::~fs_visitor()
+{
+ ralloc_free(this->mem_ctx);
+ hash_table_dtor(this->variable_ht);
+}