this->no_dd_check = false;
this->writes_accumulator = false;
this->conditional_mod = BRW_CONDITIONAL_NONE;
- this->sampler = 0;
this->texture_offset = 0;
this->target = 0;
this->shadow_compare = false;
}
return size;
case GLSL_TYPE_SAMPLER:
- /* Samplers take up one slot in UNIFORMS[], but they're baked in
- * at link time.
+ /* Samplers take up no register space, since they're baked in at
+ * link time.
*/
- return 1;
+ return 0;
case GLSL_TYPE_ATOMIC_UINT:
return 0;
case GLSL_TYPE_IMAGE:
int i;
for (i = 0; i < uniform_vector_size[uniforms]; i++) {
- stage_prog_data->param[uniforms * 4 + i] = &components->f;
+ stage_prog_data->param[uniforms * 4 + i] = components;
components++;
}
for (; i < 4; i++) {
- static float zero = 0;
+ static gl_constant_value zero = { 0.0 };
stage_prog_data->param[uniforms * 4 + i] = &zero;
}
this->userplane[i] = dst_reg(UNIFORM, this->uniforms);
this->userplane[i].type = BRW_REGISTER_TYPE_F;
for (int j = 0; j < 4; ++j) {
- stage_prog_data->param[this->uniforms * 4 + j] = &clip_planes[i][j];
+ stage_prog_data->param[this->uniforms * 4 + j] =
+ (gl_constant_value *) &clip_planes[i][j];
}
++this->uniforms;
}
*/
int index = _mesa_add_state_reference(this->prog->Parameters,
(gl_state_index *)slots[i].tokens);
- float *values = &this->prog->Parameters->ParameterValues[index][0].f;
+ gl_constant_value *values =
+ &this->prog->Parameters->ParameterValues[index][0];
assert(this->uniforms < uniform_array_size);
this->uniform_vector_size[this->uniforms] = 0;
*predicate = BRW_PREDICATE_NORMAL;
- if (expr) {
- src_reg op[2];
+ if (expr && expr->operation != ir_binop_ubo_load) {
+ src_reg op[3];
vec4_instruction *inst;
- assert(expr->get_num_operands() <= 2);
+ assert(expr->get_num_operands() <= 3);
for (unsigned int i = 0; i < expr->get_num_operands(); i++) {
expr->operands[i]->accept(this);
op[i] = this->result;
brw_conditional_for_comparison(expr->operation)));
break;
+ case ir_triop_csel: {
+ /* Expand the boolean condition into the flag register. */
+ inst = emit(MOV(dst_null_d(), op[0]));
+ inst->conditional_mod = BRW_CONDITIONAL_NZ;
+
+ /* Select which boolean to return. */
+ dst_reg temp(this, expr->operands[1]->type);
+ inst = emit(BRW_OPCODE_SEL, temp, op[1], op[2]);
+ inst->predicate = BRW_PREDICATE_NORMAL;
+
+ /* Expand the result to a condition code. */
+ inst = emit(MOV(dst_null_d(), src_reg(temp)));
+ inst->conditional_mod = BRW_CONDITIONAL_NZ;
+ break;
+ }
+
default:
unreachable("not reached");
}
{
ir_expression *expr = ir->condition->as_expression();
- if (expr) {
- src_reg op[2];
+ if (expr && expr->operation != ir_binop_ubo_load) {
+ src_reg op[3];
dst_reg temp;
- assert(expr->get_num_operands() <= 2);
+ assert(expr->get_num_operands() <= 3);
for (unsigned int i = 0; i < expr->get_num_operands(); i++) {
expr->operands[i]->accept(this);
op[i] = this->result;
emit(IF(BRW_PREDICATE_ALIGN16_ANY4H));
return;
+ case ir_triop_csel: {
+ /* Expand the boolean condition into the flag register. */
+ vec4_instruction *inst = emit(MOV(dst_null_d(), op[0]));
+ inst->conditional_mod = BRW_CONDITIONAL_NZ;
+
+ /* Select which boolean to return. */
+ dst_reg temp(this, expr->operands[1]->type);
+ inst = emit(BRW_OPCODE_SEL, temp, op[1], op[2]);
+ inst->predicate = BRW_PREDICATE_NORMAL;
+
+ emit(IF(src_reg(temp), src_reg(0), BRW_CONDITIONAL_NZ));
+ return;
+ }
+
default:
unreachable("not reached");
}
* ir_binop_ubo_load expressions and not ir_dereference_variable for UBO
* variables, so no need for them to be in variable_ht.
*
- * Atomic counters take no uniform storage, no need to do
- * anything here.
+ * Some uniforms, such as samplers and atomic counters, have no actual
+ * storage, so we should ignore them.
*/
- if (ir->is_in_uniform_block() || ir->type->contains_atomic())
+ if (ir->is_in_uniform_block() || type_size(ir->type) == 0)
return;
/* Track how big the whole uniform variable is, in case we need to put a
}
}
-bool
-vec4_visitor::try_emit_sat(ir_expression *ir)
-{
- ir_rvalue *sat_src = ir->as_rvalue_to_saturate();
- if (!sat_src)
- return false;
-
- sat_src->accept(this);
- src_reg src = this->result;
-
- this->result = src_reg(this, ir->type);
- vec4_instruction *inst;
- inst = emit(MOV(dst_reg(this->result), src));
- inst->saturate = true;
-
- return true;
-}
-
bool
vec4_visitor::try_emit_mad(ir_expression *ir)
{
bool
vec4_visitor::try_emit_b2f_of_compare(ir_expression *ir)
{
+ /* This optimization relies on CMP setting the destination to 0 when
+ * false. Early hardware only sets the least significant bit, and
+ * leaves the other bits undefined. So we can't use it.
+ */
+ if (brw->gen < 6)
+ return false;
+
ir_expression *const cmp = ir->operands[0]->as_expression();
if (cmp == NULL)
return true;
}
-void
-vec4_visitor::emit_bool_comparison(unsigned int op,
- dst_reg dst, src_reg src0, src_reg src1)
-{
- /* original gen4 does destination conversion before comparison. */
- if (brw->gen < 5)
- dst.type = src0.type;
-
- emit(CMP(dst, src0, src1, brw_conditional_for_comparison(op)));
-
- dst.type = BRW_REGISTER_TYPE_D;
- emit(AND(dst, src_reg(dst), src_reg(0x1)));
-}
-
void
vec4_visitor::emit_minmax(enum brw_conditional_mod conditionalmod, dst_reg dst,
src_reg src0, src_reg src1)
dst_reg result_dst;
vec4_instruction *inst;
- if (try_emit_sat(ir))
- return;
-
if (ir->operation == ir_binop_add) {
if (try_emit_mad(ir))
return;
switch (ir->operation) {
case ir_unop_logic_not:
- /* Note that BRW_OPCODE_NOT is not appropriate here, since it is
- * ones complement of the whole register, not just bit 0.
- */
- emit(XOR(result_dst, op[0], src_reg(1)));
+ if (ctx->Const.UniformBooleanTrue != 1) {
+ emit(NOT(result_dst, op[0]));
+ } else {
+ emit(XOR(result_dst, op[0], src_reg(1)));
+ }
break;
case ir_unop_neg:
op[0].negate = !op[0].negate;
break;
case ir_unop_dFdx:
+ case ir_unop_dFdx_coarse:
+ case ir_unop_dFdx_fine:
case ir_unop_dFdy:
+ case ir_unop_dFdy_coarse:
+ case ir_unop_dFdy_fine:
unreachable("derivatives not valid in vertex shader");
case ir_unop_bitfield_reverse:
case ir_unop_find_lsb:
emit(FBL(result_dst, op[0]));
break;
+ case ir_unop_saturate:
+ inst = emit(MOV(result_dst, op[0]));
+ inst->saturate = true;
+ break;
case ir_unop_noise:
unreachable("not reached: should be handled by lower_noise");
case ir_binop_nequal: {
emit(CMP(result_dst, op[0], op[1],
brw_conditional_for_comparison(ir->operation)));
- emit(AND(result_dst, result_src, src_reg(0x1)));
+ if (ctx->Const.UniformBooleanTrue == 1) {
+ emit(AND(result_dst, result_src, src_reg(1)));
+ }
break;
}
ir->operands[1]->type->is_vector()) {
emit(CMP(dst_null_d(), op[0], op[1], BRW_CONDITIONAL_Z));
emit(MOV(result_dst, src_reg(0)));
- inst = emit(MOV(result_dst, src_reg(1)));
+ inst = emit(MOV(result_dst, src_reg(ctx->Const.UniformBooleanTrue)));
inst->predicate = BRW_PREDICATE_ALIGN16_ALL4H;
} else {
emit(CMP(result_dst, op[0], op[1], BRW_CONDITIONAL_Z));
- emit(AND(result_dst, result_src, src_reg(0x1)));
+ if (ctx->Const.UniformBooleanTrue == 1) {
+ emit(AND(result_dst, result_src, src_reg(1)));
+ }
}
break;
case ir_binop_any_nequal:
emit(CMP(dst_null_d(), op[0], op[1], BRW_CONDITIONAL_NZ));
emit(MOV(result_dst, src_reg(0)));
- inst = emit(MOV(result_dst, src_reg(1)));
+ inst = emit(MOV(result_dst, src_reg(ctx->Const.UniformBooleanTrue)));
inst->predicate = BRW_PREDICATE_ALIGN16_ANY4H;
} else {
emit(CMP(result_dst, op[0], op[1], BRW_CONDITIONAL_NZ));
- emit(AND(result_dst, result_src, src_reg(0x1)));
+ if (ctx->Const.UniformBooleanTrue == 1) {
+ emit(AND(result_dst, result_src, src_reg(1)));
+ }
}
break;
emit(CMP(dst_null_d(), op[0], src_reg(0), BRW_CONDITIONAL_NZ));
emit(MOV(result_dst, src_reg(0)));
- inst = emit(MOV(result_dst, src_reg(1)));
+ inst = emit(MOV(result_dst, src_reg(ctx->Const.UniformBooleanTrue)));
inst->predicate = BRW_PREDICATE_ALIGN16_ANY4H;
break;
case ir_unop_i2u:
case ir_unop_u2i:
case ir_unop_u2f:
- case ir_unop_b2f:
- case ir_unop_b2i:
case ir_unop_f2i:
case ir_unop_f2u:
emit(MOV(result_dst, op[0]));
break;
+ case ir_unop_b2i:
+ if (ctx->Const.UniformBooleanTrue != 1) {
+ emit(AND(result_dst, op[0], src_reg(1)));
+ } else {
+ emit(MOV(result_dst, op[0]));
+ }
+ break;
+ case ir_unop_b2f:
+ if (ctx->Const.UniformBooleanTrue != 1) {
+ op[0].type = BRW_REGISTER_TYPE_UD;
+ result_dst.type = BRW_REGISTER_TYPE_UD;
+ emit(AND(result_dst, op[0], src_reg(0x3f800000u)));
+ result_dst.type = BRW_REGISTER_TYPE_F;
+ } else {
+ emit(MOV(result_dst, op[0]));
+ }
+ break;
case ir_unop_f2b:
- case ir_unop_i2b: {
+ case ir_unop_i2b:
emit(CMP(result_dst, op[0], src_reg(0.0f), BRW_CONDITIONAL_NZ));
- emit(AND(result_dst, result_src, src_reg(1)));
+ if (ctx->Const.UniformBooleanTrue == 1) {
+ emit(AND(result_dst, result_src, src_reg(1)));
+ }
break;
- }
case ir_unop_trunc:
emit(RNDZ(result_dst, op[0]));
break;
case ir_binop_ubo_load: {
- ir_constant *uniform_block = ir->operands[0]->as_constant();
+ ir_constant *const_uniform_block = ir->operands[0]->as_constant();
ir_constant *const_offset_ir = ir->operands[1]->as_constant();
unsigned const_offset = const_offset_ir ? const_offset_ir->value.u[0] : 0;
src_reg offset;
src_reg packed_consts = src_reg(this, glsl_type::vec4_type);
packed_consts.type = result.type;
- src_reg surf_index =
- src_reg(prog_data->base.binding_table.ubo_start + uniform_block->value.u[0]);
+ src_reg surf_index;
+
+ if (const_uniform_block) {
+ /* The block index is a constant, so just emit the binding table entry
+ * as an immediate.
+ */
+ surf_index = src_reg(prog_data->base.binding_table.ubo_start +
+ const_uniform_block->value.u[0]);
+ } else {
+ /* The block index is not a constant. Evaluate the index expression
+ * per-channel and add the base UBO index; the generator will select
+ * a value from any live channel.
+ */
+ surf_index = src_reg(this, glsl_type::uint_type);
+ emit(ADD(dst_reg(surf_index), op[0],
+ src_reg(prog_data->base.binding_table.ubo_start)));
+
+ /* Assume this may touch any UBO. It would be nice to provide
+ * a tighter bound, but the array information is already lowered away.
+ */
+ brw_mark_surface_used(&prog_data->base,
+ prog_data->base.binding_table.ubo_start +
+ shader_prog->NumUniformBlocks - 1);
+ }
+
if (const_offset_ir) {
if (brw->gen >= 8) {
/* Store the offset in a GRF so we can send-from-GRF. */
const_offset % 16 / 4,
const_offset % 16 / 4);
- /* UBO bools are any nonzero int. We store bools as either 0 or 1. */
+ /* UBO bools are any nonzero int. We need to convert them to use the
+ * value of true stored in ctx->Const.UniformBooleanTrue.
+ */
if (ir->type->base_type == GLSL_TYPE_BOOL) {
emit(CMP(result_dst, packed_consts, src_reg(0u),
BRW_CONDITIONAL_NZ));
- emit(AND(result_dst, result, src_reg(0x1)));
+ if (ctx->Const.UniformBooleanTrue == 1) {
+ emit(AND(result_dst, result, src_reg(1)));
+ }
} else {
emit(MOV(result_dst, packed_consts));
}
emit(MOV(*dst, src_reg(ir->value.u[i])));
break;
case GLSL_TYPE_BOOL:
- emit(MOV(*dst, src_reg(ir->value.b[i])));
+ emit(MOV(*dst,
+ src_reg(ir->value.b[i] != 0 ? ctx->Const.UniformBooleanTrue
+ : 0)));
break;
default:
unreachable("Non-float/uint/int/bool constant");
ir->actual_parameters.get_head());
ir_variable *location = deref->variable_referenced();
unsigned surf_index = (prog_data->base.binding_table.abo_start +
- location->data.atomic.buffer_index);
+ location->data.binding);
/* Calculate the surface offset */
src_reg offset(this, glsl_type::uint_type);
}
src_reg
-vec4_visitor::emit_mcs_fetch(ir_texture *ir, src_reg coordinate, int sampler)
+vec4_visitor::emit_mcs_fetch(ir_texture *ir, src_reg coordinate, src_reg sampler)
{
vec4_instruction *inst = new(mem_ctx) vec4_instruction(this, SHADER_OPCODE_TXF_MCS);
inst->base_mrf = 2;
inst->mlen = 1;
- inst->sampler = sampler;
inst->dst = dst_reg(this, glsl_type::uvec4_type);
inst->dst.writemask = WRITEMASK_XYZW;
+ inst->src[1] = sampler;
+
/* parameters are: u, v, r, lod; lod will always be zero due to api restrictions */
int param_base = inst->base_mrf;
int coord_mask = (1 << ir->coordinate->type->vector_elements) - 1;
return src_reg(inst->dst);
}
+static bool
+is_high_sampler(struct brw_context *brw, src_reg sampler)
+{
+ if (brw->gen < 8 && !brw->is_haswell)
+ return false;
+
+ return sampler.file != IMM || sampler.fixed_hw_reg.dw1.ud >= 16;
+}
+
void
vec4_visitor::visit(ir_texture *ir)
{
- int sampler =
+ uint32_t sampler =
_mesa_get_sampler_uniform_value(ir->sampler, shader_prog, prog);
+ ir_rvalue *nonconst_sampler_index =
+ _mesa_get_sampler_array_nonconst_index(ir->sampler);
+
+ /* Handle non-constant sampler array indexing */
+ src_reg sampler_reg;
+ if (nonconst_sampler_index) {
+ /* The highest sampler which may be used by this operation is
+ * the last element of the array. Mark it here, because the generator
+ * doesn't have enough information to determine the bound.
+ */
+ uint32_t array_size = ir->sampler->as_dereference_array()
+ ->array->type->array_size();
+
+ uint32_t max_used = sampler + array_size - 1;
+ if (ir->op == ir_tg4 && brw->gen < 8) {
+ max_used += prog_data->base.binding_table.gather_texture_start;
+ } else {
+ max_used += prog_data->base.binding_table.texture_start;
+ }
+
+ brw_mark_surface_used(&prog_data->base, max_used);
+
+ /* Emit code to evaluate the actual indexing expression */
+ nonconst_sampler_index->accept(this);
+ dst_reg temp(this, glsl_type::uint_type);
+ emit(ADD(temp, this->result, src_reg(sampler)))
+ ->force_writemask_all = true;
+ sampler_reg = src_reg(temp);
+ } else {
+ /* Single sampler, or constant array index; the indexing expression
+ * is just an immediate.
+ */
+ sampler_reg = src_reg(sampler);
+ }
+
/* When tg4 is used with the degenerate ZERO/ONE swizzles, don't bother
* emitting anything other than setting up the constant result.
*/
sample_index_type = ir->lod_info.sample_index->type;
if (brw->gen >= 7 && key->tex.compressed_multisample_layout_mask & (1<<sampler))
- mcs = emit_mcs_fetch(ir, coordinate, sampler);
+ mcs = emit_mcs_fetch(ir, coordinate, sampler_reg);
else
mcs = src_reg(0u);
break;
break;
}
- vec4_instruction *inst = NULL;
+ enum opcode opcode;
switch (ir->op) {
- case ir_tex:
- case ir_txl:
- inst = new(mem_ctx) vec4_instruction(this, SHADER_OPCODE_TXL);
- break;
- case ir_txd:
- inst = new(mem_ctx) vec4_instruction(this, SHADER_OPCODE_TXD);
- break;
- case ir_txf:
- inst = new(mem_ctx) vec4_instruction(this, SHADER_OPCODE_TXF);
- break;
- case ir_txf_ms:
- inst = new(mem_ctx) vec4_instruction(this, SHADER_OPCODE_TXF_CMS);
- break;
- case ir_txs:
- inst = new(mem_ctx) vec4_instruction(this, SHADER_OPCODE_TXS);
- break;
- case ir_tg4:
- if (has_nonconstant_offset)
- inst = new(mem_ctx) vec4_instruction(this, SHADER_OPCODE_TG4_OFFSET);
- else
- inst = new(mem_ctx) vec4_instruction(this, SHADER_OPCODE_TG4);
- break;
- case ir_query_levels:
- inst = new(mem_ctx) vec4_instruction(this, SHADER_OPCODE_TXS);
- break;
+ case ir_tex: opcode = SHADER_OPCODE_TXL; break;
+ case ir_txl: opcode = SHADER_OPCODE_TXL; break;
+ case ir_txd: opcode = SHADER_OPCODE_TXD; break;
+ case ir_txf: opcode = SHADER_OPCODE_TXF; break;
+ case ir_txf_ms: opcode = SHADER_OPCODE_TXF_CMS; break;
+ case ir_txs: opcode = SHADER_OPCODE_TXS; break;
+ case ir_tg4: opcode = has_nonconstant_offset
+ ? SHADER_OPCODE_TG4_OFFSET : SHADER_OPCODE_TG4; break;
+ case ir_query_levels: opcode = SHADER_OPCODE_TXS; break;
case ir_txb:
unreachable("TXB is not valid for vertex shaders.");
case ir_lod:
unreachable("Unrecognized tex op");
}
+ vec4_instruction *inst = new(mem_ctx) vec4_instruction(this, opcode);
+
if (ir->offset != NULL && ir->op != ir_txf)
inst->texture_offset = brw_texture_offset(ctx, ir->offset->as_constant());
*/
inst->header_present =
brw->gen < 5 || inst->texture_offset != 0 || ir->op == ir_tg4 ||
- sampler >= 16;
+ is_high_sampler(brw, sampler_reg);
inst->base_mrf = 2;
inst->mlen = inst->header_present + 1; /* always at least one */
- inst->sampler = sampler;
inst->dst = dst_reg(this, ir->type);
inst->dst.writemask = WRITEMASK_XYZW;
inst->shadow_compare = ir->shadow_comparitor != NULL;
+ inst->src[1] = sampler_reg;
+
/* MRF for the first parameter */
int param_base = inst->base_mrf + inst->header_present;
} else if (ir->op == ir_txf_ms) {
emit(MOV(dst_reg(MRF, param_base + 1, sample_index_type, WRITEMASK_X),
sample_index));
- if (brw->gen >= 7)
+ if (brw->gen >= 7) {
/* MCS data is in the first channel of `mcs`, but we need to get it into
* the .y channel of the second vec4 of params, so replicate .x across
* the whole vec4 and then mask off everything except .y
mcs.swizzle = BRW_SWIZZLE_XXXX;
emit(MOV(dst_reg(MRF, param_base + 1, glsl_type::uint_type, WRITEMASK_Y),
mcs));
+ }
inst->mlen++;
} else if (ir->op == ir_txd) {
const glsl_type *type = lod_type;
* Set up the gather channel based on the swizzle, for gather4.
*/
uint32_t
-vec4_visitor::gather_channel(ir_texture *ir, int sampler)
+vec4_visitor::gather_channel(ir_texture *ir, uint32_t sampler)
{
ir_constant *chan = ir->lod_info.component->as_constant();
int swiz = GET_SWZ(key->tex.swizzles[sampler], chan->value.i[0]);
}
void
-vec4_visitor::swizzle_result(ir_texture *ir, src_reg orig_val, int sampler)
+vec4_visitor::swizzle_result(ir_texture *ir, src_reg orig_val, uint32_t sampler)
{
int s = key->tex.swizzles[sampler];
* add it.
*/
if (pull_constant_loc[uniform] == -1) {
- const float **values = &stage_prog_data->param[uniform * 4];
+ const gl_constant_value **values =
+ &stage_prog_data->param[uniform * 4];
pull_constant_loc[uniform] = stage_prog_data->nr_pull_params / 4;