emit(VEC4_OPCODE_PACK_BYTES, dst, bytes);
}
-void
-vec4_visitor::visit_instructions(const exec_list *list)
-{
- foreach_in_list(ir_instruction, ir, list) {
- base_ir = ir;
- ir->accept(this);
- }
-}
-
/**
* Returns the minimum number of vec4 elements needed to pack a type.
*
this->type = brw_type_for_base_type(type);
}
-void
-vec4_visitor::setup_vec4_uniform_value(unsigned param_offset,
- const gl_constant_value *values,
- unsigned n)
-{
- static const gl_constant_value zero = { 0 };
-
- assert(param_offset % 4 == 0);
-
- for (unsigned i = 0; i < n; ++i)
- stage_prog_data->param[param_offset + i] = &values[i];
-
- for (unsigned i = n; i < 4; ++i)
- stage_prog_data->param[param_offset + i] = &zero;
-
- uniform_vector_size[param_offset / 4] = n;
-}
-
-/* Our support for uniforms is piggy-backed on the struct
- * gl_fragment_program, because that's where the values actually
- * get stored, rather than in some global gl_shader_program uniform
- * store.
- */
-void
-vec4_visitor::setup_uniform_values(ir_variable *ir)
-{
- int namelen = strlen(ir->name);
-
- /* The data for our (non-builtin) uniforms is stored in a series of
- * gl_uniform_driver_storage structs for each subcomponent that
- * glGetUniformLocation() could name. We know it's been set up in the same
- * order we'd walk the type, so walk the list of storage and find anything
- * with our name, or the prefix of a component that starts with our name.
- */
- for (unsigned u = 0; u < shader_prog->NumUniformStorage; u++) {
- struct gl_uniform_storage *storage = &shader_prog->UniformStorage[u];
-
- if (storage->builtin)
- continue;
-
- if (strncmp(ir->name, storage->name, namelen) != 0 ||
- (storage->name[namelen] != 0 &&
- storage->name[namelen] != '.' &&
- storage->name[namelen] != '[')) {
- continue;
- }
-
- const unsigned vector_count = (MAX2(storage->array_elements, 1) *
- storage->type->matrix_columns);
- const unsigned vector_size = storage->type->vector_elements;
-
- for (unsigned s = 0; s < vector_count; s++) {
- setup_vec4_uniform_value(uniforms * 4,
- &storage->storage[s * vector_size],
- vector_size);
- uniforms++;
- }
- }
-}
-
-/* Our support for builtin uniforms is even scarier than non-builtin.
- * It sits on top of the PROG_STATE_VAR parameters that are
- * automatically updated from GL context state.
- */
-void
-vec4_visitor::setup_builtin_uniform_values(ir_variable *ir)
-{
- const ir_state_slot *const slots = ir->get_state_slots();
- assert(slots != NULL);
-
- for (unsigned int i = 0; i < ir->get_num_state_slots(); i++) {
- /* This state reference has already been setup by ir_to_mesa,
- * but we'll get the same index back here. We can reference
- * ParameterValues directly, since unlike brw_fs.cpp, we never
- * add new state references during compile.
- */
- int index = _mesa_add_state_reference(this->prog->Parameters,
- (gl_state_index *)slots[i].tokens);
- gl_constant_value *values =
- &this->prog->Parameters->ParameterValues[index][0];
-
- assert(this->uniforms < uniform_array_size);
-
- for (unsigned j = 0; j < 4; j++)
- stage_prog_data->param[this->uniforms * 4 + j] =
- &values[GET_SWZ(slots[i].swizzle, j)];
-
- this->uniform_vector_size[this->uniforms] =
- (ir->type->is_scalar() || ir->type->is_vector() ||
- ir->type->is_matrix() ? ir->type->vector_elements : 4);
-
- this->uniforms++;
- }
-}
-
-dst_reg *
-vec4_visitor::variable_storage(ir_variable *var)
-{
- return (dst_reg *)hash_table_find(this->variable_ht, var);
-}
-
-void
-vec4_visitor::emit_bool_to_cond_code(ir_rvalue *ir,
- enum brw_predicate *predicate)
-{
- ir_expression *expr = ir->as_expression();
-
- *predicate = BRW_PREDICATE_NORMAL;
-
- if (expr && expr->operation != ir_binop_ubo_load) {
- src_reg op[3];
- vec4_instruction *inst;
-
- assert(expr->get_num_operands() <= 3);
- for (unsigned int i = 0; i < expr->get_num_operands(); i++) {
- expr->operands[i]->accept(this);
- op[i] = this->result;
-
- resolve_ud_negate(&op[i]);
- }
-
- switch (expr->operation) {
- case ir_unop_logic_not:
- inst = emit(AND(dst_null_d(), op[0], src_reg(1)));
- inst->conditional_mod = BRW_CONDITIONAL_Z;
- break;
-
- case ir_binop_logic_xor:
- if (devinfo->gen <= 5) {
- src_reg temp = src_reg(this, ir->type);
- emit(XOR(dst_reg(temp), op[0], op[1]));
- inst = emit(AND(dst_null_d(), temp, src_reg(1)));
- } else {
- inst = emit(XOR(dst_null_d(), op[0], op[1]));
- }
- inst->conditional_mod = BRW_CONDITIONAL_NZ;
- break;
-
- case ir_binop_logic_or:
- if (devinfo->gen <= 5) {
- src_reg temp = src_reg(this, ir->type);
- emit(OR(dst_reg(temp), op[0], op[1]));
- inst = emit(AND(dst_null_d(), temp, src_reg(1)));
- } else {
- inst = emit(OR(dst_null_d(), op[0], op[1]));
- }
- inst->conditional_mod = BRW_CONDITIONAL_NZ;
- break;
-
- case ir_binop_logic_and:
- if (devinfo->gen <= 5) {
- src_reg temp = src_reg(this, ir->type);
- emit(AND(dst_reg(temp), op[0], op[1]));
- inst = emit(AND(dst_null_d(), temp, src_reg(1)));
- } else {
- inst = emit(AND(dst_null_d(), op[0], op[1]));
- }
- inst->conditional_mod = BRW_CONDITIONAL_NZ;
- break;
-
- case ir_unop_f2b:
- if (devinfo->gen >= 6) {
- emit(CMP(dst_null_d(), op[0], src_reg(0.0f), BRW_CONDITIONAL_NZ));
- } else {
- inst = emit(MOV(dst_null_f(), op[0]));
- inst->conditional_mod = BRW_CONDITIONAL_NZ;
- }
- break;
-
- case ir_unop_i2b:
- if (devinfo->gen >= 6) {
- emit(CMP(dst_null_d(), op[0], src_reg(0), BRW_CONDITIONAL_NZ));
- } else {
- inst = emit(MOV(dst_null_d(), op[0]));
- inst->conditional_mod = BRW_CONDITIONAL_NZ;
- }
- break;
-
- case ir_binop_all_equal:
- if (devinfo->gen <= 5) {
- resolve_bool_comparison(expr->operands[0], &op[0]);
- resolve_bool_comparison(expr->operands[1], &op[1]);
- }
- inst = emit(CMP(dst_null_d(), op[0], op[1], BRW_CONDITIONAL_Z));
- *predicate = BRW_PREDICATE_ALIGN16_ALL4H;
- break;
-
- case ir_binop_any_nequal:
- if (devinfo->gen <= 5) {
- resolve_bool_comparison(expr->operands[0], &op[0]);
- resolve_bool_comparison(expr->operands[1], &op[1]);
- }
- inst = emit(CMP(dst_null_d(), op[0], op[1], BRW_CONDITIONAL_NZ));
- *predicate = BRW_PREDICATE_ALIGN16_ANY4H;
- break;
-
- case ir_unop_any:
- if (devinfo->gen <= 5) {
- resolve_bool_comparison(expr->operands[0], &op[0]);
- }
- inst = emit(CMP(dst_null_d(), op[0], src_reg(0), BRW_CONDITIONAL_NZ));
- *predicate = BRW_PREDICATE_ALIGN16_ANY4H;
- break;
-
- case ir_binop_greater:
- case ir_binop_gequal:
- case ir_binop_less:
- case ir_binop_lequal:
- case ir_binop_equal:
- case ir_binop_nequal:
- if (devinfo->gen <= 5) {
- resolve_bool_comparison(expr->operands[0], &op[0]);
- resolve_bool_comparison(expr->operands[1], &op[1]);
- }
- emit(CMP(dst_null_d(), op[0], op[1],
- brw_conditional_for_comparison(expr->operation)));
- break;
-
- case ir_triop_csel: {
- /* Expand the boolean condition into the flag register. */
- inst = emit(MOV(dst_null_d(), op[0]));
- inst->conditional_mod = BRW_CONDITIONAL_NZ;
-
- /* Select which boolean to return. */
- dst_reg temp(this, expr->operands[1]->type);
- inst = emit(BRW_OPCODE_SEL, temp, op[1], op[2]);
- inst->predicate = BRW_PREDICATE_NORMAL;
-
- /* Expand the result to a condition code. */
- inst = emit(MOV(dst_null_d(), src_reg(temp)));
- inst->conditional_mod = BRW_CONDITIONAL_NZ;
- break;
- }
-
- default:
- unreachable("not reached");
- }
- return;
- }
-
- ir->accept(this);
-
- resolve_ud_negate(&this->result);
-
- vec4_instruction *inst = emit(AND(dst_null_d(), this->result, src_reg(1)));
- inst->conditional_mod = BRW_CONDITIONAL_NZ;
-}
-
-/**
- * Emit a gen6 IF statement with the comparison folded into the IF
- * instruction.
- */
-void
-vec4_visitor::emit_if_gen6(ir_if *ir)
-{
- ir_expression *expr = ir->condition->as_expression();
-
- if (expr && expr->operation != ir_binop_ubo_load) {
- src_reg op[3];
- dst_reg temp;
-
- assert(expr->get_num_operands() <= 3);
- for (unsigned int i = 0; i < expr->get_num_operands(); i++) {
- expr->operands[i]->accept(this);
- op[i] = this->result;
- }
-
- switch (expr->operation) {
- case ir_unop_logic_not:
- emit(IF(op[0], src_reg(0), BRW_CONDITIONAL_Z));
- return;
-
- case ir_binop_logic_xor:
- emit(IF(op[0], op[1], BRW_CONDITIONAL_NZ));
- return;
-
- case ir_binop_logic_or:
- temp = dst_reg(this, glsl_type::bool_type);
- emit(OR(temp, op[0], op[1]));
- emit(IF(src_reg(temp), src_reg(0), BRW_CONDITIONAL_NZ));
- return;
-
- case ir_binop_logic_and:
- temp = dst_reg(this, glsl_type::bool_type);
- emit(AND(temp, op[0], op[1]));
- emit(IF(src_reg(temp), src_reg(0), BRW_CONDITIONAL_NZ));
- return;
-
- case ir_unop_f2b:
- emit(IF(op[0], src_reg(0), BRW_CONDITIONAL_NZ));
- return;
-
- case ir_unop_i2b:
- emit(IF(op[0], src_reg(0), BRW_CONDITIONAL_NZ));
- return;
-
- case ir_binop_greater:
- case ir_binop_gequal:
- case ir_binop_less:
- case ir_binop_lequal:
- case ir_binop_equal:
- case ir_binop_nequal:
- emit(IF(op[0], op[1],
- brw_conditional_for_comparison(expr->operation)));
- return;
-
- case ir_binop_all_equal:
- emit(CMP(dst_null_d(), op[0], op[1], BRW_CONDITIONAL_Z));
- emit(IF(BRW_PREDICATE_ALIGN16_ALL4H));
- return;
-
- case ir_binop_any_nequal:
- emit(CMP(dst_null_d(), op[0], op[1], BRW_CONDITIONAL_NZ));
- emit(IF(BRW_PREDICATE_ALIGN16_ANY4H));
- return;
-
- case ir_unop_any:
- emit(CMP(dst_null_d(), op[0], src_reg(0), BRW_CONDITIONAL_NZ));
- emit(IF(BRW_PREDICATE_ALIGN16_ANY4H));
- return;
-
- case ir_triop_csel: {
- /* Expand the boolean condition into the flag register. */
- vec4_instruction *inst = emit(MOV(dst_null_d(), op[0]));
- inst->conditional_mod = BRW_CONDITIONAL_NZ;
-
- /* Select which boolean to return. */
- dst_reg temp(this, expr->operands[1]->type);
- inst = emit(BRW_OPCODE_SEL, temp, op[1], op[2]);
- inst->predicate = BRW_PREDICATE_NORMAL;
-
- emit(IF(src_reg(temp), src_reg(0), BRW_CONDITIONAL_NZ));
- return;
- }
-
- default:
- unreachable("not reached");
- }
- return;
- }
-
- ir->condition->accept(this);
-
- emit(IF(this->result, src_reg(0), BRW_CONDITIONAL_NZ));
-}
-
-void
-vec4_visitor::visit(ir_variable *ir)
-{
- dst_reg *reg = NULL;
-
- if (variable_storage(ir))
- return;
-
- switch (ir->data.mode) {
- case ir_var_shader_in:
- assert(ir->data.location != -1);
- reg = new(mem_ctx) dst_reg(ATTR, ir->data.location);
- break;
-
- case ir_var_shader_out:
- assert(ir->data.location != -1);
- reg = new(mem_ctx) dst_reg(this, ir->type);
-
- for (int i = 0; i < type_size_vec4(ir->type); i++) {
- output_reg[ir->data.location + i] = *reg;
- output_reg[ir->data.location + i].reg_offset = i;
- output_reg_annotation[ir->data.location + i] = ir->name;
- }
- break;
-
- case ir_var_auto:
- case ir_var_temporary:
- reg = new(mem_ctx) dst_reg(this, ir->type);
- break;
-
- case ir_var_uniform:
- case ir_var_shader_storage:
- reg = new(this->mem_ctx) dst_reg(UNIFORM, this->uniforms);
-
- /* Thanks to the lower_ubo_reference pass, we will see only
- * ir_binop_{ubo,ssbo}_load expressions and not ir_dereference_variable
- * for UBO/SSBO variables, so no need for them to be in variable_ht.
- *
- * Some uniforms, such as samplers and atomic counters, have no actual
- * storage, so we should ignore them.
- */
- if (ir->is_in_buffer_block() || type_size_vec4(ir->type) == 0)
- return;
-
- /* Track how big the whole uniform variable is, in case we need to put a
- * copy of its data into pull constants for array access.
- */
- assert(this->uniforms < uniform_array_size);
- this->uniform_size[this->uniforms] = type_size_vec4(ir->type);
-
- if (!strncmp(ir->name, "gl_", 3)) {
- setup_builtin_uniform_values(ir);
- } else {
- setup_uniform_values(ir);
- }
- break;
-
- case ir_var_system_value:
- reg = make_reg_for_system_value(ir->data.location, ir->type);
- break;
-
- default:
- unreachable("not reached");
- }
-
- reg->type = brw_type_for_base_type(ir->type);
- hash_table_insert(this->variable_ht, reg, ir);
-}
-
-void
-vec4_visitor::visit(ir_loop *ir)
-{
- /* We don't want debugging output to print the whole body of the
- * loop as the annotation.
- */
- this->base_ir = NULL;
-
- emit(BRW_OPCODE_DO);
-
- visit_instructions(&ir->body_instructions);
-
- emit(BRW_OPCODE_WHILE);
-}
-
-void
-vec4_visitor::visit(ir_loop_jump *ir)
-{
- switch (ir->mode) {
- case ir_loop_jump::jump_break:
- emit(BRW_OPCODE_BREAK);
- break;
- case ir_loop_jump::jump_continue:
- emit(BRW_OPCODE_CONTINUE);
- break;
- }
-}
-
-
-void
-vec4_visitor::visit(ir_function_signature *)
-{
- unreachable("not reached");
-}
-
-void
-vec4_visitor::visit(ir_function *ir)
-{
- /* Ignore function bodies other than main() -- we shouldn't see calls to
- * them since they should all be inlined.
- */
- if (strcmp(ir->name, "main") == 0) {
- const ir_function_signature *sig;
- exec_list empty;
-
- sig = ir->matching_signature(NULL, &empty, false);
-
- assert(sig);
-
- visit_instructions(&sig->body);
- }
-}
-
-bool
-vec4_visitor::try_emit_mad(ir_expression *ir)
-{
- /* 3-src instructions were introduced in gen6. */
- if (devinfo->gen < 6)
- return false;
-
- /* MAD can only handle floating-point data. */
- if (ir->type->base_type != GLSL_TYPE_FLOAT)
- return false;
-
- ir_rvalue *nonmul;
- ir_expression *mul;
- bool mul_negate, mul_abs;
-
- for (int i = 0; i < 2; i++) {
- mul_negate = false;
- mul_abs = false;
-
- mul = ir->operands[i]->as_expression();
- nonmul = ir->operands[1 - i];
-
- if (mul && mul->operation == ir_unop_abs) {
- mul = mul->operands[0]->as_expression();
- mul_abs = true;
- } else if (mul && mul->operation == ir_unop_neg) {
- mul = mul->operands[0]->as_expression();
- mul_negate = true;
- }
-
- if (mul && mul->operation == ir_binop_mul)
- break;
- }
-
- if (!mul || mul->operation != ir_binop_mul)
- return false;
-
- nonmul->accept(this);
- src_reg src0 = fix_3src_operand(this->result);
-
- mul->operands[0]->accept(this);
- src_reg src1 = fix_3src_operand(this->result);
- src1.negate ^= mul_negate;
- src1.abs = mul_abs;
- if (mul_abs)
- src1.negate = false;
-
- mul->operands[1]->accept(this);
- src_reg src2 = fix_3src_operand(this->result);
- src2.abs = mul_abs;
- if (mul_abs)
- src2.negate = false;
-
- this->result = src_reg(this, ir->type);
- emit(BRW_OPCODE_MAD, dst_reg(this->result), src0, src1, src2);
-
- return true;
-}
-
-bool
-vec4_visitor::try_emit_b2f_of_compare(ir_expression *ir)
-{
- /* This optimization relies on CMP setting the destination to 0 when
- * false. Early hardware only sets the least significant bit, and
- * leaves the other bits undefined. So we can't use it.
- */
- if (devinfo->gen < 6)
- return false;
-
- ir_expression *const cmp = ir->operands[0]->as_expression();
-
- if (cmp == NULL)
- return false;
-
- switch (cmp->operation) {
- case ir_binop_less:
- case ir_binop_greater:
- case ir_binop_lequal:
- case ir_binop_gequal:
- case ir_binop_equal:
- case ir_binop_nequal:
- break;
-
- default:
- return false;
- }
-
- cmp->operands[0]->accept(this);
- const src_reg cmp_src0 = this->result;
-
- cmp->operands[1]->accept(this);
- const src_reg cmp_src1 = this->result;
-
- this->result = src_reg(this, ir->type);
-
- emit(CMP(dst_reg(this->result), cmp_src0, cmp_src1,
- brw_conditional_for_comparison(cmp->operation)));
-
- /* If the comparison is false, this->result will just happen to be zero.
- */
- vec4_instruction *const inst = emit(BRW_OPCODE_SEL, dst_reg(this->result),
- this->result, src_reg(1.0f));
- inst->predicate = BRW_PREDICATE_NORMAL;
- inst->predicate_inverse = true;
-
- return true;
-}
-
-vec4_instruction *
-vec4_visitor::emit_minmax(enum brw_conditional_mod conditionalmod, dst_reg dst,
- src_reg src0, src_reg src1)
-{
- vec4_instruction *inst;
-
- if (devinfo->gen >= 6) {
- inst = emit(BRW_OPCODE_SEL, dst, src0, src1);
- inst->conditional_mod = conditionalmod;
- } else {
- emit(CMP(dst, src0, src1, conditionalmod));
-
- inst = emit(BRW_OPCODE_SEL, dst, src0, src1);
- inst->predicate = BRW_PREDICATE_NORMAL;
- }
-
- return inst;
-}
-
-vec4_instruction *
-vec4_visitor::emit_lrp(const dst_reg &dst,
- const src_reg &x, const src_reg &y, const src_reg &a)
-{
- if (devinfo->gen >= 6) {
- /* Note that the instruction's argument order is reversed from GLSL
- * and the IR.
- */
- return emit(LRP(dst, fix_3src_operand(a), fix_3src_operand(y),
- fix_3src_operand(x)));
- } else {
- /* Earlier generations don't support three source operations, so we
- * need to emit x*(1-a) + y*a.
- */
- dst_reg y_times_a = dst_reg(this, glsl_type::vec4_type);
- dst_reg one_minus_a = dst_reg(this, glsl_type::vec4_type);
- dst_reg x_times_one_minus_a = dst_reg(this, glsl_type::vec4_type);
- y_times_a.writemask = dst.writemask;
- one_minus_a.writemask = dst.writemask;
- x_times_one_minus_a.writemask = dst.writemask;
-
- emit(MUL(y_times_a, y, a));
- emit(ADD(one_minus_a, negate(a), src_reg(1.0f)));
- emit(MUL(x_times_one_minus_a, x, src_reg(one_minus_a)));
- return emit(ADD(dst, src_reg(x_times_one_minus_a), src_reg(y_times_a)));
- }
-}
-
-/**
- * Emits the instructions needed to perform a pull constant load. before_block
- * and before_inst can be NULL in which case the instruction will be appended
- * to the end of the instruction list.
- */
-void
-vec4_visitor::emit_pull_constant_load_reg(dst_reg dst,
- src_reg surf_index,
- src_reg offset_reg,
- bblock_t *before_block,
- vec4_instruction *before_inst)
-{
- assert((before_inst == NULL && before_block == NULL) ||
- (before_inst && before_block));
-
- vec4_instruction *pull;
-
- if (devinfo->gen >= 9) {
- /* Gen9+ needs a message header in order to use SIMD4x2 mode */
- src_reg header(this, glsl_type::uvec4_type, 2);
-
- pull = new(mem_ctx)
- vec4_instruction(VS_OPCODE_SET_SIMD4X2_HEADER_GEN9,
- dst_reg(header));
-
- if (before_inst)
- emit_before(before_block, before_inst, pull);
- else
- emit(pull);
-
- dst_reg index_reg = retype(offset(dst_reg(header), 1),
- offset_reg.type);
- pull = MOV(writemask(index_reg, WRITEMASK_X), offset_reg);
-
- if (before_inst)
- emit_before(before_block, before_inst, pull);
- else
- emit(pull);
-
- pull = new(mem_ctx) vec4_instruction(VS_OPCODE_PULL_CONSTANT_LOAD_GEN7,
- dst,
- surf_index,
- header);
- pull->mlen = 2;
- pull->header_size = 1;
- } else if (devinfo->gen >= 7) {
- dst_reg grf_offset = dst_reg(this, glsl_type::int_type);
-
- grf_offset.type = offset_reg.type;
-
- pull = MOV(grf_offset, offset_reg);
-
- if (before_inst)
- emit_before(before_block, before_inst, pull);
- else
- emit(pull);
-
- pull = new(mem_ctx) vec4_instruction(VS_OPCODE_PULL_CONSTANT_LOAD_GEN7,
- dst,
- surf_index,
- src_reg(grf_offset));
- pull->mlen = 1;
- } else {
- pull = new(mem_ctx) vec4_instruction(VS_OPCODE_PULL_CONSTANT_LOAD,
- dst,
- surf_index,
- offset_reg);
- pull->base_mrf = FIRST_SPILL_MRF(devinfo->gen) + 1;
- pull->mlen = 1;
- }
-
- if (before_inst)
- emit_before(before_block, before_inst, pull);
- else
- emit(pull);
-}
-
-src_reg
-vec4_visitor::emit_uniformize(const src_reg &src)
-{
- const src_reg chan_index(this, glsl_type::uint_type);
- const dst_reg dst = retype(dst_reg(this, glsl_type::uint_type),
- src.type);
-
- emit(SHADER_OPCODE_FIND_LIVE_CHANNEL, dst_reg(chan_index))
- ->force_writemask_all = true;
- emit(SHADER_OPCODE_BROADCAST, dst, src, chan_index)
- ->force_writemask_all = true;
-
- return src_reg(dst);
-}
-
-void
-vec4_visitor::visit(ir_expression *ir)
-{
- unsigned int operand;
- src_reg op[ARRAY_SIZE(ir->operands)];
- vec4_instruction *inst;
-
- if (ir->operation == ir_binop_add) {
- if (try_emit_mad(ir))
- return;
- }
-
- if (ir->operation == ir_unop_b2f) {
- if (try_emit_b2f_of_compare(ir))
- return;
- }
-
- /* Storage for our result. Ideally for an assignment we'd be using
- * the actual storage for the result here, instead.
- */
- dst_reg result_dst(this, ir->type);
- src_reg result_src(result_dst);
-
- if (ir->operation == ir_triop_csel) {
- ir->operands[1]->accept(this);
- op[1] = this->result;
- ir->operands[2]->accept(this);
- op[2] = this->result;
-
- enum brw_predicate predicate;
- emit_bool_to_cond_code(ir->operands[0], &predicate);
- inst = emit(BRW_OPCODE_SEL, result_dst, op[1], op[2]);
- inst->predicate = predicate;
- this->result = result_src;
- return;
- }
-
- for (operand = 0; operand < ir->get_num_operands(); operand++) {
- this->result.file = BAD_FILE;
- ir->operands[operand]->accept(this);
- if (this->result.file == BAD_FILE) {
- fprintf(stderr, "Failed to get tree for expression operand:\n");
- ir->operands[operand]->fprint(stderr);
- exit(1);
- }
- op[operand] = this->result;
-
- /* Matrix expression operands should have been broken down to vector
- * operations already.
- */
- assert(!ir->operands[operand]->type->is_matrix());
- }
-
- /* If nothing special happens, this is the result. */
- this->result = result_src;
-
- switch (ir->operation) {
- case ir_unop_logic_not:
- emit(NOT(result_dst, op[0]));
- break;
- case ir_unop_neg:
- op[0].negate = !op[0].negate;
- emit(MOV(result_dst, op[0]));
- break;
- case ir_unop_abs:
- op[0].abs = true;
- op[0].negate = false;
- emit(MOV(result_dst, op[0]));
- break;
-
- case ir_unop_sign:
- if (ir->type->is_float()) {
- /* AND(val, 0x80000000) gives the sign bit.
- *
- * Predicated OR ORs 1.0 (0x3f800000) with the sign bit if val is not
- * zero.
- */
- emit(CMP(dst_null_f(), op[0], src_reg(0.0f), BRW_CONDITIONAL_NZ));
-
- op[0].type = BRW_REGISTER_TYPE_UD;
- result_dst.type = BRW_REGISTER_TYPE_UD;
- emit(AND(result_dst, op[0], src_reg(0x80000000u)));
-
- inst = emit(OR(result_dst, src_reg(result_dst), src_reg(0x3f800000u)));
- inst->predicate = BRW_PREDICATE_NORMAL;
-
- this->result.type = BRW_REGISTER_TYPE_F;
- } else {
- /* ASR(val, 31) -> negative val generates 0xffffffff (signed -1).
- * -> non-negative val generates 0x00000000.
- * Predicated OR sets 1 if val is positive.
- */
- emit(CMP(dst_null_d(), op[0], src_reg(0), BRW_CONDITIONAL_G));
-
- emit(ASR(result_dst, op[0], src_reg(31)));
-
- inst = emit(OR(result_dst, src_reg(result_dst), src_reg(1)));
- inst->predicate = BRW_PREDICATE_NORMAL;
- }
- break;
-
- case ir_unop_rcp:
- emit_math(SHADER_OPCODE_RCP, result_dst, op[0]);
- break;
-
- case ir_unop_exp2:
- emit_math(SHADER_OPCODE_EXP2, result_dst, op[0]);
- break;
- case ir_unop_log2:
- emit_math(SHADER_OPCODE_LOG2, result_dst, op[0]);
- break;
- case ir_unop_exp:
- case ir_unop_log:
- unreachable("not reached: should be handled by ir_explog_to_explog2");
- case ir_unop_sin:
- emit_math(SHADER_OPCODE_SIN, result_dst, op[0]);
- break;
- case ir_unop_cos:
- emit_math(SHADER_OPCODE_COS, result_dst, op[0]);
- break;
-
- case ir_unop_dFdx:
- case ir_unop_dFdx_coarse:
- case ir_unop_dFdx_fine:
- case ir_unop_dFdy:
- case ir_unop_dFdy_coarse:
- case ir_unop_dFdy_fine:
- unreachable("derivatives not valid in vertex shader");
-
- case ir_unop_bitfield_reverse:
- emit(BFREV(result_dst, op[0]));
- break;
- case ir_unop_bit_count:
- emit(CBIT(result_dst, op[0]));
- break;
- case ir_unop_find_msb: {
- src_reg temp = src_reg(this, glsl_type::uint_type);
-
- inst = emit(FBH(dst_reg(temp), op[0]));
- inst->dst.writemask = WRITEMASK_XYZW;
-
- /* FBH counts from the MSB side, while GLSL's findMSB() wants the count
- * from the LSB side. If FBH didn't return an error (0xFFFFFFFF), then
- * subtract the result from 31 to convert the MSB count into an LSB count.
- */
-
- /* FBH only supports UD type for dst, so use a MOV to convert UD to D. */
- temp.swizzle = BRW_SWIZZLE_NOOP;
- emit(MOV(result_dst, temp));
-
- src_reg src_tmp = src_reg(result_dst);
- emit(CMP(dst_null_d(), src_tmp, src_reg(-1), BRW_CONDITIONAL_NZ));
-
- src_tmp.negate = true;
- inst = emit(ADD(result_dst, src_tmp, src_reg(31)));
- inst->predicate = BRW_PREDICATE_NORMAL;
- break;
- }
- case ir_unop_find_lsb:
- emit(FBL(result_dst, op[0]));
- break;
- case ir_unop_saturate:
- inst = emit(MOV(result_dst, op[0]));
- inst->saturate = true;
- break;
-
- case ir_unop_noise:
- unreachable("not reached: should be handled by lower_noise");
-
- case ir_unop_subroutine_to_int:
- emit(MOV(result_dst, op[0]));
- break;
-
- case ir_unop_ssbo_unsized_array_length:
- unreachable("not reached: should be handled by lower_ubo_reference");
- break;
-
- case ir_binop_add:
- emit(ADD(result_dst, op[0], op[1]));
- break;
- case ir_binop_sub:
- unreachable("not reached: should be handled by ir_sub_to_add_neg");
-
- case ir_binop_mul:
- if (devinfo->gen < 8 && ir->type->is_integer()) {
- /* For integer multiplication, the MUL uses the low 16 bits of one of
- * the operands (src0 through SNB, src1 on IVB and later). The MACH
- * accumulates in the contribution of the upper 16 bits of that
- * operand. If we can determine that one of the args is in the low
- * 16 bits, though, we can just emit a single MUL.
- */
- if (ir->operands[0]->is_uint16_constant()) {
- if (devinfo->gen < 7)
- emit(MUL(result_dst, op[0], op[1]));
- else
- emit(MUL(result_dst, op[1], op[0]));
- } else if (ir->operands[1]->is_uint16_constant()) {
- if (devinfo->gen < 7)
- emit(MUL(result_dst, op[1], op[0]));
- else
- emit(MUL(result_dst, op[0], op[1]));
- } else {
- struct brw_reg acc = retype(brw_acc_reg(8), result_dst.type);
-
- emit(MUL(acc, op[0], op[1]));
- emit(MACH(dst_null_d(), op[0], op[1]));
- emit(MOV(result_dst, src_reg(acc)));
- }
- } else {
- emit(MUL(result_dst, op[0], op[1]));
- }
- break;
- case ir_binop_imul_high: {
- struct brw_reg acc = retype(brw_acc_reg(8), result_dst.type);
-
- emit(MUL(acc, op[0], op[1]));
- emit(MACH(result_dst, op[0], op[1]));
- break;
- }
- case ir_binop_div:
- /* Floating point should be lowered by DIV_TO_MUL_RCP in the compiler. */
- assert(ir->type->is_integer());
- emit_math(SHADER_OPCODE_INT_QUOTIENT, result_dst, op[0], op[1]);
- break;
-
- case ir_binop_carry:
- unreachable("Should have been lowered by carry_to_arith().");
-
- case ir_binop_borrow:
- unreachable("Should have been lowered by borrow_to_arith().");
-
- case ir_binop_mod:
- /* Floating point should be lowered by MOD_TO_FLOOR in the compiler. */
- assert(ir->type->is_integer());
- emit_math(SHADER_OPCODE_INT_REMAINDER, result_dst, op[0], op[1]);
- break;
-
- case ir_binop_less:
- case ir_binop_greater:
- case ir_binop_lequal:
- case ir_binop_gequal:
- case ir_binop_equal:
- case ir_binop_nequal: {
- if (devinfo->gen <= 5) {
- resolve_bool_comparison(ir->operands[0], &op[0]);
- resolve_bool_comparison(ir->operands[1], &op[1]);
- }
- emit(CMP(result_dst, op[0], op[1],
- brw_conditional_for_comparison(ir->operation)));
- break;
- }
-
- case ir_binop_all_equal:
- if (devinfo->gen <= 5) {
- resolve_bool_comparison(ir->operands[0], &op[0]);
- resolve_bool_comparison(ir->operands[1], &op[1]);
- }
-
- /* "==" operator producing a scalar boolean. */
- if (ir->operands[0]->type->is_vector() ||
- ir->operands[1]->type->is_vector()) {
- emit(CMP(dst_null_d(), op[0], op[1], BRW_CONDITIONAL_Z));
- emit(MOV(result_dst, src_reg(0)));
- inst = emit(MOV(result_dst, src_reg(~0)));
- inst->predicate = BRW_PREDICATE_ALIGN16_ALL4H;
- } else {
- emit(CMP(result_dst, op[0], op[1], BRW_CONDITIONAL_Z));
- }
- break;
- case ir_binop_any_nequal:
- if (devinfo->gen <= 5) {
- resolve_bool_comparison(ir->operands[0], &op[0]);
- resolve_bool_comparison(ir->operands[1], &op[1]);
- }
-
- /* "!=" operator producing a scalar boolean. */
- if (ir->operands[0]->type->is_vector() ||
- ir->operands[1]->type->is_vector()) {
- emit(CMP(dst_null_d(), op[0], op[1], BRW_CONDITIONAL_NZ));
-
- emit(MOV(result_dst, src_reg(0)));
- inst = emit(MOV(result_dst, src_reg(~0)));
- inst->predicate = BRW_PREDICATE_ALIGN16_ANY4H;
- } else {
- emit(CMP(result_dst, op[0], op[1], BRW_CONDITIONAL_NZ));
- }
- break;
-
- case ir_unop_any:
- if (devinfo->gen <= 5) {
- resolve_bool_comparison(ir->operands[0], &op[0]);
- }
- emit(CMP(dst_null_d(), op[0], src_reg(0), BRW_CONDITIONAL_NZ));
- emit(MOV(result_dst, src_reg(0)));
-
- inst = emit(MOV(result_dst, src_reg(~0)));
- inst->predicate = BRW_PREDICATE_ALIGN16_ANY4H;
- break;
-
- case ir_binop_logic_xor:
- emit(XOR(result_dst, op[0], op[1]));
- break;
-
- case ir_binop_logic_or:
- emit(OR(result_dst, op[0], op[1]));
- break;
-
- case ir_binop_logic_and:
- emit(AND(result_dst, op[0], op[1]));
- break;
-
- case ir_binop_dot:
- assert(ir->operands[0]->type->is_vector());
- assert(ir->operands[0]->type == ir->operands[1]->type);
- emit_dp(result_dst, op[0], op[1], ir->operands[0]->type->vector_elements);
- break;
-
- case ir_unop_sqrt:
- emit_math(SHADER_OPCODE_SQRT, result_dst, op[0]);
- break;
- case ir_unop_rsq:
- emit_math(SHADER_OPCODE_RSQ, result_dst, op[0]);
- break;
-
- case ir_unop_bitcast_i2f:
- case ir_unop_bitcast_u2f:
- this->result = op[0];
- this->result.type = BRW_REGISTER_TYPE_F;
- break;
-
- case ir_unop_bitcast_f2i:
- this->result = op[0];
- this->result.type = BRW_REGISTER_TYPE_D;
- break;
-
- case ir_unop_bitcast_f2u:
- this->result = op[0];
- this->result.type = BRW_REGISTER_TYPE_UD;
- break;
-
- case ir_unop_i2f:
- case ir_unop_i2u:
- case ir_unop_u2i:
- case ir_unop_u2f:
- case ir_unop_f2i:
- case ir_unop_f2u:
- emit(MOV(result_dst, op[0]));
- break;
- case ir_unop_b2i:
- case ir_unop_b2f:
- if (devinfo->gen <= 5) {
- resolve_bool_comparison(ir->operands[0], &op[0]);
- }
- emit(MOV(result_dst, negate(op[0])));
- break;
- case ir_unop_f2b:
- emit(CMP(result_dst, op[0], src_reg(0.0f), BRW_CONDITIONAL_NZ));
- break;
- case ir_unop_i2b:
- emit(CMP(result_dst, op[0], src_reg(0), BRW_CONDITIONAL_NZ));
- break;
-
- case ir_unop_trunc:
- emit(RNDZ(result_dst, op[0]));
- break;
- case ir_unop_ceil: {
- src_reg tmp = src_reg(this, ir->type);
- op[0].negate = !op[0].negate;
- emit(RNDD(dst_reg(tmp), op[0]));
- tmp.negate = true;
- emit(MOV(result_dst, tmp));
- }
- break;
- case ir_unop_floor:
- inst = emit(RNDD(result_dst, op[0]));
- break;
- case ir_unop_fract:
- inst = emit(FRC(result_dst, op[0]));
- break;
- case ir_unop_round_even:
- emit(RNDE(result_dst, op[0]));
- break;
-
- case ir_unop_get_buffer_size:
- unreachable("not reached: not implemented");
- break;
-
- case ir_binop_min:
- emit_minmax(BRW_CONDITIONAL_L, result_dst, op[0], op[1]);
- break;
- case ir_binop_max:
- emit_minmax(BRW_CONDITIONAL_GE, result_dst, op[0], op[1]);
- break;
-
- case ir_binop_pow:
- emit_math(SHADER_OPCODE_POW, result_dst, op[0], op[1]);
- break;
-
- case ir_unop_bit_not:
- inst = emit(NOT(result_dst, op[0]));
- break;
- case ir_binop_bit_and:
- inst = emit(AND(result_dst, op[0], op[1]));
- break;
- case ir_binop_bit_xor:
- inst = emit(XOR(result_dst, op[0], op[1]));
- break;
- case ir_binop_bit_or:
- inst = emit(OR(result_dst, op[0], op[1]));
- break;
-
- case ir_binop_lshift:
- inst = emit(SHL(result_dst, op[0], op[1]));
- break;
-
- case ir_binop_rshift:
- if (ir->type->base_type == GLSL_TYPE_INT)
- inst = emit(ASR(result_dst, op[0], op[1]));
- else
- inst = emit(SHR(result_dst, op[0], op[1]));
- break;
-
- case ir_binop_bfm:
- emit(BFI1(result_dst, op[0], op[1]));
- break;
-
- case ir_binop_ubo_load: {
- ir_constant *const_uniform_block = ir->operands[0]->as_constant();
- ir_constant *const_offset_ir = ir->operands[1]->as_constant();
- unsigned const_offset = const_offset_ir ? const_offset_ir->value.u[0] : 0;
- src_reg offset;
-
- /* Now, load the vector from that offset. */
- assert(ir->type->is_vector() || ir->type->is_scalar());
-
- src_reg packed_consts = src_reg(this, glsl_type::vec4_type);
- packed_consts.type = result.type;
- src_reg surf_index;
-
- if (const_uniform_block) {
- /* The block index is a constant, so just emit the binding table entry
- * as an immediate.
- */
- surf_index = src_reg(prog_data->base.binding_table.ubo_start +
- const_uniform_block->value.u[0]);
- } else {
- /* The block index is not a constant. Evaluate the index expression
- * per-channel and add the base UBO index; we have to select a value
- * from any live channel.
- */
- surf_index = src_reg(this, glsl_type::uint_type);
- emit(ADD(dst_reg(surf_index), op[0],
- src_reg(prog_data->base.binding_table.ubo_start)));
- surf_index = emit_uniformize(surf_index);
-
- /* Assume this may touch any UBO. It would be nice to provide
- * a tighter bound, but the array information is already lowered away.
- */
- brw_mark_surface_used(&prog_data->base,
- prog_data->base.binding_table.ubo_start +
- shader_prog->NumBufferInterfaceBlocks - 1);
- }
-
- if (const_offset_ir) {
- if (devinfo->gen >= 8) {
- /* Store the offset in a GRF so we can send-from-GRF. */
- offset = src_reg(this, glsl_type::int_type);
- emit(MOV(dst_reg(offset), src_reg(const_offset / 16)));
- } else {
- /* Immediates are fine on older generations since they'll be moved
- * to a (potentially fake) MRF at the generator level.
- */
- offset = src_reg(const_offset / 16);
- }
- } else {
- offset = src_reg(this, glsl_type::uint_type);
- emit(SHR(dst_reg(offset), op[1], src_reg(4u)));
- }
-
- emit_pull_constant_load_reg(dst_reg(packed_consts),
- surf_index,
- offset,
- NULL, NULL /* before_block/inst */);
-
- packed_consts.swizzle = brw_swizzle_for_size(ir->type->vector_elements);
- packed_consts.swizzle += BRW_SWIZZLE4(const_offset % 16 / 4,
- const_offset % 16 / 4,
- const_offset % 16 / 4,
- const_offset % 16 / 4);
-
- /* UBO bools are any nonzero int. We need to convert them to 0/~0. */
- if (ir->type->base_type == GLSL_TYPE_BOOL) {
- emit(CMP(result_dst, packed_consts, src_reg(0u),
- BRW_CONDITIONAL_NZ));
- } else {
- emit(MOV(result_dst, packed_consts));
- }
- break;
- }
-
- case ir_binop_vector_extract:
- unreachable("should have been lowered by vec_index_to_cond_assign");
-
- case ir_triop_fma:
- op[0] = fix_3src_operand(op[0]);
- op[1] = fix_3src_operand(op[1]);
- op[2] = fix_3src_operand(op[2]);
- /* Note that the instruction's argument order is reversed from GLSL
- * and the IR.
- */
- emit(MAD(result_dst, op[2], op[1], op[0]));
- break;
-
- case ir_triop_lrp:
- emit_lrp(result_dst, op[0], op[1], op[2]);
- break;
-
- case ir_triop_csel:
- unreachable("already handled above");
- break;
-
- case ir_triop_bfi:
- op[0] = fix_3src_operand(op[0]);
- op[1] = fix_3src_operand(op[1]);
- op[2] = fix_3src_operand(op[2]);
- emit(BFI2(result_dst, op[0], op[1], op[2]));
- break;
-
- case ir_triop_bitfield_extract:
- op[0] = fix_3src_operand(op[0]);
- op[1] = fix_3src_operand(op[1]);
- op[2] = fix_3src_operand(op[2]);
- /* Note that the instruction's argument order is reversed from GLSL
- * and the IR.
- */
- emit(BFE(result_dst, op[2], op[1], op[0]));
- break;
-
- case ir_triop_vector_insert:
- unreachable("should have been lowered by lower_vector_insert");
-
- case ir_quadop_bitfield_insert:
- unreachable("not reached: should be handled by "
- "bitfield_insert_to_bfm_bfi\n");
-
- case ir_quadop_vector:
- unreachable("not reached: should be handled by lower_quadop_vector");
-
- case ir_unop_pack_half_2x16:
- emit_pack_half_2x16(result_dst, op[0]);
- break;
- case ir_unop_unpack_half_2x16:
- emit_unpack_half_2x16(result_dst, op[0]);
- break;
- case ir_unop_unpack_unorm_4x8:
- emit_unpack_unorm_4x8(result_dst, op[0]);
- break;
- case ir_unop_unpack_snorm_4x8:
- emit_unpack_snorm_4x8(result_dst, op[0]);
- break;
- case ir_unop_pack_unorm_4x8:
- emit_pack_unorm_4x8(result_dst, op[0]);
- break;
- case ir_unop_pack_snorm_4x8:
- emit_pack_snorm_4x8(result_dst, op[0]);
- break;
- case ir_unop_pack_snorm_2x16:
- case ir_unop_pack_unorm_2x16:
- case ir_unop_unpack_snorm_2x16:
- case ir_unop_unpack_unorm_2x16:
- unreachable("not reached: should be handled by lower_packing_builtins");
- case ir_unop_unpack_half_2x16_split_x:
- case ir_unop_unpack_half_2x16_split_y:
- case ir_binop_pack_half_2x16_split:
- case ir_unop_interpolate_at_centroid:
- case ir_binop_interpolate_at_sample:
- case ir_binop_interpolate_at_offset:
- unreachable("not reached: should not occur in vertex shader");
- case ir_binop_ldexp:
- unreachable("not reached: should be handled by ldexp_to_arith()");
- case ir_unop_d2f:
- case ir_unop_f2d:
- case ir_unop_d2i:
- case ir_unop_i2d:
- case ir_unop_d2u:
- case ir_unop_u2d:
- case ir_unop_d2b:
- case ir_unop_pack_double_2x32:
- case ir_unop_unpack_double_2x32:
- case ir_unop_frexp_sig:
- case ir_unop_frexp_exp:
- unreachable("fp64 todo");
- }
-}
-
-
-void
-vec4_visitor::visit(ir_swizzle *ir)
-{
- /* Note that this is only swizzles in expressions, not those on the left
- * hand side of an assignment, which do write masking. See ir_assignment
- * for that.
- */
- const unsigned swz = brw_compose_swizzle(
- brw_swizzle_for_size(ir->type->vector_elements),
- BRW_SWIZZLE4(ir->mask.x, ir->mask.y, ir->mask.z, ir->mask.w));
-
- ir->val->accept(this);
- this->result = swizzle(this->result, swz);
-}
-
-void
-vec4_visitor::visit(ir_dereference_variable *ir)
-{
- const struct glsl_type *type = ir->type;
- dst_reg *reg = variable_storage(ir->var);
-
- if (!reg) {
- fail("Failed to find variable storage for %s\n", ir->var->name);
- this->result = src_reg(brw_null_reg());
- return;
- }
-
- this->result = src_reg(*reg);
-
- /* System values get their swizzle from the dst_reg writemask */
- if (ir->var->data.mode == ir_var_system_value)
- return;
-
- if (type->is_scalar() || type->is_vector() || type->is_matrix())
- this->result.swizzle = brw_swizzle_for_size(type->vector_elements);
-}
-
-
-int
-vec4_visitor::compute_array_stride(ir_dereference_array *ir)
-{
- /* Under normal circumstances array elements are stored consecutively, so
- * the stride is equal to the size of the array element.
- */
- return type_size_vec4(ir->type);
-}
-
-
-void
-vec4_visitor::visit(ir_dereference_array *ir)
-{
- ir_constant *constant_index;
- src_reg src;
- int array_stride = compute_array_stride(ir);
-
- constant_index = ir->array_index->constant_expression_value();
-
- ir->array->accept(this);
- src = this->result;
-
- if (constant_index) {
- src.reg_offset += constant_index->value.i[0] * array_stride;
- } else {
- /* Variable index array dereference. It eats the "vec4" of the
- * base of the array and an index that offsets the Mesa register
- * index.
- */
- ir->array_index->accept(this);
-
- src_reg index_reg;
-
- if (array_stride == 1) {
- index_reg = this->result;
- } else {
- index_reg = src_reg(this, glsl_type::int_type);
-
- emit(MUL(dst_reg(index_reg), this->result, src_reg(array_stride)));
- }
-
- if (src.reladdr) {
- src_reg temp = src_reg(this, glsl_type::int_type);
-
- emit(ADD(dst_reg(temp), *src.reladdr, index_reg));
-
- index_reg = temp;
- }
-
- src.reladdr = ralloc(mem_ctx, src_reg);
- memcpy(src.reladdr, &index_reg, sizeof(index_reg));
- }
-
- /* If the type is smaller than a vec4, replicate the last channel out. */
- if (ir->type->is_scalar() || ir->type->is_vector() || ir->type->is_matrix())
- src.swizzle = brw_swizzle_for_size(ir->type->vector_elements);
- else
- src.swizzle = BRW_SWIZZLE_NOOP;
- src.type = brw_type_for_base_type(ir->type);
-
- this->result = src;
-}
-
-void
-vec4_visitor::visit(ir_dereference_record *ir)
-{
- unsigned int i;
- const glsl_type *struct_type = ir->record->type;
- int offset = 0;
-
- ir->record->accept(this);
-
- for (i = 0; i < struct_type->length; i++) {
- if (strcmp(struct_type->fields.structure[i].name, ir->field) == 0)
- break;
- offset += type_size_vec4(struct_type->fields.structure[i].type);
- }
-
- /* If the type is smaller than a vec4, replicate the last channel out. */
- if (ir->type->is_scalar() || ir->type->is_vector() || ir->type->is_matrix())
- this->result.swizzle = brw_swizzle_for_size(ir->type->vector_elements);
- else
- this->result.swizzle = BRW_SWIZZLE_NOOP;
- this->result.type = brw_type_for_base_type(ir->type);
-
- this->result.reg_offset += offset;
-}
-
-/**
- * We want to be careful in assignment setup to hit the actual storage
- * instead of potentially using a temporary like we might with the
- * ir_dereference handler.
- */
-static dst_reg
-get_assignment_lhs(ir_dereference *ir, vec4_visitor *v)
-{
- /* The LHS must be a dereference. If the LHS is a variable indexed array
- * access of a vector, it must be separated into a series conditional moves
- * before reaching this point (see ir_vec_index_to_cond_assign).
- */
- assert(ir->as_dereference());
- ir_dereference_array *deref_array = ir->as_dereference_array();
- if (deref_array) {
- assert(!deref_array->array->type->is_vector());
- }
-
- /* Use the rvalue deref handler for the most part. We'll ignore
- * swizzles in it and write swizzles using writemask, though.
- */
- ir->accept(v);
- return dst_reg(v->result);
-}
-
-void
-vec4_visitor::emit_block_move(dst_reg *dst, src_reg *src,
- const struct glsl_type *type,
- enum brw_predicate predicate)
-{
- if (type->base_type == GLSL_TYPE_STRUCT) {
- for (unsigned int i = 0; i < type->length; i++) {
- emit_block_move(dst, src, type->fields.structure[i].type, predicate);
- }
- return;
- }
-
- if (type->is_array()) {
- for (unsigned int i = 0; i < type->length; i++) {
- emit_block_move(dst, src, type->fields.array, predicate);
- }
- return;
- }
-
- if (type->is_matrix()) {
- const struct glsl_type *vec_type;
-
- vec_type = glsl_type::get_instance(GLSL_TYPE_FLOAT,
- type->vector_elements, 1);
-
- for (int i = 0; i < type->matrix_columns; i++) {
- emit_block_move(dst, src, vec_type, predicate);
- }
- return;
- }
-
- assert(type->is_scalar() || type->is_vector());
-
- dst->type = brw_type_for_base_type(type);
- src->type = dst->type;
-
- dst->writemask = (1 << type->vector_elements) - 1;
-
- src->swizzle = brw_swizzle_for_size(type->vector_elements);
-
- vec4_instruction *inst = emit(MOV(*dst, *src));
- inst->predicate = predicate;
-
- dst->reg_offset++;
- src->reg_offset++;
-}
-
-
-/* If the RHS processing resulted in an instruction generating a
- * temporary value, and it would be easy to rewrite the instruction to
- * generate its result right into the LHS instead, do so. This ends
- * up reliably removing instructions where it can be tricky to do so
- * later without real UD chain information.
- */
-bool
-vec4_visitor::try_rewrite_rhs_to_dst(ir_assignment *ir,
- dst_reg dst,
- src_reg src,
- vec4_instruction *pre_rhs_inst,
- vec4_instruction *last_rhs_inst)
-{
- /* This could be supported, but it would take more smarts. */
- if (ir->condition)
- return false;
-
- if (pre_rhs_inst == last_rhs_inst)
- return false; /* No instructions generated to work with. */
-
- /* Make sure the last instruction generated our source reg. */
- if (src.file != GRF ||
- src.file != last_rhs_inst->dst.file ||
- src.reg != last_rhs_inst->dst.reg ||
- src.reg_offset != last_rhs_inst->dst.reg_offset ||
- src.reladdr ||
- src.abs ||
- src.negate ||
- last_rhs_inst->predicate != BRW_PREDICATE_NONE)
- return false;
-
- /* Check that that last instruction fully initialized the channels
- * we want to use, in the order we want to use them. We could
- * potentially reswizzle the operands of many instructions so that
- * we could handle out of order channels, but don't yet.
- */
-
- for (unsigned i = 0; i < 4; i++) {
- if (dst.writemask & (1 << i)) {
- if (!(last_rhs_inst->dst.writemask & (1 << i)))
- return false;
-
- if (BRW_GET_SWZ(src.swizzle, i) != i)
- return false;
- }
- }
-
- /* Success! Rewrite the instruction. */
- last_rhs_inst->dst.file = dst.file;
- last_rhs_inst->dst.reg = dst.reg;
- last_rhs_inst->dst.reg_offset = dst.reg_offset;
- last_rhs_inst->dst.reladdr = dst.reladdr;
- last_rhs_inst->dst.writemask &= dst.writemask;
-
- return true;
-}
-
-void
-vec4_visitor::visit(ir_assignment *ir)
-{
- dst_reg dst = get_assignment_lhs(ir->lhs, this);
- enum brw_predicate predicate = BRW_PREDICATE_NONE;
-
- if (!ir->lhs->type->is_scalar() &&
- !ir->lhs->type->is_vector()) {
- ir->rhs->accept(this);
- src_reg src = this->result;
-
- if (ir->condition) {
- emit_bool_to_cond_code(ir->condition, &predicate);
- }
-
- /* emit_block_move doesn't account for swizzles in the source register.
- * This should be ok, since the source register is a structure or an
- * array, and those can't be swizzled. But double-check to be sure.
- */
- assert(src.swizzle ==
- (ir->rhs->type->is_matrix()
- ? brw_swizzle_for_size(ir->rhs->type->vector_elements)
- : BRW_SWIZZLE_NOOP));
-
- emit_block_move(&dst, &src, ir->rhs->type, predicate);
- return;
- }
-
- /* Now we're down to just a scalar/vector with writemasks. */
- int i;
-
- vec4_instruction *pre_rhs_inst, *last_rhs_inst;
- pre_rhs_inst = (vec4_instruction *)this->instructions.get_tail();
+void
+vec4_visitor::setup_vec4_uniform_value(unsigned param_offset,
+ const gl_constant_value *values,
+ unsigned n)
+{
+ static const gl_constant_value zero = { 0 };
- ir->rhs->accept(this);
+ assert(param_offset % 4 == 0);
- last_rhs_inst = (vec4_instruction *)this->instructions.get_tail();
+ for (unsigned i = 0; i < n; ++i)
+ stage_prog_data->param[param_offset + i] = &values[i];
- int swizzles[4];
- int src_chan = 0;
+ for (unsigned i = n; i < 4; ++i)
+ stage_prog_data->param[param_offset + i] = &zero;
- assert(ir->lhs->type->is_vector() ||
- ir->lhs->type->is_scalar());
- dst.writemask = ir->write_mask;
+ uniform_vector_size[param_offset / 4] = n;
+}
- /* Swizzle a small RHS vector into the channels being written.
- *
- * glsl ir treats write_mask as dictating how many channels are
- * present on the RHS while in our instructions we need to make
- * those channels appear in the slots of the vec4 they're written to.
- */
- for (int i = 0; i < 4; i++)
- swizzles[i] = (ir->write_mask & (1 << i) ? src_chan++ : 0);
+vec4_instruction *
+vec4_visitor::emit_minmax(enum brw_conditional_mod conditionalmod, dst_reg dst,
+ src_reg src0, src_reg src1)
+{
+ vec4_instruction *inst;
- src_reg src = swizzle(this->result,
- BRW_SWIZZLE4(swizzles[0], swizzles[1],
- swizzles[2], swizzles[3]));
+ if (devinfo->gen >= 6) {
+ inst = emit(BRW_OPCODE_SEL, dst, src0, src1);
+ inst->conditional_mod = conditionalmod;
+ } else {
+ emit(CMP(dst, src0, src1, conditionalmod));
- if (try_rewrite_rhs_to_dst(ir, dst, src, pre_rhs_inst, last_rhs_inst)) {
- return;
+ inst = emit(BRW_OPCODE_SEL, dst, src0, src1);
+ inst->predicate = BRW_PREDICATE_NORMAL;
}
- if (ir->condition) {
- emit_bool_to_cond_code(ir->condition, &predicate);
- }
+ return inst;
+}
- for (i = 0; i < type_size_vec4(ir->lhs->type); i++) {
- vec4_instruction *inst = emit(MOV(dst, src));
- inst->predicate = predicate;
+vec4_instruction *
+vec4_visitor::emit_lrp(const dst_reg &dst,
+ const src_reg &x, const src_reg &y, const src_reg &a)
+{
+ if (devinfo->gen >= 6) {
+ /* Note that the instruction's argument order is reversed from GLSL
+ * and the IR.
+ */
+ return emit(LRP(dst, fix_3src_operand(a), fix_3src_operand(y),
+ fix_3src_operand(x)));
+ } else {
+ /* Earlier generations don't support three source operations, so we
+ * need to emit x*(1-a) + y*a.
+ */
+ dst_reg y_times_a = dst_reg(this, glsl_type::vec4_type);
+ dst_reg one_minus_a = dst_reg(this, glsl_type::vec4_type);
+ dst_reg x_times_one_minus_a = dst_reg(this, glsl_type::vec4_type);
+ y_times_a.writemask = dst.writemask;
+ one_minus_a.writemask = dst.writemask;
+ x_times_one_minus_a.writemask = dst.writemask;
- dst.reg_offset++;
- src.reg_offset++;
+ emit(MUL(y_times_a, y, a));
+ emit(ADD(one_minus_a, negate(a), src_reg(1.0f)));
+ emit(MUL(x_times_one_minus_a, x, src_reg(one_minus_a)));
+ return emit(ADD(dst, src_reg(x_times_one_minus_a), src_reg(y_times_a)));
}
}
+/**
+ * Emits the instructions needed to perform a pull constant load. before_block
+ * and before_inst can be NULL in which case the instruction will be appended
+ * to the end of the instruction list.
+ */
void
-vec4_visitor::emit_constant_values(dst_reg *dst, ir_constant *ir)
+vec4_visitor::emit_pull_constant_load_reg(dst_reg dst,
+ src_reg surf_index,
+ src_reg offset_reg,
+ bblock_t *before_block,
+ vec4_instruction *before_inst)
{
- if (ir->type->base_type == GLSL_TYPE_STRUCT) {
- foreach_in_list(ir_constant, field_value, &ir->components) {
- emit_constant_values(dst, field_value);
- }
- return;
- }
-
- if (ir->type->is_array()) {
- for (unsigned int i = 0; i < ir->type->length; i++) {
- emit_constant_values(dst, ir->array_elements[i]);
- }
- return;
- }
-
- if (ir->type->is_matrix()) {
- for (int i = 0; i < ir->type->matrix_columns; i++) {
- float *vec = &ir->value.f[i * ir->type->vector_elements];
+ assert((before_inst == NULL && before_block == NULL) ||
+ (before_inst && before_block));
- for (int j = 0; j < ir->type->vector_elements; j++) {
- dst->writemask = 1 << j;
- dst->type = BRW_REGISTER_TYPE_F;
+ vec4_instruction *pull;
- emit(MOV(*dst, src_reg(vec[j])));
- }
- dst->reg_offset++;
- }
- return;
- }
+ if (devinfo->gen >= 9) {
+ /* Gen9+ needs a message header in order to use SIMD4x2 mode */
+ src_reg header(this, glsl_type::uvec4_type, 2);
- int remaining_writemask = (1 << ir->type->vector_elements) - 1;
+ pull = new(mem_ctx)
+ vec4_instruction(VS_OPCODE_SET_SIMD4X2_HEADER_GEN9,
+ dst_reg(header));
- for (int i = 0; i < ir->type->vector_elements; i++) {
- if (!(remaining_writemask & (1 << i)))
- continue;
+ if (before_inst)
+ emit_before(before_block, before_inst, pull);
+ else
+ emit(pull);
- dst->writemask = 1 << i;
- dst->type = brw_type_for_base_type(ir->type);
+ dst_reg index_reg = retype(offset(dst_reg(header), 1),
+ offset_reg.type);
+ pull = MOV(writemask(index_reg, WRITEMASK_X), offset_reg);
- /* Find other components that match the one we're about to
- * write. Emits fewer instructions for things like vec4(0.5,
- * 1.5, 1.5, 1.5).
- */
- for (int j = i + 1; j < ir->type->vector_elements; j++) {
- if (ir->type->base_type == GLSL_TYPE_BOOL) {
- if (ir->value.b[i] == ir->value.b[j])
- dst->writemask |= (1 << j);
- } else {
- /* u, i, and f storage all line up, so no need for a
- * switch case for comparing each type.
- */
- if (ir->value.u[i] == ir->value.u[j])
- dst->writemask |= (1 << j);
- }
- }
+ if (before_inst)
+ emit_before(before_block, before_inst, pull);
+ else
+ emit(pull);
- switch (ir->type->base_type) {
- case GLSL_TYPE_FLOAT:
- emit(MOV(*dst, src_reg(ir->value.f[i])));
- break;
- case GLSL_TYPE_INT:
- emit(MOV(*dst, src_reg(ir->value.i[i])));
- break;
- case GLSL_TYPE_UINT:
- emit(MOV(*dst, src_reg(ir->value.u[i])));
- break;
- case GLSL_TYPE_BOOL:
- emit(MOV(*dst, src_reg(ir->value.b[i] != 0 ? ~0 : 0)));
- break;
- default:
- unreachable("Non-float/uint/int/bool constant");
- }
+ pull = new(mem_ctx) vec4_instruction(VS_OPCODE_PULL_CONSTANT_LOAD_GEN7,
+ dst,
+ surf_index,
+ header);
+ pull->mlen = 2;
+ pull->header_size = 1;
+ } else if (devinfo->gen >= 7) {
+ dst_reg grf_offset = dst_reg(this, glsl_type::int_type);
- remaining_writemask &= ~dst->writemask;
- }
- dst->reg_offset++;
-}
+ grf_offset.type = offset_reg.type;
-void
-vec4_visitor::visit(ir_constant *ir)
-{
- dst_reg dst = dst_reg(this, ir->type);
- this->result = src_reg(dst);
+ pull = MOV(grf_offset, offset_reg);
- emit_constant_values(&dst, ir);
-}
+ if (before_inst)
+ emit_before(before_block, before_inst, pull);
+ else
+ emit(pull);
-void
-vec4_visitor::visit_atomic_counter_intrinsic(ir_call *ir)
-{
- ir_dereference *deref = static_cast<ir_dereference *>(
- ir->actual_parameters.get_head());
- ir_variable *location = deref->variable_referenced();
- unsigned surf_index = (prog_data->base.binding_table.abo_start +
- location->data.binding);
-
- /* Calculate the surface offset */
- src_reg offset(this, glsl_type::uint_type);
- ir_dereference_array *deref_array = deref->as_dereference_array();
- if (deref_array) {
- deref_array->array_index->accept(this);
-
- src_reg tmp(this, glsl_type::uint_type);
- emit(MUL(dst_reg(tmp), this->result, ATOMIC_COUNTER_SIZE));
- emit(ADD(dst_reg(offset), tmp, location->data.atomic.offset));
+ pull = new(mem_ctx) vec4_instruction(VS_OPCODE_PULL_CONSTANT_LOAD_GEN7,
+ dst,
+ surf_index,
+ src_reg(grf_offset));
+ pull->mlen = 1;
} else {
- offset = location->data.atomic.offset;
- }
-
- /* Emit the appropriate machine instruction */
- const char *callee = ir->callee->function_name();
- dst_reg dst = get_assignment_lhs(ir->return_deref, this);
-
- if (!strcmp("__intrinsic_atomic_read", callee)) {
- emit_untyped_surface_read(surf_index, dst, offset);
-
- } else if (!strcmp("__intrinsic_atomic_increment", callee)) {
- emit_untyped_atomic(BRW_AOP_INC, surf_index, dst, offset,
- src_reg(), src_reg());
-
- } else if (!strcmp("__intrinsic_atomic_predecrement", callee)) {
- emit_untyped_atomic(BRW_AOP_PREDEC, surf_index, dst, offset,
- src_reg(), src_reg());
+ pull = new(mem_ctx) vec4_instruction(VS_OPCODE_PULL_CONSTANT_LOAD,
+ dst,
+ surf_index,
+ offset_reg);
+ pull->base_mrf = FIRST_SPILL_MRF(devinfo->gen) + 1;
+ pull->mlen = 1;
}
- brw_mark_surface_used(stage_prog_data, surf_index);
+ if (before_inst)
+ emit_before(before_block, before_inst, pull);
+ else
+ emit(pull);
}
-void
-vec4_visitor::visit(ir_call *ir)
+src_reg
+vec4_visitor::emit_uniformize(const src_reg &src)
{
- const char *callee = ir->callee->function_name();
+ const src_reg chan_index(this, glsl_type::uint_type);
+ const dst_reg dst = retype(dst_reg(this, glsl_type::uint_type),
+ src.type);
- if (!strcmp("__intrinsic_atomic_read", callee) ||
- !strcmp("__intrinsic_atomic_increment", callee) ||
- !strcmp("__intrinsic_atomic_predecrement", callee)) {
- visit_atomic_counter_intrinsic(ir);
- } else {
- unreachable("Unsupported intrinsic.");
- }
+ emit(SHADER_OPCODE_FIND_LIVE_CHANNEL, dst_reg(chan_index))
+ ->force_writemask_all = true;
+ emit(SHADER_OPCODE_BROADCAST, dst, src, chan_index)
+ ->force_writemask_all = true;
+
+ return src_reg(dst);
}
src_reg
src_reg(inst->dst), sampler, dest_type);
}
-void
-vec4_visitor::visit(ir_texture *ir)
-{
- uint32_t sampler =
- _mesa_get_sampler_uniform_value(ir->sampler, shader_prog, prog);
-
- ir_rvalue *nonconst_sampler_index =
- _mesa_get_sampler_array_nonconst_index(ir->sampler);
-
- /* Handle non-constant sampler array indexing */
- src_reg sampler_reg;
- if (nonconst_sampler_index) {
- /* The highest sampler which may be used by this operation is
- * the last element of the array. Mark it here, because the generator
- * doesn't have enough information to determine the bound.
- */
- uint32_t array_size = ir->sampler->as_dereference_array()
- ->array->type->array_size();
-
- uint32_t max_used = sampler + array_size - 1;
- if (ir->op == ir_tg4 && devinfo->gen < 8) {
- max_used += prog_data->base.binding_table.gather_texture_start;
- } else {
- max_used += prog_data->base.binding_table.texture_start;
- }
-
- brw_mark_surface_used(&prog_data->base, max_used);
-
- /* Emit code to evaluate the actual indexing expression */
- nonconst_sampler_index->accept(this);
- src_reg temp(this, glsl_type::uint_type);
- emit(ADD(dst_reg(temp), this->result, src_reg(sampler)));
- sampler_reg = emit_uniformize(temp);
- } else {
- /* Single sampler, or constant array index; the indexing expression
- * is just an immediate.
- */
- sampler_reg = src_reg(sampler);
- }
-
- /* When tg4 is used with the degenerate ZERO/ONE swizzles, don't bother
- * emitting anything other than setting up the constant result.
- */
- if (ir->op == ir_tg4) {
- ir_constant *chan = ir->lod_info.component->as_constant();
- int swiz = GET_SWZ(key_tex->swizzles[sampler], chan->value.i[0]);
- if (swiz == SWIZZLE_ZERO || swiz == SWIZZLE_ONE) {
- dst_reg result(this, ir->type);
- this->result = src_reg(result);
- emit(MOV(result, src_reg(swiz == SWIZZLE_ONE ? 1.0f : 0.0f)));
- return;
- }
- }
-
- /* Should be lowered by do_lower_texture_projection */
- assert(!ir->projector);
-
- /* Should be lowered */
- assert(!ir->offset || !ir->offset->type->is_array());
-
- /* Generate code to compute all the subexpression trees. This has to be
- * done before loading any values into MRFs for the sampler message since
- * generating these values may involve SEND messages that need the MRFs.
- */
- src_reg coordinate;
- int coord_components = 0;
- if (ir->coordinate) {
- coord_components = ir->coordinate->type->vector_elements;
- ir->coordinate->accept(this);
- coordinate = this->result;
- }
-
- src_reg shadow_comparitor;
- if (ir->shadow_comparitor) {
- ir->shadow_comparitor->accept(this);
- shadow_comparitor = this->result;
- }
-
- bool has_nonconstant_offset = ir->offset && !ir->offset->as_constant();
- src_reg offset_value;
- if (has_nonconstant_offset) {
- ir->offset->accept(this);
- offset_value = src_reg(this->result);
- }
-
- src_reg lod, lod2, sample_index, mcs;
- switch (ir->op) {
- case ir_tex:
- lod = src_reg(0.0f);
- break;
- case ir_txf:
- case ir_txl:
- case ir_txs:
- ir->lod_info.lod->accept(this);
- lod = this->result;
- break;
- case ir_query_levels:
- lod = src_reg(0);
- break;
- case ir_txf_ms:
- ir->lod_info.sample_index->accept(this);
- sample_index = this->result;
-
- if (devinfo->gen >= 7 && key_tex->compressed_multisample_layout_mask & (1 << sampler))
- mcs = emit_mcs_fetch(ir->coordinate->type, coordinate, sampler_reg);
- else
- mcs = src_reg(0u);
- break;
- case ir_txd:
- ir->lod_info.grad.dPdx->accept(this);
- lod = this->result;
-
- ir->lod_info.grad.dPdy->accept(this);
- lod2 = this->result;
- break;
- case ir_txb:
- case ir_lod:
- case ir_tg4:
- case ir_texture_samples:
- break;
- }
-
- uint32_t constant_offset = 0;
- if (ir->offset != NULL && !has_nonconstant_offset) {
- constant_offset =
- brw_texture_offset(ir->offset->as_constant()->value.i,
- ir->offset->type->vector_elements);
- }
-
- /* Stuff the channel select bits in the top of the texture offset */
- if (ir->op == ir_tg4)
- constant_offset |=
- gather_channel( ir->lod_info.component->as_constant()->value.i[0],
- sampler) << 16;
-
- glsl_type const *type = ir->sampler->type;
- bool is_cube_array = type->sampler_dimensionality == GLSL_SAMPLER_DIM_CUBE &&
- type->sampler_array;
-
- this->result = src_reg(this, ir->type);
- dst_reg dest = dst_reg(this->result);
-
- emit_texture(ir->op, dest, ir->type, coordinate, coord_components,
- shadow_comparitor,
- lod, lod2, sample_index,
- constant_offset, offset_value,
- mcs, is_cube_array, sampler, sampler_reg);
-}
-
/**
* Apply workarounds for Gen6 gather with UINT/SINT
*/
}
}
-void
-vec4_visitor::visit(ir_return *)
-{
- unreachable("not reached");
-}
-
-void
-vec4_visitor::visit(ir_discard *)
-{
- unreachable("not reached");
-}
-
-void
-vec4_visitor::visit(ir_if *ir)
-{
- /* Don't point the annotation at the if statement, because then it plus
- * the then and else blocks get printed.
- */
- this->base_ir = ir->condition;
-
- if (devinfo->gen == 6) {
- emit_if_gen6(ir);
- } else {
- enum brw_predicate predicate;
- emit_bool_to_cond_code(ir->condition, &predicate);
- emit(IF(predicate));
- }
-
- visit_instructions(&ir->then_instructions);
-
- if (!ir->else_instructions.is_empty()) {
- this->base_ir = ir->condition;
- emit(BRW_OPCODE_ELSE);
-
- visit_instructions(&ir->else_instructions);
- }
-
- this->base_ir = ir->condition;
- emit(BRW_OPCODE_ENDIF);
-}
-
void
vec4_visitor::gs_emit_vertex(int stream_id)
{
unreachable("not reached");
}
-void
-vec4_visitor::visit(ir_emit_vertex *)
-{
- unreachable("not reached");
-}
-
void
vec4_visitor::gs_end_primitive()
{
unreachable("not reached");
}
-
-void
-vec4_visitor::visit(ir_end_primitive *)
-{
- unreachable("not reached");
-}
-
-void
-vec4_visitor::visit(ir_barrier *)
-{
- unreachable("not reached");
-}
-
void
vec4_visitor::emit_untyped_atomic(unsigned atomic_op, unsigned surf_index,
dst_reg dst, src_reg offset,
*reg = temp;
}
-/**
- * Resolve the result of a Gen4-5 CMP instruction to a proper boolean.
- *
- * CMP on Gen4-5 only sets the LSB of the result; the rest are undefined.
- * If we need a proper boolean value, we have to fix it up to be 0 or ~0.
- */
-void
-vec4_visitor::resolve_bool_comparison(ir_rvalue *rvalue, src_reg *reg)
-{
- assert(devinfo->gen <= 5);
-
- if (!rvalue->type->is_boolean())
- return;
-
- src_reg and_result = src_reg(this, rvalue->type);
- src_reg neg_result = src_reg(this, rvalue->type);
- emit(AND(dst_reg(and_result), *reg, src_reg(1)));
- emit(MOV(dst_reg(neg_result), negate(and_result)));
- *reg = neg_result;
-}
-
vec4_visitor::vec4_visitor(const struct brw_compiler *compiler,
void *log_data,
struct gl_program *prog,
this->current_annotation = NULL;
memset(this->output_reg_annotation, 0, sizeof(this->output_reg_annotation));
- this->variable_ht = hash_table_ctor(0,
- hash_table_pointer_hash,
- hash_table_pointer_compare);
-
this->virtual_grf_start = NULL;
this->virtual_grf_end = NULL;
this->live_intervals = NULL;
vec4_visitor::~vec4_visitor()
{
- hash_table_dtor(this->variable_ht);
}