X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fmesa%2Fdrivers%2Fdri%2Fi965%2Fbrw_vec4_visitor.cpp;h=5346fde950a9b2082e61cce583a515a9047658a0;hb=b38fcd0aea8d17919ecd9cc7afc518cfb2c01c27;hp=96816b6fbd06b7e2debd1df3b3f00ad3c83fc36a;hpb=169b6c1955deee7333d61f9ff149b7124bdea7d1;p=mesa.git diff --git a/src/mesa/drivers/dri/i965/brw_vec4_visitor.cpp b/src/mesa/drivers/dri/i965/brw_vec4_visitor.cpp index 96816b6fbd0..5346fde950a 100644 --- a/src/mesa/drivers/dri/i965/brw_vec4_visitor.cpp +++ b/src/mesa/drivers/dri/i965/brw_vec4_visitor.cpp @@ -24,14 +24,11 @@ #include "brw_vec4.h" #include "brw_cfg.h" #include "glsl/ir_uniform.h" -extern "C" { #include "program/sampler.h" -} namespace brw { -vec4_instruction::vec4_instruction(vec4_visitor *v, - enum opcode opcode, const dst_reg &dst, +vec4_instruction::vec4_instruction(enum opcode opcode, const dst_reg &dst, const src_reg &src0, const src_reg &src1, const src_reg &src2) { @@ -46,20 +43,27 @@ vec4_instruction::vec4_instruction(vec4_visitor *v, this->no_dd_check = false; this->writes_accumulator = false; this->conditional_mod = BRW_CONDITIONAL_NONE; + this->predicate = BRW_PREDICATE_NONE; + this->predicate_inverse = false; this->target = 0; + this->regs_written = (dst.file == BAD_FILE ? 0 : 1); this->shadow_compare = false; - this->ir = v->base_ir; + this->ir = NULL; this->urb_write_flags = BRW_URB_WRITE_NO_FLAGS; - this->header_present = false; + this->header_size = 0; + this->flag_subreg = 0; this->mlen = 0; this->base_mrf = 0; this->offset = 0; - this->annotation = v->current_annotation; + this->annotation = NULL; } vec4_instruction * vec4_visitor::emit(vec4_instruction *inst) { + inst->ir = this->base_ir; + inst->annotation = this->current_annotation; + this->instructions.push_tail(inst); return inst; @@ -81,8 +85,7 @@ vec4_instruction * vec4_visitor::emit(enum opcode opcode, const dst_reg &dst, const src_reg &src0, const src_reg &src1, const src_reg &src2) { - return emit(new(mem_ctx) vec4_instruction(this, opcode, dst, - src0, src1, src2)); + return emit(new(mem_ctx) vec4_instruction(opcode, dst, src0, src1, src2)); } @@ -90,33 +93,32 @@ vec4_instruction * vec4_visitor::emit(enum opcode opcode, const dst_reg &dst, const src_reg &src0, const src_reg &src1) { - return emit(new(mem_ctx) vec4_instruction(this, opcode, dst, src0, src1)); + return emit(new(mem_ctx) vec4_instruction(opcode, dst, src0, src1)); } vec4_instruction * vec4_visitor::emit(enum opcode opcode, const dst_reg &dst, const src_reg &src0) { - return emit(new(mem_ctx) vec4_instruction(this, opcode, dst, src0)); + return emit(new(mem_ctx) vec4_instruction(opcode, dst, src0)); } vec4_instruction * vec4_visitor::emit(enum opcode opcode, const dst_reg &dst) { - return emit(new(mem_ctx) vec4_instruction(this, opcode, dst)); + return emit(new(mem_ctx) vec4_instruction(opcode, dst)); } vec4_instruction * vec4_visitor::emit(enum opcode opcode) { - return emit(new(mem_ctx) vec4_instruction(this, opcode, dst_reg())); + return emit(new(mem_ctx) vec4_instruction(opcode, dst_reg())); } #define ALU1(op) \ vec4_instruction * \ vec4_visitor::op(const dst_reg &dst, const src_reg &src0) \ { \ - return new(mem_ctx) vec4_instruction(this, BRW_OPCODE_##op, dst, \ - src0); \ + return new(mem_ctx) vec4_instruction(BRW_OPCODE_##op, dst, src0); \ } #define ALU2(op) \ @@ -124,8 +126,8 @@ vec4_visitor::emit(enum opcode opcode) vec4_visitor::op(const dst_reg &dst, const src_reg &src0, \ const src_reg &src1) \ { \ - return new(mem_ctx) vec4_instruction(this, BRW_OPCODE_##op, dst, \ - src0, src1); \ + return new(mem_ctx) vec4_instruction(BRW_OPCODE_##op, dst, \ + src0, src1); \ } #define ALU2_ACC(op) \ @@ -133,10 +135,10 @@ vec4_visitor::emit(enum opcode opcode) vec4_visitor::op(const dst_reg &dst, const src_reg &src0, \ const src_reg &src1) \ { \ - vec4_instruction *inst = new(mem_ctx) vec4_instruction(this, \ + vec4_instruction *inst = new(mem_ctx) vec4_instruction( \ BRW_OPCODE_##op, dst, src0, src1); \ - inst->writes_accumulator = true; \ - return inst; \ + inst->writes_accumulator = true; \ + return inst; \ } #define ALU3(op) \ @@ -144,8 +146,8 @@ vec4_visitor::emit(enum opcode opcode) vec4_visitor::op(const dst_reg &dst, const src_reg &src0, \ const src_reg &src1, const src_reg &src2) \ { \ - assert(brw->gen >= 6); \ - return new(mem_ctx) vec4_instruction(this, BRW_OPCODE_##op, dst, \ + assert(devinfo->gen >= 6); \ + return new(mem_ctx) vec4_instruction(BRW_OPCODE_##op, dst, \ src0, src1, src2); \ } @@ -188,7 +190,7 @@ vec4_visitor::IF(enum brw_predicate predicate) { vec4_instruction *inst; - inst = new(mem_ctx) vec4_instruction(this, BRW_OPCODE_IF); + inst = new(mem_ctx) vec4_instruction(BRW_OPCODE_IF); inst->predicate = predicate; return inst; @@ -199,14 +201,14 @@ vec4_instruction * vec4_visitor::IF(src_reg src0, src_reg src1, enum brw_conditional_mod condition) { - assert(brw->gen == 6); + assert(devinfo->gen == 6); vec4_instruction *inst; resolve_ud_negate(&src0); resolve_ud_negate(&src1); - inst = new(mem_ctx) vec4_instruction(this, BRW_OPCODE_IF, dst_null_d(), + inst = new(mem_ctx) vec4_instruction(BRW_OPCODE_IF, dst_null_d(), src0, src1); inst->conditional_mod = condition; @@ -224,20 +226,24 @@ vec4_visitor::CMP(dst_reg dst, src_reg src0, src_reg src1, { vec4_instruction *inst; - /* original gen4 does type conversion to the destination type - * before before comparison, producing garbage results for floating - * point comparisons. + /* Take the instruction: + * + * CMP null src0 src1 + * + * Original gen4 does type conversion to the destination type before + * comparison, producing garbage results for floating point comparisons. + * + * The destination type doesn't matter on newer generations, so we set the + * type to match src0 so we can compact the instruction. */ - if (brw->gen == 4) { - dst.type = src0.type; - if (dst.file == HW_REG) - dst.fixed_hw_reg.type = dst.type; - } + dst.type = src0.type; + if (dst.file == HW_REG) + dst.fixed_hw_reg.type = dst.type; resolve_ud_negate(&src0); resolve_ud_negate(&src1); - inst = new(mem_ctx) vec4_instruction(this, BRW_OPCODE_CMP, dst, src0, src1); + inst = new(mem_ctx) vec4_instruction(BRW_OPCODE_CMP, dst, src0, src1); inst->conditional_mod = condition; return inst; @@ -248,7 +254,7 @@ vec4_visitor::SCRATCH_READ(const dst_reg &dst, const src_reg &index) { vec4_instruction *inst; - inst = new(mem_ctx) vec4_instruction(this, SHADER_OPCODE_GEN4_SCRATCH_READ, + inst = new(mem_ctx) vec4_instruction(SHADER_OPCODE_GEN4_SCRATCH_READ, dst, index); inst->base_mrf = 14; inst->mlen = 2; @@ -262,7 +268,7 @@ vec4_visitor::SCRATCH_WRITE(const dst_reg &dst, const src_reg &src, { vec4_instruction *inst; - inst = new(mem_ctx) vec4_instruction(this, SHADER_OPCODE_GEN4_SCRATCH_WRITE, + inst = new(mem_ctx) vec4_instruction(SHADER_OPCODE_GEN4_SCRATCH_WRITE, dst, src, index); inst->base_mrf = 13; inst->mlen = 3; @@ -302,14 +308,14 @@ vec4_visitor::fix_3src_operand(src_reg src) dst_reg expanded = dst_reg(this, glsl_type::vec4_type); expanded.type = src.type; - emit(MOV(expanded, src)); + emit(VEC4_OPCODE_UNPACK_UNIFORM, expanded, src); return src_reg(expanded); } src_reg vec4_visitor::fix_math_operand(src_reg src) { - if (brw->gen < 6 || brw->gen >= 8 || src.file == BAD_FILE) + if (devinfo->gen < 6 || devinfo->gen >= 8 || src.file == BAD_FILE) return src; /* The gen6 math instruction ignores the source modifiers -- @@ -323,7 +329,7 @@ vec4_visitor::fix_math_operand(src_reg src) * can't use. */ - if (brw->gen == 7 && src.file != IMM) + if (devinfo->gen == 7 && src.file != IMM) return src; dst_reg expanded = dst_reg(this, glsl_type::vec4_type); @@ -332,7 +338,7 @@ vec4_visitor::fix_math_operand(src_reg src) return src_reg(expanded); } -void +vec4_instruction * vec4_visitor::emit_math(enum opcode opcode, const dst_reg &dst, const src_reg &src0, const src_reg &src1) @@ -340,21 +346,23 @@ vec4_visitor::emit_math(enum opcode opcode, vec4_instruction *math = emit(opcode, dst, fix_math_operand(src0), fix_math_operand(src1)); - if (brw->gen == 6 && dst.writemask != WRITEMASK_XYZW) { + if (devinfo->gen == 6 && dst.writemask != WRITEMASK_XYZW) { /* MATH on Gen6 must be align1, so we can't do writemasks. */ math->dst = dst_reg(this, glsl_type::vec4_type); math->dst.type = dst.type; - emit(MOV(dst, src_reg(math->dst))); - } else if (brw->gen < 6) { + math = emit(MOV(dst, src_reg(math->dst))); + } else if (devinfo->gen < 6) { math->base_mrf = 1; math->mlen = src1.file == BAD_FILE ? 1 : 2; } + + return math; } void vec4_visitor::emit_pack_half_2x16(dst_reg dst, src_reg src0) { - if (brw->gen < 7) { + if (devinfo->gen < 7) { unreachable("ir_unop_pack_half_2x16 should be lowered"); } @@ -431,7 +439,7 @@ vec4_visitor::emit_pack_half_2x16(dst_reg dst, src_reg src0) void vec4_visitor::emit_unpack_half_2x16(dst_reg dst, src_reg src0) { - if (brw->gen < 7) { + if (devinfo->gen < 7) { unreachable("ir_unop_unpack_half_2x16 should be lowered"); } @@ -483,7 +491,7 @@ vec4_visitor::emit_unpack_unorm_4x8(const dst_reg &dst, src_reg src0) shifted.type = BRW_REGISTER_TYPE_UB; dst_reg f(this, glsl_type::vec4_type); - emit(MOV(f, src_reg(shifted))); + emit(VEC4_OPCODE_MOV_BYTES, f, src_reg(shifted)); emit(MUL(dst, src_reg(f), src_reg(1.0f / 255.0f))); } @@ -505,13 +513,13 @@ vec4_visitor::emit_unpack_snorm_4x8(const dst_reg &dst, src_reg src0) shifted.type = BRW_REGISTER_TYPE_B; dst_reg f(this, glsl_type::vec4_type); - emit(MOV(f, src_reg(shifted))); + emit(VEC4_OPCODE_MOV_BYTES, f, src_reg(shifted)); dst_reg scaled(this, glsl_type::vec4_type); emit(MUL(scaled, src_reg(f), src_reg(1.0f / 127.0f))); dst_reg max(this, glsl_type::vec4_type); - emit_minmax(BRW_CONDITIONAL_G, max, src_reg(scaled), src_reg(-1.0f)); + emit_minmax(BRW_CONDITIONAL_GE, max, src_reg(scaled), src_reg(-1.0f)); emit_minmax(BRW_CONDITIONAL_L, dst, src_reg(max), src_reg(1.0f)); } @@ -539,7 +547,7 @@ void vec4_visitor::emit_pack_snorm_4x8(const dst_reg &dst, const src_reg &src0) { dst_reg max(this, glsl_type::vec4_type); - emit_minmax(BRW_CONDITIONAL_G, max, src0, src_reg(-1.0f)); + emit_minmax(BRW_CONDITIONAL_GE, max, src0, src_reg(-1.0f)); dst_reg min(this, glsl_type::vec4_type); emit_minmax(BRW_CONDITIONAL_L, min, src_reg(max), src_reg(1.0f)); @@ -566,9 +574,18 @@ vec4_visitor::visit_instructions(const exec_list *list) } } - -static int -type_size(const struct glsl_type *type) +/** + * Returns the minimum number of vec4 elements needed to pack a type. + * + * For simple types, it will return 1 (a single vec4); for matrices, the + * number of columns; for array and struct, the sum of the vec4_size of + * each of its elements; and for sampler and atomic, zero. + * + * This method is useful to calculate how much register space is needed to + * store a particular type. + */ +int +vec4_visitor::type_size(const struct glsl_type *type) { unsigned int i; int size; @@ -597,6 +614,9 @@ type_size(const struct glsl_type *type) size += type_size(type->fields.structure[i].type); } return size; + case GLSL_TYPE_SUBROUTINE: + return 1; + case GLSL_TYPE_SAMPLER: /* Samplers take up no register space, since they're baked in at * link time. @@ -606,6 +626,7 @@ type_size(const struct glsl_type *type) return 0; case GLSL_TYPE_IMAGE: case GLSL_TYPE_VOID: + case GLSL_TYPE_DOUBLE: case GLSL_TYPE_ERROR: case GLSL_TYPE_INTERFACE: unreachable("not reached"); @@ -614,36 +635,17 @@ type_size(const struct glsl_type *type) return 0; } -int -vec4_visitor::virtual_grf_alloc(int size) -{ - if (virtual_grf_array_size <= virtual_grf_count) { - if (virtual_grf_array_size == 0) - virtual_grf_array_size = 16; - else - virtual_grf_array_size *= 2; - virtual_grf_sizes = reralloc(mem_ctx, virtual_grf_sizes, int, - virtual_grf_array_size); - virtual_grf_reg_map = reralloc(mem_ctx, virtual_grf_reg_map, int, - virtual_grf_array_size); - } - virtual_grf_reg_map[virtual_grf_count] = virtual_grf_reg_count; - virtual_grf_reg_count += size; - virtual_grf_sizes[virtual_grf_count] = size; - return virtual_grf_count++; -} - src_reg::src_reg(class vec4_visitor *v, const struct glsl_type *type) { init(); this->file = GRF; - this->reg = v->virtual_grf_alloc(type_size(type)); + this->reg = v->alloc.allocate(v->type_size(type)); if (type->is_array() || type->is_record()) { this->swizzle = BRW_SWIZZLE_NOOP; } else { - this->swizzle = swizzle_for_size(type->vector_elements); + this->swizzle = brw_swizzle_for_size(type->vector_elements); } this->type = brw_type_for_base_type(type); @@ -656,7 +658,7 @@ src_reg::src_reg(class vec4_visitor *v, const struct glsl_type *type, int size) init(); this->file = GRF; - this->reg = v->virtual_grf_alloc(type_size(type) * size); + this->reg = v->alloc.allocate(v->type_size(type) * size); this->swizzle = BRW_SWIZZLE_NOOP; @@ -668,7 +670,7 @@ dst_reg::dst_reg(class vec4_visitor *v, const struct glsl_type *type) init(); this->file = GRF; - this->reg = v->virtual_grf_alloc(type_size(type)); + this->reg = v->alloc.allocate(v->type_size(type)); if (type->is_array() || type->is_record()) { this->writemask = WRITEMASK_XYZW; @@ -679,6 +681,21 @@ dst_reg::dst_reg(class vec4_visitor *v, const struct glsl_type *type) this->type = brw_type_for_base_type(type); } +void +vec4_visitor::setup_vector_uniform_values(const gl_constant_value *values, + unsigned n) +{ + static const gl_constant_value zero = { 0 }; + + for (unsigned i = 0; i < n; ++i) + stage_prog_data->param[4 * uniforms + i] = &values[i]; + + for (unsigned i = n; i < 4; ++i) + stage_prog_data->param[4 * uniforms + i] = &zero; + + uniform_vector_size[uniforms++] = n; +} + /* Our support for uniforms is piggy-backed on the struct * gl_fragment_program, because that's where the values actually * get stored, rather than in some global gl_shader_program uniform @@ -695,9 +712,12 @@ vec4_visitor::setup_uniform_values(ir_variable *ir) * order we'd walk the type, so walk the list of storage and find anything * with our name, or the prefix of a component that starts with our name. */ - for (unsigned u = 0; u < shader_prog->NumUserUniformStorage; u++) { + for (unsigned u = 0; u < shader_prog->NumUniformStorage; u++) { struct gl_uniform_storage *storage = &shader_prog->UniformStorage[u]; + if (storage->builtin) + continue; + if (strncmp(ir->name, storage->name, namelen) != 0 || (storage->name[namelen] != 0 && storage->name[namelen] != '.' && @@ -705,34 +725,19 @@ vec4_visitor::setup_uniform_values(ir_variable *ir) continue; } - gl_constant_value *components = storage->storage; - unsigned vector_count = (MAX2(storage->array_elements, 1) * - storage->type->matrix_columns); - - for (unsigned s = 0; s < vector_count; s++) { - assert(uniforms < uniform_array_size); - uniform_vector_size[uniforms] = storage->type->vector_elements; - - int i; - for (i = 0; i < uniform_vector_size[uniforms]; i++) { - stage_prog_data->param[uniforms * 4 + i] = components; - components++; - } - for (; i < 4; i++) { - static gl_constant_value zero = { 0.0 }; - stage_prog_data->param[uniforms * 4 + i] = &zero; - } + const unsigned vector_count = (MAX2(storage->array_elements, 1) * + storage->type->matrix_columns); + const unsigned vector_size = storage->type->vector_elements; - uniforms++; - } + for (unsigned s = 0; s < vector_count; s++) + setup_vector_uniform_values(&storage->storage[s * vector_size], + vector_size); } } void -vec4_visitor::setup_uniform_clipplane_values() +vec4_visitor::setup_uniform_clipplane_values(gl_clip_plane *clip_planes) { - gl_clip_plane *clip_planes = brw_select_clip_planes(ctx); - for (int i = 0; i < key->nr_userclip_plane_consts; ++i) { assert(this->uniforms < uniform_array_size); this->uniform_vector_size[this->uniforms] = 4; @@ -768,20 +773,15 @@ vec4_visitor::setup_builtin_uniform_values(ir_variable *ir) &this->prog->Parameters->ParameterValues[index][0]; assert(this->uniforms < uniform_array_size); - this->uniform_vector_size[this->uniforms] = 0; - /* Add each of the unique swizzled channels of the element. - * This will end up matching the size of the glsl_type of this field. - */ - int last_swiz = -1; - for (unsigned int j = 0; j < 4; j++) { - int swiz = GET_SWZ(slots[i].swizzle, j); - last_swiz = swiz; - - stage_prog_data->param[this->uniforms * 4 + j] = &values[swiz]; - assert(this->uniforms < uniform_array_size); - if (swiz <= last_swiz) - this->uniform_vector_size[this->uniforms]++; - } + + for (unsigned j = 0; j < 4; j++) + stage_prog_data->param[this->uniforms * 4 + j] = + &values[GET_SWZ(slots[i].swizzle, j)]; + + this->uniform_vector_size[this->uniforms] = + (ir->type->is_scalar() || ir->type->is_vector() || + ir->type->is_matrix() ? ir->type->vector_elements : 4); + this->uniforms++; } } @@ -819,22 +819,40 @@ vec4_visitor::emit_bool_to_cond_code(ir_rvalue *ir, break; case ir_binop_logic_xor: - inst = emit(XOR(dst_null_d(), op[0], op[1])); - inst->conditional_mod = BRW_CONDITIONAL_NZ; + if (devinfo->gen <= 5) { + src_reg temp = src_reg(this, ir->type); + emit(XOR(dst_reg(temp), op[0], op[1])); + inst = emit(AND(dst_null_d(), temp, src_reg(1))); + } else { + inst = emit(XOR(dst_null_d(), op[0], op[1])); + } + inst->conditional_mod = BRW_CONDITIONAL_NZ; break; case ir_binop_logic_or: - inst = emit(OR(dst_null_d(), op[0], op[1])); - inst->conditional_mod = BRW_CONDITIONAL_NZ; + if (devinfo->gen <= 5) { + src_reg temp = src_reg(this, ir->type); + emit(OR(dst_reg(temp), op[0], op[1])); + inst = emit(AND(dst_null_d(), temp, src_reg(1))); + } else { + inst = emit(OR(dst_null_d(), op[0], op[1])); + } + inst->conditional_mod = BRW_CONDITIONAL_NZ; break; case ir_binop_logic_and: - inst = emit(AND(dst_null_d(), op[0], op[1])); - inst->conditional_mod = BRW_CONDITIONAL_NZ; + if (devinfo->gen <= 5) { + src_reg temp = src_reg(this, ir->type); + emit(AND(dst_reg(temp), op[0], op[1])); + inst = emit(AND(dst_null_d(), temp, src_reg(1))); + } else { + inst = emit(AND(dst_null_d(), op[0], op[1])); + } + inst->conditional_mod = BRW_CONDITIONAL_NZ; break; case ir_unop_f2b: - if (brw->gen >= 6) { + if (devinfo->gen >= 6) { emit(CMP(dst_null_d(), op[0], src_reg(0.0f), BRW_CONDITIONAL_NZ)); } else { inst = emit(MOV(dst_null_f(), op[0])); @@ -843,7 +861,7 @@ vec4_visitor::emit_bool_to_cond_code(ir_rvalue *ir, break; case ir_unop_i2b: - if (brw->gen >= 6) { + if (devinfo->gen >= 6) { emit(CMP(dst_null_d(), op[0], src_reg(0), BRW_CONDITIONAL_NZ)); } else { inst = emit(MOV(dst_null_d(), op[0])); @@ -852,16 +870,27 @@ vec4_visitor::emit_bool_to_cond_code(ir_rvalue *ir, break; case ir_binop_all_equal: + if (devinfo->gen <= 5) { + resolve_bool_comparison(expr->operands[0], &op[0]); + resolve_bool_comparison(expr->operands[1], &op[1]); + } inst = emit(CMP(dst_null_d(), op[0], op[1], BRW_CONDITIONAL_Z)); *predicate = BRW_PREDICATE_ALIGN16_ALL4H; break; case ir_binop_any_nequal: + if (devinfo->gen <= 5) { + resolve_bool_comparison(expr->operands[0], &op[0]); + resolve_bool_comparison(expr->operands[1], &op[1]); + } inst = emit(CMP(dst_null_d(), op[0], op[1], BRW_CONDITIONAL_NZ)); *predicate = BRW_PREDICATE_ALIGN16_ANY4H; break; case ir_unop_any: + if (devinfo->gen <= 5) { + resolve_bool_comparison(expr->operands[0], &op[0]); + } inst = emit(CMP(dst_null_d(), op[0], src_reg(0), BRW_CONDITIONAL_NZ)); *predicate = BRW_PREDICATE_ALIGN16_ANY4H; break; @@ -872,6 +901,10 @@ vec4_visitor::emit_bool_to_cond_code(ir_rvalue *ir, case ir_binop_lequal: case ir_binop_equal: case ir_binop_nequal: + if (devinfo->gen <= 5) { + resolve_bool_comparison(expr->operands[0], &op[0]); + resolve_bool_comparison(expr->operands[1], &op[1]); + } emit(CMP(dst_null_d(), op[0], op[1], brw_conditional_for_comparison(expr->operation))); break; @@ -902,14 +935,8 @@ vec4_visitor::emit_bool_to_cond_code(ir_rvalue *ir, resolve_ud_negate(&this->result); - if (brw->gen >= 6) { - vec4_instruction *inst = emit(AND(dst_null_d(), - this->result, src_reg(1))); - inst->conditional_mod = BRW_CONDITIONAL_NZ; - } else { - vec4_instruction *inst = emit(MOV(dst_null_d(), this->result)); - inst->conditional_mod = BRW_CONDITIONAL_NZ; - } + vec4_instruction *inst = emit(AND(dst_null_d(), this->result, src_reg(1))); + inst->conditional_mod = BRW_CONDITIONAL_NZ; } /** @@ -1020,17 +1047,17 @@ vec4_visitor::visit(ir_variable *ir) switch (ir->data.mode) { case ir_var_shader_in: + assert(ir->data.location != -1); reg = new(mem_ctx) dst_reg(ATTR, ir->data.location); break; case ir_var_shader_out: + assert(ir->data.location != -1); reg = new(mem_ctx) dst_reg(this, ir->type); for (int i = 0; i < type_size(ir->type); i++) { output_reg[ir->data.location + i] = *reg; output_reg[ir->data.location + i].reg_offset = i; - output_reg[ir->data.location + i].type = - brw_type_for_base_type(ir->type->get_scalar_type()); output_reg_annotation[ir->data.location + i] = ir->name; } break; @@ -1050,7 +1077,7 @@ vec4_visitor::visit(ir_variable *ir) * Some uniforms, such as samplers and atomic counters, have no actual * storage, so we should ignore them. */ - if (ir->is_in_uniform_block() || type_size(ir->type) == 0) + if (ir->is_in_buffer_block() || type_size(ir->type) == 0) return; /* Track how big the whole uniform variable is, in case we need to put a @@ -1067,7 +1094,7 @@ vec4_visitor::visit(ir_variable *ir) break; case ir_var_system_value: - reg = make_reg_for_system_value(ir); + reg = make_reg_for_system_value(ir->data.location, ir->type); break; default: @@ -1135,32 +1162,54 @@ bool vec4_visitor::try_emit_mad(ir_expression *ir) { /* 3-src instructions were introduced in gen6. */ - if (brw->gen < 6) + if (devinfo->gen < 6) return false; /* MAD can only handle floating-point data. */ if (ir->type->base_type != GLSL_TYPE_FLOAT) return false; - ir_rvalue *nonmul = ir->operands[1]; - ir_expression *mul = ir->operands[0]->as_expression(); + ir_rvalue *nonmul; + ir_expression *mul; + bool mul_negate, mul_abs; - if (!mul || mul->operation != ir_binop_mul) { - nonmul = ir->operands[0]; - mul = ir->operands[1]->as_expression(); + for (int i = 0; i < 2; i++) { + mul_negate = false; + mul_abs = false; - if (!mul || mul->operation != ir_binop_mul) - return false; + mul = ir->operands[i]->as_expression(); + nonmul = ir->operands[1 - i]; + + if (mul && mul->operation == ir_unop_abs) { + mul = mul->operands[0]->as_expression(); + mul_abs = true; + } else if (mul && mul->operation == ir_unop_neg) { + mul = mul->operands[0]->as_expression(); + mul_negate = true; + } + + if (mul && mul->operation == ir_binop_mul) + break; } + if (!mul || mul->operation != ir_binop_mul) + return false; + nonmul->accept(this); src_reg src0 = fix_3src_operand(this->result); mul->operands[0]->accept(this); src_reg src1 = fix_3src_operand(this->result); + src1.negate ^= mul_negate; + src1.abs = mul_abs; + if (mul_abs) + src1.negate = false; mul->operands[1]->accept(this); src_reg src2 = fix_3src_operand(this->result); + src2.abs = mul_abs; + if (mul_abs) + src2.negate = false; this->result = src_reg(this, ir->type); emit(BRW_OPCODE_MAD, dst_reg(this->result), src0, src1, src2); @@ -1175,7 +1224,7 @@ vec4_visitor::try_emit_b2f_of_compare(ir_expression *ir) * false. Early hardware only sets the least significant bit, and * leaves the other bits undefined. So we can't use it. */ - if (brw->gen < 6) + if (devinfo->gen < 6) return false; ir_expression *const cmp = ir->operands[0]->as_expression(); @@ -1217,13 +1266,13 @@ vec4_visitor::try_emit_b2f_of_compare(ir_expression *ir) return true; } -void +vec4_instruction * vec4_visitor::emit_minmax(enum brw_conditional_mod conditionalmod, dst_reg dst, src_reg src0, src_reg src1) { vec4_instruction *inst; - if (brw->gen >= 6) { + if (devinfo->gen >= 6) { inst = emit(BRW_OPCODE_SEL, dst, src0, src1); inst->conditional_mod = conditionalmod; } else { @@ -1232,18 +1281,20 @@ vec4_visitor::emit_minmax(enum brw_conditional_mod conditionalmod, dst_reg dst, inst = emit(BRW_OPCODE_SEL, dst, src0, src1); inst->predicate = BRW_PREDICATE_NORMAL; } + + return inst; } -void +vec4_instruction * vec4_visitor::emit_lrp(const dst_reg &dst, const src_reg &x, const src_reg &y, const src_reg &a) { - if (brw->gen >= 6) { + if (devinfo->gen >= 6) { /* Note that the instruction's argument order is reversed from GLSL * and the IR. */ - emit(LRP(dst, - fix_3src_operand(a), fix_3src_operand(y), fix_3src_operand(x))); + return emit(LRP(dst, fix_3src_operand(a), fix_3src_operand(y), + fix_3src_operand(x))); } else { /* Earlier generations don't support three source operations, so we * need to emit x*(1-a) + y*a. @@ -1258,15 +1309,107 @@ vec4_visitor::emit_lrp(const dst_reg &dst, emit(MUL(y_times_a, y, a)); emit(ADD(one_minus_a, negate(a), src_reg(1.0f))); emit(MUL(x_times_one_minus_a, x, src_reg(one_minus_a))); - emit(ADD(dst, src_reg(x_times_one_minus_a), src_reg(y_times_a))); + return emit(ADD(dst, src_reg(x_times_one_minus_a), src_reg(y_times_a))); + } +} + +/** + * Emits the instructions needed to perform a pull constant load. before_block + * and before_inst can be NULL in which case the instruction will be appended + * to the end of the instruction list. + */ +void +vec4_visitor::emit_pull_constant_load_reg(dst_reg dst, + src_reg surf_index, + src_reg offset_reg, + bblock_t *before_block, + vec4_instruction *before_inst) +{ + assert((before_inst == NULL && before_block == NULL) || + (before_inst && before_block)); + + vec4_instruction *pull; + + if (devinfo->gen >= 9) { + /* Gen9+ needs a message header in order to use SIMD4x2 mode */ + src_reg header(this, glsl_type::uvec4_type, 2); + + pull = new(mem_ctx) + vec4_instruction(VS_OPCODE_SET_SIMD4X2_HEADER_GEN9, + dst_reg(header)); + + if (before_inst) + emit_before(before_block, before_inst, pull); + else + emit(pull); + + dst_reg index_reg = retype(offset(dst_reg(header), 1), + offset_reg.type); + pull = MOV(writemask(index_reg, WRITEMASK_X), offset_reg); + + if (before_inst) + emit_before(before_block, before_inst, pull); + else + emit(pull); + + pull = new(mem_ctx) vec4_instruction(VS_OPCODE_PULL_CONSTANT_LOAD_GEN7, + dst, + surf_index, + header); + pull->mlen = 2; + pull->header_size = 1; + } else if (devinfo->gen >= 7) { + dst_reg grf_offset = dst_reg(this, glsl_type::int_type); + + grf_offset.type = offset_reg.type; + + pull = MOV(grf_offset, offset_reg); + + if (before_inst) + emit_before(before_block, before_inst, pull); + else + emit(pull); + + pull = new(mem_ctx) vec4_instruction(VS_OPCODE_PULL_CONSTANT_LOAD_GEN7, + dst, + surf_index, + src_reg(grf_offset)); + pull->mlen = 1; + } else { + pull = new(mem_ctx) vec4_instruction(VS_OPCODE_PULL_CONSTANT_LOAD, + dst, + surf_index, + offset_reg); + pull->base_mrf = 14; + pull->mlen = 1; } + + if (before_inst) + emit_before(before_block, before_inst, pull); + else + emit(pull); +} + +src_reg +vec4_visitor::emit_uniformize(const src_reg &src) +{ + const src_reg chan_index(this, glsl_type::uint_type); + const dst_reg dst = retype(dst_reg(this, glsl_type::uint_type), + src.type); + + emit(SHADER_OPCODE_FIND_LIVE_CHANNEL, dst_reg(chan_index)) + ->force_writemask_all = true; + emit(SHADER_OPCODE_BROADCAST, dst, src, chan_index) + ->force_writemask_all = true; + + return src_reg(dst); } void vec4_visitor::visit(ir_expression *ir) { unsigned int operand; - src_reg op[Elements(ir->operands)]; + src_reg op[ARRAY_SIZE(ir->operands)]; vec4_instruction *inst; if (ir->operation == ir_binop_add) { @@ -1320,11 +1463,7 @@ vec4_visitor::visit(ir_expression *ir) switch (ir->operation) { case ir_unop_logic_not: - if (ctx->Const.UniformBooleanTrue != 1) { - emit(NOT(result_dst, op[0])); - } else { - emit(XOR(result_dst, op[0], src_reg(1u))); - } + emit(NOT(result_dst, op[0])); break; case ir_unop_neg: op[0].negate = !op[0].negate; @@ -1381,11 +1520,9 @@ vec4_visitor::visit(ir_expression *ir) case ir_unop_log: unreachable("not reached: should be handled by ir_explog_to_explog2"); case ir_unop_sin: - case ir_unop_sin_reduced: emit_math(SHADER_OPCODE_SIN, result_dst, op[0]); break; case ir_unop_cos: - case ir_unop_cos_reduced: emit_math(SHADER_OPCODE_COS, result_dst, op[0]); break; @@ -1437,6 +1574,10 @@ vec4_visitor::visit(ir_expression *ir) case ir_unop_noise: unreachable("not reached: should be handled by lower_noise"); + case ir_unop_subroutine_to_int: + emit(MOV(result_dst, op[0])); + break; + case ir_binop_add: emit(ADD(result_dst, op[0], op[1])); break; @@ -1444,7 +1585,7 @@ vec4_visitor::visit(ir_expression *ir) unreachable("not reached: should be handled by ir_sub_to_add_neg"); case ir_binop_mul: - if (brw->gen < 8 && ir->type->is_integer()) { + if (devinfo->gen < 8 && ir->type->is_integer()) { /* For integer multiplication, the MUL uses the low 16 bits of one of * the operands (src0 through SNB, src1 on IVB and later). The MACH * accumulates in the contribution of the upper 16 bits of that @@ -1452,12 +1593,12 @@ vec4_visitor::visit(ir_expression *ir) * 16 bits, though, we can just emit a single MUL. */ if (ir->operands[0]->is_uint16_constant()) { - if (brw->gen < 7) + if (devinfo->gen < 7) emit(MUL(result_dst, op[0], op[1])); else emit(MUL(result_dst, op[1], op[0])); } else if (ir->operands[1]->is_uint16_constant()) { - if (brw->gen < 7) + if (devinfo->gen < 7) emit(MUL(result_dst, op[1], op[0])); else emit(MUL(result_dst, op[0], op[1])); @@ -1484,22 +1625,15 @@ vec4_visitor::visit(ir_expression *ir) assert(ir->type->is_integer()); emit_math(SHADER_OPCODE_INT_QUOTIENT, result_dst, op[0], op[1]); break; - case ir_binop_carry: { - struct brw_reg acc = retype(brw_acc_reg(8), BRW_REGISTER_TYPE_UD); - emit(ADDC(dst_null_ud(), op[0], op[1])); - emit(MOV(result_dst, src_reg(acc))); - break; - } - case ir_binop_borrow: { - struct brw_reg acc = retype(brw_acc_reg(8), BRW_REGISTER_TYPE_UD); + case ir_binop_carry: + unreachable("Should have been lowered by carry_to_arith()."); + + case ir_binop_borrow: + unreachable("Should have been lowered by borrow_to_arith()."); - emit(SUBB(dst_null_ud(), op[0], op[1])); - emit(MOV(result_dst, src_reg(acc))); - break; - } case ir_binop_mod: - /* Floating point should be lowered by MOD_TO_FRACT in the compiler. */ + /* Floating point should be lowered by MOD_TO_FLOOR in the compiler. */ assert(ir->type->is_integer()); emit_math(SHADER_OPCODE_INT_REMAINDER, result_dst, op[0], op[1]); break; @@ -1510,51 +1644,59 @@ vec4_visitor::visit(ir_expression *ir) case ir_binop_gequal: case ir_binop_equal: case ir_binop_nequal: { + if (devinfo->gen <= 5) { + resolve_bool_comparison(ir->operands[0], &op[0]); + resolve_bool_comparison(ir->operands[1], &op[1]); + } emit(CMP(result_dst, op[0], op[1], brw_conditional_for_comparison(ir->operation))); - if (ctx->Const.UniformBooleanTrue == 1) { - emit(AND(result_dst, result_src, src_reg(1u))); - } break; } case ir_binop_all_equal: + if (devinfo->gen <= 5) { + resolve_bool_comparison(ir->operands[0], &op[0]); + resolve_bool_comparison(ir->operands[1], &op[1]); + } + /* "==" operator producing a scalar boolean. */ if (ir->operands[0]->type->is_vector() || ir->operands[1]->type->is_vector()) { emit(CMP(dst_null_d(), op[0], op[1], BRW_CONDITIONAL_Z)); emit(MOV(result_dst, src_reg(0))); - inst = emit(MOV(result_dst, src_reg(ctx->Const.UniformBooleanTrue))); + inst = emit(MOV(result_dst, src_reg(~0))); inst->predicate = BRW_PREDICATE_ALIGN16_ALL4H; } else { emit(CMP(result_dst, op[0], op[1], BRW_CONDITIONAL_Z)); - if (ctx->Const.UniformBooleanTrue == 1) { - emit(AND(result_dst, result_src, src_reg(1u))); - } } break; case ir_binop_any_nequal: + if (devinfo->gen <= 5) { + resolve_bool_comparison(ir->operands[0], &op[0]); + resolve_bool_comparison(ir->operands[1], &op[1]); + } + /* "!=" operator producing a scalar boolean. */ if (ir->operands[0]->type->is_vector() || ir->operands[1]->type->is_vector()) { emit(CMP(dst_null_d(), op[0], op[1], BRW_CONDITIONAL_NZ)); emit(MOV(result_dst, src_reg(0))); - inst = emit(MOV(result_dst, src_reg(ctx->Const.UniformBooleanTrue))); + inst = emit(MOV(result_dst, src_reg(~0))); inst->predicate = BRW_PREDICATE_ALIGN16_ANY4H; } else { emit(CMP(result_dst, op[0], op[1], BRW_CONDITIONAL_NZ)); - if (ctx->Const.UniformBooleanTrue == 1) { - emit(AND(result_dst, result_src, src_reg(1u))); - } } break; case ir_unop_any: + if (devinfo->gen <= 5) { + resolve_bool_comparison(ir->operands[0], &op[0]); + } emit(CMP(dst_null_d(), op[0], src_reg(0), BRW_CONDITIONAL_NZ)); emit(MOV(result_dst, src_reg(0))); - inst = emit(MOV(result_dst, src_reg(ctx->Const.UniformBooleanTrue))); + inst = emit(MOV(result_dst, src_reg(~0))); inst->predicate = BRW_PREDICATE_ALIGN16_ANY4H; break; @@ -1608,37 +1750,29 @@ vec4_visitor::visit(ir_expression *ir) emit(MOV(result_dst, op[0])); break; case ir_unop_b2i: - if (ctx->Const.UniformBooleanTrue != 1) { - emit(AND(result_dst, op[0], src_reg(1u))); - } else { - emit(MOV(result_dst, op[0])); - } - break; case ir_unop_b2f: - if (ctx->Const.UniformBooleanTrue != 1) { - op[0].type = BRW_REGISTER_TYPE_UD; - result_dst.type = BRW_REGISTER_TYPE_UD; - emit(AND(result_dst, op[0], src_reg(0x3f800000u))); - result_dst.type = BRW_REGISTER_TYPE_F; - } else { - emit(MOV(result_dst, op[0])); + if (devinfo->gen <= 5) { + resolve_bool_comparison(ir->operands[0], &op[0]); } + emit(MOV(result_dst, negate(op[0]))); break; case ir_unop_f2b: - case ir_unop_i2b: emit(CMP(result_dst, op[0], src_reg(0.0f), BRW_CONDITIONAL_NZ)); - if (ctx->Const.UniformBooleanTrue == 1) { - emit(AND(result_dst, result_src, src_reg(1u))); - } + break; + case ir_unop_i2b: + emit(CMP(result_dst, op[0], src_reg(0), BRW_CONDITIONAL_NZ)); break; case ir_unop_trunc: emit(RNDZ(result_dst, op[0])); break; - case ir_unop_ceil: - op[0].negate = !op[0].negate; - inst = emit(RNDD(result_dst, op[0])); - this->result.negate = true; + case ir_unop_ceil: { + src_reg tmp = src_reg(this, ir->type); + op[0].negate = !op[0].negate; + emit(RNDD(dst_reg(tmp), op[0])); + tmp.negate = true; + emit(MOV(result_dst, tmp)); + } break; case ir_unop_floor: inst = emit(RNDD(result_dst, op[0])); @@ -1654,7 +1788,7 @@ vec4_visitor::visit(ir_expression *ir) emit_minmax(BRW_CONDITIONAL_L, result_dst, op[0], op[1]); break; case ir_binop_max: - emit_minmax(BRW_CONDITIONAL_G, result_dst, op[0], op[1]); + emit_minmax(BRW_CONDITIONAL_GE, result_dst, op[0], op[1]); break; case ir_binop_pow: @@ -1710,12 +1844,13 @@ vec4_visitor::visit(ir_expression *ir) const_uniform_block->value.u[0]); } else { /* The block index is not a constant. Evaluate the index expression - * per-channel and add the base UBO index; the generator will select - * a value from any live channel. + * per-channel and add the base UBO index; we have to select a value + * from any live channel. */ surf_index = src_reg(this, glsl_type::uint_type); emit(ADD(dst_reg(surf_index), op[0], src_reg(prog_data->base.binding_table.ubo_start))); + surf_index = emit_uniformize(surf_index); /* Assume this may touch any UBO. It would be nice to provide * a tighter bound, but the array information is already lowered away. @@ -1726,7 +1861,7 @@ vec4_visitor::visit(ir_expression *ir) } if (const_offset_ir) { - if (brw->gen >= 8) { + if (devinfo->gen >= 8) { /* Store the offset in a GRF so we can send-from-GRF. */ offset = src_reg(this, glsl_type::int_type); emit(MOV(dst_reg(offset), src_reg(const_offset / 16))); @@ -1738,46 +1873,24 @@ vec4_visitor::visit(ir_expression *ir) } } else { offset = src_reg(this, glsl_type::uint_type); - emit(SHR(dst_reg(offset), op[1], src_reg(4))); + emit(SHR(dst_reg(offset), op[1], src_reg(4u))); } - if (brw->gen >= 7) { - dst_reg grf_offset = dst_reg(this, glsl_type::int_type); - grf_offset.type = offset.type; - - emit(MOV(grf_offset, offset)); + emit_pull_constant_load_reg(dst_reg(packed_consts), + surf_index, + offset, + NULL, NULL /* before_block/inst */); - emit(new(mem_ctx) vec4_instruction(this, - VS_OPCODE_PULL_CONSTANT_LOAD_GEN7, - dst_reg(packed_consts), - surf_index, - src_reg(grf_offset))); - } else { - vec4_instruction *pull = - emit(new(mem_ctx) vec4_instruction(this, - VS_OPCODE_PULL_CONSTANT_LOAD, - dst_reg(packed_consts), - surf_index, - offset)); - pull->base_mrf = 14; - pull->mlen = 1; - } - - packed_consts.swizzle = swizzle_for_size(ir->type->vector_elements); + packed_consts.swizzle = brw_swizzle_for_size(ir->type->vector_elements); packed_consts.swizzle += BRW_SWIZZLE4(const_offset % 16 / 4, const_offset % 16 / 4, const_offset % 16 / 4, const_offset % 16 / 4); - /* UBO bools are any nonzero int. We need to convert them to use the - * value of true stored in ctx->Const.UniformBooleanTrue. - */ + /* UBO bools are any nonzero int. We need to convert them to 0/~0. */ if (ir->type->base_type == GLSL_TYPE_BOOL) { emit(CMP(result_dst, packed_consts, src_reg(0u), BRW_CONDITIONAL_NZ)); - if (ctx->Const.UniformBooleanTrue == 1) { - emit(AND(result_dst, result, src_reg(1u))); - } } else { emit(MOV(result_dst, packed_consts)); } @@ -1864,6 +1977,18 @@ vec4_visitor::visit(ir_expression *ir) unreachable("not reached: should not occur in vertex shader"); case ir_binop_ldexp: unreachable("not reached: should be handled by ldexp_to_arith()"); + case ir_unop_d2f: + case ir_unop_f2d: + case ir_unop_d2i: + case ir_unop_i2d: + case ir_unop_d2u: + case ir_unop_u2d: + case ir_unop_d2b: + case ir_unop_pack_double_2x32: + case ir_unop_unpack_double_2x32: + case ir_unop_frexp_sig: + case ir_unop_frexp_exp: + unreachable("fp64 todo"); } } @@ -1871,43 +1996,16 @@ vec4_visitor::visit(ir_expression *ir) void vec4_visitor::visit(ir_swizzle *ir) { - src_reg src; - int i = 0; - int swizzle[4]; - /* Note that this is only swizzles in expressions, not those on the left * hand side of an assignment, which do write masking. See ir_assignment * for that. */ + const unsigned swz = brw_compose_swizzle( + brw_swizzle_for_size(ir->type->vector_elements), + BRW_SWIZZLE4(ir->mask.x, ir->mask.y, ir->mask.z, ir->mask.w)); ir->val->accept(this); - src = this->result; - assert(src.file != BAD_FILE); - - for (i = 0; i < ir->type->vector_elements; i++) { - switch (i) { - case 0: - swizzle[i] = BRW_GET_SWZ(src.swizzle, ir->mask.x); - break; - case 1: - swizzle[i] = BRW_GET_SWZ(src.swizzle, ir->mask.y); - break; - case 2: - swizzle[i] = BRW_GET_SWZ(src.swizzle, ir->mask.z); - break; - case 3: - swizzle[i] = BRW_GET_SWZ(src.swizzle, ir->mask.w); - break; - } - } - for (; i < 4; i++) { - /* Replicate the last channel out. */ - swizzle[i] = swizzle[ir->type->vector_elements - 1]; - } - - src.swizzle = BRW_SWIZZLE4(swizzle[0], swizzle[1], swizzle[2], swizzle[3]); - - this->result = src; + this->result = swizzle(this->result, swz); } void @@ -1929,7 +2027,7 @@ vec4_visitor::visit(ir_dereference_variable *ir) return; if (type->is_scalar() || type->is_vector() || type->is_matrix()) - this->result.swizzle = swizzle_for_size(type->vector_elements); + this->result.swizzle = brw_swizzle_for_size(type->vector_elements); } @@ -1988,7 +2086,7 @@ vec4_visitor::visit(ir_dereference_array *ir) /* If the type is smaller than a vec4, replicate the last channel out. */ if (ir->type->is_scalar() || ir->type->is_vector() || ir->type->is_matrix()) - src.swizzle = swizzle_for_size(ir->type->vector_elements); + src.swizzle = brw_swizzle_for_size(ir->type->vector_elements); else src.swizzle = BRW_SWIZZLE_NOOP; src.type = brw_type_for_base_type(ir->type); @@ -2013,7 +2111,7 @@ vec4_visitor::visit(ir_dereference_record *ir) /* If the type is smaller than a vec4, replicate the last channel out. */ if (ir->type->is_scalar() || ir->type->is_vector() || ir->type->is_matrix()) - this->result.swizzle = swizzle_for_size(ir->type->vector_elements); + this->result.swizzle = brw_swizzle_for_size(ir->type->vector_elements); else this->result.swizzle = BRW_SWIZZLE_NOOP; this->result.type = brw_type_for_base_type(ir->type); @@ -2084,7 +2182,7 @@ vec4_visitor::emit_block_move(dst_reg *dst, src_reg *src, dst->writemask = (1 << type->vector_elements) - 1; - src->swizzle = swizzle_for_size(type->vector_elements); + src->swizzle = brw_swizzle_for_size(type->vector_elements); vec4_instruction *inst = emit(MOV(*dst, *src)); inst->predicate = predicate; @@ -2172,7 +2270,7 @@ vec4_visitor::visit(ir_assignment *ir) */ assert(src.swizzle == (ir->rhs->type->is_matrix() - ? swizzle_for_size(ir->rhs->type->vector_elements) + ? brw_swizzle_for_size(ir->rhs->type->vector_elements) : BRW_SWIZZLE_NOOP)); emit_block_move(&dst, &src, ir->rhs->type, predicate); @@ -2189,37 +2287,25 @@ vec4_visitor::visit(ir_assignment *ir) last_rhs_inst = (vec4_instruction *)this->instructions.get_tail(); - src_reg src = this->result; - int swizzles[4]; - int first_enabled_chan = 0; int src_chan = 0; assert(ir->lhs->type->is_vector() || ir->lhs->type->is_scalar()); dst.writemask = ir->write_mask; - for (int i = 0; i < 4; i++) { - if (dst.writemask & (1 << i)) { - first_enabled_chan = BRW_GET_SWZ(src.swizzle, i); - break; - } - } - /* Swizzle a small RHS vector into the channels being written. * * glsl ir treats write_mask as dictating how many channels are * present on the RHS while in our instructions we need to make * those channels appear in the slots of the vec4 they're written to. */ - for (int i = 0; i < 4; i++) { - if (dst.writemask & (1 << i)) - swizzles[i] = BRW_GET_SWZ(src.swizzle, src_chan++); - else - swizzles[i] = first_enabled_chan; - } - src.swizzle = BRW_SWIZZLE4(swizzles[0], swizzles[1], - swizzles[2], swizzles[3]); + for (int i = 0; i < 4; i++) + swizzles[i] = (ir->write_mask & (1 << i) ? src_chan++ : 0); + + src_reg src = swizzle(this->result, + BRW_SWIZZLE4(swizzles[0], swizzles[1], + swizzles[2], swizzles[3])); if (try_rewrite_rhs_to_dst(ir, dst, src, pre_rhs_inst, last_rhs_inst)) { return; @@ -2307,9 +2393,7 @@ vec4_visitor::emit_constant_values(dst_reg *dst, ir_constant *ir) emit(MOV(*dst, src_reg(ir->value.u[i]))); break; case GLSL_TYPE_BOOL: - emit(MOV(*dst, - src_reg(ir->value.b[i] != 0 ? ctx->Const.UniformBooleanTrue - : 0u))); + emit(MOV(*dst, src_reg(ir->value.b[i] != 0 ? ~0 : 0))); break; default: unreachable("Non-float/uint/int/bool constant"); @@ -2366,6 +2450,8 @@ vec4_visitor::visit_atomic_counter_intrinsic(ir_call *ir) emit_untyped_atomic(BRW_AOP_PREDEC, surf_index, dst, offset, src_reg(), src_reg()); } + + brw_mark_surface_used(stage_prog_data, surf_index); } void @@ -2385,16 +2471,31 @@ vec4_visitor::visit(ir_call *ir) src_reg vec4_visitor::emit_mcs_fetch(ir_texture *ir, src_reg coordinate, src_reg sampler) { - vec4_instruction *inst = new(mem_ctx) vec4_instruction(this, SHADER_OPCODE_TXF_MCS); + vec4_instruction *inst = + new(mem_ctx) vec4_instruction(SHADER_OPCODE_TXF_MCS, + dst_reg(this, glsl_type::uvec4_type)); inst->base_mrf = 2; - inst->mlen = 1; - inst->dst = dst_reg(this, glsl_type::uvec4_type); - inst->dst.writemask = WRITEMASK_XYZW; - inst->src[1] = sampler; + int param_base; + + if (devinfo->gen >= 9) { + /* Gen9+ needs a message header in order to use SIMD4x2 mode */ + vec4_instruction *header_inst = new(mem_ctx) + vec4_instruction(VS_OPCODE_SET_SIMD4X2_HEADER_GEN9, + dst_reg(MRF, inst->base_mrf)); + + emit(header_inst); + + inst->mlen = 2; + inst->header_size = 1; + param_base = inst->base_mrf + 1; + } else { + inst->mlen = 1; + param_base = inst->base_mrf; + } + /* parameters are: u, v, r, lod; lod will always be zero due to api restrictions */ - int param_base = inst->base_mrf; int coord_mask = (1 << ir->coordinate->type->vector_elements) - 1; int zero_mask = 0xf & ~coord_mask; @@ -2409,9 +2510,9 @@ vec4_visitor::emit_mcs_fetch(ir_texture *ir, src_reg coordinate, src_reg sampler } static bool -is_high_sampler(struct brw_context *brw, src_reg sampler) +is_high_sampler(const struct brw_device_info *devinfo, src_reg sampler) { - if (brw->gen < 8 && !brw->is_haswell) + if (devinfo->gen < 8 && !devinfo->is_haswell) return false; return sampler.file != IMM || sampler.fixed_hw_reg.dw1.ud >= 16; @@ -2437,7 +2538,7 @@ vec4_visitor::visit(ir_texture *ir) ->array->type->array_size(); uint32_t max_used = sampler + array_size - 1; - if (ir->op == ir_tg4 && brw->gen < 8) { + if (ir->op == ir_tg4 && devinfo->gen < 8) { max_used += prog_data->base.binding_table.gather_texture_start; } else { max_used += prog_data->base.binding_table.texture_start; @@ -2447,10 +2548,9 @@ vec4_visitor::visit(ir_texture *ir) /* Emit code to evaluate the actual indexing expression */ nonconst_sampler_index->accept(this); - dst_reg temp(this, glsl_type::uint_type); - emit(ADD(temp, this->result, src_reg(sampler))) - ->force_writemask_all = true; - sampler_reg = src_reg(temp); + src_reg temp(this, glsl_type::uint_type); + emit(ADD(dst_reg(temp), this->result, src_reg(sampler))); + sampler_reg = emit_uniformize(temp); } else { /* Single sampler, or constant array index; the indexing expression * is just an immediate. @@ -2524,7 +2624,7 @@ vec4_visitor::visit(ir_texture *ir) sample_index = this->result; sample_index_type = ir->lod_info.sample_index->type; - if (brw->gen >= 7 && key->tex.compressed_multisample_layout_mask & (1<gen >= 7 && key->tex.compressed_multisample_layout_mask & (1<type)); if (ir->offset != NULL && !has_nonconstant_offset) { inst->offset = - brw_texture_offset(ctx, ir->offset->as_constant()->value.i, + brw_texture_offset(ir->offset->as_constant()->value.i, ir->offset->type->vector_elements); } @@ -2577,26 +2678,27 @@ vec4_visitor::visit(ir_texture *ir) /* The message header is necessary for: * - Gen4 (always) + * - Gen9+ for selecting SIMD4x2 * - Texel offsets * - Gather channel selection * - Sampler indices too large to fit in a 4-bit value. */ - inst->header_present = - brw->gen < 5 || inst->offset != 0 || ir->op == ir_tg4 || - is_high_sampler(brw, sampler_reg); + inst->header_size = + (devinfo->gen < 5 || devinfo->gen >= 9 || + inst->offset != 0 || ir->op == ir_tg4 || + is_high_sampler(devinfo, sampler_reg)) ? 1 : 0; inst->base_mrf = 2; - inst->mlen = inst->header_present + 1; /* always at least one */ - inst->dst = dst_reg(this, ir->type); + inst->mlen = inst->header_size + 1; /* always at least one */ inst->dst.writemask = WRITEMASK_XYZW; inst->shadow_compare = ir->shadow_comparitor != NULL; inst->src[1] = sampler_reg; /* MRF for the first parameter */ - int param_base = inst->base_mrf + inst->header_present; + int param_base = inst->base_mrf + inst->header_size; if (ir->op == ir_txs || ir->op == ir_query_levels) { - int writemask = brw->gen == 4 ? WRITEMASK_W : WRITEMASK_X; + int writemask = devinfo->gen == 4 ? WRITEMASK_W : WRITEMASK_X; emit(MOV(dst_reg(MRF, param_base, lod_type, writemask), lod)); } else { /* Load the coordinate */ @@ -2622,7 +2724,7 @@ vec4_visitor::visit(ir_texture *ir) /* Load the LOD info */ if (ir->op == ir_tex || ir->op == ir_txl) { int mrf, writemask; - if (brw->gen >= 5) { + if (devinfo->gen >= 5) { mrf = param_base + 1; if (ir->shadow_comparitor) { writemask = WRITEMASK_Y; @@ -2631,7 +2733,7 @@ vec4_visitor::visit(ir_texture *ir) writemask = WRITEMASK_X; inst->mlen++; } - } else /* brw->gen == 4 */ { + } else /* devinfo->gen == 4 */ { mrf = param_base; writemask = WRITEMASK_W; } @@ -2641,7 +2743,7 @@ vec4_visitor::visit(ir_texture *ir) } else if (ir->op == ir_txf_ms) { emit(MOV(dst_reg(MRF, param_base + 1, sample_index_type, WRITEMASK_X), sample_index)); - if (brw->gen >= 7) { + if (devinfo->gen >= 7) { /* MCS data is in the first channel of `mcs`, but we need to get it into * the .y channel of the second vec4 of params, so replicate .x across * the whole vec4 and then mask off everything except .y @@ -2654,7 +2756,7 @@ vec4_visitor::visit(ir_texture *ir) } else if (ir->op == ir_txd) { const glsl_type *type = lod_type; - if (brw->gen >= 5) { + if (devinfo->gen >= 5) { dPdx.swizzle = BRW_SWIZZLE4(SWIZZLE_X,SWIZZLE_X,SWIZZLE_Y,SWIZZLE_Y); dPdy.swizzle = BRW_SWIZZLE4(SWIZZLE_X,SWIZZLE_X,SWIZZLE_Y,SWIZZLE_Y); emit(MOV(dst_reg(MRF, param_base + 1, type, WRITEMASK_XZ), dPdx)); @@ -2674,7 +2776,7 @@ vec4_visitor::visit(ir_texture *ir) shadow_comparitor)); } } - } else /* brw->gen == 4 */ { + } else /* devinfo->gen == 4 */ { emit(MOV(dst_reg(MRF, param_base + 1, type, WRITEMASK_XYZ), dPdx)); emit(MOV(dst_reg(MRF, param_base + 2, type, WRITEMASK_XYZ), dPdy)); inst->mlen += 2; @@ -2706,7 +2808,7 @@ vec4_visitor::visit(ir_texture *ir) } } - if (brw->gen == 6 && ir->op == ir_tg4) { + if (devinfo->gen == 6 && ir->op == ir_tg4) { emit_gen6_gather_wa(key->tex.gen6_gather_wa[sampler], inst->dst); } @@ -2841,7 +2943,7 @@ vec4_visitor::visit(ir_if *ir) */ this->base_ir = ir->condition; - if (brw->gen == 6) { + if (devinfo->gen == 6) { emit_if_gen6(ir); } else { enum brw_predicate predicate; @@ -2874,6 +2976,12 @@ vec4_visitor::visit(ir_end_primitive *) unreachable("not reached"); } +void +vec4_visitor::visit(ir_barrier *) +{ + unreachable("not reached"); +} + void vec4_visitor::emit_untyped_atomic(unsigned atomic_op, unsigned surf_index, dst_reg dst, src_reg offset, @@ -2901,8 +3009,8 @@ vec4_visitor::emit_untyped_atomic(unsigned atomic_op, unsigned surf_index, * unused channels will be masked out. */ vec4_instruction *inst = emit(SHADER_OPCODE_UNTYPED_ATOMIC, dst, - src_reg(atomic_op), src_reg(surf_index)); - inst->base_mrf = 0; + brw_message_reg(0), + src_reg(surf_index), src_reg(atomic_op)); inst->mlen = mlen; } @@ -2917,9 +3025,9 @@ vec4_visitor::emit_untyped_surface_read(unsigned surf_index, dst_reg dst, * untyped surface read message, but that's OK because unused * channels will be masked out. */ - vec4_instruction *inst = emit(SHADER_OPCODE_UNTYPED_SURFACE_READ, - dst, src_reg(surf_index)); - inst->base_mrf = 0; + vec4_instruction *inst = emit(SHADER_OPCODE_UNTYPED_SURFACE_READ, dst, + brw_message_reg(0), + src_reg(surf_index), src_reg(1)); inst->mlen = 1; } @@ -2949,9 +3057,9 @@ vec4_visitor::emit_ndc_computation() void vec4_visitor::emit_psiz_and_flags(dst_reg reg) { - if (brw->gen < 6 && + if (devinfo->gen < 6 && ((prog_data->vue_map.slots_valid & VARYING_BIT_PSIZ) || - key->userclip_active || brw->has_negative_rhw_bug)) { + key->userclip_active || devinfo->has_negative_rhw_bug)) { dst_reg header1 = dst_reg(this, glsl_type::uvec4_type); dst_reg header1_w = header1; header1_w.writemask = WRITEMASK_W; @@ -2990,37 +3098,43 @@ vec4_visitor::emit_psiz_and_flags(dst_reg reg) * Later, clipping will detect ucp[6] and ensure the primitive is * clipped against all fixed planes. */ - if (brw->has_negative_rhw_bug) { + if (devinfo->has_negative_rhw_bug) { src_reg ndc_w = src_reg(output_reg[BRW_VARYING_SLOT_NDC]); ndc_w.swizzle = BRW_SWIZZLE_WWWW; emit(CMP(dst_null_f(), ndc_w, src_reg(0.0f), BRW_CONDITIONAL_L)); vec4_instruction *inst; inst = emit(OR(header1_w, src_reg(header1_w), src_reg(1u << 6))); inst->predicate = BRW_PREDICATE_NORMAL; + output_reg[BRW_VARYING_SLOT_NDC].type = BRW_REGISTER_TYPE_F; inst = emit(MOV(output_reg[BRW_VARYING_SLOT_NDC], src_reg(0.0f))); inst->predicate = BRW_PREDICATE_NORMAL; } emit(MOV(retype(reg, BRW_REGISTER_TYPE_UD), src_reg(header1))); - } else if (brw->gen < 6) { + } else if (devinfo->gen < 6) { emit(MOV(retype(reg, BRW_REGISTER_TYPE_UD), 0u)); } else { emit(MOV(retype(reg, BRW_REGISTER_TYPE_D), src_reg(0))); if (prog_data->vue_map.slots_valid & VARYING_BIT_PSIZ) { dst_reg reg_w = reg; reg_w.writemask = WRITEMASK_W; - emit(MOV(reg_w, src_reg(output_reg[VARYING_SLOT_PSIZ]))); + src_reg reg_as_src = src_reg(output_reg[VARYING_SLOT_PSIZ]); + reg_as_src.type = reg_w.type; + reg_as_src.swizzle = brw_swizzle_for_size(1); + emit(MOV(reg_w, reg_as_src)); } if (prog_data->vue_map.slots_valid & VARYING_BIT_LAYER) { dst_reg reg_y = reg; reg_y.writemask = WRITEMASK_Y; reg_y.type = BRW_REGISTER_TYPE_D; + output_reg[VARYING_SLOT_LAYER].type = reg_y.type; emit(MOV(reg_y, src_reg(output_reg[VARYING_SLOT_LAYER]))); } if (prog_data->vue_map.slots_valid & VARYING_BIT_VIEWPORT) { dst_reg reg_z = reg; reg_z.writemask = WRITEMASK_Z; reg_z.type = BRW_REGISTER_TYPE_D; + output_reg[VARYING_SLOT_VIEWPORT].type = reg_z.type; emit(MOV(reg_z, src_reg(output_reg[VARYING_SLOT_VIEWPORT]))); } } @@ -3058,8 +3172,8 @@ vec4_visitor::emit_clip_distances(dst_reg reg, int offset) vec4_instruction * vec4_visitor::emit_generic_urb_slot(dst_reg reg, int varying) { - assert (varying < VARYING_SLOT_MAX); - reg.type = output_reg[varying].type; + assert(varying < VARYING_SLOT_MAX); + assert(output_reg[varying].type == reg.type); current_annotation = output_reg_annotation[varying]; /* Copy the register, saturating if necessary */ return emit(MOV(reg, src_reg(output_reg[varying]))); @@ -3069,6 +3183,7 @@ void vec4_visitor::emit_urb_slot(dst_reg reg, int varying) { reg.type = BRW_REGISTER_TYPE_F; + output_reg[varying].type = reg.type; switch (varying) { case VARYING_SLOT_PSIZ: @@ -3104,8 +3219,13 @@ vec4_visitor::emit_urb_slot(dst_reg reg, int varying) case VARYING_SLOT_COL1: case VARYING_SLOT_BFC0: case VARYING_SLOT_BFC1: { + /* These built-in varyings are only supported in compatibility mode, + * and we only support GS in core profile. So, this must be a vertex + * shader. + */ + assert(stage == MESA_SHADER_VERTEX); vec4_instruction *inst = emit_generic_urb_slot(reg, varying); - if (key->clamp_vertex_color) + if (((struct brw_vs_prog_key *) key)->clamp_vertex_color) inst->saturate = true; break; } @@ -3117,9 +3237,9 @@ vec4_visitor::emit_urb_slot(dst_reg reg, int varying) } static int -align_interleaved_urb_mlen(struct brw_context *brw, int mlen) +align_interleaved_urb_mlen(const struct brw_device_info *devinfo, int mlen) { - if (brw->gen >= 6) { + if (devinfo->gen >= 6) { /* URB data written (does not include the message header reg) must * be a multiple of 256 bits, or 2 VS registers. See vol5c.5, * section 5.4.3.2.2: URB_INTERLEAVED. @@ -3167,7 +3287,7 @@ vec4_visitor::emit_vertex() */ emit_urb_write_header(mrf++); - if (brw->gen < 6) { + if (devinfo->gen < 6) { emit_ndc_computation(); } @@ -3211,7 +3331,7 @@ vec4_visitor::emit_vertex() current_annotation = "URB write"; vec4_instruction *inst = emit_urb_write_opcode(complete); inst->base_mrf = base_mrf; - inst->mlen = align_interleaved_urb_mlen(brw, mrf - base_mrf); + inst->mlen = align_interleaved_urb_mlen(devinfo, mrf - base_mrf); inst->offset += offset; } while(!complete); } @@ -3229,7 +3349,7 @@ vec4_visitor::get_scratch_offset(bblock_t *block, vec4_instruction *inst, /* Pre-gen6, the message header uses byte offsets instead of vec4 * (16-byte) offset units. */ - if (brw->gen < 6) + if (devinfo->gen < 6) message_header_scale *= 16; if (reladdr) { @@ -3259,18 +3379,18 @@ vec4_visitor::get_pull_constant_offset(bblock_t * block, vec4_instruction *inst, /* Pre-gen6, the message header uses byte offsets instead of vec4 * (16-byte) offset units. */ - if (brw->gen < 6) { + if (devinfo->gen < 6) { emit_before(block, inst, MUL(dst_reg(index), index, src_reg(16))); } return index; - } else if (brw->gen >= 8) { + } else if (devinfo->gen >= 8) { /* Store the offset in a GRF so we can send-from-GRF. */ src_reg offset = src_reg(this, glsl_type::int_type); emit_before(block, inst, MOV(dst_reg(offset), src_reg(reg_offset))); return offset; } else { - int message_header_scale = brw->gen < 6 ? 16 : 1; + int message_header_scale = devinfo->gen < 6 ? 16 : 1; return src_reg(reg_offset * message_header_scale); } } @@ -3314,18 +3434,9 @@ vec4_visitor::emit_scratch_write(bblock_t *block, vec4_instruction *inst, * weren't initialized, it will confuse live interval analysis, which will * make spilling fail to make progress. */ - src_reg temp = src_reg(this, glsl_type::vec4_type); - temp.type = inst->dst.type; - int first_writemask_chan = ffs(inst->dst.writemask) - 1; - int swizzles[4]; - for (int i = 0; i < 4; i++) - if (inst->dst.writemask & (1 << i)) - swizzles[i] = i; - else - swizzles[i] = first_writemask_chan; - temp.swizzle = BRW_SWIZZLE4(swizzles[0], swizzles[1], - swizzles[2], swizzles[3]); - + const src_reg temp = swizzle(retype(src_reg(this, glsl_type::vec4_type), + inst->dst.type), + brw_swizzle_for_mask(inst->dst.writemask)); dst_reg dst = dst_reg(brw_writemask(brw_vec8_grf(0, 0), inst->dst.writemask)); vec4_instruction *write = SCRATCH_WRITE(dst, temp, index); @@ -3340,6 +3451,39 @@ vec4_visitor::emit_scratch_write(bblock_t *block, vec4_instruction *inst, inst->dst.reladdr = NULL; } +/** + * Checks if \p src and/or \p src.reladdr require a scratch read, and if so, + * adds the scratch read(s) before \p inst. The function also checks for + * recursive reladdr scratch accesses, issuing the corresponding scratch + * loads and rewriting reladdr references accordingly. + * + * \return \p src if it did not require a scratch load, otherwise, the + * register holding the result of the scratch load that the caller should + * use to rewrite src. + */ +src_reg +vec4_visitor::emit_resolve_reladdr(int scratch_loc[], bblock_t *block, + vec4_instruction *inst, src_reg src) +{ + /* Resolve recursive reladdr scratch access by calling ourselves + * with src.reladdr + */ + if (src.reladdr) + *src.reladdr = emit_resolve_reladdr(scratch_loc, block, inst, + *src.reladdr); + + /* Now handle scratch access on src */ + if (src.file == GRF && scratch_loc[src.reg] != -1) { + dst_reg temp = dst_reg(this, glsl_type::vec4_type); + emit_scratch_read(block, inst, temp, src, scratch_loc[src.reg]); + src.reg = temp.reg; + src.reg_offset = temp.reg_offset; + src.reladdr = NULL; + } + + return src; +} + /** * We can't generally support array access in GRF space, because a * single instruction's destination can only span 2 contiguous @@ -3349,7 +3493,7 @@ vec4_visitor::emit_scratch_write(bblock_t *block, vec4_instruction *inst, void vec4_visitor::move_grf_array_access_to_scratch() { - int scratch_loc[this->virtual_grf_count]; + int scratch_loc[this->alloc.count]; memset(scratch_loc, -1, sizeof(scratch_loc)); /* First, calculate the set of virtual GRFs that need to be punted @@ -3357,20 +3501,31 @@ vec4_visitor::move_grf_array_access_to_scratch() * scratch. */ foreach_block_and_inst(block, vec4_instruction, inst, cfg) { - if (inst->dst.file == GRF && inst->dst.reladdr && - scratch_loc[inst->dst.reg] == -1) { - scratch_loc[inst->dst.reg] = c->last_scratch; - c->last_scratch += this->virtual_grf_sizes[inst->dst.reg]; + if (inst->dst.file == GRF && inst->dst.reladdr) { + if (scratch_loc[inst->dst.reg] == -1) { + scratch_loc[inst->dst.reg] = last_scratch; + last_scratch += this->alloc.sizes[inst->dst.reg]; + } + + for (src_reg *iter = inst->dst.reladdr; + iter->reladdr; + iter = iter->reladdr) { + if (iter->file == GRF && scratch_loc[iter->reg] == -1) { + scratch_loc[iter->reg] = last_scratch; + last_scratch += this->alloc.sizes[iter->reg]; + } + } } for (int i = 0 ; i < 3; i++) { - src_reg *src = &inst->src[i]; - - if (src->file == GRF && src->reladdr && - scratch_loc[src->reg] == -1) { - scratch_loc[src->reg] = c->last_scratch; - c->last_scratch += this->virtual_grf_sizes[src->reg]; - } + for (src_reg *iter = &inst->src[i]; + iter->reladdr; + iter = iter->reladdr) { + if (iter->file == GRF && scratch_loc[iter->reg] == -1) { + scratch_loc[iter->reg] = last_scratch; + last_scratch += this->alloc.sizes[iter->reg]; + } + } } } @@ -3384,23 +3539,27 @@ vec4_visitor::move_grf_array_access_to_scratch() base_ir = inst->ir; current_annotation = inst->annotation; - if (inst->dst.file == GRF && scratch_loc[inst->dst.reg] != -1) { - emit_scratch_write(block, inst, scratch_loc[inst->dst.reg]); - } - - for (int i = 0 ; i < 3; i++) { - if (inst->src[i].file != GRF || scratch_loc[inst->src[i].reg] == -1) - continue; - - dst_reg temp = dst_reg(this, glsl_type::vec4_type); + /* First handle scratch access on the dst. Notice we have to handle + * the case where the dst's reladdr also points to scratch space. + */ + if (inst->dst.reladdr) + *inst->dst.reladdr = emit_resolve_reladdr(scratch_loc, block, inst, + *inst->dst.reladdr); - emit_scratch_read(block, inst, temp, inst->src[i], - scratch_loc[inst->src[i].reg]); + /* Now that we have handled any (possibly recursive) reladdr scratch + * accesses for dst we can safely do the scratch write for dst itself + */ + if (inst->dst.file == GRF && scratch_loc[inst->dst.reg] != -1) + emit_scratch_write(block, inst, scratch_loc[inst->dst.reg]); - inst->src[i].file = temp.file; - inst->src[i].reg = temp.reg; - inst->src[i].reg_offset = temp.reg_offset; - inst->src[i].reladdr = NULL; + /* Now handle scratch access on any src. In this case, since inst->src[i] + * already is a src_reg, we can just call emit_resolve_reladdr with + * inst->src[i] and it will take care of handling scratch loads for + * both src and src.reladdr (recursively). + */ + for (int i = 0 ; i < 3; i++) { + inst->src[i] = emit_resolve_reladdr(scratch_loc, block, inst, + inst->src[i]); } } } @@ -3418,23 +3577,11 @@ vec4_visitor::emit_pull_constant_load(bblock_t *block, vec4_instruction *inst, src_reg index = src_reg(prog_data->base.binding_table.pull_constants_start); src_reg offset = get_pull_constant_offset(block, inst, orig_src.reladdr, reg_offset); - vec4_instruction *load; - if (brw->gen >= 7) { - dst_reg grf_offset = dst_reg(this, glsl_type::int_type); - grf_offset.type = offset.type; - emit_before(block, inst, MOV(grf_offset, offset)); - - load = new(mem_ctx) vec4_instruction(this, - VS_OPCODE_PULL_CONSTANT_LOAD_GEN7, - temp, index, src_reg(grf_offset)); - } else { - load = new(mem_ctx) vec4_instruction(this, VS_OPCODE_PULL_CONSTANT_LOAD, - temp, index, offset); - load->base_mrf = 14; - load->mlen = 1; - } - emit_before(block, inst, load); + emit_pull_constant_load_reg(temp, + index, + offset, + block, inst); } /** @@ -3528,34 +3675,49 @@ vec4_visitor::resolve_ud_negate(src_reg *reg) *reg = temp; } -vec4_visitor::vec4_visitor(struct brw_context *brw, - struct brw_vec4_compile *c, +/** + * Resolve the result of a Gen4-5 CMP instruction to a proper boolean. + * + * CMP on Gen4-5 only sets the LSB of the result; the rest are undefined. + * If we need a proper boolean value, we have to fix it up to be 0 or ~0. + */ +void +vec4_visitor::resolve_bool_comparison(ir_rvalue *rvalue, src_reg *reg) +{ + assert(devinfo->gen <= 5); + + if (!rvalue->type->is_boolean()) + return; + + src_reg and_result = src_reg(this, rvalue->type); + src_reg neg_result = src_reg(this, rvalue->type); + emit(AND(dst_reg(and_result), *reg, src_reg(1))); + emit(MOV(dst_reg(neg_result), negate(and_result))); + *reg = neg_result; +} + +vec4_visitor::vec4_visitor(const struct brw_compiler *compiler, + void *log_data, struct gl_program *prog, - const struct brw_vec4_prog_key *key, - struct brw_vec4_prog_data *prog_data, + const struct brw_vue_prog_key *key, + struct brw_vue_prog_data *prog_data, struct gl_shader_program *shader_prog, gl_shader_stage stage, void *mem_ctx, - bool debug_flag, bool no_spills, - shader_time_shader_type st_base, - shader_time_shader_type st_written, - shader_time_shader_type st_reset) - : backend_visitor(brw, shader_prog, prog, &prog_data->base, stage), - c(c), + int shader_time_index) + : backend_shader(compiler, log_data, mem_ctx, + shader_prog, prog, &prog_data->base, stage), key(key), prog_data(prog_data), sanity_param_count(0), fail_msg(NULL), first_non_payload_grf(0), need_all_constants_in_pull_buffer(false), - debug_flag(debug_flag), no_spills(no_spills), - st_base(st_base), - st_written(st_written), - st_reset(st_reset) + shader_time_index(shader_time_index), + last_scratch(0) { - this->mem_ctx = mem_ctx; this->failed = false; this->base_ir = NULL; @@ -3568,14 +3730,9 @@ vec4_visitor::vec4_visitor(struct brw_context *brw, this->virtual_grf_start = NULL; this->virtual_grf_end = NULL; - this->virtual_grf_sizes = NULL; - this->virtual_grf_count = 0; - this->virtual_grf_reg_map = NULL; - this->virtual_grf_reg_count = 0; - this->virtual_grf_array_size = 0; this->live_intervals = NULL; - this->max_grf = brw->gen >= 7 ? GEN7_MRF_HACK_START : BRW_MAX_GRF; + this->max_grf = devinfo->gen >= 7 ? GEN7_MRF_HACK_START : BRW_MAX_GRF; this->uniforms = 0; @@ -3584,7 +3741,8 @@ vec4_visitor::vec4_visitor(struct brw_context *brw, */ this->uniform_array_size = 1; if (prog_data) { - this->uniform_array_size = MAX2(stage_prog_data->nr_params, 1); + this->uniform_array_size = + MAX2(DIV_ROUND_UP(stage_prog_data->nr_params, 4), 1); } this->uniform_size = rzalloc_array(mem_ctx, int, this->uniform_array_size); @@ -3611,11 +3769,11 @@ vec4_visitor::fail(const char *format, ...) va_start(va, format); msg = ralloc_vasprintf(mem_ctx, format, va); va_end(va); - msg = ralloc_asprintf(mem_ctx, "vec4 compile failed: %s\n", msg); + msg = ralloc_asprintf(mem_ctx, "%s compile failed: %s\n", stage_abbrev, msg); this->fail_msg = msg; - if (debug_flag) { + if (debug_enabled) { fprintf(stderr, "%s", msg); } }