#include "glsl/nir/glsl_to_nir.h"
#include "brw_fs.h"
-void
-fs_visitor::emit_nir_code()
+static void
+nir_optimize(nir_shader *nir)
{
- /* first, lower the GLSL IR shader to NIR */
- lower_output_reads(shader->base.ir);
- nir_shader *nir = glsl_to_nir(shader->base.ir, NULL, true);
- nir_validate_shader(nir);
-
- nir_lower_global_vars_to_local(nir);
- nir_validate_shader(nir);
-
- nir_split_var_copies(nir);
- nir_validate_shader(nir);
-
bool progress;
do {
progress = false;
- nir_lower_variables(nir);
+ nir_lower_vars_to_ssa(nir);
+ nir_validate_shader(nir);
+ nir_lower_alu_to_scalar(nir);
+ nir_validate_shader(nir);
+ progress |= nir_copy_prop(nir);
+ nir_validate_shader(nir);
+ nir_lower_phis_to_scalar(nir);
nir_validate_shader(nir);
progress |= nir_copy_prop(nir);
nir_validate_shader(nir);
nir_validate_shader(nir);
progress |= nir_opt_constant_folding(nir);
nir_validate_shader(nir);
+ progress |= nir_opt_remove_phis(nir);
+ nir_validate_shader(nir);
} while (progress);
+}
- /* Lower a bunch of stuff */
- nir_lower_io(nir);
- nir_validate_shader(nir);
+static bool
+count_nir_instrs_in_block(nir_block *block, void *state)
+{
+ int *count = (int *) state;
+ nir_foreach_instr(block, instr) {
+ *count = *count + 1;
+ }
+ return true;
+}
- nir_lower_locals_to_regs(nir);
+static int
+count_nir_instrs(nir_shader *nir)
+{
+ int count = 0;
+ nir_foreach_overload(nir, overload) {
+ if (!overload->impl)
+ continue;
+ nir_foreach_block(overload->impl, count_nir_instrs_in_block, &count);
+ }
+ return count;
+}
+
+void
+fs_visitor::emit_nir_code()
+{
+ const nir_shader_compiler_options *options =
+ ctx->Const.ShaderCompilerOptions[stage].NirOptions;
+
+ /* first, lower the GLSL IR shader to NIR */
+ lower_output_reads(shader->base.ir);
+ nir_shader *nir = glsl_to_nir(&shader->base, options);
nir_validate_shader(nir);
- nir_remove_dead_variables(nir);
+ nir_lower_global_vars_to_local(nir);
nir_validate_shader(nir);
- nir_lower_to_source_mods(nir);
+ nir_split_var_copies(nir);
nir_validate_shader(nir);
- nir_copy_prop(nir);
+
+ nir_optimize(nir);
+
+ /* Lower a bunch of stuff */
+ nir_lower_var_copies(nir);
nir_validate_shader(nir);
- nir_convert_from_ssa(nir);
+
+ /* Get rid of split copies */
+ nir_optimize(nir);
+
+ nir_assign_var_locations_scalar_direct_first(nir, &nir->uniforms,
+ &num_direct_uniforms,
+ &nir->num_uniforms);
+ nir_assign_var_locations_scalar(&nir->inputs, &nir->num_inputs);
+ nir_assign_var_locations_scalar(&nir->outputs, &nir->num_outputs);
+
+ nir_lower_io(nir);
nir_validate_shader(nir);
- nir_lower_vec_to_movs(nir);
+
+ nir_remove_dead_variables(nir);
nir_validate_shader(nir);
nir_lower_samplers(nir, shader_prog, shader->base.Program);
nir_lower_atomics(nir);
nir_validate_shader(nir);
+ nir_optimize(nir);
+
+ nir_lower_locals_to_regs(nir);
+ nir_validate_shader(nir);
+
+ nir_lower_to_source_mods(nir);
+ nir_validate_shader(nir);
+ nir_copy_prop(nir);
+ nir_validate_shader(nir);
+
+ if (unlikely(debug_enabled)) {
+ fprintf(stderr, "NIR (SSA form) for %s shader:\n", stage_name);
+ nir_print_shader(nir, stderr);
+ }
+
+ if (dispatch_width == 8) {
+ static GLuint msg_id = 0;
+ _mesa_gl_debug(&brw->ctx, &msg_id,
+ MESA_DEBUG_SOURCE_SHADER_COMPILER,
+ MESA_DEBUG_TYPE_OTHER,
+ MESA_DEBUG_SEVERITY_NOTIFICATION,
+ "%s NIR shader: %d inst\n",
+ stage_abbrev,
+ count_nir_instrs(nir));
+ }
+
+ nir_convert_from_ssa(nir);
+ nir_validate_shader(nir);
+
/* emit the arrays used for inputs and outputs - load/store intrinsics will
* be converted to reads/writes of these arrays
*/
if (nir->num_inputs > 0) {
- nir_inputs = fs_reg(GRF, virtual_grf_alloc(nir->num_inputs));
+ nir_inputs = vgrf(nir->num_inputs);
nir_setup_inputs(nir);
}
if (nir->num_outputs > 0) {
- nir_outputs = fs_reg(GRF, virtual_grf_alloc(nir->num_outputs));
+ nir_outputs = vgrf(nir->num_outputs);
nir_setup_outputs(nir);
}
if (nir->num_uniforms > 0) {
- nir_uniforms = fs_reg(UNIFORM, 0);
nir_setup_uniforms(nir);
}
+ nir_emit_system_values(nir);
+
nir_globals = ralloc_array(mem_ctx, fs_reg, nir->reg_alloc);
foreach_list_typed(nir_register, reg, node, &nir->registers) {
unsigned array_elems =
reg->num_array_elems == 0 ? 1 : reg->num_array_elems;
unsigned size = array_elems * reg->num_components;
- nir_globals[reg->index] = fs_reg(GRF, virtual_grf_alloc(size));
+ nir_globals[reg->index] = vgrf(size);
}
/* get the main function and emit it */
nir_emit_impl(overload->impl);
}
+ if (unlikely(debug_enabled)) {
+ fprintf(stderr, "NIR (final form) for %s shader:\n", stage_name);
+ nir_print_shader(nir, stderr);
+ }
+
ralloc_free(nir);
}
void
fs_visitor::nir_setup_inputs(nir_shader *shader)
{
- fs_reg varying = nir_inputs;
-
- struct hash_entry *entry;
- hash_table_foreach(shader->inputs, entry) {
- nir_variable *var = (nir_variable *) entry->data;
- varying.reg_offset = var->data.driver_location;
+ foreach_list_typed(nir_variable, var, node, &shader->inputs) {
+ enum brw_reg_type type = brw_type_for_base_type(var->type);
+ fs_reg input = offset(nir_inputs, var->data.driver_location);
fs_reg reg;
- if (!strcmp(var->name, "gl_FragCoord")) {
- reg = *emit_fragcoord_interpolation(var->data.pixel_center_integer,
- var->data.origin_upper_left);
- emit_percomp(MOV(varying, reg), 0xF);
- } else if (!strcmp(var->name, "gl_FrontFacing")) {
- reg = *emit_frontfacing_interpolation();
- emit(MOV(retype(varying, BRW_REGISTER_TYPE_UD), reg));
- } else {
- emit_general_interpolation(varying, var->name, var->type,
- (glsl_interp_qualifier) var->data.interpolation,
- var->data.location, var->data.centroid,
- var->data.sample);
+ switch (stage) {
+ case MESA_SHADER_VERTEX: {
+ /* Our ATTR file is indexed by VERT_ATTRIB_*, which is the value
+ * stored in nir_variable::location.
+ *
+ * However, NIR's load_input intrinsics use a different index - an
+ * offset into a single contiguous array containing all inputs.
+ * This index corresponds to the nir_variable::driver_location field.
+ *
+ * So, we need to copy from fs_reg(ATTR, var->location) to
+ * offset(nir_inputs, var->data.driver_location).
+ */
+ unsigned components = var->type->without_array()->components();
+ unsigned array_length = var->type->is_array() ? var->type->length : 1;
+ for (unsigned i = 0; i < array_length; i++) {
+ for (unsigned j = 0; j < components; j++) {
+ emit(MOV(retype(offset(input, components * i + j), type),
+ offset(fs_reg(ATTR, var->data.location + i, type), j)));
+ }
+ }
+ break;
+ }
+ case MESA_SHADER_GEOMETRY:
+ case MESA_SHADER_COMPUTE:
+ unreachable("fs_visitor not used for these stages yet.");
+ break;
+ case MESA_SHADER_FRAGMENT:
+ if (var->data.location == VARYING_SLOT_POS) {
+ reg = *emit_fragcoord_interpolation(var->data.pixel_center_integer,
+ var->data.origin_upper_left);
+ emit_percomp(MOV(input, reg), 0xF);
+ } else {
+ emit_general_interpolation(input, var->name, var->type,
+ (glsl_interp_qualifier) var->data.interpolation,
+ var->data.location, var->data.centroid,
+ var->data.sample);
+ }
+ break;
}
}
}
fs_visitor::nir_setup_outputs(nir_shader *shader)
{
brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
- fs_reg reg = nir_outputs;
- struct hash_entry *entry;
- hash_table_foreach(shader->outputs, entry) {
- nir_variable *var = (nir_variable *) entry->data;
- reg.reg_offset = var->data.driver_location;
+ foreach_list_typed(nir_variable, var, node, &shader->outputs) {
+ fs_reg reg = offset(nir_outputs, var->data.driver_location);
- if (var->data.index > 0) {
+ int vector_elements =
+ var->type->is_array() ? var->type->fields.array->vector_elements
+ : var->type->vector_elements;
+
+ if (stage == MESA_SHADER_VERTEX) {
+ for (int i = 0; i < ALIGN(type_size(var->type), 4) / 4; i++) {
+ int output = var->data.location + i;
+ this->outputs[output] = offset(reg, 4 * i);
+ this->output_components[output] = vector_elements;
+ }
+ } else if (var->data.index > 0) {
assert(var->data.location == FRAG_RESULT_DATA0);
assert(var->data.index == 1);
this->dual_src_output = reg;
assert(var->data.location >= FRAG_RESULT_DATA0 &&
var->data.location < FRAG_RESULT_DATA0 + BRW_MAX_DRAW_BUFFERS);
- int vector_elements =
- var->type->is_array() ? var->type->fields.array->vector_elements
- : var->type->vector_elements;
-
/* General color output. */
for (unsigned int i = 0; i < MAX2(1, var->type->length); i++) {
int output = var->data.location - FRAG_RESULT_DATA0 + i;
- this->outputs[output] = reg;
- this->outputs[output].reg_offset += vector_elements * i;
+ this->outputs[output] = offset(reg, vector_elements * i);
this->output_components[output] = vector_elements;
}
}
fs_visitor::nir_setup_uniforms(nir_shader *shader)
{
uniforms = shader->num_uniforms;
- param_size[0] = shader->num_uniforms;
+
+ /* We split the uniform register file in half. The first half is
+ * entirely direct uniforms. The second half is indirect.
+ */
+ param_size[0] = num_direct_uniforms;
+ if (shader->num_uniforms > num_direct_uniforms)
+ param_size[num_direct_uniforms] = shader->num_uniforms - num_direct_uniforms;
if (dispatch_width != 8)
return;
- struct hash_entry *entry;
- hash_table_foreach(shader->uniforms, entry) {
- nir_variable *var = (nir_variable *) entry->data;
-
+ foreach_list_typed(nir_variable, var, node, &shader->uniforms) {
/* UBO's and atomics don't take up space in the uniform file */
if (var->interface_type != NULL || var->type->contains_atomic())
}
}
+static bool
+emit_system_values_block(nir_block *block, void *void_visitor)
+{
+ fs_visitor *v = (fs_visitor *)void_visitor;
+ fs_reg *reg;
+
+ nir_foreach_instr(block, instr) {
+ if (instr->type != nir_instr_type_intrinsic)
+ continue;
+
+ nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
+ switch (intrin->intrinsic) {
+ case nir_intrinsic_load_vertex_id:
+ unreachable("should be lowered by lower_vertex_id().");
+
+ case nir_intrinsic_load_vertex_id_zero_base:
+ assert(v->stage == MESA_SHADER_VERTEX);
+ reg = &v->nir_system_values[SYSTEM_VALUE_VERTEX_ID_ZERO_BASE];
+ if (reg->file == BAD_FILE)
+ *reg = *v->emit_vs_system_value(SYSTEM_VALUE_VERTEX_ID_ZERO_BASE);
+ break;
+
+ case nir_intrinsic_load_base_vertex:
+ assert(v->stage == MESA_SHADER_VERTEX);
+ reg = &v->nir_system_values[SYSTEM_VALUE_BASE_VERTEX];
+ if (reg->file == BAD_FILE)
+ *reg = *v->emit_vs_system_value(SYSTEM_VALUE_BASE_VERTEX);
+ break;
+
+ case nir_intrinsic_load_instance_id:
+ assert(v->stage == MESA_SHADER_VERTEX);
+ reg = &v->nir_system_values[SYSTEM_VALUE_INSTANCE_ID];
+ if (reg->file == BAD_FILE)
+ *reg = *v->emit_vs_system_value(SYSTEM_VALUE_INSTANCE_ID);
+ break;
+
+ case nir_intrinsic_load_sample_pos:
+ assert(v->stage == MESA_SHADER_FRAGMENT);
+ reg = &v->nir_system_values[SYSTEM_VALUE_SAMPLE_POS];
+ if (reg->file == BAD_FILE)
+ *reg = *v->emit_samplepos_setup();
+ break;
+
+ case nir_intrinsic_load_sample_id:
+ assert(v->stage == MESA_SHADER_FRAGMENT);
+ reg = &v->nir_system_values[SYSTEM_VALUE_SAMPLE_ID];
+ if (reg->file == BAD_FILE)
+ *reg = *v->emit_sampleid_setup();
+ break;
+
+ case nir_intrinsic_load_sample_mask_in:
+ assert(v->stage == MESA_SHADER_FRAGMENT);
+ assert(v->brw->gen >= 7);
+ reg = &v->nir_system_values[SYSTEM_VALUE_SAMPLE_MASK_IN];
+ if (reg->file == BAD_FILE)
+ *reg = fs_reg(retype(brw_vec8_grf(v->payload.sample_mask_in_reg, 0),
+ BRW_REGISTER_TYPE_D));
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ return true;
+}
+
+void
+fs_visitor::nir_emit_system_values(nir_shader *shader)
+{
+ nir_system_values = ralloc_array(mem_ctx, fs_reg, SYSTEM_VALUE_MAX);
+ nir_foreach_overload(shader, overload) {
+ assert(strcmp(overload->function->name, "main") == 0);
+ assert(overload->impl);
+ nir_foreach_block(overload->impl, emit_system_values_block, this);
+ }
+}
+
void
fs_visitor::nir_emit_impl(nir_function_impl *impl)
{
unsigned array_elems =
reg->num_array_elems == 0 ? 1 : reg->num_array_elems;
unsigned size = array_elems * reg->num_components;
- nir_locals[reg->index] = fs_reg(GRF, virtual_grf_alloc(size));
+ nir_locals[reg->index] = vgrf(size);
}
nir_emit_cf_list(&impl->body);
void
fs_visitor::nir_emit_cf_list(exec_list *list)
{
+ exec_list_validate(list);
foreach_list_typed(nir_cf_node, node, node, list) {
switch (node->type) {
case nir_cf_node_if:
void
fs_visitor::nir_emit_if(nir_if *if_stmt)
{
- if (brw->gen < 6) {
- no16("Can't support (non-uniform) control flow on SIMD16\n");
- }
-
/* first, put the condition into f0 */
fs_inst *inst = emit(MOV(reg_null_d,
retype(get_nir_src(if_stmt->condition),
emit(BRW_OPCODE_ENDIF);
- try_replace_with_sel();
+ if (!try_replace_with_sel() && brw->gen < 6) {
+ no16("Can't support (non-uniform) control flow on SIMD16\n");
+ }
}
void
break;
case nir_instr_type_load_const:
- nir_emit_load_const(nir_instr_as_load_const(instr));
+ /* We can hit these, but we do nothing now and use them as
+ * immediates later.
+ */
break;
case nir_instr_type_jump:
return BRW_REGISTER_TYPE_F;
}
+bool
+fs_visitor::optimize_frontfacing_ternary(nir_alu_instr *instr,
+ const fs_reg &result)
+{
+ if (instr->src[0].src.is_ssa ||
+ !instr->src[0].src.reg.reg ||
+ !instr->src[0].src.reg.reg->parent_instr)
+ return false;
+
+ if (instr->src[0].src.reg.reg->parent_instr->type !=
+ nir_instr_type_intrinsic)
+ return false;
+
+ nir_intrinsic_instr *src0 =
+ nir_instr_as_intrinsic(instr->src[0].src.reg.reg->parent_instr);
+
+ if (src0->intrinsic != nir_intrinsic_load_front_face)
+ return false;
+
+ nir_const_value *value1 = nir_src_as_const_value(instr->src[1].src);
+ if (!value1 || fabsf(value1->f[0]) != 1.0f)
+ return false;
+
+ nir_const_value *value2 = nir_src_as_const_value(instr->src[2].src);
+ if (!value2 || fabsf(value2->f[0]) != 1.0f)
+ return false;
+
+ fs_reg tmp = vgrf(glsl_type::int_type);
+
+ if (brw->gen >= 6) {
+ /* Bit 15 of g0.0 is 0 if the polygon is front facing. */
+ fs_reg g0 = fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_W));
+
+ /* For (gl_FrontFacing ? 1.0 : -1.0), emit:
+ *
+ * or(8) tmp.1<2>W g0.0<0,1,0>W 0x00003f80W
+ * and(8) dst<1>D tmp<8,8,1>D 0xbf800000D
+ *
+ * and negate g0.0<0,1,0>W for (gl_FrontFacing ? -1.0 : 1.0).
+ *
+ * This negation looks like it's safe in practice, because bits 0:4 will
+ * surely be TRIANGLES
+ */
+
+ if (value1->f[0] == -1.0f) {
+ g0.negate = true;
+ }
+
+ tmp.type = BRW_REGISTER_TYPE_W;
+ tmp.subreg_offset = 2;
+ tmp.stride = 2;
+
+ fs_inst *or_inst = emit(OR(tmp, g0, fs_reg(0x3f80)));
+ or_inst->src[1].type = BRW_REGISTER_TYPE_UW;
+
+ tmp.type = BRW_REGISTER_TYPE_D;
+ tmp.subreg_offset = 0;
+ tmp.stride = 1;
+ } else {
+ /* Bit 31 of g1.6 is 0 if the polygon is front facing. */
+ fs_reg g1_6 = fs_reg(retype(brw_vec1_grf(1, 6), BRW_REGISTER_TYPE_D));
+
+ /* For (gl_FrontFacing ? 1.0 : -1.0), emit:
+ *
+ * or(8) tmp<1>D g1.6<0,1,0>D 0x3f800000D
+ * and(8) dst<1>D tmp<8,8,1>D 0xbf800000D
+ *
+ * and negate g1.6<0,1,0>D for (gl_FrontFacing ? -1.0 : 1.0).
+ *
+ * This negation looks like it's safe in practice, because bits 0:4 will
+ * surely be TRIANGLES
+ */
+
+ if (value1->f[0] == -1.0f) {
+ g1_6.negate = true;
+ }
+
+ emit(OR(tmp, g1_6, fs_reg(0x3f800000)));
+ }
+ emit(AND(retype(result, BRW_REGISTER_TYPE_D), tmp, fs_reg(0xbf800000)));
+
+ return true;
+}
+
void
fs_visitor::nir_emit_alu(nir_alu_instr *instr)
{
struct brw_wm_prog_key *fs_key = (struct brw_wm_prog_key *) this->key;
+ fs_inst *inst;
- fs_reg op[3];
- fs_reg dest = get_nir_dest(instr->dest.dest);
- dest.type = brw_type_for_nir_type(nir_op_infos[instr->op].output_type);
+ fs_reg result = get_nir_dest(instr->dest.dest);
+ result.type = brw_type_for_nir_type(nir_op_infos[instr->op].output_type);
- fs_reg result;
- if (instr->has_predicate) {
- result = fs_reg(GRF, virtual_grf_alloc(4));
- result.type = dest.type;
- } else {
- result = dest;
+ fs_reg op[4];
+ for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
+ op[i] = get_nir_src(instr->src[i].src);
+ op[i].type = brw_type_for_nir_type(nir_op_infos[instr->op].input_types[i]);
+ op[i].abs = instr->src[i].abs;
+ op[i].negate = instr->src[i].negate;
+ }
+
+ /* We get a bunch of mov's out of the from_ssa pass and they may still
+ * be vectorized. We'll handle them as a special-case. We'll also
+ * handle vecN here because it's basically the same thing.
+ */
+ switch (instr->op) {
+ case nir_op_imov:
+ case nir_op_fmov:
+ case nir_op_vec2:
+ case nir_op_vec3:
+ case nir_op_vec4: {
+ fs_reg temp = result;
+ bool need_extra_copy = false;
+ for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
+ if (!instr->src[i].src.is_ssa &&
+ instr->dest.dest.reg.reg == instr->src[i].src.reg.reg) {
+ need_extra_copy = true;
+ temp = retype(vgrf(4), result.type);
+ break;
+ }
+ }
+
+ for (unsigned i = 0; i < 4; i++) {
+ if (!(instr->dest.write_mask & (1 << i)))
+ continue;
+
+ if (instr->op == nir_op_imov || instr->op == nir_op_fmov) {
+ inst = emit(MOV(offset(temp, i),
+ offset(op[0], instr->src[0].swizzle[i])));
+ } else {
+ inst = emit(MOV(offset(temp, i),
+ offset(op[i], instr->src[i].swizzle[0])));
+ }
+ inst->saturate = instr->dest.saturate;
+ }
+
+ /* In this case the source and destination registers were the same,
+ * so we need to insert an extra set of moves in order to deal with
+ * any swizzling.
+ */
+ if (need_extra_copy) {
+ for (unsigned i = 0; i < 4; i++) {
+ if (!(instr->dest.write_mask & (1 << i)))
+ continue;
+
+ emit(MOV(offset(result, i), offset(temp, i)));
+ }
+ }
+ return;
}
+ default:
+ break;
+ }
+
+ /* At this point, we have dealt with any instruction that operates on
+ * more than a single channel. Therefore, we can just adjust the source
+ * and destination registers for that channel and emit the instruction.
+ */
+ unsigned channel = 0;
+ if (nir_op_infos[instr->op].output_size == 0) {
+ /* Since NIR is doing the scalarizing for us, we should only ever see
+ * vectorized operations with a single channel.
+ */
+ assert(_mesa_bitcount(instr->dest.write_mask) == 1);
+ channel = ffs(instr->dest.write_mask) - 1;
+ result = offset(result, channel);
+ }
- for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
- op[i] = get_nir_alu_src(instr, i);
+ for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
+ assert(nir_op_infos[instr->op].input_sizes[i] < 2);
+ op[i] = offset(op[i], instr->src[i].swizzle[channel]);
+ }
switch (instr->op) {
- case nir_op_fmov:
case nir_op_i2f:
- case nir_op_u2f: {
- fs_inst *inst = MOV(result, op[0]);
+ case nir_op_u2f:
+ inst = emit(MOV(result, op[0]));
inst->saturate = instr->dest.saturate;
- emit_percomp(inst, instr->dest.write_mask);
- }
break;
- case nir_op_imov:
case nir_op_f2i:
case nir_op_f2u:
- emit_percomp(MOV(result, op[0]), instr->dest.write_mask);
+ emit(MOV(result, op[0]));
break;
case nir_op_fsign: {
* Predicated OR ORs 1.0 (0x3f800000) with the sign bit if val is not
* zero.
*/
- emit_percomp(CMP(reg_null_f, op[0], fs_reg(0.0f), BRW_CONDITIONAL_NZ),
- instr->dest.write_mask);
+ emit(CMP(reg_null_f, op[0], fs_reg(0.0f), BRW_CONDITIONAL_NZ));
fs_reg result_int = retype(result, BRW_REGISTER_TYPE_UD);
op[0].type = BRW_REGISTER_TYPE_UD;
result.type = BRW_REGISTER_TYPE_UD;
- emit_percomp(AND(result_int, op[0], fs_reg(0x80000000u)),
- instr->dest.write_mask);
+ emit(AND(result_int, op[0], fs_reg(0x80000000u)));
- fs_inst *inst = OR(result_int, result_int, fs_reg(0x3f800000u));
+ inst = emit(OR(result_int, result_int, fs_reg(0x3f800000u)));
inst->predicate = BRW_PREDICATE_NORMAL;
- emit_percomp(inst, instr->dest.write_mask);
if (instr->dest.saturate) {
- fs_inst *inst = MOV(result, result);
+ inst = emit(MOV(result, result));
inst->saturate = true;
- emit_percomp(inst, instr->dest.write_mask);
}
break;
}
- case nir_op_isign: {
+ case nir_op_isign:
/* ASR(val, 31) -> negative val generates 0xffffffff (signed -1).
- * -> non-negative val generates 0x00000000.
- * Predicated OR sets 1 if val is positive.
- */
- emit_percomp(CMP(reg_null_d, op[0], fs_reg(0), BRW_CONDITIONAL_G),
- instr->dest.write_mask);
-
- emit_percomp(ASR(result, op[0], fs_reg(31)), instr->dest.write_mask);
-
- fs_inst *inst = OR(result, result, fs_reg(1));
+ * -> non-negative val generates 0x00000000.
+ * Predicated OR sets 1 if val is positive.
+ */
+ emit(CMP(reg_null_d, op[0], fs_reg(0), BRW_CONDITIONAL_G));
+ emit(ASR(result, op[0], fs_reg(31)));
+ inst = emit(OR(result, result, fs_reg(1)));
inst->predicate = BRW_PREDICATE_NORMAL;
- emit_percomp(inst, instr->dest.write_mask);
break;
- }
case nir_op_frcp:
- emit_math_percomp(SHADER_OPCODE_RCP, result, op[0],
- instr->dest.write_mask, instr->dest.saturate);
+ inst = emit_math(SHADER_OPCODE_RCP, result, op[0]);
+ inst->saturate = instr->dest.saturate;
break;
case nir_op_fexp2:
- emit_math_percomp(SHADER_OPCODE_EXP2, result, op[0],
- instr->dest.write_mask, instr->dest.saturate);
+ inst = emit_math(SHADER_OPCODE_EXP2, result, op[0]);
+ inst->saturate = instr->dest.saturate;
break;
case nir_op_flog2:
- emit_math_percomp(SHADER_OPCODE_LOG2, result, op[0],
- instr->dest.write_mask, instr->dest.saturate);
+ inst = emit_math(SHADER_OPCODE_LOG2, result, op[0]);
+ inst->saturate = instr->dest.saturate;
break;
case nir_op_fexp:
case nir_op_fsin:
case nir_op_fsin_reduced:
- emit_math_percomp(SHADER_OPCODE_SIN, result, op[0],
- instr->dest.write_mask, instr->dest.saturate);
+ inst = emit_math(SHADER_OPCODE_SIN, result, op[0]);
+ inst->saturate = instr->dest.saturate;
break;
case nir_op_fcos:
case nir_op_fcos_reduced:
- emit_math_percomp(SHADER_OPCODE_COS, result, op[0],
- instr->dest.write_mask, instr->dest.saturate);
+ inst = emit_math(SHADER_OPCODE_COS, result, op[0]);
+ inst->saturate = instr->dest.saturate;
break;
case nir_op_fddx:
- if (fs_key->high_quality_derivatives)
- emit_percomp(FS_OPCODE_DDX_FINE, result, op[0],
- instr->dest.write_mask, instr->dest.saturate);
- else
- emit_percomp(FS_OPCODE_DDX_COARSE, result, op[0],
- instr->dest.write_mask, instr->dest.saturate);
+ if (fs_key->high_quality_derivatives) {
+ inst = emit(FS_OPCODE_DDX_FINE, result, op[0]);
+ } else {
+ inst = emit(FS_OPCODE_DDX_COARSE, result, op[0]);
+ }
+ inst->saturate = instr->dest.saturate;
break;
case nir_op_fddx_fine:
- emit_percomp(FS_OPCODE_DDX_FINE, result, op[0],
- instr->dest.write_mask, instr->dest.saturate);
+ inst = emit(FS_OPCODE_DDX_FINE, result, op[0]);
+ inst->saturate = instr->dest.saturate;
break;
case nir_op_fddx_coarse:
- emit_percomp(FS_OPCODE_DDX_COARSE, result, op[0],
- instr->dest.write_mask, instr->dest.saturate);
+ inst = emit(FS_OPCODE_DDX_COARSE, result, op[0]);
+ inst->saturate = instr->dest.saturate;
break;
case nir_op_fddy:
- if (fs_key->high_quality_derivatives)
- emit_percomp(FS_OPCODE_DDY_FINE, result, op[0],
- fs_reg(fs_key->render_to_fbo),
- instr->dest.write_mask, instr->dest.saturate);
- else
- emit_percomp(FS_OPCODE_DDY_COARSE, result, op[0],
- fs_reg(fs_key->render_to_fbo),
- instr->dest.write_mask, instr->dest.saturate);
+ if (fs_key->high_quality_derivatives) {
+ inst = emit(FS_OPCODE_DDY_FINE, result, op[0],
+ fs_reg(fs_key->render_to_fbo));
+ } else {
+ inst = emit(FS_OPCODE_DDY_COARSE, result, op[0],
+ fs_reg(fs_key->render_to_fbo));
+ }
+ inst->saturate = instr->dest.saturate;
break;
case nir_op_fddy_fine:
- emit_percomp(FS_OPCODE_DDY_FINE, result, op[0],
- fs_reg(fs_key->render_to_fbo),
- instr->dest.write_mask, instr->dest.saturate);
+ inst = emit(FS_OPCODE_DDY_FINE, result, op[0],
+ fs_reg(fs_key->render_to_fbo));
+ inst->saturate = instr->dest.saturate;
break;
case nir_op_fddy_coarse:
- emit_percomp(FS_OPCODE_DDY_COARSE, result, op[0],
- fs_reg(fs_key->render_to_fbo),
- instr->dest.write_mask, instr->dest.saturate);
+ inst = emit(FS_OPCODE_DDY_COARSE, result, op[0],
+ fs_reg(fs_key->render_to_fbo));
+ inst->saturate = instr->dest.saturate;
break;
case nir_op_fadd:
- case nir_op_iadd: {
- fs_inst *inst = ADD(result, op[0], op[1]);
+ case nir_op_iadd:
+ inst = emit(ADD(result, op[0], op[1]));
inst->saturate = instr->dest.saturate;
- emit_percomp(inst, instr->dest.write_mask);
break;
- }
- case nir_op_fmul: {
- fs_inst *inst = MUL(result, op[0], op[1]);
+ case nir_op_fmul:
+ inst = emit(MUL(result, op[0], op[1]));
inst->saturate = instr->dest.saturate;
- emit_percomp(inst, instr->dest.write_mask);
break;
- }
case nir_op_imul: {
- /* TODO put in the 16-bit constant optimization once we have SSA */
+ if (brw->gen >= 8) {
+ emit(MUL(result, op[0], op[1]));
+ break;
+ } else {
+ nir_const_value *value0 = nir_src_as_const_value(instr->src[0].src);
+ nir_const_value *value1 = nir_src_as_const_value(instr->src[1].src);
+
+ if (value0 && value0->u[0] < (1 << 16)) {
+ if (brw->gen < 7) {
+ emit(MUL(result, op[0], op[1]));
+ } else {
+ emit(MUL(result, op[1], op[0]));
+ }
+ break;
+ } else if (value1 && value1->u[0] < (1 << 16)) {
+ if (brw->gen < 7) {
+ emit(MUL(result, op[1], op[0]));
+ } else {
+ emit(MUL(result, op[0], op[1]));
+ }
+ break;
+ }
+ }
if (brw->gen >= 7)
no16("SIMD16 explicit accumulator operands unsupported\n");
struct brw_reg acc = retype(brw_acc_reg(dispatch_width), result.type);
- emit_percomp(MUL(acc, op[0], op[1]), instr->dest.write_mask);
- emit_percomp(MACH(reg_null_d, op[0], op[1]), instr->dest.write_mask);
- emit_percomp(MOV(result, fs_reg(acc)), instr->dest.write_mask);
+ emit(MUL(acc, op[0], op[1]));
+ emit(MACH(reg_null_d, op[0], op[1]));
+ emit(MOV(result, fs_reg(acc)));
break;
}
struct brw_reg acc = retype(brw_acc_reg(dispatch_width), result.type);
- emit_percomp(MUL(acc, op[0], op[1]), instr->dest.write_mask);
- emit_percomp(MACH(result, op[0], op[1]), instr->dest.write_mask);
+ emit(MUL(acc, op[0], op[1]));
+ emit(MACH(result, op[0], op[1]));
break;
}
case nir_op_idiv:
case nir_op_udiv:
- emit_math_percomp(SHADER_OPCODE_INT_QUOTIENT, result, op[0], op[1],
- instr->dest.write_mask);
+ emit_math(SHADER_OPCODE_INT_QUOTIENT, result, op[0], op[1]);
break;
case nir_op_uadd_carry: {
struct brw_reg acc = retype(brw_acc_reg(dispatch_width),
BRW_REGISTER_TYPE_UD);
- emit_percomp(ADDC(reg_null_ud, op[0], op[1]), instr->dest.write_mask);
- emit_percomp(MOV(result, fs_reg(acc)), instr->dest.write_mask);
+ emit(ADDC(reg_null_ud, op[0], op[1]));
+ emit(MOV(result, fs_reg(acc)));
break;
}
struct brw_reg acc = retype(brw_acc_reg(dispatch_width),
BRW_REGISTER_TYPE_UD);
- emit_percomp(SUBB(reg_null_ud, op[0], op[1]), instr->dest.write_mask);
- emit_percomp(MOV(result, fs_reg(acc)), instr->dest.write_mask);
+ emit(SUBB(reg_null_ud, op[0], op[1]));
+ emit(MOV(result, fs_reg(acc)));
break;
}
case nir_op_umod:
- emit_math_percomp(SHADER_OPCODE_INT_REMAINDER, result, op[0],
- op[1], instr->dest.write_mask);
+ emit_math(SHADER_OPCODE_INT_REMAINDER, result, op[0], op[1]);
break;
case nir_op_flt:
case nir_op_ilt:
case nir_op_ult:
- emit_percomp(CMP(result, op[0], op[1], BRW_CONDITIONAL_L),
- instr->dest.write_mask);
+ emit(CMP(result, op[0], op[1], BRW_CONDITIONAL_L));
break;
case nir_op_fge:
case nir_op_ige:
case nir_op_uge:
- emit_percomp(CMP(result, op[0], op[1], BRW_CONDITIONAL_GE),
- instr->dest.write_mask);
+ emit(CMP(result, op[0], op[1], BRW_CONDITIONAL_GE));
break;
case nir_op_feq:
case nir_op_ieq:
- emit_percomp(CMP(result, op[0], op[1], BRW_CONDITIONAL_Z),
- instr->dest.write_mask);
+ emit(CMP(result, op[0], op[1], BRW_CONDITIONAL_Z));
break;
case nir_op_fne:
case nir_op_ine:
- emit_percomp(CMP(result, op[0], op[1], BRW_CONDITIONAL_NZ),
- instr->dest.write_mask);
- break;
-
- case nir_op_ball_fequal2:
- case nir_op_ball_iequal2:
- case nir_op_ball_fequal3:
- case nir_op_ball_iequal3:
- case nir_op_ball_fequal4:
- case nir_op_ball_iequal4: {
- unsigned num_components = nir_op_infos[instr->op].input_sizes[0];
- fs_reg temp = fs_reg(GRF, virtual_grf_alloc(num_components));
- emit_percomp(CMP(temp, op[0], op[1], BRW_CONDITIONAL_Z),
- (1 << num_components) - 1);
- emit_reduction(BRW_OPCODE_AND, result, temp, num_components);
+ emit(CMP(result, op[0], op[1], BRW_CONDITIONAL_NZ));
break;
- }
-
- case nir_op_bany_fnequal2:
- case nir_op_bany_inequal2:
- case nir_op_bany_fnequal3:
- case nir_op_bany_inequal3:
- case nir_op_bany_fnequal4:
- case nir_op_bany_inequal4: {
- unsigned num_components = nir_op_infos[instr->op].input_sizes[0];
- fs_reg temp = fs_reg(GRF, virtual_grf_alloc(num_components));
- temp.type = BRW_REGISTER_TYPE_UD;
- emit_percomp(CMP(temp, op[0], op[1], BRW_CONDITIONAL_NZ),
- (1 << num_components) - 1);
- emit_reduction(BRW_OPCODE_OR, result, temp, num_components);
- break;
- }
case nir_op_inot:
- emit_percomp(NOT(result, op[0]), instr->dest.write_mask);
+ if (brw->gen >= 8) {
+ resolve_source_modifiers(&op[0]);
+ }
+ emit(NOT(result, op[0]));
break;
case nir_op_ixor:
- emit_percomp(XOR(result, op[0], op[1]), instr->dest.write_mask);
+ if (brw->gen >= 8) {
+ resolve_source_modifiers(&op[0]);
+ resolve_source_modifiers(&op[1]);
+ }
+ emit(XOR(result, op[0], op[1]));
break;
case nir_op_ior:
- emit_percomp(OR(result, op[0], op[1]), instr->dest.write_mask);
+ if (brw->gen >= 8) {
+ resolve_source_modifiers(&op[0]);
+ resolve_source_modifiers(&op[1]);
+ }
+ emit(OR(result, op[0], op[1]));
break;
case nir_op_iand:
- emit_percomp(AND(result, op[0], op[1]), instr->dest.write_mask);
+ if (brw->gen >= 8) {
+ resolve_source_modifiers(&op[0]);
+ resolve_source_modifiers(&op[1]);
+ }
+ emit(AND(result, op[0], op[1]));
break;
case nir_op_fdot2:
case nir_op_fdot3:
- case nir_op_fdot4: {
- unsigned num_components = nir_op_infos[instr->op].input_sizes[0];
- fs_reg temp = fs_reg(GRF, virtual_grf_alloc(num_components));
- emit_percomp(MUL(temp, op[0], op[1]), (1 << num_components) - 1);
- emit_reduction(BRW_OPCODE_ADD, result, temp, num_components);
- if (instr->dest.saturate) {
- fs_inst *inst = emit(MOV(result, result));
- inst->saturate = true;
- }
- break;
- }
-
+ case nir_op_fdot4:
case nir_op_bany2:
case nir_op_bany3:
- case nir_op_bany4: {
- unsigned num_components = nir_op_infos[instr->op].input_sizes[0];
- emit_reduction(BRW_OPCODE_OR, result, op[0], num_components);
- break;
- }
-
+ case nir_op_bany4:
case nir_op_ball2:
case nir_op_ball3:
- case nir_op_ball4: {
- unsigned num_components = nir_op_infos[instr->op].input_sizes[0];
- emit_reduction(BRW_OPCODE_AND, result, op[0], num_components);
- break;
- }
+ case nir_op_ball4:
+ case nir_op_ball_fequal2:
+ case nir_op_ball_iequal2:
+ case nir_op_ball_fequal3:
+ case nir_op_ball_iequal3:
+ case nir_op_ball_fequal4:
+ case nir_op_ball_iequal4:
+ case nir_op_bany_fnequal2:
+ case nir_op_bany_inequal2:
+ case nir_op_bany_fnequal3:
+ case nir_op_bany_inequal3:
+ case nir_op_bany_fnequal4:
+ case nir_op_bany_inequal4:
+ unreachable("Lowered by nir_lower_alu_reductions");
case nir_op_fnoise1_1:
case nir_op_fnoise1_2:
case nir_op_fnoise4_4:
unreachable("not reached: should be handled by lower_noise");
- case nir_op_vec2:
- case nir_op_vec3:
- case nir_op_vec4:
- unreachable("not reached: should be handled by lower_quadop_vector");
-
case nir_op_ldexp:
unreachable("not reached: should be handled by ldexp_to_arith()");
case nir_op_fsqrt:
- emit_math_percomp(SHADER_OPCODE_SQRT, result, op[0],
- instr->dest.write_mask, instr->dest.saturate);
+ inst = emit_math(SHADER_OPCODE_SQRT, result, op[0]);
+ inst->saturate = instr->dest.saturate;
break;
case nir_op_frsq:
- emit_math_percomp(SHADER_OPCODE_RSQ, result, op[0],
- instr->dest.write_mask, instr->dest.saturate);
+ inst = emit_math(SHADER_OPCODE_RSQ, result, op[0]);
+ inst->saturate = instr->dest.saturate;
break;
case nir_op_b2i:
- emit_percomp(AND(result, op[0], fs_reg(1)), instr->dest.write_mask);
+ emit(AND(result, op[0], fs_reg(1)));
break;
- case nir_op_b2f: {
- emit_percomp(AND(retype(result, BRW_REGISTER_TYPE_UD), op[0],
- fs_reg(0x3f800000u)),
- instr->dest.write_mask);
+ case nir_op_b2f:
+ emit(AND(retype(result, BRW_REGISTER_TYPE_UD), op[0], fs_reg(0x3f800000u)));
break;
- }
case nir_op_f2b:
- emit_percomp(CMP(result, op[0], fs_reg(0.0f), BRW_CONDITIONAL_NZ),
- instr->dest.write_mask);
+ emit(CMP(result, op[0], fs_reg(0.0f), BRW_CONDITIONAL_NZ));
break;
case nir_op_i2b:
- emit_percomp(CMP(result, op[0], fs_reg(0), BRW_CONDITIONAL_NZ),
- instr->dest.write_mask);
+ emit(CMP(result, op[0], fs_reg(0), BRW_CONDITIONAL_NZ));
break;
- case nir_op_ftrunc: {
- fs_inst *inst = RNDZ(result, op[0]);
+ case nir_op_ftrunc:
+ inst = emit(RNDZ(result, op[0]));
inst->saturate = instr->dest.saturate;
- emit_percomp(inst, instr->dest.write_mask);
break;
- }
+
case nir_op_fceil: {
op[0].negate = !op[0].negate;
- fs_reg temp = fs_reg(this, glsl_type::vec4_type);
- emit_percomp(RNDD(temp, op[0]), instr->dest.write_mask);
+ fs_reg temp = vgrf(glsl_type::float_type);
+ emit(RNDD(temp, op[0]));
temp.negate = true;
- fs_inst *inst = MOV(result, temp);
+ inst = emit(MOV(result, temp));
inst->saturate = instr->dest.saturate;
- emit_percomp(inst, instr->dest.write_mask);
break;
}
- case nir_op_ffloor: {
- fs_inst *inst = RNDD(result, op[0]);
+ case nir_op_ffloor:
+ inst = emit(RNDD(result, op[0]));
inst->saturate = instr->dest.saturate;
- emit_percomp(inst, instr->dest.write_mask);
break;
- }
- case nir_op_ffract: {
- fs_inst *inst = FRC(result, op[0]);
+ case nir_op_ffract:
+ inst = emit(FRC(result, op[0]));
inst->saturate = instr->dest.saturate;
- emit_percomp(inst, instr->dest.write_mask);
break;
- }
- case nir_op_fround_even: {
- fs_inst *inst = RNDE(result, op[0]);
+ case nir_op_fround_even:
+ inst = emit(RNDE(result, op[0]));
inst->saturate = instr->dest.saturate;
- emit_percomp(inst, instr->dest.write_mask);
break;
- }
case nir_op_fmin:
case nir_op_imin:
case nir_op_umin:
if (brw->gen >= 6) {
- emit_percomp(BRW_OPCODE_SEL, result, op[0], op[1],
- instr->dest.write_mask, instr->dest.saturate,
- BRW_PREDICATE_NONE, BRW_CONDITIONAL_L);
+ inst = emit(BRW_OPCODE_SEL, result, op[0], op[1]);
+ inst->conditional_mod = BRW_CONDITIONAL_L;
} else {
- emit_percomp(CMP(reg_null_d, op[0], op[1], BRW_CONDITIONAL_L),
- instr->dest.write_mask);
-
- emit_percomp(BRW_OPCODE_SEL, result, op[0], op[1],
- instr->dest.write_mask, instr->dest.saturate,
- BRW_PREDICATE_NORMAL);
+ emit(CMP(reg_null_d, op[0], op[1], BRW_CONDITIONAL_L));
+ inst = emit(SEL(result, op[0], op[1]));
}
+ inst->saturate = instr->dest.saturate;
break;
case nir_op_fmax:
case nir_op_imax:
case nir_op_umax:
if (brw->gen >= 6) {
- emit_percomp(BRW_OPCODE_SEL, result, op[0], op[1],
- instr->dest.write_mask, instr->dest.saturate,
- BRW_PREDICATE_NONE, BRW_CONDITIONAL_GE);
+ inst = emit(BRW_OPCODE_SEL, result, op[0], op[1]);
+ inst->conditional_mod = BRW_CONDITIONAL_GE;
} else {
- emit_percomp(CMP(reg_null_d, op[0], op[1], BRW_CONDITIONAL_GE),
- instr->dest.write_mask);
-
- emit_percomp(BRW_OPCODE_SEL, result, op[0], op[1],
- instr->dest.write_mask, instr->dest.saturate,
- BRW_PREDICATE_NORMAL);
+ emit(CMP(reg_null_d, op[0], op[1], BRW_CONDITIONAL_GE));
+ inst = emit(SEL(result, op[0], op[1]));
}
+ inst->saturate = instr->dest.saturate;
break;
case nir_op_pack_snorm_2x16:
unreachable("not reached: should be handled by lower_packing_builtins");
case nir_op_unpack_half_2x16_split_x:
- emit_percomp(FS_OPCODE_UNPACK_HALF_2x16_SPLIT_X, result, op[0],
- instr->dest.write_mask, instr->dest.saturate);
+ inst = emit(FS_OPCODE_UNPACK_HALF_2x16_SPLIT_X, result, op[0]);
+ inst->saturate = instr->dest.saturate;
break;
case nir_op_unpack_half_2x16_split_y:
- emit_percomp(FS_OPCODE_UNPACK_HALF_2x16_SPLIT_Y, result, op[0],
- instr->dest.write_mask, instr->dest.saturate);
+ inst = emit(FS_OPCODE_UNPACK_HALF_2x16_SPLIT_Y, result, op[0]);
+ inst->saturate = instr->dest.saturate;
break;
case nir_op_fpow:
- emit_percomp(SHADER_OPCODE_POW, result, op[0], op[1],
- instr->dest.write_mask, instr->dest.saturate);
+ inst = emit_math(SHADER_OPCODE_POW, result, op[0], op[1]);
+ inst->saturate = instr->dest.saturate;
break;
case nir_op_bitfield_reverse:
- emit_percomp(BFREV(result, op[0]), instr->dest.write_mask);
+ emit(BFREV(result, op[0]));
break;
case nir_op_bit_count:
- emit_percomp(CBIT(result, op[0]), instr->dest.write_mask);
+ emit(CBIT(result, op[0]));
break;
case nir_op_ufind_msb:
case nir_op_ifind_msb: {
- emit_percomp(FBH(retype(result, BRW_REGISTER_TYPE_UD), op[0]),
- instr->dest.write_mask);
+ emit(FBH(retype(result, BRW_REGISTER_TYPE_UD), op[0]));
/* FBH counts from the MSB side, while GLSL's findMSB() wants the count
* from the LSB side. If FBH didn't return an error (0xFFFFFFFF), then
* subtract the result from 31 to convert the MSB count into an LSB count.
*/
- emit_percomp(CMP(reg_null_d, result, fs_reg(-1), BRW_CONDITIONAL_NZ),
- instr->dest.write_mask);
+ emit(CMP(reg_null_d, result, fs_reg(-1), BRW_CONDITIONAL_NZ));
fs_reg neg_result(result);
neg_result.negate = true;
- fs_inst *inst = ADD(result, neg_result, fs_reg(31));
+ inst = emit(ADD(result, neg_result, fs_reg(31)));
inst->predicate = BRW_PREDICATE_NORMAL;
- emit_percomp(inst, instr->dest.write_mask);
break;
}
case nir_op_find_lsb:
- emit_percomp(FBL(result, op[0]), instr->dest.write_mask);
+ emit(FBL(result, op[0]));
break;
case nir_op_ubitfield_extract:
case nir_op_ibitfield_extract:
- emit_percomp(BFE(result, op[2], op[1], op[0]), instr->dest.write_mask);
+ emit(BFE(result, op[2], op[1], op[0]));
break;
case nir_op_bfm:
- emit_percomp(BFI1(result, op[0], op[1]), instr->dest.write_mask);
+ emit(BFI1(result, op[0], op[1]));
break;
case nir_op_bfi:
- emit_percomp(BFI2(result, op[0], op[1], op[2]), instr->dest.write_mask);
+ emit(BFI2(result, op[0], op[1], op[2]));
break;
case nir_op_bitfield_insert:
"lower_instructions::bitfield_insert_to_bfm_bfi");
case nir_op_ishl:
- emit_percomp(SHL(result, op[0], op[1]), instr->dest.write_mask);
+ emit(SHL(result, op[0], op[1]));
break;
case nir_op_ishr:
- emit_percomp(ASR(result, op[0], op[1]), instr->dest.write_mask);
+ emit(ASR(result, op[0], op[1]));
break;
case nir_op_ushr:
- emit_percomp(SHR(result, op[0], op[1]), instr->dest.write_mask);
+ emit(SHR(result, op[0], op[1]));
break;
case nir_op_pack_half_2x16_split:
- emit_percomp(FS_OPCODE_PACK_HALF_2x16_SPLIT, result, op[0], op[1],
- instr->dest.write_mask);
+ emit(FS_OPCODE_PACK_HALF_2x16_SPLIT, result, op[0], op[1]);
break;
case nir_op_ffma:
- emit_percomp(MAD(result, op[2], op[1], op[0]), instr->dest.write_mask);
+ inst = emit(MAD(result, op[2], op[1], op[0]));
+ inst->saturate = instr->dest.saturate;
break;
case nir_op_flrp:
- /* TODO emulate for gen < 6 */
- emit_percomp(LRP(result, op[2], op[1], op[0]), instr->dest.write_mask);
+ inst = emit_lrp(result, op[0], op[1], op[2]);
+ inst->saturate = instr->dest.saturate;
break;
case nir_op_bcsel:
+ if (optimize_frontfacing_ternary(instr, result))
+ return;
+
emit(CMP(reg_null_d, op[0], fs_reg(0), BRW_CONDITIONAL_NZ));
- emit_percomp(BRW_OPCODE_SEL, result, op[1], op[2],
- instr->dest.write_mask, false, BRW_PREDICATE_NORMAL);
+ inst = emit(SEL(result, op[1], op[2]));
+ inst->predicate = BRW_PREDICATE_NORMAL;
break;
default:
unreachable("unhandled instruction");
}
-
- /* emit a predicated move if there was predication */
- if (instr->has_predicate) {
- fs_inst *inst = emit(MOV(reg_null_d,
- retype(get_nir_src(instr->predicate),
- BRW_REGISTER_TYPE_UD)));
- inst->conditional_mod = BRW_CONDITIONAL_NZ;
- inst = MOV(dest, result);
- inst->predicate = BRW_PREDICATE_NORMAL;
- emit_percomp(inst, instr->dest.write_mask);
- }
}
fs_reg
if (src.is_ssa) {
assert(src.ssa->parent_instr->type == nir_instr_type_load_const);
nir_load_const_instr *load = nir_instr_as_load_const(src.ssa->parent_instr);
- fs_reg reg(GRF, virtual_grf_alloc(src.ssa->num_components),
- BRW_REGISTER_TYPE_D);
+ fs_reg reg = vgrf(src.ssa->num_components);
+ reg.type = BRW_REGISTER_TYPE_D;
for (unsigned i = 0; i < src.ssa->num_components; ++i)
emit(MOV(offset(reg, i), fs_reg(load->value.i[i])));
* default to D - instructions that need floating point semantics will set
* this to F if they need to
*/
- reg.type = BRW_REGISTER_TYPE_D;
- reg.reg_offset = src.reg.base_offset;
+ reg = retype(offset(reg, src.reg.base_offset), BRW_REGISTER_TYPE_D);
if (src.reg.indirect) {
reg.reladdr = new(mem_ctx) fs_reg();
*reg.reladdr = retype(get_nir_src(*src.reg.indirect),
}
}
-fs_reg
-fs_visitor::get_nir_alu_src(nir_alu_instr *instr, unsigned src)
-{
- fs_reg reg = get_nir_src(instr->src[src].src);
-
- reg.type = brw_type_for_nir_type(nir_op_infos[instr->op].input_types[src]);
- reg.abs = instr->src[src].abs;
- reg.negate = instr->src[src].negate;
-
- bool needs_swizzle = false;
- unsigned num_components = 0;
- for (unsigned i = 0; i < 4; i++) {
- if (!nir_alu_instr_channel_used(instr, src, i))
- continue;
-
- if (instr->src[src].swizzle[i] != i)
- needs_swizzle = true;
-
- num_components = i + 1;
- }
-
- if (needs_swizzle) {
- /* resolve the swizzle through MOV's */
- fs_reg new_reg = fs_reg(GRF, virtual_grf_alloc(num_components), reg.type);
-
- for (unsigned i = 0; i < 4; i++) {
- if (!nir_alu_instr_channel_used(instr, src, i))
- continue;
-
- emit(MOV(offset(new_reg, i),
- offset(reg, instr->src[src].swizzle[i])));
- }
-
- return new_reg;
- }
-
- return reg;
-}
-
fs_reg
fs_visitor::get_nir_dest(nir_dest dest)
{
else
reg = nir_locals[dest.reg.reg->index];
- reg.reg_offset = dest.reg.base_offset;
+ reg = offset(reg, dest.reg.base_offset);
if (dest.reg.indirect) {
reg.reladdr = new(mem_ctx) fs_reg();
*reg.reladdr = retype(get_nir_src(*dest.reg.indirect),
continue;
fs_inst *new_inst = new(mem_ctx) fs_inst(*inst);
- new_inst->dst.reg_offset += i;
+ new_inst->dst = offset(new_inst->dst, i);
for (unsigned j = 0; j < new_inst->sources; j++)
if (inst->src[j].file == GRF)
- new_inst->src[j].reg_offset += i;
-
- emit(new_inst);
- }
-}
-
-void
-fs_visitor::emit_percomp(enum opcode op, fs_reg dest, fs_reg src0,
- unsigned wr_mask, bool saturate,
- enum brw_predicate predicate,
- enum brw_conditional_mod mod)
-{
- for (unsigned i = 0; i < 4; i++) {
- if (!((wr_mask >> i) & 1))
- continue;
-
- fs_inst *new_inst = new(mem_ctx) fs_inst(op, dest, src0);
- new_inst->dst.reg_offset += i;
- for (unsigned j = 0; j < new_inst->sources; j++)
- if (new_inst->src[j].file == GRF)
- new_inst->src[j].reg_offset += i;
-
- new_inst->predicate = predicate;
- new_inst->conditional_mod = mod;
- new_inst->saturate = saturate;
- emit(new_inst);
- }
-}
-
-void
-fs_visitor::emit_percomp(enum opcode op, fs_reg dest, fs_reg src0, fs_reg src1,
- unsigned wr_mask, bool saturate,
- enum brw_predicate predicate,
- enum brw_conditional_mod mod)
-{
- for (unsigned i = 0; i < 4; i++) {
- if (!((wr_mask >> i) & 1))
- continue;
-
- fs_inst *new_inst = new(mem_ctx) fs_inst(op, dest, src0, src1);
- new_inst->dst.reg_offset += i;
- for (unsigned j = 0; j < new_inst->sources; j++)
- if (new_inst->src[j].file == GRF)
- new_inst->src[j].reg_offset += i;
+ new_inst->src[j] = offset(new_inst->src[j], i);
- new_inst->predicate = predicate;
- new_inst->conditional_mod = mod;
- new_inst->saturate = saturate;
emit(new_inst);
}
}
-void
-fs_visitor::emit_math_percomp(enum opcode op, fs_reg dest, fs_reg src0,
- unsigned wr_mask, bool saturate)
-{
- for (unsigned i = 0; i < 4; i++) {
- if (!((wr_mask >> i) & 1))
- continue;
-
- fs_reg new_dest = dest;
- new_dest.reg_offset += i;
- fs_reg new_src0 = src0;
- if (src0.file == GRF)
- new_src0.reg_offset += i;
-
- fs_inst *new_inst = emit_math(op, new_dest, new_src0);
- new_inst->saturate = saturate;
- }
-}
-
-void
-fs_visitor::emit_math_percomp(enum opcode op, fs_reg dest, fs_reg src0,
- fs_reg src1, unsigned wr_mask,
- bool saturate)
-{
- for (unsigned i = 0; i < 4; i++) {
- if (!((wr_mask >> i) & 1))
- continue;
-
- fs_reg new_dest = dest;
- new_dest.reg_offset += i;
- fs_reg new_src0 = src0;
- if (src0.file == GRF)
- new_src0.reg_offset += i;
- fs_reg new_src1 = src1;
- if (src1.file == GRF)
- new_src1.reg_offset += i;
-
- fs_inst *new_inst = emit_math(op, new_dest, new_src0, new_src1);
- new_inst->saturate = saturate;
- }
-}
-
-void
-fs_visitor::emit_reduction(enum opcode op, fs_reg dest, fs_reg src,
- unsigned num_components)
-{
- fs_reg src0 = src;
- fs_reg src1 = src;
- src1.reg_offset++;
-
- if (num_components == 2) {
- emit(op, dest, src0, src1);
- return;
- }
-
- fs_reg temp1 = fs_reg(GRF, virtual_grf_alloc(1));
- temp1.type = src.type;
- emit(op, temp1, src0, src1);
-
- fs_reg src2 = src;
- src2.reg_offset += 2;
-
- if (num_components == 3) {
- emit(op, dest, temp1, src2);
- return;
- }
-
- assert(num_components == 4);
-
- fs_reg src3 = src;
- src3.reg_offset += 3;
- fs_reg temp2 = fs_reg(GRF, virtual_grf_alloc(1));
- temp2.type = src.type;
-
- emit(op, temp2, src2, src3);
- emit(op, dest, temp1, temp2);
-}
-
void
fs_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr)
{
fs_reg dest;
if (nir_intrinsic_infos[instr->intrinsic].has_dest)
dest = get_nir_dest(instr->dest);
- if (instr->has_predicate) {
- fs_inst *inst = emit(MOV(reg_null_d,
- retype(get_nir_src(instr->predicate),
- BRW_REGISTER_TYPE_UD)));
- inst->conditional_mod = BRW_CONDITIONAL_NZ;
- }
bool has_indirect = false;
switch (instr->intrinsic) {
- case nir_intrinsic_discard: {
+ case nir_intrinsic_discard:
+ case nir_intrinsic_discard_if: {
/* We track our discarded pixels in f0.1. By predicating on it, we can
- * update just the flag bits that aren't yet discarded. By emitting a
- * CMP of g0 != g0, all our currently executing channels will get turned
- * off.
+ * update just the flag bits that aren't yet discarded. If there's no
+ * condition, we emit a CMP of g0 != g0, so all currently executing
+ * channels will get turned off.
*/
- fs_reg some_reg = fs_reg(retype(brw_vec8_grf(0, 0),
- BRW_REGISTER_TYPE_UW));
- fs_inst *cmp = emit(CMP(reg_null_f, some_reg, some_reg,
- BRW_CONDITIONAL_NZ));
+ fs_inst *cmp;
+ if (instr->intrinsic == nir_intrinsic_discard_if) {
+ cmp = emit(CMP(reg_null_f, get_nir_src(instr->src[0]),
+ fs_reg(0), BRW_CONDITIONAL_Z));
+ } else {
+ fs_reg some_reg = fs_reg(retype(brw_vec8_grf(0, 0),
+ BRW_REGISTER_TYPE_UW));
+ cmp = emit(CMP(reg_null_f, some_reg, some_reg, BRW_CONDITIONAL_NZ));
+ }
cmp->predicate = BRW_PREDICATE_NORMAL;
cmp->flag_subreg = 1;
if (brw->gen >= 6) {
- /* For performance, after a discard, jump to the end of the shader.
- * Only jump if all relevant channels have been discarded.
- */
- fs_inst *discard_jump = emit(FS_OPCODE_DISCARD_JUMP);
- discard_jump->flag_subreg = 1;
-
- discard_jump->predicate = (dispatch_width == 8)
- ? BRW_PREDICATE_ALIGN1_ANY8H
- : BRW_PREDICATE_ALIGN1_ANY16H;
- discard_jump->predicate_inverse = true;
+ emit_discard_jump();
}
-
break;
}
}
case nir_intrinsic_load_front_face:
- assert(!"TODO");
+ emit(MOV(retype(dest, BRW_REGISTER_TYPE_D),
+ *emit_frontfacing_interpolation()));
+ break;
+
+ case nir_intrinsic_load_vertex_id:
+ unreachable("should be lowered by lower_vertex_id()");
+
+ case nir_intrinsic_load_vertex_id_zero_base: {
+ fs_reg vertex_id = nir_system_values[SYSTEM_VALUE_VERTEX_ID_ZERO_BASE];
+ assert(vertex_id.file != BAD_FILE);
+ dest.type = vertex_id.type;
+ emit(MOV(dest, vertex_id));
+ break;
+ }
+
+ case nir_intrinsic_load_base_vertex: {
+ fs_reg base_vertex = nir_system_values[SYSTEM_VALUE_BASE_VERTEX];
+ assert(base_vertex.file != BAD_FILE);
+ dest.type = base_vertex.type;
+ emit(MOV(dest, base_vertex));
+ break;
+ }
+
+ case nir_intrinsic_load_instance_id: {
+ fs_reg instance_id = nir_system_values[SYSTEM_VALUE_INSTANCE_ID];
+ assert(instance_id.file != BAD_FILE);
+ dest.type = instance_id.type;
+ emit(MOV(dest, instance_id));
+ break;
+ }
case nir_intrinsic_load_sample_mask_in: {
- assert(brw->gen >= 7);
- fs_reg reg = fs_reg(retype(brw_vec8_grf(payload.sample_mask_in_reg, 0),
- BRW_REGISTER_TYPE_D));
- dest.type = reg.type;
- fs_inst *inst = MOV(dest, reg);
- if (instr->has_predicate)
- inst->predicate = BRW_PREDICATE_NORMAL;
- emit(inst);
+ fs_reg sample_mask_in = nir_system_values[SYSTEM_VALUE_SAMPLE_MASK_IN];
+ assert(sample_mask_in.file != BAD_FILE);
+ dest.type = sample_mask_in.type;
+ emit(MOV(dest, sample_mask_in));
break;
}
case nir_intrinsic_load_sample_pos: {
- fs_reg *reg = emit_samplepos_setup();
- dest.type = reg->type;
- emit(MOV(dest, *reg));
- emit(MOV(offset(dest, 1), offset(*reg, 1)));
+ fs_reg sample_pos = nir_system_values[SYSTEM_VALUE_SAMPLE_POS];
+ assert(sample_pos.file != BAD_FILE);
+ dest.type = sample_pos.type;
+ emit(MOV(dest, sample_pos));
+ emit(MOV(offset(dest, 1), offset(sample_pos, 1)));
break;
}
case nir_intrinsic_load_sample_id: {
- fs_reg *reg = emit_sampleid_setup();
- dest.type = reg->type;
- emit(MOV(dest, *reg));
+ fs_reg sample_id = nir_system_values[SYSTEM_VALUE_SAMPLE_ID];
+ assert(sample_id.file != BAD_FILE);
+ dest.type = sample_id.type;
+ emit(MOV(dest, sample_id));
break;
}
case nir_intrinsic_load_uniform_indirect:
has_indirect = true;
case nir_intrinsic_load_uniform: {
- unsigned index = 0;
+ unsigned index = instr->const_index[0];
+
+ fs_reg uniform_reg;
+ if (index < num_direct_uniforms) {
+ uniform_reg = fs_reg(UNIFORM, 0);
+ } else {
+ uniform_reg = fs_reg(UNIFORM, num_direct_uniforms);
+ index -= num_direct_uniforms;
+ }
+
for (int i = 0; i < instr->const_index[1]; i++) {
for (unsigned j = 0; j < instr->num_components; j++) {
- fs_reg src = nir_uniforms;
- src.reg_offset = instr->const_index[0] + index;
+ fs_reg src = offset(retype(uniform_reg, dest.type), index);
if (has_indirect)
src.reladdr = new(mem_ctx) fs_reg(get_nir_src(instr->src[0]));
- src.type = dest.type;
index++;
- fs_inst *inst = MOV(dest, src);
- if (instr->has_predicate)
- inst->predicate = BRW_PREDICATE_NORMAL;
- emit(inst);
- dest.reg_offset++;
+ emit(MOV(dest, src));
+ dest = offset(dest, 1);
}
}
break;
case nir_intrinsic_load_ubo_indirect:
has_indirect = true;
+ /* fallthrough */
case nir_intrinsic_load_ubo: {
nir_const_value *const_index = nir_src_as_const_value(instr->src[0]);
fs_reg surf_index;
* per-channel and add the base UBO index; the generator will select
* a value from any live channel.
*/
- surf_index = fs_reg(this, glsl_type::uint_type);
+ surf_index = vgrf(glsl_type::uint_type);
emit(ADD(surf_index, get_nir_src(instr->src[0]),
fs_reg(stage_prog_data->binding_table.ubo_start)))
->force_writemask_all = true;
if (has_indirect) {
/* Turn the byte offset into a dword offset. */
- fs_reg base_offset = fs_reg(this, glsl_type::int_type);
+ fs_reg base_offset = vgrf(glsl_type::int_type);
emit(SHR(base_offset, retype(get_nir_src(instr->src[1]),
BRW_REGISTER_TYPE_D),
fs_reg(2)));
unsigned vec4_offset = instr->const_index[0] / 4;
- for (int i = 0; i < instr->num_components; i++) {
- exec_list list = VARYING_PULL_CONSTANT_LOAD(offset(dest, i),
- surf_index, base_offset,
- vec4_offset + i);
-
- fs_inst *last_inst = (fs_inst *) list.get_tail();
- if (instr->has_predicate)
- last_inst->predicate = BRW_PREDICATE_NORMAL;
- emit(list);
- }
+ for (int i = 0; i < instr->num_components; i++)
+ emit(VARYING_PULL_CONSTANT_LOAD(offset(dest, i), surf_index,
+ base_offset, vec4_offset + i));
} else {
- fs_reg packed_consts = fs_reg(this, glsl_type::float_type);
+ fs_reg packed_consts = vgrf(glsl_type::float_type);
packed_consts.type = dest.type;
fs_reg const_offset_reg((unsigned) instr->const_index[0] & ~15);
*/
assert(packed_consts.subreg_offset < 32);
- fs_inst *inst = MOV(dest, packed_consts);
- if (instr->has_predicate)
- inst->predicate = BRW_PREDICATE_NORMAL;
- emit(inst);
-
- dest.reg_offset++;
+ emit(MOV(dest, packed_consts));
+ dest = offset(dest, 1);
}
}
break;
case nir_intrinsic_load_input_indirect:
has_indirect = true;
+ /* fallthrough */
case nir_intrinsic_load_input: {
unsigned index = 0;
for (int i = 0; i < instr->const_index[1]; i++) {
for (unsigned j = 0; j < instr->num_components; j++) {
- fs_reg src = nir_inputs;
- src.reg_offset = instr->const_index[0] + index;
+ fs_reg src = offset(retype(nir_inputs, dest.type),
+ instr->const_index[0] + index);
if (has_indirect)
src.reladdr = new(mem_ctx) fs_reg(get_nir_src(instr->src[0]));
- src.type = dest.type;
index++;
- fs_inst *inst = MOV(dest, src);
- if (instr->has_predicate)
- inst->predicate = BRW_PREDICATE_NORMAL;
- emit(inst);
- dest.reg_offset++;
+ emit(MOV(dest, src));
+ dest = offset(dest, 1);
}
}
break;
*/
no16("interpolate_at_* not yet supported in SIMD16 mode.");
- fs_reg dst_x(GRF, virtual_grf_alloc(2), BRW_REGISTER_TYPE_F);
+ fs_reg dst_x = vgrf(2);
fs_reg dst_y = offset(dst_x, 1);
/* For most messages, we need one reg of ignored data; the hardware
* requires mlen==1 even when there is no payload. in the per-slot
* offset case, we'll replace this with the proper source data.
*/
- fs_reg src(this, glsl_type::float_type);
+ fs_reg src = vgrf(glsl_type::float_type);
int mlen = 1; /* one reg unless overriden */
fs_inst *inst;
inst = emit(FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET, dst_x, src,
fs_reg(off_x | (off_y << 4)));
} else {
- src = fs_reg(this, glsl_type::ivec2_type);
+ src = vgrf(glsl_type::ivec2_type);
fs_reg offset_src = retype(get_nir_src(instr->src[0]),
BRW_REGISTER_TYPE_F);
for (int i = 0; i < 2; i++) {
- fs_reg temp(this, glsl_type::float_type);
+ fs_reg temp = vgrf(glsl_type::float_type);
emit(MUL(temp, offset(offset_src, i), fs_reg(16.0f)));
- fs_reg itemp(this, glsl_type::int_type);
+ fs_reg itemp = vgrf(glsl_type::int_type);
emit(MOV(itemp, temp)); /* float to int */
/* Clamp the upper end of the range to +7/16.
fs_reg src = interp_reg(instr->variables[0]->var->data.location, j);
src.type = dest.type;
- fs_inst *inst = emit(FS_OPCODE_LINTERP, dest, dst_x, dst_y, src);
- if (instr->has_predicate)
- inst->predicate = BRW_PREDICATE_NORMAL;
- dest.reg_offset++;
+ emit(FS_OPCODE_LINTERP, dest, dst_x, dst_y, src);
+ dest = offset(dest, 1);
}
break;
}
unsigned index = 0;
for (int i = 0; i < instr->const_index[1]; i++) {
for (unsigned j = 0; j < instr->num_components; j++) {
- fs_reg new_dest = nir_outputs;
- new_dest.reg_offset = instr->const_index[0] + index;
+ fs_reg new_dest = offset(retype(nir_outputs, src.type),
+ instr->const_index[0] + index);
if (has_indirect)
src.reladdr = new(mem_ctx) fs_reg(get_nir_src(instr->src[1]));
- new_dest.type = src.type;
index++;
- fs_inst *inst = MOV(new_dest, src);
- if (instr->has_predicate)
- inst->predicate = BRW_PREDICATE_NORMAL;
- emit(inst);
- src.reg_offset++;
+ emit(MOV(new_dest, src));
+ src = offset(src, 1);
}
}
break;
void
fs_visitor::nir_emit_texture(nir_tex_instr *instr)
{
- brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
unsigned sampler = instr->sampler_index;
fs_reg sampler_reg(sampler);
bool is_cube_array = instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE &&
instr->is_array;
- int lod_components, offset_components = 0;
+ int lod_components = 0, offset_components = 0;
fs_reg coordinate, shadow_comparitor, lod, lod2, sample_index, mcs, offset;
for (unsigned i = 0; i < instr->num_srcs; i++) {
- fs_reg src = get_nir_src(instr->src[i]);
- switch (instr->src_type[i]) {
+ fs_reg src = get_nir_src(instr->src[i].src);
+ switch (instr->src[i].src_type) {
case nir_tex_src_bias:
lod = retype(src, BRW_REGISTER_TYPE_F);
break;
brw_mark_surface_used(prog_data, max_used);
/* Emit code to evaluate the actual indexing expression */
- sampler_reg = fs_reg(this, glsl_type::uint_type);
+ sampler_reg = vgrf(glsl_type::uint_type);
emit(ADD(sampler_reg, src, fs_reg(sampler)))
->force_writemask_all = true;
break;
}
if (instr->op == nir_texop_txf_ms) {
- if (brw->gen >= 7 && key->tex.compressed_multisample_layout_mask & (1<<sampler))
+ if (brw->gen >= 7 &&
+ key_tex->compressed_multisample_layout_mask & (1 << sampler)) {
mcs = emit_mcs_fetch(coordinate, instr->coord_components, sampler_reg);
- else
+ } else {
mcs = fs_reg(0u);
+ }
}
for (unsigned i = 0; i < 3; i++) {
emit_texture(op, dest_type, coordinate, instr->coord_components,
shadow_comparitor, lod, lod2, lod_components, sample_index,
- offset, offset_components, mcs, gather_component,
+ offset, mcs, gather_component,
is_cube_array, is_rect, sampler, sampler_reg, texunit);
fs_reg dest = get_nir_dest(instr->dest);
emit_percomp(MOV(dest, this->result), (1 << num_components) - 1);
}
-void
-fs_visitor::nir_emit_load_const(nir_load_const_instr *instr)
-{
- /* Bail on SSA constant loads. These are used for immediates. */
- if (instr->dest.is_ssa)
- return;
-
- fs_reg dest = get_nir_dest(instr->dest);
- dest.type = BRW_REGISTER_TYPE_UD;
- if (instr->array_elems == 0) {
- for (unsigned i = 0; i < instr->num_components; i++) {
- emit(MOV(dest, fs_reg(instr->value.u[i])));
- dest.reg_offset++;
- }
- } else {
- for (unsigned i = 0; i < instr->array_elems; i++) {
- for (unsigned j = 0; j < instr->num_components; j++) {
- emit(MOV(dest, fs_reg(instr->array[i].u[j])));
- dest.reg_offset++;
- }
- }
- }
-}
-
void
fs_visitor::nir_emit_jump(nir_jump_instr *instr)
{