X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fmesa%2Fdrivers%2Fdri%2Fi965%2Fbrw_vec4_nir.cpp;h=ba3bbdfaf16523f7fdfd45e8089d6ed270d39254;hb=31a40202b8bdf8bb65d33862144a03610fd57e3f;hp=cfb66a53fe91244c40547024c780773b5a41e492;hpb=fd944197f27ff428f2599eb03bc0c4085c9fbc6a;p=mesa.git diff --git a/src/mesa/drivers/dri/i965/brw_vec4_nir.cpp b/src/mesa/drivers/dri/i965/brw_vec4_nir.cpp index cfb66a53fe9..ba3bbdfaf16 100644 --- a/src/mesa/drivers/dri/i965/brw_vec4_nir.cpp +++ b/src/mesa/drivers/dri/i965/brw_vec4_nir.cpp @@ -41,10 +41,10 @@ vec4_visitor::emit_nir_code() nir_setup_system_values(); /* get the main function and emit it */ - nir_foreach_overload(nir, overload) { - assert(strcmp(overload->function->name, "main") == 0); - assert(overload->impl); - nir_emit_impl(overload->impl); + nir_foreach_function(function, nir) { + assert(strcmp(function->name, "main") == 0); + assert(function->impl); + nir_emit_impl(function->impl); } } @@ -60,22 +60,31 @@ vec4_visitor::nir_setup_system_value_intrinsic(nir_intrinsic_instr *instr) case nir_intrinsic_load_vertex_id_zero_base: reg = &nir_system_values[SYSTEM_VALUE_VERTEX_ID_ZERO_BASE]; if (reg->file == BAD_FILE) - *reg = *make_reg_for_system_value(SYSTEM_VALUE_VERTEX_ID_ZERO_BASE, - glsl_type::int_type); + *reg = *make_reg_for_system_value(SYSTEM_VALUE_VERTEX_ID_ZERO_BASE); break; case nir_intrinsic_load_base_vertex: reg = &nir_system_values[SYSTEM_VALUE_BASE_VERTEX]; if (reg->file == BAD_FILE) - *reg = *make_reg_for_system_value(SYSTEM_VALUE_BASE_VERTEX, - glsl_type::int_type); + *reg = *make_reg_for_system_value(SYSTEM_VALUE_BASE_VERTEX); break; case nir_intrinsic_load_instance_id: reg = &nir_system_values[SYSTEM_VALUE_INSTANCE_ID]; if (reg->file == BAD_FILE) - *reg = *make_reg_for_system_value(SYSTEM_VALUE_INSTANCE_ID, - glsl_type::int_type); + *reg = *make_reg_for_system_value(SYSTEM_VALUE_INSTANCE_ID); + break; + + case nir_intrinsic_load_base_instance: + reg = &nir_system_values[SYSTEM_VALUE_BASE_INSTANCE]; + if (reg->file == BAD_FILE) + *reg = *make_reg_for_system_value(SYSTEM_VALUE_BASE_INSTANCE); + break; + + case nir_intrinsic_load_draw_id: + reg = &nir_system_values[SYSTEM_VALUE_DRAW_ID]; + if (reg->file == BAD_FILE) + *reg = *make_reg_for_system_value(SYSTEM_VALUE_DRAW_ID); break; default: @@ -84,11 +93,9 @@ vec4_visitor::nir_setup_system_value_intrinsic(nir_intrinsic_instr *instr) } static bool -setup_system_values_block(nir_block *block, void *void_visitor) +setup_system_values_block(nir_block *block, vec4_visitor *v) { - vec4_visitor *v = (vec4_visitor *)void_visitor; - - nir_foreach_instr(block, instr) { + nir_foreach_instr(instr, block) { if (instr->type != nir_instr_type_intrinsic) continue; @@ -107,10 +114,12 @@ vec4_visitor::nir_setup_system_values() nir_system_values[i] = dst_reg(); } - nir_foreach_overload(nir, overload) { - assert(strcmp(overload->function->name, "main") == 0); - assert(overload->impl); - nir_foreach_block(overload->impl, setup_system_values_block, this); + nir_foreach_function(function, nir) { + assert(strcmp(function->name, "main") == 0); + assert(function->impl); + nir_foreach_block(block, function->impl) { + setup_system_values_block(block, this); + } } } @@ -118,15 +127,6 @@ void vec4_visitor::nir_setup_uniforms() { uniforms = nir->num_uniforms / 16; - - nir_foreach_variable(var, &nir->uniforms) { - /* UBO's and atomics don't take up space in the uniform file */ - if (var->interface_type != NULL || var->type->contains_atomic()) - continue; - - if (type_size_vec4(var->type) > 0) - uniform_size[var->data.driver_location / 16] = type_size_vec4(var->type); - } } void @@ -208,7 +208,7 @@ vec4_visitor::nir_emit_loop(nir_loop *loop) void vec4_visitor::nir_emit_block(nir_block *block) { - nir_foreach_instr(block, instr) { + nir_foreach_instr(instr, block) { nir_emit_instr(instr); } } @@ -267,7 +267,7 @@ dst_reg_for_nir_reg(vec4_visitor *v, nir_register *nir_reg, } dst_reg -vec4_visitor::get_nir_dest(nir_dest dest) +vec4_visitor::get_nir_dest(const nir_dest &dest) { if (dest.is_ssa) { dst_reg dst = dst_reg(VGRF, alloc.allocate(1)); @@ -280,19 +280,19 @@ vec4_visitor::get_nir_dest(nir_dest dest) } dst_reg -vec4_visitor::get_nir_dest(nir_dest dest, enum brw_reg_type type) +vec4_visitor::get_nir_dest(const nir_dest &dest, enum brw_reg_type type) { return retype(get_nir_dest(dest), type); } dst_reg -vec4_visitor::get_nir_dest(nir_dest dest, nir_alu_type type) +vec4_visitor::get_nir_dest(const nir_dest &dest, nir_alu_type type) { return get_nir_dest(dest, brw_type_for_nir_type(type)); } src_reg -vec4_visitor::get_nir_src(nir_src src, enum brw_reg_type type, +vec4_visitor::get_nir_src(const nir_src &src, enum brw_reg_type type, unsigned num_components) { dst_reg reg; @@ -314,19 +314,37 @@ vec4_visitor::get_nir_src(nir_src src, enum brw_reg_type type, } src_reg -vec4_visitor::get_nir_src(nir_src src, nir_alu_type type, +vec4_visitor::get_nir_src(const nir_src &src, nir_alu_type type, unsigned num_components) { return get_nir_src(src, brw_type_for_nir_type(type), num_components); } src_reg -vec4_visitor::get_nir_src(nir_src src, unsigned num_components) +vec4_visitor::get_nir_src(const nir_src &src, unsigned num_components) { /* if type is not specified, default to signed int */ return get_nir_src(src, nir_type_int, num_components); } +src_reg +vec4_visitor::get_indirect_offset(nir_intrinsic_instr *instr) +{ + nir_src *offset_src = nir_get_io_offset_src(instr); + nir_const_value *const_value = nir_src_as_const_value(*offset_src); + + if (const_value) { + /* The only constant offset we should find is 0. brw_nir.c's + * add_const_offset_to_base() will fold other constant offsets + * into instr->const_index[0]. + */ + assert(const_value->u32[0] == 0); + return src_reg(); + } + + return get_nir_src(*offset_src, BRW_REGISTER_TYPE_UD, 1); +} + void vec4_visitor::nir_emit_load_const(nir_load_const_instr *instr) { @@ -346,13 +364,13 @@ vec4_visitor::nir_emit_load_const(nir_load_const_instr *instr) continue; for (unsigned j = i; j < instr->def.num_components; j++) { - if (instr->value.u[i] == instr->value.u[j]) { + if (instr->value.u32[i] == instr->value.u32[j]) { writemask |= 1 << j; } } reg.writemask = writemask; - emit(MOV(reg, brw_imm_d(instr->value.i[i]))); + emit(MOV(reg, brw_imm_d(instr->value.i32[i]))); remaining &= ~writemask; } @@ -377,8 +395,10 @@ vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr) /* We set EmitNoIndirectInput for VS */ assert(const_offset); - src = src_reg(ATTR, instr->const_index[0] + const_offset->u[0], + src = src_reg(ATTR, instr->const_index[0] + const_offset->u32[0], glsl_type::uvec4_type); + /* Swizzle source based on component layout qualifier */ + src.swizzle = BRW_SWZ_COMP_INPUT(nir_intrinsic_component(instr)); dest = get_nir_dest(instr->dest, src.type); dest.writemask = brw_writemask_for_size(instr->num_components); @@ -391,18 +411,25 @@ vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr) nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]); assert(const_offset); - int varying = instr->const_index[0] + const_offset->u[0]; + int varying = instr->const_index[0] + const_offset->u32[0]; src = get_nir_src(instr->src[0], BRW_REGISTER_TYPE_F, instr->num_components); - output_reg[varying] = dst_reg(src); + if (varying >= VARYING_SLOT_VAR0) { + unsigned c = nir_intrinsic_component(instr); + unsigned v = varying - VARYING_SLOT_VAR0; + output_generic_reg[v][c] = dst_reg(src); + output_generic_num_components[v][c] = instr->num_components; + } else { + output_reg[varying] = dst_reg(src); + } break; } case nir_intrinsic_get_buffer_size: { nir_const_value *const_uniform_block = nir_src_as_const_value(instr->src[0]); - unsigned ssbo_index = const_uniform_block ? const_uniform_block->u[0] : 0; + unsigned ssbo_index = const_uniform_block ? const_uniform_block->u32[0] : 0; const unsigned index = prog_data->base.binding_table.ssbo_start + ssbo_index; @@ -435,7 +462,7 @@ vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr) nir_src_as_const_value(instr->src[1]); if (const_uniform_block) { unsigned index = prog_data->base.binding_table.ssbo_start + - const_uniform_block->u[0]; + const_uniform_block->u32[0]; surf_index = brw_imm_ud(index); brw_mark_surface_used(&prog_data->base, index); } else { @@ -453,7 +480,7 @@ vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr) src_reg offset_reg; nir_const_value *const_offset = nir_src_as_const_value(instr->src[2]); if (const_offset) { - offset_reg = brw_imm_ud(const_offset->u[0]); + offset_reg = brw_imm_ud(const_offset->u32[0]); } else { offset_reg = get_nir_src(instr->src[2], 1); } @@ -573,7 +600,7 @@ vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr) src_reg surf_index; if (const_uniform_block) { unsigned index = prog_data->base.binding_table.ssbo_start + - const_uniform_block->u[0]; + const_uniform_block->u32[0]; surf_index = brw_imm_ud(index); brw_mark_surface_used(&prog_data->base, index); @@ -594,7 +621,7 @@ vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr) src_reg offset_reg; nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]); if (const_offset) { - offset_reg = brw_imm_ud(const_offset->u[0]); + offset_reg = brw_imm_ud(const_offset->u32[0]); } else { offset_reg = get_nir_src(instr->src[1], 1); } @@ -650,7 +677,10 @@ vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr) case nir_intrinsic_load_vertex_id_zero_base: case nir_intrinsic_load_base_vertex: - case nir_intrinsic_load_instance_id: { + case nir_intrinsic_load_instance_id: + case nir_intrinsic_load_base_instance: + case nir_intrinsic_load_draw_id: + case nir_intrinsic_load_invocation_id: { gl_system_value sv = nir_system_value_from_intrinsic(instr->intrinsic); src_reg val = src_reg(nir_system_values[sv]); assert(val.file != BAD_FILE); @@ -660,25 +690,47 @@ vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr) } case nir_intrinsic_load_uniform: { - /* Offsets are in bytes but they should always be multiples of 16 */ - assert(instr->const_index[0] % 16 == 0); + /* Offsets are in bytes but they should always be multiples of 4 */ + assert(nir_intrinsic_base(instr) % 4 == 0); dest = get_nir_dest(instr->dest); - src = src_reg(dst_reg(UNIFORM, instr->const_index[0] / 16)); + src = src_reg(dst_reg(UNIFORM, nir_intrinsic_base(instr) / 16)); src.type = dest.type; + /* Uniforms don't actually have to be vec4 aligned. In the case that + * it isn't, we have to use a swizzle to shift things around. They + * do still have the std140 alignment requirement that vec2's have to + * be vec2-aligned and vec3's and vec4's have to be vec4-aligned. + * + * The swizzle also works in the indirect case as the generator adds + * the swizzle to the offset for us. + */ + unsigned shift = (nir_intrinsic_base(instr) % 16) / 4; + assert(shift + instr->num_components <= 4); + nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]); if (const_offset) { - /* Offsets are in bytes but they should always be multiples of 16 */ - assert(const_offset->u[0] % 16 == 0); - src.reg_offset = const_offset->u[0] / 16; + /* Offsets are in bytes but they should always be multiples of 4 */ + assert(const_offset->u32[0] % 4 == 0); + + unsigned offset = const_offset->u32[0] + shift * 4; + src.offset = ROUND_DOWN_TO(offset, 16); + shift = (offset % 16) / 4; + src.swizzle += BRW_SWIZZLE4(shift, shift, shift, shift); + + emit(MOV(dest, src)); } else { - src_reg tmp = get_nir_src(instr->src[0], BRW_REGISTER_TYPE_D, 1); - src.reladdr = new(mem_ctx) src_reg(tmp); - } + src.swizzle += BRW_SWIZZLE4(shift, shift, shift, shift); - emit(MOV(dest, src)); + src_reg indirect = get_nir_src(instr->src[0], BRW_REGISTER_TYPE_UD, 1); + + /* MOV_INDIRECT is going to stomp the whole thing anyway */ + dest.writemask = WRITEMASK_XYZW; + + emit(SHADER_OPCODE_MOV_INDIRECT, dest, src, + indirect, brw_imm_ud(instr->const_index[1])); + } break; } @@ -689,24 +741,34 @@ vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr) (unsigned) instr->const_index[0]; src_reg offset = get_nir_src(instr->src[0], nir_type_int, instr->num_components); + const src_reg surface = brw_imm_ud(surf_index); + const vec4_builder bld = + vec4_builder(this).at_end().annotate(current_annotation, base_ir); + src_reg tmp; + dest = get_nir_dest(instr->dest); switch (instr->intrinsic) { - case nir_intrinsic_atomic_counter_inc: - emit_untyped_atomic(BRW_AOP_INC, surf_index, dest, offset, - src_reg(), src_reg()); - break; - case nir_intrinsic_atomic_counter_dec: - emit_untyped_atomic(BRW_AOP_PREDEC, surf_index, dest, offset, - src_reg(), src_reg()); - break; - case nir_intrinsic_atomic_counter_read: - emit_untyped_surface_read(surf_index, dest, offset); - break; - default: - unreachable("Unreachable"); + case nir_intrinsic_atomic_counter_inc: + tmp = emit_untyped_atomic(bld, surface, offset, + src_reg(), src_reg(), + 1, 1, + BRW_AOP_INC); + break; + case nir_intrinsic_atomic_counter_dec: + tmp = emit_untyped_atomic(bld, surface, offset, + src_reg(), src_reg(), + 1, 1, + BRW_AOP_PREDEC); + break; + case nir_intrinsic_atomic_counter_read: + tmp = emit_untyped_read(bld, surface, offset, 1, 1); + break; + default: + unreachable("Unreachable"); } + bld.MOV(retype(dest, tmp.type), tmp); brw_mark_surface_used(stage_prog_data, surf_index); break; } @@ -722,7 +784,7 @@ vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr) * as an immediate. */ const unsigned index = prog_data->base.binding_table.ubo_start + - const_block_index->u[0]; + const_block_index->u32[0]; surf_index = brw_imm_ud(index); brw_mark_surface_used(&prog_data->base, index); } else { @@ -747,7 +809,7 @@ vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr) src_reg offset; nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]); if (const_offset) { - offset = brw_imm_ud(const_offset->u[0] & ~15); + offset = brw_imm_ud(const_offset->u32[0] & ~15); } else { offset = get_nir_src(instr->src[1], nir_type_int, 1); } @@ -762,10 +824,10 @@ vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr) packed_consts.swizzle = brw_swizzle_for_size(instr->num_components); if (const_offset) { - packed_consts.swizzle += BRW_SWIZZLE4(const_offset->u[0] % 16 / 4, - const_offset->u[0] % 16 / 4, - const_offset->u[0] % 16 / 4, - const_offset->u[0] % 16 / 4); + packed_consts.swizzle += BRW_SWIZZLE4(const_offset->u32[0] % 16 / 4, + const_offset->u32[0] % 16 / 4, + const_offset->u32[0] % 16 / 4, + const_offset->u32[0] % 16 / 4); } emit(MOV(dest, packed_consts)); @@ -777,7 +839,7 @@ vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr) vec4_builder(this).at_end().annotate(current_annotation, base_ir); const dst_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_UD, 2); bld.emit(SHADER_OPCODE_MEMORY_FENCE, tmp) - ->regs_written = 2; + ->size_written = 2 * REG_SIZE; break; } @@ -807,7 +869,7 @@ vec4_visitor::nir_emit_ssbo_atomic(int op, nir_intrinsic_instr *instr) nir_const_value *const_surface = nir_src_as_const_value(instr->src[0]); if (const_surface) { unsigned surf_index = prog_data->base.binding_table.ssbo_start + - const_surface->u[0]; + const_surface->u32[0]; surface = brw_imm_ud(surf_index); brw_mark_surface_used(&prog_data->base, surf_index); } else { @@ -833,12 +895,11 @@ vec4_visitor::nir_emit_ssbo_atomic(int op, nir_intrinsic_instr *instr) const vec4_builder bld = vec4_builder(this).at_end().annotate(current_annotation, base_ir); - src_reg atomic_result = - surface_access::emit_untyped_atomic(bld, surface, offset, - data1, data2, - 1 /* dims */, 1 /* rsize */, - op, - BRW_PREDICATE_NONE); + src_reg atomic_result = emit_untyped_atomic(bld, surface, offset, + data1, data2, + 1 /* dims */, 1 /* rsize */, + op, + BRW_PREDICATE_NONE); dest.type = atomic_result.type; bld.MOV(dest, atomic_result); } @@ -888,6 +949,107 @@ brw_conditional_for_nir_comparison(nir_op op) } } +bool +vec4_visitor::optimize_predicate(nir_alu_instr *instr, + enum brw_predicate *predicate) +{ + if (!instr->src[0].src.is_ssa || + instr->src[0].src.ssa->parent_instr->type != nir_instr_type_alu) + return false; + + nir_alu_instr *cmp_instr = + nir_instr_as_alu(instr->src[0].src.ssa->parent_instr); + + switch (cmp_instr->op) { + case nir_op_bany_fnequal2: + case nir_op_bany_inequal2: + case nir_op_bany_fnequal3: + case nir_op_bany_inequal3: + case nir_op_bany_fnequal4: + case nir_op_bany_inequal4: + *predicate = BRW_PREDICATE_ALIGN16_ANY4H; + break; + case nir_op_ball_fequal2: + case nir_op_ball_iequal2: + case nir_op_ball_fequal3: + case nir_op_ball_iequal3: + case nir_op_ball_fequal4: + case nir_op_ball_iequal4: + *predicate = BRW_PREDICATE_ALIGN16_ALL4H; + break; + default: + return false; + } + + unsigned size_swizzle = + brw_swizzle_for_size(nir_op_infos[cmp_instr->op].input_sizes[0]); + + src_reg op[2]; + assert(nir_op_infos[cmp_instr->op].num_inputs == 2); + for (unsigned i = 0; i < 2; i++) { + op[i] = get_nir_src(cmp_instr->src[i].src, + nir_op_infos[cmp_instr->op].input_types[i], 4); + unsigned base_swizzle = + brw_swizzle_for_nir_swizzle(cmp_instr->src[i].swizzle); + op[i].swizzle = brw_compose_swizzle(size_swizzle, base_swizzle); + op[i].abs = cmp_instr->src[i].abs; + op[i].negate = cmp_instr->src[i].negate; + } + + emit(CMP(dst_null_d(), op[0], op[1], + brw_conditional_for_nir_comparison(cmp_instr->op))); + + return true; +} + +static void +emit_find_msb_using_lzd(const vec4_builder &bld, + const dst_reg &dst, + const src_reg &src, + bool is_signed) +{ + vec4_instruction *inst; + src_reg temp = src; + + if (is_signed) { + /* LZD of an absolute value source almost always does the right + * thing. There are two problem values: + * + * * 0x80000000. Since abs(0x80000000) == 0x80000000, LZD returns + * 0. However, findMSB(int(0x80000000)) == 30. + * + * * 0xffffffff. Since abs(0xffffffff) == 1, LZD returns + * 31. Section 8.8 (Integer Functions) of the GLSL 4.50 spec says: + * + * For a value of zero or negative one, -1 will be returned. + * + * * Negative powers of two. LZD(abs(-(1<src[0].negate = true; +} + void vec4_visitor::nir_emit_alu(nir_alu_instr *instr) { @@ -952,12 +1114,12 @@ vec4_visitor::nir_emit_alu(nir_alu_instr *instr) * operand. If we can determine that one of the args is in the low * 16 bits, though, we can just emit a single MUL. */ - if (value0 && value0->u[0] < (1 << 16)) { + if (value0 && value0->u32[0] < (1 << 16)) { if (devinfo->gen < 7) emit(MUL(dst, op[0], op[1])); else emit(MUL(dst, op[1], op[0])); - } else if (value1 && value1->u[0] < (1 << 16)) { + } else if (value1 && value1->u32[0] < (1 << 16)) { if (devinfo->gen < 7) emit(MUL(dst, op[1], op[0])); else @@ -979,7 +1141,11 @@ vec4_visitor::nir_emit_alu(nir_alu_instr *instr) case nir_op_umul_high: { struct brw_reg acc = retype(brw_acc_reg(8), dst.type); - emit(MUL(acc, op[0], op[1])); + if (devinfo->gen >= 8) + emit(MUL(acc, op[0], retype(op[1], BRW_REGISTER_TYPE_UW))); + else + emit(MUL(acc, op[0], op[1])); + emit(MACH(dst, op[0], op[1])); break; } @@ -1015,9 +1181,45 @@ vec4_visitor::nir_emit_alu(nir_alu_instr *instr) break; case nir_op_umod: + case nir_op_irem: + /* According to the sign table for INT DIV in the Ivy Bridge PRM, it + * appears that our hardware just does the right thing for signed + * remainder. + */ emit_math(SHADER_OPCODE_INT_REMAINDER, dst, op[0], op[1]); break; + case nir_op_imod: { + /* Get a regular C-style remainder. If a % b == 0, set the predicate. */ + inst = emit_math(SHADER_OPCODE_INT_REMAINDER, dst, op[0], op[1]); + + /* Math instructions don't support conditional mod */ + inst = emit(MOV(dst_null_d(), src_reg(dst))); + inst->conditional_mod = BRW_CONDITIONAL_NZ; + + /* Now, we need to determine if signs of the sources are different. + * When we XOR the sources, the top bit is 0 if they are the same and 1 + * if they are different. We can then use a conditional modifier to + * turn that into a predicate. This leads us to an XOR.l instruction. + * + * Technically, according to the PRM, you're not allowed to use .l on a + * XOR instruction. However, emperical experiments and Curro's reading + * of the simulator source both indicate that it's safe. + */ + src_reg tmp = src_reg(this, glsl_type::ivec4_type); + inst = emit(XOR(dst_reg(tmp), op[0], op[1])); + inst->predicate = BRW_PREDICATE_NORMAL; + inst->conditional_mod = BRW_CONDITIONAL_L; + + /* If the result of the initial remainder operation is non-zero and the + * two sources have different signs, add in a copy of op[1] to get the + * final integer modulus value. + */ + inst = emit(ADD(dst, src_reg(dst), op[1])); + inst->predicate = BRW_PREDICATE_NORMAL; + break; + } + case nir_op_ldexp: unreachable("not reached: should be handled by ldexp_to_arith()"); @@ -1087,6 +1289,31 @@ vec4_visitor::nir_emit_alu(nir_alu_instr *instr) inst->saturate = instr->dest.saturate; break; + case nir_op_fquantize2f16: { + /* See also vec4_visitor::emit_pack_half_2x16() */ + src_reg tmp16 = src_reg(this, glsl_type::uvec4_type); + src_reg tmp32 = src_reg(this, glsl_type::vec4_type); + src_reg zero = src_reg(this, glsl_type::vec4_type); + + /* Check for denormal */ + src_reg abs_src0 = op[0]; + abs_src0.abs = true; + emit(CMP(dst_null_f(), abs_src0, brw_imm_f(ldexpf(1.0, -14)), + BRW_CONDITIONAL_L)); + /* Get the appropriately signed zero */ + emit(AND(retype(dst_reg(zero), BRW_REGISTER_TYPE_UD), + retype(op[0], BRW_REGISTER_TYPE_UD), + brw_imm_ud(0x80000000))); + /* Do the actual F32 -> F16 -> F32 conversion */ + emit(F32TO16(dst_reg(tmp16), op[0])); + emit(F16TO32(dst_reg(tmp32), tmp16)); + /* Select that or zero based on normal status */ + inst = emit(BRW_OPCODE_SEL, dst, zero, tmp32); + inst->predicate = BRW_PREDICATE_NORMAL; + inst->saturate = instr->dest.saturate; + break; + } + case nir_op_fmin: case nir_op_imin: case nir_op_umin: @@ -1231,6 +1458,24 @@ vec4_visitor::nir_emit_alu(nir_alu_instr *instr) case nir_op_pack_unorm_2x16: unreachable("not reached: should be handled by lower_packing_builtins"); + case nir_op_pack_uvec4_to_uint: + unreachable("not reached"); + + case nir_op_pack_uvec2_to_uint: { + dst_reg tmp1 = dst_reg(this, glsl_type::uint_type); + tmp1.writemask = WRITEMASK_X; + op[0].swizzle = BRW_SWIZZLE_YYYY; + emit(SHL(tmp1, op[0], src_reg(brw_imm_ud(16u)))); + + dst_reg tmp2 = dst_reg(this, glsl_type::uint_type); + tmp2.writemask = WRITEMASK_X; + op[0].swizzle = BRW_SWIZZLE_XXXX; + emit(AND(tmp2, op[0], src_reg(brw_imm_ud(0xffffu)))); + + emit(OR(dst, src_reg(tmp1), src_reg(tmp2))); + break; + } + case nir_op_unpack_half_2x16: /* As NIR does not guarantee that we have a correct swizzle outside the * boundaries of a vector, and the implementation of emit_unpack_half_2x16 @@ -1273,28 +1518,63 @@ vec4_visitor::nir_emit_alu(nir_alu_instr *instr) break; case nir_op_ufind_msb: - case nir_op_ifind_msb: { - emit(FBH(retype(dst, BRW_REGISTER_TYPE_UD), op[0])); + emit_find_msb_using_lzd(vec4_builder(this).at_end(), dst, op[0], false); + break; - /* FBH counts from the MSB side, while GLSL's findMSB() wants the count - * from the LSB side. If FBH didn't return an error (0xFFFFFFFF), then - * subtract the result from 31 to convert the MSB count into an LSB count. - */ + case nir_op_ifind_msb: { + vec4_builder bld = vec4_builder(this).at_end(); src_reg src(dst); - emit(CMP(dst_null_d(), src, brw_imm_d(-1), BRW_CONDITIONAL_NZ)); - inst = emit(ADD(dst, src, brw_imm_d(31))); - inst->predicate = BRW_PREDICATE_NORMAL; - inst->src[0].negate = true; + if (devinfo->gen < 7) { + emit_find_msb_using_lzd(bld, dst, op[0], true); + } else { + emit(FBH(retype(dst, BRW_REGISTER_TYPE_UD), op[0])); + + /* FBH counts from the MSB side, while GLSL's findMSB() wants the + * count from the LSB side. If FBH didn't return an error + * (0xFFFFFFFF), then subtract the result from 31 to convert the MSB + * count into an LSB count. + */ + bld.CMP(dst_null_d(), src, brw_imm_d(-1), BRW_CONDITIONAL_NZ); + + inst = bld.ADD(dst, src, brw_imm_d(31)); + inst->predicate = BRW_PREDICATE_NORMAL; + inst->src[0].negate = true; + } break; } - case nir_op_find_lsb: - emit(FBL(dst, op[0])); + case nir_op_find_lsb: { + vec4_builder bld = vec4_builder(this).at_end(); + + if (devinfo->gen < 7) { + dst_reg temp = bld.vgrf(BRW_REGISTER_TYPE_D); + + /* (x & -x) generates a value that consists of only the LSB of x. + * For all powers of 2, findMSB(y) == findLSB(y). + */ + src_reg src = src_reg(retype(op[0], BRW_REGISTER_TYPE_D)); + src_reg negated_src = src; + + /* One must be negated, and the other must be non-negated. It + * doesn't matter which is which. + */ + negated_src.negate = true; + src.negate = false; + + bld.AND(temp, src, negated_src); + emit_find_msb_using_lzd(bld, dst, src_reg(temp), false); + } else { + bld.FBL(dst, op[0]); + } break; + } case nir_op_ubitfield_extract: case nir_op_ibitfield_extract: + unreachable("should have been lowered"); + case nir_op_ubfe: + case nir_op_ibfe: op[0] = fix_3src_operand(op[0]); op[1] = fix_3src_operand(op[1]); op[2] = fix_3src_operand(op[2]); @@ -1315,8 +1595,7 @@ vec4_visitor::nir_emit_alu(nir_alu_instr *instr) break; case nir_op_bitfield_insert: - unreachable("not reached: should be handled by " - "lower_instructions::bitfield_insert_to_bfm_bfi"); + unreachable("not reached: should have been lowered"); case nir_op_fsign: /* AND(val, 0x80000000) gives the sign bit. @@ -1378,25 +1657,29 @@ vec4_visitor::nir_emit_alu(nir_alu_instr *instr) break; case nir_op_bcsel: - emit(CMP(dst_null_d(), op[0], brw_imm_d(0), BRW_CONDITIONAL_NZ)); - inst = emit(BRW_OPCODE_SEL, dst, op[1], op[2]); - switch (dst.writemask) { - case WRITEMASK_X: - inst->predicate = BRW_PREDICATE_ALIGN16_REPLICATE_X; - break; - case WRITEMASK_Y: - inst->predicate = BRW_PREDICATE_ALIGN16_REPLICATE_Y; - break; - case WRITEMASK_Z: - inst->predicate = BRW_PREDICATE_ALIGN16_REPLICATE_Z; - break; - case WRITEMASK_W: - inst->predicate = BRW_PREDICATE_ALIGN16_REPLICATE_W; - break; - default: - inst->predicate = BRW_PREDICATE_NORMAL; - break; + enum brw_predicate predicate; + if (!optimize_predicate(instr, &predicate)) { + emit(CMP(dst_null_d(), op[0], brw_imm_d(0), BRW_CONDITIONAL_NZ)); + switch (dst.writemask) { + case WRITEMASK_X: + predicate = BRW_PREDICATE_ALIGN16_REPLICATE_X; + break; + case WRITEMASK_Y: + predicate = BRW_PREDICATE_ALIGN16_REPLICATE_Y; + break; + case WRITEMASK_Z: + predicate = BRW_PREDICATE_ALIGN16_REPLICATE_Z; + break; + case WRITEMASK_W: + predicate = BRW_PREDICATE_ALIGN16_REPLICATE_W; + break; + default: + predicate = BRW_PREDICATE_NORMAL; + break; + } } + inst = emit(BRW_OPCODE_SEL, dst, op[1], op[2]); + inst->predicate = predicate; break; case nir_op_fdot_replicated2: @@ -1419,20 +1702,6 @@ vec4_visitor::nir_emit_alu(nir_alu_instr *instr) inst->saturate = instr->dest.saturate; break; - case nir_op_bany2: - case nir_op_bany3: - case nir_op_bany4: { - unsigned swiz = - brw_swizzle_for_size(nir_op_infos[instr->op].input_sizes[0]); - - emit(CMP(dst_null_d(), swizzle(op[0], swiz), brw_imm_d(0), - BRW_CONDITIONAL_NZ)); - emit(MOV(dst, brw_imm_d(0))); - inst = emit(MOV(dst, brw_imm_d(~0))); - inst->predicate = BRW_PREDICATE_ALIGN16_ANY4H; - break; - } - case nir_op_fabs: case nir_op_iabs: case nir_op_fneg: @@ -1482,13 +1751,7 @@ vec4_visitor::nir_emit_jump(nir_jump_instr *instr) break; case nir_jump_return: - /* This has to be the last block in the shader. We don't handle - * early returns. - */ - assert(nir_cf_node_next(&instr->instr.block->cf_node) == NULL && - instr->instr.block->cf_node.parent->type == nir_cf_node_function); - break; - + /* fall through */ default: unreachable("unknown jump"); } @@ -1558,11 +1821,12 @@ vec4_visitor::nir_emit_texture(nir_tex_instr *instr) nir_tex_instr_dest_size(instr)); dst_reg dest = get_nir_dest(instr->dest, instr->dest_type); - /* Our hardware requires a LOD for buffer textures */ + /* The hardware requires a LOD for buffer textures */ if (instr->sampler_dim == GLSL_SAMPLER_DIM_BUF) lod = brw_imm_d(0); /* Load the texture operation sources */ + uint32_t constant_offset = 0; for (unsigned i = 0; i < instr->num_srcs; i++) { switch (instr->src[i].src_type) { case nir_tex_src_comparitor: @@ -1619,16 +1883,25 @@ vec4_visitor::nir_emit_texture(nir_tex_instr *instr) break; } - case nir_tex_src_offset: - offset_value = get_nir_src(instr->src[i].src, BRW_REGISTER_TYPE_D, 2); + case nir_tex_src_offset: { + nir_const_value *const_offset = + nir_src_as_const_value(instr->src[i].src); + if (const_offset) { + constant_offset = brw_texture_offset(const_offset->i32, 3); + } else { + offset_value = + get_nir_src(instr->src[i].src, BRW_REGISTER_TYPE_D, 2); + } break; + } case nir_tex_src_texture_offset: { /* The highest texture which may be used by this operation is * the last element of the array. Mark it here, because the generator * doesn't have enough information to determine the bound. */ - uint32_t max_used = texture + instr->texture_array_size - 1; + uint32_t array_size = instr->texture_array_size; + uint32_t max_used = texture + array_size - 1; if (instr->op == nir_texop_tg4) { max_used += prog_data->base.binding_table.gather_texture_start; } else { @@ -1669,21 +1942,13 @@ vec4_visitor::nir_emit_texture(nir_tex_instr *instr) instr->op == nir_texop_samples_identical) { assert(coord_type != NULL); if (devinfo->gen >= 7 && - key_tex->compressed_multisample_layout_mask & (1 << sampler)) { - mcs = emit_mcs_fetch(coord_type, coordinate, sampler_reg); + key_tex->compressed_multisample_layout_mask & (1 << texture)) { + mcs = emit_mcs_fetch(coord_type, coordinate, texture_reg); } else { mcs = brw_imm_ud(0u); } } - uint32_t constant_offset = 0; - for (unsigned i = 0; i < 3; i++) { - if (instr->const_offset[i] != 0) { - constant_offset = brw_texture_offset(instr->const_offset, 3); - break; - } - } - /* Stuff the channel select bits in the top of the texture offset */ if (instr->op == nir_texop_tg4) { if (instr->component == 1 && @@ -1699,16 +1964,10 @@ vec4_visitor::nir_emit_texture(nir_tex_instr *instr) ir_texture_opcode op = ir_texture_opcode_for_nir_texop(instr->op); - bool is_cube_array = - instr->op == nir_texop_txs && - instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE && - instr->is_array; - emit_texture(op, dest, dest_type, coordinate, instr->coord_components, shadow_comparitor, lod, lod2, sample_index, - constant_offset, offset_value, - mcs, is_cube_array, + constant_offset, offset_value, mcs, texture, texture_reg, sampler, sampler_reg); }