X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fintel%2Fcompiler%2Fbrw_fs_nir.cpp;h=e04667c509944c61b882f93562c9c86caa75b548;hb=b6a454791b45b60b9518b4b8fb41fd443b3ceab1;hp=2b36171136e6695f9fa3d06e409075070aec0d14;hpb=d28bc35ece9c6e0d9f40c1540bd147a6face6207;p=mesa.git diff --git a/src/intel/compiler/brw_fs_nir.cpp b/src/intel/compiler/brw_fs_nir.cpp index 2b36171136e..e04667c5099 100644 --- a/src/intel/compiler/brw_fs_nir.cpp +++ b/src/intel/compiler/brw_fs_nir.cpp @@ -23,12 +23,11 @@ #include "compiler/glsl/ir.h" #include "brw_fs.h" -#include "brw_fs_surface_builder.h" #include "brw_nir.h" #include "util/u_math.h" +#include "util/bitscan.h" using namespace brw; -using namespace brw::surface_access; void fs_visitor::emit_nir_code() @@ -59,7 +58,7 @@ fs_visitor::nir_setup_outputs() const int loc = var->data.driver_location; const unsigned var_vec4s = var->data.compact ? DIV_ROUND_UP(glsl_get_length(var->type), 4) - : type_size_vec4(var->type); + : type_size_vec4(var->type, true); vec4s[loc] = MAX2(vec4s[loc], var_vec4s); } @@ -382,13 +381,30 @@ fs_visitor::nir_emit_cf_list(exec_list *list) void fs_visitor::nir_emit_if(nir_if *if_stmt) { + bool invert; + fs_reg cond_reg; + + /* If the condition has the form !other_condition, use other_condition as + * the source, but invert the predicate on the if instruction. + */ + nir_alu_instr *const cond = nir_src_as_alu_instr(&if_stmt->condition); + if (cond != NULL && cond->op == nir_op_inot) { + assert(!cond->src[0].negate); + assert(!cond->src[0].abs); + + invert = true; + cond_reg = get_nir_src(cond->src[0].src); + } else { + invert = false; + cond_reg = get_nir_src(if_stmt->condition); + } + /* first, put the condition into f0 */ fs_inst *inst = bld.MOV(bld.null_reg_d(), - retype(get_nir_src(if_stmt->condition), - BRW_REGISTER_TYPE_D)); + retype(cond_reg, BRW_REGISTER_TYPE_D)); inst->conditional_mod = BRW_CONDITIONAL_NZ; - bld.IF(BRW_PREDICATE_NORMAL); + bld.IF(BRW_PREDICATE_NORMAL)->predicate_inverse = invert; nir_emit_cf_list(&if_stmt->then_list); @@ -511,6 +527,15 @@ fs_visitor::optimize_extract_to_float(nir_alu_instr *instr, src0->op != nir_op_extract_i8 && src0->op != nir_op_extract_i16) return false; + /* If either opcode has source modifiers, bail. + * + * TODO: We can potentially handle source modifiers if both of the opcodes + * we're combining are signed integers. + */ + if (instr->src[0].abs || instr->src[0].negate || + src0->src[0].abs || src0->src[0].negate) + return false; + unsigned element = nir_src_as_uint(src0->src[1].src); /* Element type to extract.*/ @@ -663,18 +688,19 @@ brw_rnd_mode_from_nir_op (const nir_op op) { } } -void -fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr) +fs_reg +fs_visitor::prepare_alu_destination_and_sources(const fs_builder &bld, + nir_alu_instr *instr, + fs_reg *op, + bool need_dest) { - struct brw_wm_prog_key *fs_key = (struct brw_wm_prog_key *) this->key; - fs_inst *inst; + fs_reg result = + need_dest ? get_nir_dest(instr->dest.dest) : bld.null_reg_ud(); - fs_reg result = get_nir_dest(instr->dest.dest); result.type = brw_type_for_nir_type(devinfo, (nir_alu_type)(nir_op_infos[instr->op].output_type | nir_dest_bit_size(instr->dest.dest))); - fs_reg op[4]; for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) { op[i] = get_nir_src(instr->src[i].src); op[i].type = brw_type_for_nir_type(devinfo, @@ -684,10 +710,113 @@ fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr) op[i].negate = instr->src[i].negate; } - /* We get a bunch of mov's out of the from_ssa pass and they may still - * be vectorized. We'll handle them as a special-case. We'll also - * handle vecN here because it's basically the same thing. + /* Move and vecN instrutions may still be vectored. Return the raw, + * vectored source and destination so that fs_visitor::nir_emit_alu can + * handle it. Other callers should not have to handle these kinds of + * instructions. + */ + switch (instr->op) { + case nir_op_imov: + case nir_op_fmov: + case nir_op_vec2: + case nir_op_vec3: + case nir_op_vec4: + return result; + default: + break; + } + + /* At this point, we have dealt with any instruction that operates on + * more than a single channel. Therefore, we can just adjust the source + * and destination registers for that channel and emit the instruction. + */ + unsigned channel = 0; + if (nir_op_infos[instr->op].output_size == 0) { + /* Since NIR is doing the scalarizing for us, we should only ever see + * vectorized operations with a single channel. + */ + assert(util_bitcount(instr->dest.write_mask) == 1); + channel = ffs(instr->dest.write_mask) - 1; + + result = offset(result, bld, channel); + } + + for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) { + assert(nir_op_infos[instr->op].input_sizes[i] < 2); + op[i] = offset(op[i], bld, instr->src[i].swizzle[channel]); + } + + return result; +} + +void +fs_visitor::resolve_inot_sources(const fs_builder &bld, nir_alu_instr *instr, + fs_reg *op) +{ + for (unsigned i = 0; i < 2; i++) { + nir_alu_instr *const inot_instr = + nir_src_as_alu_instr(&instr->src[i].src); + + if (inot_instr != NULL && inot_instr->op == nir_op_inot && + !inot_instr->src[0].abs && !inot_instr->src[0].negate) { + /* The source of the inot is now the source of instr. */ + prepare_alu_destination_and_sources(bld, inot_instr, &op[i], false); + + assert(!op[i].negate); + op[i].negate = true; + } else { + op[i] = resolve_source_modifiers(op[i]); + } + } +} + +bool +fs_visitor::try_emit_b2fi_of_inot(const fs_builder &bld, + fs_reg result, + nir_alu_instr *instr) +{ + if (devinfo->gen < 6 || devinfo->gen >= 12) + return false; + + nir_alu_instr *const inot_instr = nir_src_as_alu_instr(&instr->src[0].src); + + if (inot_instr == NULL || inot_instr->op != nir_op_inot) + return false; + + /* HF is also possible as a destination on BDW+. For nir_op_b2i, the set + * of valid size-changing combinations is a bit more complex. + * + * The source restriction is just because I was lazy about generating the + * constant below. + */ + if (nir_dest_bit_size(instr->dest.dest) != 32 || + nir_src_bit_size(inot_instr->src[0].src) != 32) + return false; + + /* b2[fi](inot(a)) maps a=0 => 1, a=-1 => 0. Since a can only be 0 or -1, + * this is float(1 + a). + */ + fs_reg op; + + prepare_alu_destination_and_sources(bld, inot_instr, &op, false); + + /* Ignore the saturate modifier, if there is one. The result of the + * arithmetic can only be 0 or 1, so the clamping will do nothing anyway. */ + bld.ADD(result, op, brw_imm_d(1)); + + return true; +} + +void +fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr) +{ + struct brw_wm_prog_key *fs_key = (struct brw_wm_prog_key *) this->key; + fs_inst *inst; + + fs_reg op[4]; + fs_reg result = prepare_alu_destination_and_sources(bld, instr, op, true); + switch (instr->op) { case nir_op_imov: case nir_op_fmov: @@ -733,31 +862,7 @@ fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr) } return; } - default: - break; - } - - /* At this point, we have dealt with any instruction that operates on - * more than a single channel. Therefore, we can just adjust the source - * and destination registers for that channel and emit the instruction. - */ - unsigned channel = 0; - if (nir_op_infos[instr->op].output_size == 0) { - /* Since NIR is doing the scalarizing for us, we should only ever see - * vectorized operations with a single channel. - */ - assert(util_bitcount(instr->dest.write_mask) == 1); - channel = ffs(instr->dest.write_mask) - 1; - - result = offset(result, bld, channel); - } - for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) { - assert(nir_op_infos[instr->op].input_sizes[i] < 2); - op[i] = offset(op[i], bld, instr->src[i].swizzle[channel]); - } - - switch (instr->op) { case nir_op_i2f32: case nir_op_u2f32: if (optimize_extract_to_float(instr, result)) @@ -783,45 +888,38 @@ fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr) */ case nir_op_f2f16: + case nir_op_i2f16: + case nir_op_u2f16: + assert(type_sz(op[0].type) < 8); /* brw_nir_lower_conversions */ inst = bld.MOV(result, op[0]); inst->saturate = instr->dest.saturate; break; - case nir_op_b2i: - case nir_op_b2f: - op[0].type = BRW_REGISTER_TYPE_D; - op[0].negate = !op[0].negate; - /* fallthrough */ case nir_op_f2f64: case nir_op_f2i64: case nir_op_f2u64: + assert(type_sz(op[0].type) > 2); /* brw_nir_lower_conversions */ + inst = bld.MOV(result, op[0]); + inst->saturate = instr->dest.saturate; + break; + + case nir_op_b2i8: + case nir_op_b2i16: + case nir_op_b2i32: + case nir_op_b2i64: + case nir_op_b2f16: + case nir_op_b2f32: + case nir_op_b2f64: + if (try_emit_b2fi_of_inot(bld, result, instr)) + break; + op[0].type = BRW_REGISTER_TYPE_D; + op[0].negate = !op[0].negate; + /* fallthrough */ case nir_op_i2f64: case nir_op_i2i64: case nir_op_u2f64: case nir_op_u2u64: - /* CHV PRM, vol07, 3D Media GPGPU Engine, Register Region Restrictions: - * - * "When source or destination is 64b (...), regioning in Align1 - * must follow these rules: - * - * 1. Source and destination horizontal stride must be aligned to - * the same qword. - * (...)" - * - * This means that conversions from bit-sizes smaller than 64-bit to - * 64-bit need to have the source data elements aligned to 64-bit. - * This restriction does not apply to BDW and later. - */ - if (nir_dest_bit_size(instr->dest.dest) == 64 && - nir_src_bit_size(instr->src[0].src) < 64 && - (devinfo->is_cherryview || gen_device_info_is_9lp(devinfo))) { - fs_reg tmp = bld.vgrf(result.type, 1); - tmp = subscript(tmp, op[0].type, 0); - inst = bld.MOV(tmp, op[0]); - inst = bld.MOV(result, tmp); - inst->saturate = instr->dest.saturate; - break; - } + assert(type_sz(op[0].type) > 1); /* brw_nir_lower_conversions */ /* fallthrough */ case nir_op_f2f32: case nir_op_f2i32: @@ -832,8 +930,6 @@ fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr) case nir_op_u2u32: case nir_op_i2i16: case nir_op_u2u16: - case nir_op_i2f16: - case nir_op_u2f16: case nir_op_i2i8: case nir_op_u2u8: inst = bld.MOV(result, op[0]); @@ -893,25 +989,6 @@ fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr) break; } - case nir_op_isign: { - /* ASR(val, 31) -> negative val generates 0xffffffff (signed -1). - * -> non-negative val generates 0x00000000. - * Predicated OR sets 1 if val is positive. - */ - uint32_t bit_size = nir_dest_bit_size(instr->dest.dest); - assert(bit_size == 32 || bit_size == 16); - - fs_reg zero = bit_size == 32 ? brw_imm_d(0) : brw_imm_w(0); - fs_reg one = bit_size == 32 ? brw_imm_d(1) : brw_imm_w(1); - fs_reg shift = bit_size == 32 ? brw_imm_d(31) : brw_imm_w(15); - - bld.CMP(bld.null_reg_d(), op[0], zero, BRW_CONDITIONAL_G); - bld.ASR(result, op[0], shift); - inst = bld.OR(result, result, one); - inst->predicate = BRW_PREDICATE_NORMAL; - break; - } - case nir_op_frcp: inst = bld.emit(SHADER_OPCODE_RCP, result, op[0]); inst->saturate = instr->dest.saturate; @@ -976,11 +1053,21 @@ fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr) inst->saturate = instr->dest.saturate; break; + case nir_op_uadd_sat: + inst = bld.ADD(result, op[0], op[1]); + inst->saturate = true; + break; + case nir_op_fmul: inst = bld.MUL(result, op[0], op[1]); inst->saturate = instr->dest.saturate; break; + case nir_op_imul_2x32_64: + case nir_op_umul_2x32_64: + bld.MUL(result, op[0], op[1]); + break; + case nir_op_imul: assert(nir_dest_bit_size(instr->dest.dest) < 64); bld.MUL(result, op[0], op[1]); @@ -1045,10 +1132,10 @@ fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr) break; } - case nir_op_flt: - case nir_op_fge: - case nir_op_feq: - case nir_op_fne: { + case nir_op_flt32: + case nir_op_fge32: + case nir_op_feq32: + case nir_op_fne32: { fs_reg dest = result; const uint32_t bit_size = nir_src_bit_size(instr->src[0].src); @@ -1057,16 +1144,16 @@ fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr) brw_conditional_mod cond; switch (instr->op) { - case nir_op_flt: + case nir_op_flt32: cond = BRW_CONDITIONAL_L; break; - case nir_op_fge: + case nir_op_fge32: cond = BRW_CONDITIONAL_GE; break; - case nir_op_feq: + case nir_op_feq32: cond = BRW_CONDITIONAL_Z; break; - case nir_op_fne: + case nir_op_fne32: cond = BRW_CONDITIONAL_NZ; break; default: @@ -1089,12 +1176,12 @@ fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr) break; } - case nir_op_ilt: - case nir_op_ult: - case nir_op_ige: - case nir_op_uge: - case nir_op_ieq: - case nir_op_ine: { + case nir_op_ilt32: + case nir_op_ult32: + case nir_op_ige32: + case nir_op_uge32: + case nir_op_ieq32: + case nir_op_ine32: { fs_reg dest = result; const uint32_t bit_size = nir_src_bit_size(instr->src[0].src); @@ -1103,18 +1190,18 @@ fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr) brw_conditional_mod cond; switch (instr->op) { - case nir_op_ilt: - case nir_op_ult: + case nir_op_ilt32: + case nir_op_ult32: cond = BRW_CONDITIONAL_L; break; - case nir_op_ige: - case nir_op_uge: + case nir_op_ige32: + case nir_op_uge32: cond = BRW_CONDITIONAL_GE; break; - case nir_op_ieq: + case nir_op_ieq32: cond = BRW_CONDITIONAL_Z; break; - case nir_op_ine: + case nir_op_ine32: cond = BRW_CONDITIONAL_NZ; break; default: @@ -1138,28 +1225,84 @@ fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr) case nir_op_inot: if (devinfo->gen >= 8) { + nir_alu_instr *const inot_src_instr = + nir_src_as_alu_instr(&instr->src[0].src); + + if (inot_src_instr != NULL && + (inot_src_instr->op == nir_op_ior || + inot_src_instr->op == nir_op_ixor || + inot_src_instr->op == nir_op_iand) && + !inot_src_instr->src[0].abs && + !inot_src_instr->src[0].negate && + !inot_src_instr->src[1].abs && + !inot_src_instr->src[1].negate) { + /* The sources of the source logical instruction are now the + * sources of the instruction that will be generated. + */ + prepare_alu_destination_and_sources(bld, inot_src_instr, op, false); + resolve_inot_sources(bld, inot_src_instr, op); + + /* Smash all of the sources and destination to be signed. This + * doesn't matter for the operation of the instruction, but cmod + * propagation fails on unsigned sources with negation (due to + * fs_inst::can_do_cmod returning false). + */ + result.type = + brw_type_for_nir_type(devinfo, + (nir_alu_type)(nir_type_int | + nir_dest_bit_size(instr->dest.dest))); + op[0].type = + brw_type_for_nir_type(devinfo, + (nir_alu_type)(nir_type_int | + nir_src_bit_size(inot_src_instr->src[0].src))); + op[1].type = + brw_type_for_nir_type(devinfo, + (nir_alu_type)(nir_type_int | + nir_src_bit_size(inot_src_instr->src[1].src))); + + /* For XOR, only invert one of the sources. Arbitrarily choose + * the first source. + */ + op[0].negate = !op[0].negate; + if (inot_src_instr->op != nir_op_ixor) + op[1].negate = !op[1].negate; + + switch (inot_src_instr->op) { + case nir_op_ior: + bld.AND(result, op[0], op[1]); + return; + + case nir_op_iand: + bld.OR(result, op[0], op[1]); + return; + + case nir_op_ixor: + bld.XOR(result, op[0], op[1]); + return; + + default: + unreachable("impossible opcode"); + } + } op[0] = resolve_source_modifiers(op[0]); } bld.NOT(result, op[0]); break; case nir_op_ixor: if (devinfo->gen >= 8) { - op[0] = resolve_source_modifiers(op[0]); - op[1] = resolve_source_modifiers(op[1]); + resolve_inot_sources(bld, instr, op); } bld.XOR(result, op[0], op[1]); break; case nir_op_ior: if (devinfo->gen >= 8) { - op[0] = resolve_source_modifiers(op[0]); - op[1] = resolve_source_modifiers(op[1]); + resolve_inot_sources(bld, instr, op); } bld.OR(result, op[0], op[1]); break; case nir_op_iand: if (devinfo->gen >= 8) { - op[0] = resolve_source_modifiers(op[0]); - op[1] = resolve_source_modifiers(op[1]); + resolve_inot_sources(bld, instr, op); } bld.AND(result, op[0], op[1]); break; @@ -1167,18 +1310,18 @@ fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr) case nir_op_fdot2: case nir_op_fdot3: case nir_op_fdot4: - case nir_op_ball_fequal2: - case nir_op_ball_iequal2: - case nir_op_ball_fequal3: - case nir_op_ball_iequal3: - case nir_op_ball_fequal4: - case nir_op_ball_iequal4: - case nir_op_bany_fnequal2: - case nir_op_bany_inequal2: - case nir_op_bany_fnequal3: - case nir_op_bany_inequal3: - case nir_op_bany_fnequal4: - case nir_op_bany_inequal4: + case nir_op_b32all_fequal2: + case nir_op_b32all_iequal2: + case nir_op_b32all_fequal3: + case nir_op_b32all_iequal3: + case nir_op_b32all_fequal4: + case nir_op_b32all_iequal4: + case nir_op_b32any_fnequal2: + case nir_op_b32any_inequal2: + case nir_op_b32any_fnequal3: + case nir_op_b32any_inequal3: + case nir_op_b32any_fnequal4: + case nir_op_b32any_inequal4: unreachable("Lowered by nir_lower_alu_reductions"); case nir_op_fnoise1_1: @@ -1212,15 +1355,15 @@ fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr) inst->saturate = instr->dest.saturate; break; - case nir_op_i2b: - case nir_op_f2b: { + case nir_op_i2b32: + case nir_op_f2b32: { uint32_t bit_size = nir_src_bit_size(instr->src[0].src); if (bit_size == 64) { /* two-argument instructions can't take 64-bit immediates */ fs_reg zero; fs_reg tmp; - if (instr->op == nir_op_f2b) { + if (instr->op == nir_op_f2b32) { zero = vgrf(glsl_type::double_type); tmp = vgrf(glsl_type::double_type); bld.MOV(zero, setup_imm_df(bld, 0.0)); @@ -1239,10 +1382,10 @@ fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr) } else { fs_reg zero; if (bit_size == 32) { - zero = instr->op == nir_op_f2b ? brw_imm_f(0.0f) : brw_imm_d(0); + zero = instr->op == nir_op_f2b32 ? brw_imm_f(0.0f) : brw_imm_d(0); } else { assert(bit_size == 16); - zero = instr->op == nir_op_f2b ? + zero = instr->op == nir_op_f2b32 ? retype(brw_imm_w(0), BRW_REGISTER_TYPE_HF) : brw_imm_w(0); } bld.CMP(result, op[0], zero, BRW_CONDITIONAL_NZ); @@ -1332,11 +1475,13 @@ fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr) unreachable("not reached: should be handled by lower_packing_builtins"); case nir_op_unpack_half_2x16_split_x: - inst = bld.emit(FS_OPCODE_UNPACK_HALF_2x16_SPLIT_X, result, op[0]); + inst = bld.emit(BRW_OPCODE_F16TO32, result, + subscript(op[0], BRW_REGISTER_TYPE_UW, 0)); inst->saturate = instr->dest.saturate; break; case nir_op_unpack_half_2x16_split_y: - inst = bld.emit(FS_OPCODE_UNPACK_HALF_2x16_SPLIT_Y, result, op[0]); + inst = bld.emit(BRW_OPCODE_F16TO32, result, + subscript(op[0], BRW_REGISTER_TYPE_UW, 1)); inst->saturate = instr->dest.saturate; break; @@ -1452,36 +1597,14 @@ fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr) unreachable("not reached: should have been lowered"); case nir_op_ishl: + bld.SHL(result, op[0], op[1]); + break; case nir_op_ishr: - case nir_op_ushr: { - fs_reg shift_count = op[1]; - - if (devinfo->is_cherryview || gen_device_info_is_9lp(devinfo)) { - if (op[1].file == VGRF && - (result.type == BRW_REGISTER_TYPE_Q || - result.type == BRW_REGISTER_TYPE_UQ)) { - shift_count = fs_reg(VGRF, alloc.allocate(dispatch_width / 4), - BRW_REGISTER_TYPE_UD); - shift_count.stride = 2; - bld.MOV(shift_count, op[1]); - } - } - - switch (instr->op) { - case nir_op_ishl: - bld.SHL(result, op[0], shift_count); - break; - case nir_op_ishr: - bld.ASR(result, op[0], shift_count); - break; - case nir_op_ushr: - bld.SHR(result, op[0], shift_count); - break; - default: - unreachable("not reached"); - } + bld.ASR(result, op[0], op[1]); + break; + case nir_op_ushr: + bld.SHR(result, op[0], op[1]); break; - } case nir_op_pack_half_2x16_split: bld.emit(FS_OPCODE_PACK_HALF_2x16_SPLIT, result, op[0], op[1]); @@ -1497,7 +1620,7 @@ fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr) inst->saturate = instr->dest.saturate; break; - case nir_op_bcsel: + case nir_op_b32csel: if (optimize_frontfacing_ternary(instr, result)) return; @@ -1517,16 +1640,25 @@ fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr) * Use two instructions and a word or DWord intermediate integer type. */ if (nir_dest_bit_size(instr->dest.dest) == 64) { - const brw_reg_type type = brw_int_type(2, instr->op == nir_op_extract_i8); + const brw_reg_type type = brw_int_type(1, instr->op == nir_op_extract_i8); if (instr->op == nir_op_extract_i8) { /* If we need to sign extend, extract to a word first */ fs_reg w_temp = bld.vgrf(BRW_REGISTER_TYPE_W); bld.MOV(w_temp, subscript(op[0], type, byte)); bld.MOV(result, w_temp); + } else if (byte & 1) { + /* Extract the high byte from the word containing the desired byte + * offset. + */ + bld.SHR(result, + subscript(op[0], BRW_REGISTER_TYPE_UW, byte / 2), + brw_imm_uw(8)); } else { /* Otherwise use an AND with 0xff and a word type */ - bld.AND(result, subscript(op[0], type, byte / 2), brw_imm_uw(0xff)); + bld.AND(result, + subscript(op[0], BRW_REGISTER_TYPE_UW, byte / 2), + brw_imm_uw(0xff)); } } else { const brw_reg_type type = brw_int_type(1, instr->op == nir_op_extract_i8); @@ -1570,17 +1702,17 @@ fs_visitor::nir_emit_load_const(const fs_builder &bld, switch (instr->def.bit_size) { case 8: for (unsigned i = 0; i < instr->def.num_components; i++) - bld.MOV(offset(reg, bld, i), setup_imm_b(bld, instr->value.i8[i])); + bld.MOV(offset(reg, bld, i), setup_imm_b(bld, instr->value[i].i8)); break; case 16: for (unsigned i = 0; i < instr->def.num_components; i++) - bld.MOV(offset(reg, bld, i), brw_imm_w(instr->value.i16[i])); + bld.MOV(offset(reg, bld, i), brw_imm_w(instr->value[i].i16)); break; case 32: for (unsigned i = 0; i < instr->def.num_components; i++) - bld.MOV(offset(reg, bld, i), brw_imm_d(instr->value.i32[i])); + bld.MOV(offset(reg, bld, i), brw_imm_d(instr->value[i].i32)); break; case 64: @@ -1589,11 +1721,11 @@ fs_visitor::nir_emit_load_const(const fs_builder &bld, /* We don't get 64-bit integer types until gen8 */ for (unsigned i = 0; i < instr->def.num_components; i++) { bld.MOV(retype(offset(reg, bld, i), BRW_REGISTER_TYPE_DF), - setup_imm_df(bld, instr->value.f64[i])); + setup_imm_df(bld, instr->value[i].f64)); } } else { for (unsigned i = 0; i < instr->def.num_components; i++) - bld.MOV(offset(reg, bld, i), brw_imm_q(instr->value.i64[i])); + bld.MOV(offset(reg, bld, i), brw_imm_q(instr->value[i].i64)); } break; @@ -1873,7 +2005,7 @@ fs_visitor::emit_gs_control_data_bits(const fs_reg &vertex_count) } /* Store the control data bits in the message payload and send it. */ - int mlen = 2; + unsigned mlen = 2; if (channel_mask.file != BAD_FILE) mlen += 4; /* channel masks, plus 3 extra copies of the data */ if (per_slot_offset.file != BAD_FILE) @@ -1881,7 +2013,7 @@ fs_visitor::emit_gs_control_data_bits(const fs_reg &vertex_count) fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, mlen); fs_reg *sources = ralloc_array(mem_ctx, fs_reg, mlen); - int i = 0; + unsigned i = 0; sources[i++] = fs_reg(retype(brw_vec8_grf(1, 0), BRW_REGISTER_TYPE_UD)); if (per_slot_offset.file != BAD_FILE) sources[i++] = per_slot_offset; @@ -2250,107 +2382,6 @@ fs_visitor::get_indirect_offset(nir_intrinsic_instr *instr) return get_nir_src(*offset_src); } -static void -do_untyped_vector_read(const fs_builder &bld, - const fs_reg dest, - const fs_reg surf_index, - const fs_reg offset_reg, - unsigned num_components) -{ - if (type_sz(dest.type) <= 2) { - assert(dest.stride == 1); - boolean is_const_offset = offset_reg.file == BRW_IMMEDIATE_VALUE; - - if (is_const_offset) { - uint32_t start = offset_reg.ud & ~3; - uint32_t end = offset_reg.ud + num_components * type_sz(dest.type); - end = ALIGN(end, 4); - assert (end - start <= 16); - - /* At this point we have 16-bit component/s that have constant - * offset aligned to 4-bytes that can be read with untyped_reads. - * untyped_read message requires 32-bit aligned offsets. - */ - unsigned first_component = (offset_reg.ud & 3) / type_sz(dest.type); - unsigned num_components_32bit = (end - start) / 4; - - fs_reg read_result = - emit_untyped_read(bld, surf_index, brw_imm_ud(start), - 1 /* dims */, - num_components_32bit, - BRW_PREDICATE_NONE); - shuffle_from_32bit_read(bld, dest, read_result, first_component, - num_components); - } else { - fs_reg read_offset = bld.vgrf(BRW_REGISTER_TYPE_UD); - for (unsigned i = 0; i < num_components; i++) { - if (i == 0) { - bld.MOV(read_offset, offset_reg); - } else { - bld.ADD(read_offset, offset_reg, - brw_imm_ud(i * type_sz(dest.type))); - } - /* Non constant offsets are not guaranteed to be aligned 32-bits - * so they are read using one byte_scattered_read message - * for each component. - */ - fs_reg read_result = - emit_byte_scattered_read(bld, surf_index, read_offset, - 1 /* dims */, 1, - type_sz(dest.type) * 8 /* bit_size */, - BRW_PREDICATE_NONE); - bld.MOV(offset(dest, bld, i), - subscript (read_result, dest.type, 0)); - } - } - } else if (type_sz(dest.type) == 4) { - fs_reg read_result = emit_untyped_read(bld, surf_index, offset_reg, - 1 /* dims */, - num_components, - BRW_PREDICATE_NONE); - read_result.type = dest.type; - for (unsigned i = 0; i < num_components; i++) - bld.MOV(offset(dest, bld, i), offset(read_result, bld, i)); - } else if (type_sz(dest.type) == 8) { - /* Reading a dvec, so we need to: - * - * 1. Multiply num_components by 2, to account for the fact that we - * need to read 64-bit components. - * 2. Shuffle the result of the load to form valid 64-bit elements - * 3. Emit a second load (for components z/w) if needed. - */ - fs_reg read_offset = bld.vgrf(BRW_REGISTER_TYPE_UD); - bld.MOV(read_offset, offset_reg); - - int iters = num_components <= 2 ? 1 : 2; - - /* Load the dvec, the first iteration loads components x/y, the second - * iteration, if needed, loads components z/w - */ - for (int it = 0; it < iters; it++) { - /* Compute number of components to read in this iteration */ - int iter_components = MIN2(2, num_components); - num_components -= iter_components; - - /* Read. Since this message reads 32-bit components, we need to - * read twice as many components. - */ - fs_reg read_result = emit_untyped_read(bld, surf_index, read_offset, - 1 /* dims */, - iter_components * 2, - BRW_PREDICATE_NONE); - - /* Shuffle the 32-bit load result into valid 64-bit data */ - shuffle_from_32bit_read(bld, offset(dest, bld, it * 2), - read_result, 0, iter_components); - - bld.ADD(read_offset, read_offset, brw_imm_ud(16)); - } - } else { - unreachable("Unsupported type"); - } -} - void fs_visitor::nir_emit_vs_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr) @@ -2434,16 +2465,26 @@ fs_visitor::nir_emit_tcs_intrinsic(const fs_builder &bld, /* Zero the message header */ bld.exec_all().MOV(m0, brw_imm_ud(0u)); - /* Copy "Barrier ID" from r0.2, bits 16:13 */ - chanbld.AND(m0_2, retype(brw_vec1_grf(0, 2), BRW_REGISTER_TYPE_UD), - brw_imm_ud(INTEL_MASK(16, 13))); + if (devinfo->gen < 11) { + /* Copy "Barrier ID" from r0.2, bits 16:13 */ + chanbld.AND(m0_2, retype(brw_vec1_grf(0, 2), BRW_REGISTER_TYPE_UD), + brw_imm_ud(INTEL_MASK(16, 13))); - /* Shift it up to bits 27:24. */ - chanbld.SHL(m0_2, m0_2, brw_imm_ud(11)); + /* Shift it up to bits 27:24. */ + chanbld.SHL(m0_2, m0_2, brw_imm_ud(11)); + } else { + chanbld.AND(m0_2, retype(brw_vec1_grf(0, 2), BRW_REGISTER_TYPE_UD), + brw_imm_ud(INTEL_MASK(30, 24))); + } /* Set the Barrier Count and the enable bit */ - chanbld.OR(m0_2, m0_2, - brw_imm_ud(tcs_prog_data->instances << 9 | (1 << 15))); + if (devinfo->gen < 11) { + chanbld.OR(m0_2, m0_2, + brw_imm_ud(tcs_prog_data->instances << 9 | (1 << 15))); + } else { + chanbld.OR(m0_2, m0_2, + brw_imm_ud(tcs_prog_data->instances << 8 | (1 << 15))); + } bld.emit(SHADER_OPCODE_BARRIER, bld.null_reg_ud(), m0); break; @@ -3029,10 +3070,6 @@ fs_visitor::emit_non_coherent_fb_read(const fs_builder &bld, const fs_reg &dst, wm_prog_data->binding_table.render_target_read_start - wm_prog_data->base.binding_table.texture_start; - brw_mark_surface_used( - bld.shader->stage_prog_data, - wm_prog_data->binding_table.render_target_read_start + target); - /* Calculate the fragment coordinates. */ const fs_reg coords = bld.vgrf(BRW_REGISTER_TYPE_UD, 3); bld.MOV(offset(coords, bld, 0), pixel_x); @@ -3062,11 +3099,15 @@ fs_visitor::emit_non_coherent_fb_read(const fs_builder &bld, const fs_reg &dst, SHADER_OPCODE_TXF_CMS_LOGICAL; /* Emit the instruction. */ - const fs_reg srcs[] = { coords, fs_reg(), brw_imm_ud(0), fs_reg(), - sample, mcs, - brw_imm_ud(surface), brw_imm_ud(0), - fs_reg(), brw_imm_ud(3), brw_imm_ud(0) }; - STATIC_ASSERT(ARRAY_SIZE(srcs) == TEX_LOGICAL_NUM_SRCS); + fs_reg srcs[TEX_LOGICAL_NUM_SRCS]; + srcs[TEX_LOGICAL_SRC_COORDINATE] = coords; + srcs[TEX_LOGICAL_SRC_LOD] = brw_imm_ud(0); + srcs[TEX_LOGICAL_SRC_SAMPLE_INDEX] = sample; + srcs[TEX_LOGICAL_SRC_MCS] = mcs; + srcs[TEX_LOGICAL_SRC_SURFACE] = brw_imm_ud(surface); + srcs[TEX_LOGICAL_SRC_SAMPLER] = brw_imm_ud(0); + srcs[TEX_LOGICAL_SRC_COORD_COMPONENTS] = brw_imm_ud(3); + srcs[TEX_LOGICAL_SRC_GRAD_COMPONENTS] = brw_imm_ud(0); fs_inst *inst = bld.emit(op, dst, srcs, ARRAY_SIZE(srcs)); inst->size_written = 4 * inst->dst.component_size(inst->exec_size); @@ -3360,8 +3401,8 @@ fs_visitor::nir_emit_fs_intrinsic(const fs_builder &bld, if (const_offset) { assert(nir_src_bit_size(instr->src[0]) == 32); - unsigned off_x = MIN2((int)(const_offset->f32[0] * 16), 7) & 0xf; - unsigned off_y = MIN2((int)(const_offset->f32[1] * 16), 7) & 0xf; + unsigned off_x = MIN2((int)(const_offset[0].f32 * 16), 7) & 0xf; + unsigned off_y = MIN2((int)(const_offset[1].f32 * 16), 7) & 0xf; emit_pixel_interpolater_send(bld, FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET, @@ -3513,19 +3554,16 @@ fs_visitor::nir_emit_cs_intrinsic(const fs_builder &bld, cs_prog_data->uses_num_work_groups = true; - fs_reg surf_index = brw_imm_ud(surface); - brw_mark_surface_used(prog_data, surface); + fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS]; + srcs[SURFACE_LOGICAL_SRC_SURFACE] = brw_imm_ud(surface); + srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] = brw_imm_ud(1); + srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(1); /* num components */ /* Read the 3 GLuint components of gl_NumWorkGroups */ for (unsigned i = 0; i < 3; i++) { - fs_reg read_result = - emit_untyped_read(bld, surf_index, - brw_imm_ud(i << 2), - 1 /* dims */, 1 /* size */, - BRW_PREDICATE_NONE); - read_result.type = dest.type; - bld.MOV(dest, read_result); - dest = offset(dest, bld, 1); + srcs[SURFACE_LOGICAL_SRC_ADDRESS] = brw_imm_ud(i << 2); + bld.emit(SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL, + offset(dest, bld, i), srcs, SURFACE_LOGICAL_NUM_SRCS); } break; } @@ -3572,93 +3610,71 @@ fs_visitor::nir_emit_cs_intrinsic(const fs_builder &bld, case nir_intrinsic_load_shared: { assert(devinfo->gen >= 7); + assert(stage == MESA_SHADER_COMPUTE); - fs_reg surf_index = brw_imm_ud(GEN7_BTI_SLM); + const unsigned bit_size = nir_dest_bit_size(instr->dest); + fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS]; + srcs[SURFACE_LOGICAL_SRC_SURFACE] = brw_imm_ud(GEN7_BTI_SLM); + srcs[SURFACE_LOGICAL_SRC_ADDRESS] = get_nir_src(instr->src[0]); + srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] = brw_imm_ud(1); - /* Get the offset to read from */ - fs_reg offset_reg; - if (nir_src_is_const(instr->src[0])) { - offset_reg = brw_imm_ud(instr->const_index[0] + - nir_src_as_uint(instr->src[0])); - } else { - offset_reg = vgrf(glsl_type::uint_type); - bld.ADD(offset_reg, - retype(get_nir_src(instr->src[0]), BRW_REGISTER_TYPE_UD), - brw_imm_ud(instr->const_index[0])); - } + /* Make dest unsigned because that's what the temporary will be */ + dest.type = brw_reg_type_from_bit_size(bit_size, BRW_REGISTER_TYPE_UD); /* Read the vector */ - do_untyped_vector_read(bld, dest, surf_index, offset_reg, - instr->num_components); + if (nir_intrinsic_align(instr) >= 4) { + assert(nir_dest_bit_size(instr->dest) == 32); + srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(instr->num_components); + fs_inst *inst = + bld.emit(SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL, + dest, srcs, SURFACE_LOGICAL_NUM_SRCS); + inst->size_written = instr->num_components * dispatch_width * 4; + } else { + assert(nir_dest_bit_size(instr->dest) <= 32); + assert(nir_dest_num_components(instr->dest) == 1); + srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(bit_size); + + fs_reg read_result = bld.vgrf(BRW_REGISTER_TYPE_UD); + bld.emit(SHADER_OPCODE_BYTE_SCATTERED_READ_LOGICAL, + read_result, srcs, SURFACE_LOGICAL_NUM_SRCS); + bld.MOV(dest, read_result); + } break; } case nir_intrinsic_store_shared: { assert(devinfo->gen >= 7); + assert(stage == MESA_SHADER_COMPUTE); - /* Block index */ - fs_reg surf_index = brw_imm_ud(GEN7_BTI_SLM); - - /* Value */ - fs_reg val_reg = get_nir_src(instr->src[0]); + const unsigned bit_size = nir_src_bit_size(instr->src[0]); + fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS]; + srcs[SURFACE_LOGICAL_SRC_SURFACE] = brw_imm_ud(GEN7_BTI_SLM); + srcs[SURFACE_LOGICAL_SRC_ADDRESS] = get_nir_src(instr->src[1]); + srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] = brw_imm_ud(1); - /* Writemask */ - unsigned writemask = instr->const_index[1]; + fs_reg data = get_nir_src(instr->src[0]); + data.type = brw_reg_type_from_bit_size(bit_size, BRW_REGISTER_TYPE_UD); - /* get_nir_src() retypes to integer. Be wary of 64-bit types though - * since the untyped writes below operate in units of 32-bits, which - * means that we need to write twice as many components each time. - * Also, we have to suffle 64-bit data to be in the appropriate layout - * expected by our 32-bit write messages. - */ - unsigned type_size = 4; - if (nir_src_bit_size(instr->src[0]) == 64) { - type_size = 8; - val_reg = shuffle_for_32bit_write(bld, val_reg, 0, - instr->num_components); - } - - unsigned type_slots = type_size / 4; - - /* Combine groups of consecutive enabled channels in one write - * message. We use ffs to find the first enabled channel and then ffs on - * the bit-inverse, down-shifted writemask to determine the length of - * the block of enabled bits. - */ - while (writemask) { - unsigned first_component = ffs(writemask) - 1; - unsigned length = ffs(~(writemask >> first_component)) - 1; - - /* We can't write more than 2 64-bit components at once. Limit the - * length of the write to what we can do and let the next iteration - * handle the rest - */ - if (type_size > 4) - length = MIN2(2, length); - - fs_reg offset_reg; - if (nir_src_is_const(instr->src[1])) { - offset_reg = brw_imm_ud(instr->const_index[0] + - nir_src_as_uint(instr->src[1]) + - type_size * first_component); - } else { - offset_reg = vgrf(glsl_type::uint_type); - bld.ADD(offset_reg, - retype(get_nir_src(instr->src[1]), BRW_REGISTER_TYPE_UD), - brw_imm_ud(instr->const_index[0] + type_size * first_component)); - } + assert(nir_intrinsic_write_mask(instr) == + (1u << instr->num_components) - 1); + if (nir_intrinsic_align(instr) >= 4) { + assert(nir_src_bit_size(instr->src[0]) == 32); + assert(nir_src_num_components(instr->src[0]) <= 4); + srcs[SURFACE_LOGICAL_SRC_DATA] = data; + srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(instr->num_components); + bld.emit(SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL, + fs_reg(), srcs, SURFACE_LOGICAL_NUM_SRCS); + } else { + assert(nir_src_bit_size(instr->src[0]) <= 32); + assert(nir_src_num_components(instr->src[0]) == 1); + srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(bit_size); - emit_untyped_write(bld, surf_index, offset_reg, - offset(val_reg, bld, first_component * type_slots), - 1 /* dims */, length * type_slots, - BRW_PREDICATE_NONE); + srcs[SURFACE_LOGICAL_SRC_DATA] = bld.vgrf(BRW_REGISTER_TYPE_UD); + bld.MOV(srcs[SURFACE_LOGICAL_SRC_DATA], data); - /* Clear the bits in the writemask that we just wrote, then try - * again to see if more channels are left. - */ - writemask &= (15 << (first_component + length)); + bld.emit(SHADER_OPCODE_BYTE_SCATTERED_WRITE_LOGICAL, + fs_reg(), srcs, SURFACE_LOGICAL_NUM_SRCS); } - break; } @@ -3676,14 +3692,14 @@ brw_nir_reduction_op_identity(const fs_builder &bld, switch (type_sz(type)) { case 2: assert(type != BRW_REGISTER_TYPE_HF); - return retype(brw_imm_uw(value.u16[0]), type); + return retype(brw_imm_uw(value.u16), type); case 4: - return retype(brw_imm_ud(value.u32[0]), type); + return retype(brw_imm_ud(value.u32), type); case 8: if (type == BRW_REGISTER_TYPE_DF) - return setup_imm_df(bld, value.f64[0]); + return setup_imm_df(bld, value.f64); else - return retype(brw_imm_u64(value.u64[0]), type); + return retype(brw_imm_u64(value.u64), type); default: unreachable("Invalid type size"); } @@ -3763,21 +3779,13 @@ fs_visitor::get_nir_ssbo_intrinsic_index(const brw::fs_builder &bld, unsigned index = stage_prog_data->binding_table.ssbo_start + nir_src_as_uint(instr->src[src]); surf_index = brw_imm_ud(index); - brw_mark_surface_used(prog_data, index); } else { surf_index = vgrf(glsl_type::uint_type); bld.ADD(surf_index, get_nir_src(instr->src[src]), brw_imm_ud(stage_prog_data->binding_table.ssbo_start)); - - /* Assume this may touch any UBO. It would be nice to provide - * a tighter bound, but the array information is already lowered away. - */ - brw_mark_surface_used(prog_data, - stage_prog_data->binding_table.ssbo_start + - nir->info.num_ssbos - 1); } - return surf_index; + return bld.emit_uniformize(surf_index); } static unsigned @@ -3825,24 +3833,27 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr /* Get some metadata from the image intrinsic. */ const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic]; - const unsigned dims = image_intrinsic_coord_components(instr); const GLenum format = nir_intrinsic_format(instr); - const unsigned dest_components = nir_intrinsic_dest_components(instr); - /* Get the arguments of the image intrinsic. */ - const fs_reg image = get_nir_image_intrinsic_image(bld, instr); - const fs_reg coords = retype(get_nir_src(instr->src[1]), - BRW_REGISTER_TYPE_UD); - fs_reg tmp; + fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS]; + srcs[SURFACE_LOGICAL_SRC_SURFACE] = + get_nir_image_intrinsic_image(bld, instr); + srcs[SURFACE_LOGICAL_SRC_ADDRESS] = get_nir_src(instr->src[1]); + srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] = + brw_imm_ud(image_intrinsic_coord_components(instr)); /* Emit an image load, store or atomic op. */ if (instr->intrinsic == nir_intrinsic_image_load) { - tmp = emit_typed_read(bld, image, coords, dims, - instr->num_components); + srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(instr->num_components); + fs_inst *inst = + bld.emit(SHADER_OPCODE_TYPED_SURFACE_READ_LOGICAL, + dest, srcs, SURFACE_LOGICAL_NUM_SRCS); + inst->size_written = instr->num_components * dispatch_width * 4; } else if (instr->intrinsic == nir_intrinsic_image_store) { - const fs_reg src0 = get_nir_src(instr->src[3]); - emit_typed_write(bld, image, coords, src0, dims, - instr->num_components); + srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(instr->num_components); + srcs[SURFACE_LOGICAL_SRC_DATA] = get_nir_src(instr->src[3]); + bld.emit(SHADER_OPCODE_TYPED_SURFACE_WRITE_LOGICAL, + fs_reg(), srcs, SURFACE_LOGICAL_NUM_SRCS); } else { int op; unsigned num_srcs = info->num_srcs; @@ -3883,18 +3894,21 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr unreachable("Not reachable."); } - const fs_reg src0 = (num_srcs >= 4 ? - get_nir_src(instr->src[3]) : fs_reg()); - const fs_reg src1 = (num_srcs >= 5 ? - get_nir_src(instr->src[4]) : fs_reg()); + srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(op); - tmp = emit_typed_atomic(bld, image, coords, src0, src1, dims, 1, op); - } + fs_reg data; + if (num_srcs >= 4) + data = get_nir_src(instr->src[3]); + if (num_srcs >= 5) { + fs_reg tmp = bld.vgrf(data.type, 2); + fs_reg sources[2] = { data, get_nir_src(instr->src[4]) }; + bld.LOAD_PAYLOAD(tmp, sources, 2, 0); + data = tmp; + } + srcs[SURFACE_LOGICAL_SRC_DATA] = data; - /* Assign the result. */ - for (unsigned c = 0; c < dest_components; ++c) { - bld.MOV(offset(retype(dest, tmp.type), bld, c), - offset(tmp, bld, c)); + bld.emit(SHADER_OPCODE_TYPED_ATOMIC_LOGICAL, + dest, srcs, SURFACE_LOGICAL_NUM_SRCS); } break; } @@ -3907,18 +3921,20 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr BRW_REGISTER_TYPE_UD); image = bld.emit_uniformize(image); + fs_reg srcs[TEX_LOGICAL_NUM_SRCS]; + srcs[TEX_LOGICAL_SRC_SURFACE] = image; + srcs[TEX_LOGICAL_SRC_SAMPLER] = brw_imm_d(0); + srcs[TEX_LOGICAL_SRC_COORD_COMPONENTS] = brw_imm_d(0); + srcs[TEX_LOGICAL_SRC_GRAD_COMPONENTS] = brw_imm_d(0); + /* Since the image size is always uniform, we can just emit a SIMD8 * query instruction and splat the result out. */ const fs_builder ubld = bld.exec_all().group(8, 0); - /* The LOD also serves as the message payload */ - fs_reg lod = ubld.vgrf(BRW_REGISTER_TYPE_UD); - ubld.MOV(lod, brw_imm_ud(0)); - fs_reg tmp = ubld.vgrf(BRW_REGISTER_TYPE_UD, 4); - fs_inst *inst = ubld.emit(SHADER_OPCODE_IMAGE_SIZE, tmp, lod, image); - inst->mlen = 1; + fs_inst *inst = ubld.emit(SHADER_OPCODE_IMAGE_SIZE_LOGICAL, + tmp, srcs, ARRAY_SIZE(srcs)); inst->size_written = 4 * REG_SIZE; for (unsigned c = 0; c < instr->dest.ssa.num_components; ++c) { @@ -3935,31 +3951,34 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr } case nir_intrinsic_image_load_raw_intel: { - const fs_reg image = get_nir_image_intrinsic_image(bld, instr); - const fs_reg addr = retype(get_nir_src(instr->src[1]), - BRW_REGISTER_TYPE_UD); - - fs_reg tmp = emit_untyped_read(bld, image, addr, 1, - instr->num_components); + fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS]; + srcs[SURFACE_LOGICAL_SRC_SURFACE] = + get_nir_image_intrinsic_image(bld, instr); + srcs[SURFACE_LOGICAL_SRC_ADDRESS] = get_nir_src(instr->src[1]); + srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] = brw_imm_ud(1); + srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(instr->num_components); - for (unsigned c = 0; c < instr->num_components; ++c) { - bld.MOV(offset(retype(dest, tmp.type), bld, c), - offset(tmp, bld, c)); - } + fs_inst *inst = + bld.emit(SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL, + dest, srcs, SURFACE_LOGICAL_NUM_SRCS); + inst->size_written = instr->num_components * dispatch_width * 4; break; } case nir_intrinsic_image_store_raw_intel: { - const fs_reg image = get_nir_image_intrinsic_image(bld, instr); - const fs_reg addr = retype(get_nir_src(instr->src[1]), - BRW_REGISTER_TYPE_UD); - const fs_reg data = retype(get_nir_src(instr->src[2]), - BRW_REGISTER_TYPE_UD); + if (stage == MESA_SHADER_FRAGMENT) + brw_wm_prog_data(prog_data)->has_side_effects = true; - brw_wm_prog_data(prog_data)->has_side_effects = true; + fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS]; + srcs[SURFACE_LOGICAL_SRC_SURFACE] = + get_nir_image_intrinsic_image(bld, instr); + srcs[SURFACE_LOGICAL_SRC_ADDRESS] = get_nir_src(instr->src[1]); + srcs[SURFACE_LOGICAL_SRC_DATA] = get_nir_src(instr->src[2]); + srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] = brw_imm_ud(1); + srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(instr->num_components); - emit_untyped_write(bld, image, addr, data, 1, - instr->num_components); + bld.emit(SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL, + fs_reg(), srcs, SURFACE_LOGICAL_NUM_SRCS); break; } @@ -4060,7 +4079,6 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr const unsigned index = stage_prog_data->binding_table.ubo_start + nir_src_as_uint(instr->src[0]); surf_index = brw_imm_ud(index); - brw_mark_surface_used(prog_data, index); } else { /* The block index is not a constant. Evaluate the index expression * per-channel and add the base UBO index; we have to select a value @@ -4070,13 +4088,6 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr bld.ADD(surf_index, get_nir_src(instr->src[0]), brw_imm_ud(stage_prog_data->binding_table.ubo_start)); surf_index = bld.emit_uniformize(surf_index); - - /* Assume this may touch any UBO. It would be nice to provide - * a tighter bound, but the array information is already lowered away. - */ - brw_mark_surface_used(prog_data, - stage_prog_data->binding_table.ubo_start + - nir->info.num_ubos - 1); } if (!nir_src_is_const(instr->src[1])) { @@ -4152,16 +4163,135 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr break; } + case nir_intrinsic_load_global: { + assert(devinfo->gen >= 8); + + if (nir_intrinsic_align(instr) >= 4) { + assert(nir_dest_bit_size(instr->dest) == 32); + fs_inst *inst = bld.emit(SHADER_OPCODE_A64_UNTYPED_READ_LOGICAL, + dest, + get_nir_src(instr->src[0]), /* Address */ + fs_reg(), /* No source data */ + brw_imm_ud(instr->num_components)); + inst->size_written = instr->num_components * + inst->dst.component_size(inst->exec_size); + } else { + const unsigned bit_size = nir_dest_bit_size(instr->dest); + assert(bit_size <= 32); + assert(nir_dest_num_components(instr->dest) == 1); + brw_reg_type data_type = + brw_reg_type_from_bit_size(bit_size, BRW_REGISTER_TYPE_UD); + fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_UD); + bld.emit(SHADER_OPCODE_A64_BYTE_SCATTERED_READ_LOGICAL, + tmp, + get_nir_src(instr->src[0]), /* Address */ + fs_reg(), /* No source data */ + brw_imm_ud(bit_size)); + bld.MOV(retype(dest, data_type), tmp); + } + break; + } + + case nir_intrinsic_store_global: + assert(devinfo->gen >= 8); + + if (stage == MESA_SHADER_FRAGMENT) + brw_wm_prog_data(prog_data)->has_side_effects = true; + + if (nir_intrinsic_align(instr) >= 4) { + assert(nir_src_bit_size(instr->src[0]) == 32); + bld.emit(SHADER_OPCODE_A64_UNTYPED_WRITE_LOGICAL, + fs_reg(), + get_nir_src(instr->src[1]), /* Address */ + get_nir_src(instr->src[0]), /* Data */ + brw_imm_ud(instr->num_components)); + } else { + const unsigned bit_size = nir_src_bit_size(instr->src[0]); + assert(bit_size <= 32); + assert(nir_src_num_components(instr->src[0]) == 1); + brw_reg_type data_type = + brw_reg_type_from_bit_size(bit_size, BRW_REGISTER_TYPE_UD); + fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_UD); + bld.MOV(tmp, retype(get_nir_src(instr->src[0]), data_type)); + bld.emit(SHADER_OPCODE_A64_BYTE_SCATTERED_WRITE_LOGICAL, + fs_reg(), + get_nir_src(instr->src[1]), /* Address */ + tmp, /* Data */ + brw_imm_ud(nir_src_bit_size(instr->src[0]))); + } + break; + + case nir_intrinsic_global_atomic_add: + nir_emit_global_atomic(bld, get_op_for_atomic_add(instr, 1), instr); + break; + case nir_intrinsic_global_atomic_imin: + nir_emit_global_atomic(bld, BRW_AOP_IMIN, instr); + break; + case nir_intrinsic_global_atomic_umin: + nir_emit_global_atomic(bld, BRW_AOP_UMIN, instr); + break; + case nir_intrinsic_global_atomic_imax: + nir_emit_global_atomic(bld, BRW_AOP_IMAX, instr); + break; + case nir_intrinsic_global_atomic_umax: + nir_emit_global_atomic(bld, BRW_AOP_UMAX, instr); + break; + case nir_intrinsic_global_atomic_and: + nir_emit_global_atomic(bld, BRW_AOP_AND, instr); + break; + case nir_intrinsic_global_atomic_or: + nir_emit_global_atomic(bld, BRW_AOP_OR, instr); + break; + case nir_intrinsic_global_atomic_xor: + nir_emit_global_atomic(bld, BRW_AOP_XOR, instr); + break; + case nir_intrinsic_global_atomic_exchange: + nir_emit_global_atomic(bld, BRW_AOP_MOV, instr); + break; + case nir_intrinsic_global_atomic_comp_swap: + nir_emit_global_atomic(bld, BRW_AOP_CMPWR, instr); + break; + case nir_intrinsic_global_atomic_fmin: + nir_emit_global_atomic_float(bld, BRW_AOP_FMIN, instr); + break; + case nir_intrinsic_global_atomic_fmax: + nir_emit_global_atomic_float(bld, BRW_AOP_FMAX, instr); + break; + case nir_intrinsic_global_atomic_fcomp_swap: + nir_emit_global_atomic_float(bld, BRW_AOP_FCMPWR, instr); + break; + case nir_intrinsic_load_ssbo: { assert(devinfo->gen >= 7); - fs_reg surf_index = get_nir_ssbo_intrinsic_index(bld, instr); - fs_reg offset_reg = get_nir_src_imm(instr->src[1]); + const unsigned bit_size = nir_dest_bit_size(instr->dest); + fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS]; + srcs[SURFACE_LOGICAL_SRC_SURFACE] = + get_nir_ssbo_intrinsic_index(bld, instr); + srcs[SURFACE_LOGICAL_SRC_ADDRESS] = get_nir_src(instr->src[1]); + srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] = brw_imm_ud(1); + + /* Make dest unsigned because that's what the temporary will be */ + dest.type = brw_reg_type_from_bit_size(bit_size, BRW_REGISTER_TYPE_UD); /* Read the vector */ - do_untyped_vector_read(bld, dest, surf_index, offset_reg, - instr->num_components); + if (nir_intrinsic_align(instr) >= 4) { + assert(nir_dest_bit_size(instr->dest) == 32); + srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(instr->num_components); + fs_inst *inst = + bld.emit(SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL, + dest, srcs, SURFACE_LOGICAL_NUM_SRCS); + inst->size_written = instr->num_components * dispatch_width * 4; + } else { + assert(nir_dest_bit_size(instr->dest) <= 32); + assert(nir_dest_num_components(instr->dest) == 1); + srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(bit_size); + fs_reg read_result = bld.vgrf(BRW_REGISTER_TYPE_UD); + bld.emit(SHADER_OPCODE_BYTE_SCATTERED_READ_LOGICAL, + read_result, srcs, SURFACE_LOGICAL_NUM_SRCS); + bld.MOV(dest, read_result); + } break; } @@ -4171,125 +4301,35 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr if (stage == MESA_SHADER_FRAGMENT) brw_wm_prog_data(prog_data)->has_side_effects = true; - fs_reg surf_index = get_nir_ssbo_intrinsic_index(bld, instr); - - /* Value */ - fs_reg val_reg = get_nir_src(instr->src[0]); - - /* Writemask */ - unsigned writemask = instr->const_index[0]; - - /* get_nir_src() retypes to integer. Be wary of 64-bit types though - * since the untyped writes below operate in units of 32-bits, which - * means that we need to write twice as many components each time. - * Also, we have to suffle 64-bit data to be in the appropriate layout - * expected by our 32-bit write messages. - */ - unsigned bit_size = nir_src_bit_size(instr->src[0]); - unsigned type_size = bit_size / 8; - - /* Combine groups of consecutive enabled channels in one write - * message. We use ffs to find the first enabled channel and then ffs on - * the bit-inverse, down-shifted writemask to determine the num_components - * of the block of enabled bits. - */ - while (writemask) { - unsigned first_component = ffs(writemask) - 1; - unsigned num_components = ffs(~(writemask >> first_component)) - 1; - fs_reg write_src = offset(val_reg, bld, first_component); - - if (type_size > 4) { - /* We can't write more than 2 64-bit components at once. Limit - * the num_components of the write to what we can do and let the next - * iteration handle the rest. - */ - num_components = MIN2(2, num_components); - write_src = shuffle_for_32bit_write(bld, write_src, 0, - num_components); - } else if (type_size < 4) { - /* For 16-bit types we pack two consecutive values into a 32-bit - * word and use an untyped write message. For single values or not - * 32-bit-aligned we need to use byte-scattered writes because - * untyped writes works with 32-bit components with 32-bit - * alignment. byte_scattered_write messages only support one - * 16-bit component at a time. As VK_KHR_relaxed_block_layout - * could be enabled we can not guarantee that not constant offsets - * to be 32-bit aligned for 16-bit types. For example an array, of - * 16-bit vec3 with array element stride of 6. - * - * In the case of 32-bit aligned constant offsets if there is - * a 3-components vector we submit one untyped-write message - * of 32-bit (first two components), and one byte-scattered - * write message (the last component). - */ - - if (!nir_src_is_const(instr->src[2]) || - ((nir_src_as_uint(instr->src[2]) + - type_size * first_component) % 4)) { - /* If we use a .yz writemask we also need to emit 2 - * byte-scattered write messages because of y-component not - * being aligned to 32-bit. - */ - num_components = 1; - } else if (num_components * type_size > 4 && - (num_components * type_size % 4)) { - /* If the pending components size is not a multiple of 4 bytes - * we left the not aligned components for following emits of - * length == 1 with byte_scattered_write. - */ - num_components -= (num_components * type_size % 4) / type_size; - } else if (num_components * type_size < 4) { - num_components = 1; - } - /* For num_components == 1 we are also shuffling the component - * because byte scattered writes of 16-bit need values to be dword - * aligned. Shuffling only one component would be the same as - * striding it. - */ - write_src = shuffle_for_32bit_write(bld, write_src, 0, - num_components); - } + const unsigned bit_size = nir_src_bit_size(instr->src[0]); + fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS]; + srcs[SURFACE_LOGICAL_SRC_SURFACE] = + get_nir_ssbo_intrinsic_index(bld, instr); + srcs[SURFACE_LOGICAL_SRC_ADDRESS] = get_nir_src(instr->src[2]); + srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] = brw_imm_ud(1); - fs_reg offset_reg; + fs_reg data = get_nir_src(instr->src[0]); + data.type = brw_reg_type_from_bit_size(bit_size, BRW_REGISTER_TYPE_UD); - if (nir_src_is_const(instr->src[2])) { - offset_reg = brw_imm_ud(nir_src_as_uint(instr->src[2]) + - type_size * first_component); - } else { - offset_reg = vgrf(glsl_type::uint_type); - bld.ADD(offset_reg, - retype(get_nir_src(instr->src[2]), BRW_REGISTER_TYPE_UD), - brw_imm_ud(type_size * first_component)); - } + assert(nir_intrinsic_write_mask(instr) == + (1u << instr->num_components) - 1); + if (nir_intrinsic_align(instr) >= 4) { + assert(nir_src_bit_size(instr->src[0]) == 32); + assert(nir_src_num_components(instr->src[0]) <= 4); + srcs[SURFACE_LOGICAL_SRC_DATA] = data; + srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(instr->num_components); + bld.emit(SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL, + fs_reg(), srcs, SURFACE_LOGICAL_NUM_SRCS); + } else { + assert(nir_src_bit_size(instr->src[0]) <= 32); + assert(nir_src_num_components(instr->src[0]) == 1); + srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(bit_size); - if (type_size < 4 && num_components == 1) { - /* Untyped Surface messages have a fixed 32-bit size, so we need - * to rely on byte scattered in order to write 16-bit elements. - * The byte_scattered_write message needs that every written 16-bit - * type to be aligned 32-bits (stride=2). - */ - emit_byte_scattered_write(bld, surf_index, offset_reg, - write_src, - 1 /* dims */, - bit_size, - BRW_PREDICATE_NONE); - } else { - assert(num_components * type_size <= 16); - assert((num_components * type_size) % 4 == 0); - assert(offset_reg.file != BRW_IMMEDIATE_VALUE || - offset_reg.ud % 4 == 0); - unsigned num_slots = (num_components * type_size) / 4; - - emit_untyped_write(bld, surf_index, offset_reg, - write_src, - 1 /* dims */, num_slots, - BRW_PREDICATE_NONE); - } + srcs[SURFACE_LOGICAL_SRC_DATA] = bld.vgrf(BRW_REGISTER_TYPE_UD); + bld.MOV(srcs[SURFACE_LOGICAL_SRC_DATA], data); - /* Clear the bits in the writemask that we just wrote, then try - * again to see if more channels are left. - */ - writemask &= (15 << (first_component + num_components)); + bld.emit(SHADER_OPCODE_BYTE_SCATTERED_WRITE_LOGICAL, + fs_reg(), srcs, SURFACE_LOGICAL_NUM_SRCS); } break; } @@ -4355,6 +4395,7 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr break; case nir_intrinsic_get_buffer_size: { + assert(nir_src_num_components(instr->src[0]) == 1); unsigned ssbo_index = nir_src_is_const(instr->src[0]) ? nir_src_as_uint(instr->src[0]) : 0; @@ -4409,8 +4450,6 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr ubld.ADD(buffer_size, size_aligned4, negate(size_padding)); bld.MOV(retype(dest, ret_payload.type), component(buffer_size, 0)); - - brw_mark_surface_used(prog_data, index); break; } @@ -4609,34 +4648,9 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr const fs_reg tmp_left = horiz_stride(tmp, 2); const fs_reg tmp_right = horiz_stride(horiz_offset(tmp, 1), 2); - /* From the Cherryview PRM Vol. 7, "Register Region Restrictiosn": - * - * "When source or destination datatype is 64b or operation is - * integer DWord multiply, regioning in Align1 must follow - * these rules: - * - * [...] - * - * 3. Source and Destination offset must be the same, except - * the case of scalar source." - * - * In order to work around this, we have to emit two 32-bit MOVs instead - * of a single 64-bit MOV to do the shuffle. - */ - if (type_sz(value.type) > 4 && - (devinfo->is_cherryview || gen_device_info_is_9lp(devinfo))) { - ubld.MOV(subscript(tmp_left, BRW_REGISTER_TYPE_D, 0), - subscript(src_right, BRW_REGISTER_TYPE_D, 0)); - ubld.MOV(subscript(tmp_left, BRW_REGISTER_TYPE_D, 1), - subscript(src_right, BRW_REGISTER_TYPE_D, 1)); - ubld.MOV(subscript(tmp_right, BRW_REGISTER_TYPE_D, 0), - subscript(src_left, BRW_REGISTER_TYPE_D, 0)); - ubld.MOV(subscript(tmp_right, BRW_REGISTER_TYPE_D, 1), - subscript(src_left, BRW_REGISTER_TYPE_D, 1)); - } else { - ubld.MOV(tmp_left, src_right); - ubld.MOV(tmp_right, src_left); - } + ubld.MOV(tmp_left, src_right); + ubld.MOV(tmp_right, src_left); + bld.MOV(retype(dest, value.type), tmp); break; } @@ -4772,7 +4786,6 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr break; } - case nir_intrinsic_begin_fragment_shader_ordering: case nir_intrinsic_begin_invocation_interlock: { const fs_builder ubld = bld.group(8, 0); const fs_reg tmp = ubld.vgrf(BRW_REGISTER_TYPE_UD, 2); @@ -4804,24 +4817,28 @@ fs_visitor::nir_emit_ssbo_atomic(const fs_builder &bld, if (nir_intrinsic_infos[instr->intrinsic].has_dest) dest = get_nir_dest(instr->dest); - fs_reg surface = get_nir_ssbo_intrinsic_index(bld, instr); - fs_reg offset = get_nir_src(instr->src[1]); - fs_reg data1; + fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS]; + srcs[SURFACE_LOGICAL_SRC_SURFACE] = get_nir_ssbo_intrinsic_index(bld, instr); + srcs[SURFACE_LOGICAL_SRC_ADDRESS] = get_nir_src(instr->src[1]); + srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] = brw_imm_ud(1); + srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(op); + + fs_reg data; if (op != BRW_AOP_INC && op != BRW_AOP_DEC && op != BRW_AOP_PREDEC) - data1 = get_nir_src(instr->src[2]); - fs_reg data2; - if (op == BRW_AOP_CMPWR) - data2 = get_nir_src(instr->src[3]); + data = get_nir_src(instr->src[2]); + + if (op == BRW_AOP_CMPWR) { + fs_reg tmp = bld.vgrf(data.type, 2); + fs_reg sources[2] = { data, get_nir_src(instr->src[3]) }; + bld.LOAD_PAYLOAD(tmp, sources, 2, 0); + data = tmp; + } + srcs[SURFACE_LOGICAL_SRC_DATA] = data; /* Emit the actual atomic operation */ - fs_reg atomic_result = emit_untyped_atomic(bld, surface, offset, - data1, data2, - 1 /* dims */, 1 /* rsize */, - op, - BRW_PREDICATE_NONE); - dest.type = atomic_result.type; - bld.MOV(dest, atomic_result); + bld.emit(SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL, + dest, srcs, SURFACE_LOGICAL_NUM_SRCS); } void @@ -4835,22 +4852,25 @@ fs_visitor::nir_emit_ssbo_atomic_float(const fs_builder &bld, if (nir_intrinsic_infos[instr->intrinsic].has_dest) dest = get_nir_dest(instr->dest); - fs_reg surface = get_nir_ssbo_intrinsic_index(bld, instr); - fs_reg offset = get_nir_src(instr->src[1]); - fs_reg data1 = get_nir_src(instr->src[2]); - fs_reg data2; - if (op == BRW_AOP_FCMPWR) - data2 = get_nir_src(instr->src[3]); + fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS]; + srcs[SURFACE_LOGICAL_SRC_SURFACE] = get_nir_ssbo_intrinsic_index(bld, instr); + srcs[SURFACE_LOGICAL_SRC_ADDRESS] = get_nir_src(instr->src[1]); + srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] = brw_imm_ud(1); + srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(op); + + fs_reg data = get_nir_src(instr->src[2]); + if (op == BRW_AOP_FCMPWR) { + fs_reg tmp = bld.vgrf(data.type, 2); + fs_reg sources[2] = { data, get_nir_src(instr->src[3]) }; + bld.LOAD_PAYLOAD(tmp, sources, 2, 0); + data = tmp; + } + srcs[SURFACE_LOGICAL_SRC_DATA] = data; /* Emit the actual atomic operation */ - fs_reg atomic_result = emit_untyped_atomic_float(bld, surface, offset, - data1, data2, - 1 /* dims */, 1 /* rsize */, - op, - BRW_PREDICATE_NONE); - dest.type = atomic_result.type; - bld.MOV(dest, atomic_result); + bld.emit(SHADER_OPCODE_UNTYPED_ATOMIC_FLOAT_LOGICAL, + dest, srcs, SURFACE_LOGICAL_NUM_SRCS); } void @@ -4861,35 +4881,37 @@ fs_visitor::nir_emit_shared_atomic(const fs_builder &bld, if (nir_intrinsic_infos[instr->intrinsic].has_dest) dest = get_nir_dest(instr->dest); - fs_reg surface = brw_imm_ud(GEN7_BTI_SLM); - fs_reg offset; - fs_reg data1; + fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS]; + srcs[SURFACE_LOGICAL_SRC_SURFACE] = brw_imm_ud(GEN7_BTI_SLM); + srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] = brw_imm_ud(1); + srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(op); + + fs_reg data; if (op != BRW_AOP_INC && op != BRW_AOP_DEC && op != BRW_AOP_PREDEC) - data1 = get_nir_src(instr->src[1]); - fs_reg data2; - if (op == BRW_AOP_CMPWR) - data2 = get_nir_src(instr->src[2]); + data = get_nir_src(instr->src[1]); + if (op == BRW_AOP_CMPWR) { + fs_reg tmp = bld.vgrf(data.type, 2); + fs_reg sources[2] = { data, get_nir_src(instr->src[2]) }; + bld.LOAD_PAYLOAD(tmp, sources, 2, 0); + data = tmp; + } + srcs[SURFACE_LOGICAL_SRC_DATA] = data; /* Get the offset */ if (nir_src_is_const(instr->src[0])) { - offset = brw_imm_ud(instr->const_index[0] + - nir_src_as_uint(instr->src[0])); + srcs[SURFACE_LOGICAL_SRC_ADDRESS] = + brw_imm_ud(instr->const_index[0] + nir_src_as_uint(instr->src[0])); } else { - offset = vgrf(glsl_type::uint_type); - bld.ADD(offset, + srcs[SURFACE_LOGICAL_SRC_ADDRESS] = vgrf(glsl_type::uint_type); + bld.ADD(srcs[SURFACE_LOGICAL_SRC_ADDRESS], retype(get_nir_src(instr->src[0]), BRW_REGISTER_TYPE_UD), brw_imm_ud(instr->const_index[0])); } /* Emit the actual atomic operation operation */ - fs_reg atomic_result = emit_untyped_atomic(bld, surface, offset, - data1, data2, - 1 /* dims */, 1 /* rsize */, - op, - BRW_PREDICATE_NONE); - dest.type = atomic_result.type; - bld.MOV(dest, atomic_result); + bld.emit(SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL, + dest, srcs, SURFACE_LOGICAL_NUM_SRCS); } void @@ -4900,33 +4922,89 @@ fs_visitor::nir_emit_shared_atomic_float(const fs_builder &bld, if (nir_intrinsic_infos[instr->intrinsic].has_dest) dest = get_nir_dest(instr->dest); - fs_reg surface = brw_imm_ud(GEN7_BTI_SLM); - fs_reg offset; - fs_reg data1 = get_nir_src(instr->src[1]); - fs_reg data2; - if (op == BRW_AOP_FCMPWR) - data2 = get_nir_src(instr->src[2]); + fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS]; + srcs[SURFACE_LOGICAL_SRC_SURFACE] = brw_imm_ud(GEN7_BTI_SLM); + srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] = brw_imm_ud(1); + srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(op); + + fs_reg data = get_nir_src(instr->src[1]); + if (op == BRW_AOP_FCMPWR) { + fs_reg tmp = bld.vgrf(data.type, 2); + fs_reg sources[2] = { data, get_nir_src(instr->src[2]) }; + bld.LOAD_PAYLOAD(tmp, sources, 2, 0); + data = tmp; + } + srcs[SURFACE_LOGICAL_SRC_DATA] = data; /* Get the offset */ if (nir_src_is_const(instr->src[0])) { - offset = brw_imm_ud(instr->const_index[0] + - nir_src_as_uint(instr->src[0])); + srcs[SURFACE_LOGICAL_SRC_ADDRESS] = + brw_imm_ud(instr->const_index[0] + nir_src_as_uint(instr->src[0])); } else { - offset = vgrf(glsl_type::uint_type); - bld.ADD(offset, - retype(get_nir_src(instr->src[0]), BRW_REGISTER_TYPE_UD), - brw_imm_ud(instr->const_index[0])); + srcs[SURFACE_LOGICAL_SRC_ADDRESS] = vgrf(glsl_type::uint_type); + bld.ADD(srcs[SURFACE_LOGICAL_SRC_ADDRESS], + retype(get_nir_src(instr->src[0]), BRW_REGISTER_TYPE_UD), + brw_imm_ud(instr->const_index[0])); } /* Emit the actual atomic operation operation */ - fs_reg atomic_result = emit_untyped_atomic_float(bld, surface, offset, - data1, data2, - 1 /* dims */, 1 /* rsize */, - op, - BRW_PREDICATE_NONE); - dest.type = atomic_result.type; - bld.MOV(dest, atomic_result); + bld.emit(SHADER_OPCODE_UNTYPED_ATOMIC_FLOAT_LOGICAL, + dest, srcs, SURFACE_LOGICAL_NUM_SRCS); +} + +void +fs_visitor::nir_emit_global_atomic(const fs_builder &bld, + int op, nir_intrinsic_instr *instr) +{ + if (stage == MESA_SHADER_FRAGMENT) + brw_wm_prog_data(prog_data)->has_side_effects = true; + + fs_reg dest; + if (nir_intrinsic_infos[instr->intrinsic].has_dest) + dest = get_nir_dest(instr->dest); + + fs_reg addr = get_nir_src(instr->src[0]); + + fs_reg data; + if (op != BRW_AOP_INC && op != BRW_AOP_DEC && op != BRW_AOP_PREDEC) + data = get_nir_src(instr->src[1]); + + if (op == BRW_AOP_CMPWR) { + fs_reg tmp = bld.vgrf(data.type, 2); + fs_reg sources[2] = { data, get_nir_src(instr->src[2]) }; + bld.LOAD_PAYLOAD(tmp, sources, 2, 0); + data = tmp; + } + + bld.emit(SHADER_OPCODE_A64_UNTYPED_ATOMIC_LOGICAL, + dest, addr, data, brw_imm_ud(op)); +} + +void +fs_visitor::nir_emit_global_atomic_float(const fs_builder &bld, + int op, nir_intrinsic_instr *instr) +{ + if (stage == MESA_SHADER_FRAGMENT) + brw_wm_prog_data(prog_data)->has_side_effects = true; + + assert(nir_intrinsic_infos[instr->intrinsic].has_dest); + fs_reg dest = get_nir_dest(instr->dest); + + fs_reg addr = get_nir_src(instr->src[0]); + + assert(op != BRW_AOP_INC && op != BRW_AOP_DEC && op != BRW_AOP_PREDEC); + fs_reg data = get_nir_src(instr->src[1]); + + if (op == BRW_AOP_FCMPWR) { + fs_reg tmp = bld.vgrf(data.type, 2); + fs_reg sources[2] = { data, get_nir_src(instr->src[2]) }; + bld.LOAD_PAYLOAD(tmp, sources, 2, 0); + data = tmp; + } + + bld.emit(SHADER_OPCODE_A64_UNTYPED_ATOMIC_LOGICAL, + dest, addr, data, brw_imm_ud(op)); } void @@ -4993,19 +5071,17 @@ fs_visitor::nir_emit_texture(const fs_builder &bld, nir_tex_instr *instr) break; } break; + case nir_tex_src_min_lod: + srcs[TEX_LOGICAL_SRC_MIN_LOD] = + retype(get_nir_src_imm(instr->src[i].src), BRW_REGISTER_TYPE_F); + break; case nir_tex_src_ms_index: srcs[TEX_LOGICAL_SRC_SAMPLE_INDEX] = retype(src, BRW_REGISTER_TYPE_UD); break; case nir_tex_src_offset: { - nir_const_value *const_offset = - nir_src_as_const_value(instr->src[i].src); - assert(nir_src_bit_size(instr->src[i].src) == 32); - unsigned offset_bits = 0; - if (const_offset && - brw_texture_offset(const_offset->i32, - nir_tex_instr_src_size(instr, i), - &offset_bits)) { + uint32_t offset_bits = 0; + if (brw_texture_offset(instr, i, &offset_bits)) { header_bits |= offset_bits; } else { srcs[TEX_LOGICAL_SRC_TG4_OFFSET] = @@ -5018,15 +5094,6 @@ fs_visitor::nir_emit_texture(const fs_builder &bld, nir_tex_instr *instr) unreachable("should be lowered"); case nir_tex_src_texture_offset: { - /* Figure out the highest possible texture index and mark it as used */ - uint32_t max_used = texture + instr->texture_array_size - 1; - if (instr->op == nir_texop_tg4 && devinfo->gen < 8) { - max_used += stage_prog_data->binding_table.gather_texture_start; - } else { - max_used += stage_prog_data->binding_table.texture_start; - } - brw_mark_surface_used(prog_data, max_used); - /* Emit code to evaluate the actual indexing expression */ fs_reg tmp = vgrf(glsl_type::uint_type); bld.ADD(tmp, src, brw_imm_ud(texture)); @@ -5080,11 +5147,15 @@ fs_visitor::nir_emit_texture(const fs_builder &bld, nir_tex_instr *instr) srcs[TEX_LOGICAL_SRC_COORD_COMPONENTS] = brw_imm_d(instr->coord_components); srcs[TEX_LOGICAL_SRC_GRAD_COMPONENTS] = brw_imm_d(lod_components); + bool shader_supports_implicit_lod = stage == MESA_SHADER_FRAGMENT || + (stage == MESA_SHADER_COMPUTE && + nir->info.cs.derivative_group != DERIVATIVE_GROUP_NONE); + enum opcode opcode; switch (instr->op) { case nir_texop_tex: - opcode = (stage == MESA_SHADER_FRAGMENT ? SHADER_OPCODE_TEX_LOGICAL : - SHADER_OPCODE_TXL_LOGICAL); + opcode = shader_supports_implicit_lod ? + SHADER_OPCODE_TEX_LOGICAL : SHADER_OPCODE_TXL_LOGICAL; break; case nir_texop_txb: opcode = FS_OPCODE_TXB_LOGICAL;