#include "compiler/glsl/ir.h"
#include "brw_fs.h"
-#include "brw_fs_surface_builder.h"
#include "brw_nir.h"
#include "util/u_math.h"
#include "util/bitscan.h"
using namespace brw;
-using namespace brw::surface_access;
void
fs_visitor::emit_nir_code()
const int loc = var->data.driver_location;
const unsigned var_vec4s =
var->data.compact ? DIV_ROUND_UP(glsl_get_length(var->type), 4)
- : type_size_vec4(var->type);
+ : type_size_vec4(var->type, true);
vec4s[loc] = MAX2(vec4s[loc], var_vec4s);
}
void
fs_visitor::nir_emit_if(nir_if *if_stmt)
{
+ bool invert;
+ fs_reg cond_reg;
+
+ /* If the condition has the form !other_condition, use other_condition as
+ * the source, but invert the predicate on the if instruction.
+ */
+ nir_alu_instr *const cond = nir_src_as_alu_instr(&if_stmt->condition);
+ if (cond != NULL && cond->op == nir_op_inot) {
+ assert(!cond->src[0].negate);
+ assert(!cond->src[0].abs);
+
+ invert = true;
+ cond_reg = get_nir_src(cond->src[0].src);
+ } else {
+ invert = false;
+ cond_reg = get_nir_src(if_stmt->condition);
+ }
+
/* first, put the condition into f0 */
fs_inst *inst = bld.MOV(bld.null_reg_d(),
- retype(get_nir_src(if_stmt->condition),
- BRW_REGISTER_TYPE_D));
+ retype(cond_reg, BRW_REGISTER_TYPE_D));
inst->conditional_mod = BRW_CONDITIONAL_NZ;
- bld.IF(BRW_PREDICATE_NORMAL);
+ bld.IF(BRW_PREDICATE_NORMAL)->predicate_inverse = invert;
nir_emit_cf_list(&if_stmt->then_list);
src0->op != nir_op_extract_i8 && src0->op != nir_op_extract_i16)
return false;
+ /* If either opcode has source modifiers, bail.
+ *
+ * TODO: We can potentially handle source modifiers if both of the opcodes
+ * we're combining are signed integers.
+ */
+ if (instr->src[0].abs || instr->src[0].negate ||
+ src0->src[0].abs || src0->src[0].negate)
+ return false;
+
unsigned element = nir_src_as_uint(src0->src[1].src);
/* Element type to extract.*/
}
}
-void
-fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr)
+fs_reg
+fs_visitor::prepare_alu_destination_and_sources(const fs_builder &bld,
+ nir_alu_instr *instr,
+ fs_reg *op,
+ bool need_dest)
{
- struct brw_wm_prog_key *fs_key = (struct brw_wm_prog_key *) this->key;
- fs_inst *inst;
+ fs_reg result =
+ need_dest ? get_nir_dest(instr->dest.dest) : bld.null_reg_ud();
- fs_reg result = get_nir_dest(instr->dest.dest);
result.type = brw_type_for_nir_type(devinfo,
(nir_alu_type)(nir_op_infos[instr->op].output_type |
nir_dest_bit_size(instr->dest.dest)));
- fs_reg op[4];
for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
op[i] = get_nir_src(instr->src[i].src);
op[i].type = brw_type_for_nir_type(devinfo,
op[i].negate = instr->src[i].negate;
}
- /* We get a bunch of mov's out of the from_ssa pass and they may still
- * be vectorized. We'll handle them as a special-case. We'll also
- * handle vecN here because it's basically the same thing.
+ /* Move and vecN instrutions may still be vectored. Return the raw,
+ * vectored source and destination so that fs_visitor::nir_emit_alu can
+ * handle it. Other callers should not have to handle these kinds of
+ * instructions.
*/
+ switch (instr->op) {
+ case nir_op_imov:
+ case nir_op_fmov:
+ case nir_op_vec2:
+ case nir_op_vec3:
+ case nir_op_vec4:
+ return result;
+ default:
+ break;
+ }
+
+ /* At this point, we have dealt with any instruction that operates on
+ * more than a single channel. Therefore, we can just adjust the source
+ * and destination registers for that channel and emit the instruction.
+ */
+ unsigned channel = 0;
+ if (nir_op_infos[instr->op].output_size == 0) {
+ /* Since NIR is doing the scalarizing for us, we should only ever see
+ * vectorized operations with a single channel.
+ */
+ assert(util_bitcount(instr->dest.write_mask) == 1);
+ channel = ffs(instr->dest.write_mask) - 1;
+
+ result = offset(result, bld, channel);
+ }
+
+ for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
+ assert(nir_op_infos[instr->op].input_sizes[i] < 2);
+ op[i] = offset(op[i], bld, instr->src[i].swizzle[channel]);
+ }
+
+ return result;
+}
+
+void
+fs_visitor::resolve_inot_sources(const fs_builder &bld, nir_alu_instr *instr,
+ fs_reg *op)
+{
+ for (unsigned i = 0; i < 2; i++) {
+ nir_alu_instr *const inot_instr =
+ nir_src_as_alu_instr(&instr->src[i].src);
+
+ if (inot_instr != NULL && inot_instr->op == nir_op_inot &&
+ !inot_instr->src[0].abs && !inot_instr->src[0].negate) {
+ /* The source of the inot is now the source of instr. */
+ prepare_alu_destination_and_sources(bld, inot_instr, &op[i], false);
+
+ assert(!op[i].negate);
+ op[i].negate = true;
+ } else {
+ op[i] = resolve_source_modifiers(op[i]);
+ }
+ }
+}
+
+bool
+fs_visitor::try_emit_b2fi_of_inot(const fs_builder &bld,
+ fs_reg result,
+ nir_alu_instr *instr)
+{
+ if (devinfo->gen < 6 || devinfo->gen >= 12)
+ return false;
+
+ nir_alu_instr *const inot_instr = nir_src_as_alu_instr(&instr->src[0].src);
+
+ if (inot_instr == NULL || inot_instr->op != nir_op_inot)
+ return false;
+
+ /* HF is also possible as a destination on BDW+. For nir_op_b2i, the set
+ * of valid size-changing combinations is a bit more complex.
+ *
+ * The source restriction is just because I was lazy about generating the
+ * constant below.
+ */
+ if (nir_dest_bit_size(instr->dest.dest) != 32 ||
+ nir_src_bit_size(inot_instr->src[0].src) != 32)
+ return false;
+
+ /* b2[fi](inot(a)) maps a=0 => 1, a=-1 => 0. Since a can only be 0 or -1,
+ * this is float(1 + a).
+ */
+ fs_reg op;
+
+ prepare_alu_destination_and_sources(bld, inot_instr, &op, false);
+
+ /* Ignore the saturate modifier, if there is one. The result of the
+ * arithmetic can only be 0 or 1, so the clamping will do nothing anyway.
+ */
+ bld.ADD(result, op, brw_imm_d(1));
+
+ return true;
+}
+
+void
+fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr)
+{
+ struct brw_wm_prog_key *fs_key = (struct brw_wm_prog_key *) this->key;
+ fs_inst *inst;
+
+ fs_reg op[4];
+ fs_reg result = prepare_alu_destination_and_sources(bld, instr, op, true);
+
switch (instr->op) {
case nir_op_imov:
case nir_op_fmov:
}
return;
}
- default:
- break;
- }
-
- /* At this point, we have dealt with any instruction that operates on
- * more than a single channel. Therefore, we can just adjust the source
- * and destination registers for that channel and emit the instruction.
- */
- unsigned channel = 0;
- if (nir_op_infos[instr->op].output_size == 0) {
- /* Since NIR is doing the scalarizing for us, we should only ever see
- * vectorized operations with a single channel.
- */
- assert(util_bitcount(instr->dest.write_mask) == 1);
- channel = ffs(instr->dest.write_mask) - 1;
-
- result = offset(result, bld, channel);
- }
- for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
- assert(nir_op_infos[instr->op].input_sizes[i] < 2);
- op[i] = offset(op[i], bld, instr->src[i].swizzle[channel]);
- }
-
- switch (instr->op) {
case nir_op_i2f32:
case nir_op_u2f32:
if (optimize_extract_to_float(instr, result))
*/
case nir_op_f2f16:
+ case nir_op_i2f16:
+ case nir_op_u2f16:
+ assert(type_sz(op[0].type) < 8); /* brw_nir_lower_conversions */
+ inst = bld.MOV(result, op[0]);
+ inst->saturate = instr->dest.saturate;
+ break;
+
+ case nir_op_f2f64:
+ case nir_op_f2i64:
+ case nir_op_f2u64:
+ assert(type_sz(op[0].type) > 2); /* brw_nir_lower_conversions */
inst = bld.MOV(result, op[0]);
inst->saturate = instr->dest.saturate;
break;
case nir_op_b2f16:
case nir_op_b2f32:
case nir_op_b2f64:
+ if (try_emit_b2fi_of_inot(bld, result, instr))
+ break;
op[0].type = BRW_REGISTER_TYPE_D;
op[0].negate = !op[0].negate;
/* fallthrough */
- case nir_op_f2f64:
- case nir_op_f2i64:
- case nir_op_f2u64:
case nir_op_i2f64:
case nir_op_i2i64:
case nir_op_u2f64:
case nir_op_u2u64:
+ assert(type_sz(op[0].type) > 1); /* brw_nir_lower_conversions */
+ /* fallthrough */
case nir_op_f2f32:
case nir_op_f2i32:
case nir_op_f2u32:
case nir_op_u2u32:
case nir_op_i2i16:
case nir_op_u2u16:
- case nir_op_i2f16:
- case nir_op_u2f16:
case nir_op_i2i8:
case nir_op_u2u8:
inst = bld.MOV(result, op[0]);
break;
}
- case nir_op_isign: {
- /* ASR(val, 31) -> negative val generates 0xffffffff (signed -1).
- * -> non-negative val generates 0x00000000.
- * Predicated OR sets 1 if val is positive.
- */
- uint32_t bit_size = nir_dest_bit_size(instr->dest.dest);
- assert(bit_size == 32 || bit_size == 16);
-
- fs_reg zero = bit_size == 32 ? brw_imm_d(0) : brw_imm_w(0);
- fs_reg one = bit_size == 32 ? brw_imm_d(1) : brw_imm_w(1);
- fs_reg shift = bit_size == 32 ? brw_imm_d(31) : brw_imm_w(15);
-
- bld.CMP(bld.null_reg_d(), op[0], zero, BRW_CONDITIONAL_G);
- bld.ASR(result, op[0], shift);
- inst = bld.OR(result, result, one);
- inst->predicate = BRW_PREDICATE_NORMAL;
- break;
- }
-
case nir_op_frcp:
inst = bld.emit(SHADER_OPCODE_RCP, result, op[0]);
inst->saturate = instr->dest.saturate;
inst->saturate = instr->dest.saturate;
break;
+ case nir_op_imul_2x32_64:
+ case nir_op_umul_2x32_64:
+ bld.MUL(result, op[0], op[1]);
+ break;
+
case nir_op_imul:
assert(nir_dest_bit_size(instr->dest.dest) < 64);
bld.MUL(result, op[0], op[1]);
case nir_op_inot:
if (devinfo->gen >= 8) {
+ nir_alu_instr *const inot_src_instr =
+ nir_src_as_alu_instr(&instr->src[0].src);
+
+ if (inot_src_instr != NULL &&
+ (inot_src_instr->op == nir_op_ior ||
+ inot_src_instr->op == nir_op_ixor ||
+ inot_src_instr->op == nir_op_iand) &&
+ !inot_src_instr->src[0].abs &&
+ !inot_src_instr->src[0].negate &&
+ !inot_src_instr->src[1].abs &&
+ !inot_src_instr->src[1].negate) {
+ /* The sources of the source logical instruction are now the
+ * sources of the instruction that will be generated.
+ */
+ prepare_alu_destination_and_sources(bld, inot_src_instr, op, false);
+ resolve_inot_sources(bld, inot_src_instr, op);
+
+ /* Smash all of the sources and destination to be signed. This
+ * doesn't matter for the operation of the instruction, but cmod
+ * propagation fails on unsigned sources with negation (due to
+ * fs_inst::can_do_cmod returning false).
+ */
+ result.type =
+ brw_type_for_nir_type(devinfo,
+ (nir_alu_type)(nir_type_int |
+ nir_dest_bit_size(instr->dest.dest)));
+ op[0].type =
+ brw_type_for_nir_type(devinfo,
+ (nir_alu_type)(nir_type_int |
+ nir_src_bit_size(inot_src_instr->src[0].src)));
+ op[1].type =
+ brw_type_for_nir_type(devinfo,
+ (nir_alu_type)(nir_type_int |
+ nir_src_bit_size(inot_src_instr->src[1].src)));
+
+ /* For XOR, only invert one of the sources. Arbitrarily choose
+ * the first source.
+ */
+ op[0].negate = !op[0].negate;
+ if (inot_src_instr->op != nir_op_ixor)
+ op[1].negate = !op[1].negate;
+
+ switch (inot_src_instr->op) {
+ case nir_op_ior:
+ bld.AND(result, op[0], op[1]);
+ return;
+
+ case nir_op_iand:
+ bld.OR(result, op[0], op[1]);
+ return;
+
+ case nir_op_ixor:
+ bld.XOR(result, op[0], op[1]);
+ return;
+
+ default:
+ unreachable("impossible opcode");
+ }
+ }
op[0] = resolve_source_modifiers(op[0]);
}
bld.NOT(result, op[0]);
break;
case nir_op_ixor:
if (devinfo->gen >= 8) {
- op[0] = resolve_source_modifiers(op[0]);
- op[1] = resolve_source_modifiers(op[1]);
+ resolve_inot_sources(bld, instr, op);
}
bld.XOR(result, op[0], op[1]);
break;
case nir_op_ior:
if (devinfo->gen >= 8) {
- op[0] = resolve_source_modifiers(op[0]);
- op[1] = resolve_source_modifiers(op[1]);
+ resolve_inot_sources(bld, instr, op);
}
bld.OR(result, op[0], op[1]);
break;
case nir_op_iand:
if (devinfo->gen >= 8) {
- op[0] = resolve_source_modifiers(op[0]);
- op[1] = resolve_source_modifiers(op[1]);
+ resolve_inot_sources(bld, instr, op);
}
bld.AND(result, op[0], op[1]);
break;
* Use two instructions and a word or DWord intermediate integer type.
*/
if (nir_dest_bit_size(instr->dest.dest) == 64) {
- const brw_reg_type type = brw_int_type(2, instr->op == nir_op_extract_i8);
+ const brw_reg_type type = brw_int_type(1, instr->op == nir_op_extract_i8);
if (instr->op == nir_op_extract_i8) {
/* If we need to sign extend, extract to a word first */
fs_reg w_temp = bld.vgrf(BRW_REGISTER_TYPE_W);
bld.MOV(w_temp, subscript(op[0], type, byte));
bld.MOV(result, w_temp);
+ } else if (byte & 1) {
+ /* Extract the high byte from the word containing the desired byte
+ * offset.
+ */
+ bld.SHR(result,
+ subscript(op[0], BRW_REGISTER_TYPE_UW, byte / 2),
+ brw_imm_uw(8));
} else {
/* Otherwise use an AND with 0xff and a word type */
- bld.AND(result, subscript(op[0], type, byte / 2), brw_imm_uw(0xff));
+ bld.AND(result,
+ subscript(op[0], BRW_REGISTER_TYPE_UW, byte / 2),
+ brw_imm_uw(0xff));
}
} else {
const brw_reg_type type = brw_int_type(1, instr->op == nir_op_extract_i8);
switch (instr->def.bit_size) {
case 8:
for (unsigned i = 0; i < instr->def.num_components; i++)
- bld.MOV(offset(reg, bld, i), setup_imm_b(bld, instr->value.i8[i]));
+ bld.MOV(offset(reg, bld, i), setup_imm_b(bld, instr->value[i].i8));
break;
case 16:
for (unsigned i = 0; i < instr->def.num_components; i++)
- bld.MOV(offset(reg, bld, i), brw_imm_w(instr->value.i16[i]));
+ bld.MOV(offset(reg, bld, i), brw_imm_w(instr->value[i].i16));
break;
case 32:
for (unsigned i = 0; i < instr->def.num_components; i++)
- bld.MOV(offset(reg, bld, i), brw_imm_d(instr->value.i32[i]));
+ bld.MOV(offset(reg, bld, i), brw_imm_d(instr->value[i].i32));
break;
case 64:
/* We don't get 64-bit integer types until gen8 */
for (unsigned i = 0; i < instr->def.num_components; i++) {
bld.MOV(retype(offset(reg, bld, i), BRW_REGISTER_TYPE_DF),
- setup_imm_df(bld, instr->value.f64[i]));
+ setup_imm_df(bld, instr->value[i].f64));
}
} else {
for (unsigned i = 0; i < instr->def.num_components; i++)
- bld.MOV(offset(reg, bld, i), brw_imm_q(instr->value.i64[i]));
+ bld.MOV(offset(reg, bld, i), brw_imm_q(instr->value[i].i64));
}
break;
/* Zero the message header */
bld.exec_all().MOV(m0, brw_imm_ud(0u));
- /* Copy "Barrier ID" from r0.2, bits 16:13 */
- chanbld.AND(m0_2, retype(brw_vec1_grf(0, 2), BRW_REGISTER_TYPE_UD),
- brw_imm_ud(INTEL_MASK(16, 13)));
+ if (devinfo->gen < 11) {
+ /* Copy "Barrier ID" from r0.2, bits 16:13 */
+ chanbld.AND(m0_2, retype(brw_vec1_grf(0, 2), BRW_REGISTER_TYPE_UD),
+ brw_imm_ud(INTEL_MASK(16, 13)));
- /* Shift it up to bits 27:24. */
- chanbld.SHL(m0_2, m0_2, brw_imm_ud(11));
+ /* Shift it up to bits 27:24. */
+ chanbld.SHL(m0_2, m0_2, brw_imm_ud(11));
+ } else {
+ chanbld.AND(m0_2, retype(brw_vec1_grf(0, 2), BRW_REGISTER_TYPE_UD),
+ brw_imm_ud(INTEL_MASK(30, 24)));
+ }
/* Set the Barrier Count and the enable bit */
- chanbld.OR(m0_2, m0_2,
- brw_imm_ud(tcs_prog_data->instances << 9 | (1 << 15)));
+ if (devinfo->gen < 11) {
+ chanbld.OR(m0_2, m0_2,
+ brw_imm_ud(tcs_prog_data->instances << 9 | (1 << 15)));
+ } else {
+ chanbld.OR(m0_2, m0_2,
+ brw_imm_ud(tcs_prog_data->instances << 8 | (1 << 15)));
+ }
bld.emit(SHADER_OPCODE_BARRIER, bld.null_reg_ud(), m0);
break;
SHADER_OPCODE_TXF_CMS_LOGICAL;
/* Emit the instruction. */
- const fs_reg srcs[] = { coords, fs_reg(), brw_imm_ud(0), fs_reg(),
- fs_reg(), sample, mcs,
- brw_imm_ud(surface), brw_imm_ud(0),
- fs_reg(), brw_imm_ud(3), brw_imm_ud(0) };
- STATIC_ASSERT(ARRAY_SIZE(srcs) == TEX_LOGICAL_NUM_SRCS);
+ fs_reg srcs[TEX_LOGICAL_NUM_SRCS];
+ srcs[TEX_LOGICAL_SRC_COORDINATE] = coords;
+ srcs[TEX_LOGICAL_SRC_LOD] = brw_imm_ud(0);
+ srcs[TEX_LOGICAL_SRC_SAMPLE_INDEX] = sample;
+ srcs[TEX_LOGICAL_SRC_MCS] = mcs;
+ srcs[TEX_LOGICAL_SRC_SURFACE] = brw_imm_ud(surface);
+ srcs[TEX_LOGICAL_SRC_SAMPLER] = brw_imm_ud(0);
+ srcs[TEX_LOGICAL_SRC_COORD_COMPONENTS] = brw_imm_ud(3);
+ srcs[TEX_LOGICAL_SRC_GRAD_COMPONENTS] = brw_imm_ud(0);
fs_inst *inst = bld.emit(op, dst, srcs, ARRAY_SIZE(srcs));
inst->size_written = 4 * inst->dst.component_size(inst->exec_size);
if (const_offset) {
assert(nir_src_bit_size(instr->src[0]) == 32);
- unsigned off_x = MIN2((int)(const_offset->f32[0] * 16), 7) & 0xf;
- unsigned off_y = MIN2((int)(const_offset->f32[1] * 16), 7) & 0xf;
+ unsigned off_x = MIN2((int)(const_offset[0].f32 * 16), 7) & 0xf;
+ unsigned off_y = MIN2((int)(const_offset[1].f32 * 16), 7) & 0xf;
emit_pixel_interpolater_send(bld,
FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET,
cs_prog_data->uses_num_work_groups = true;
- fs_reg surf_index = brw_imm_ud(surface);
+ fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
+ srcs[SURFACE_LOGICAL_SRC_SURFACE] = brw_imm_ud(surface);
+ srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] = brw_imm_ud(1);
+ srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(1); /* num components */
/* Read the 3 GLuint components of gl_NumWorkGroups */
for (unsigned i = 0; i < 3; i++) {
- fs_reg read_result =
- emit_untyped_read(bld, surf_index,
- brw_imm_ud(i << 2),
- 1 /* dims */, 1 /* size */,
- BRW_PREDICATE_NONE);
- read_result.type = dest.type;
- bld.MOV(dest, read_result);
- dest = offset(dest, bld, 1);
+ srcs[SURFACE_LOGICAL_SRC_ADDRESS] = brw_imm_ud(i << 2);
+ bld.emit(SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL,
+ offset(dest, bld, i), srcs, SURFACE_LOGICAL_NUM_SRCS);
}
break;
}
assert(stage == MESA_SHADER_COMPUTE);
const unsigned bit_size = nir_dest_bit_size(instr->dest);
- fs_reg offset_reg = retype(get_nir_src(instr->src[0]),
- BRW_REGISTER_TYPE_UD);
+ fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
+ srcs[SURFACE_LOGICAL_SRC_SURFACE] = brw_imm_ud(GEN7_BTI_SLM);
+ srcs[SURFACE_LOGICAL_SRC_ADDRESS] = get_nir_src(instr->src[0]);
+ srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] = brw_imm_ud(1);
/* Make dest unsigned because that's what the temporary will be */
dest.type = brw_reg_type_from_bit_size(bit_size, BRW_REGISTER_TYPE_UD);
/* Read the vector */
if (nir_intrinsic_align(instr) >= 4) {
assert(nir_dest_bit_size(instr->dest) == 32);
- fs_reg read_result = emit_untyped_read(bld, brw_imm_ud(GEN7_BTI_SLM),
- offset_reg, 1 /* dims */,
- instr->num_components,
- BRW_PREDICATE_NONE);
- for (unsigned i = 0; i < instr->num_components; i++)
- bld.MOV(offset(dest, bld, i), offset(read_result, bld, i));
+ srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(instr->num_components);
+ fs_inst *inst =
+ bld.emit(SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL,
+ dest, srcs, SURFACE_LOGICAL_NUM_SRCS);
+ inst->size_written = instr->num_components * dispatch_width * 4;
} else {
assert(nir_dest_bit_size(instr->dest) <= 32);
assert(nir_dest_num_components(instr->dest) == 1);
- fs_reg read_result =
- emit_byte_scattered_read(bld, brw_imm_ud(GEN7_BTI_SLM), offset_reg,
- 1 /* dims */, 1, bit_size,
- BRW_PREDICATE_NONE);
+ srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(bit_size);
+
+ fs_reg read_result = bld.vgrf(BRW_REGISTER_TYPE_UD);
+ bld.emit(SHADER_OPCODE_BYTE_SCATTERED_READ_LOGICAL,
+ read_result, srcs, SURFACE_LOGICAL_NUM_SRCS);
bld.MOV(dest, read_result);
}
break;
assert(stage == MESA_SHADER_COMPUTE);
const unsigned bit_size = nir_src_bit_size(instr->src[0]);
- fs_reg val_reg = get_nir_src(instr->src[0]);
- fs_reg offset_reg = retype(get_nir_src(instr->src[1]),
- BRW_REGISTER_TYPE_UD);
+ fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
+ srcs[SURFACE_LOGICAL_SRC_SURFACE] = brw_imm_ud(GEN7_BTI_SLM);
+ srcs[SURFACE_LOGICAL_SRC_ADDRESS] = get_nir_src(instr->src[1]);
+ srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] = brw_imm_ud(1);
- val_reg.type = brw_reg_type_from_bit_size(bit_size, BRW_REGISTER_TYPE_UD);
+ fs_reg data = get_nir_src(instr->src[0]);
+ data.type = brw_reg_type_from_bit_size(bit_size, BRW_REGISTER_TYPE_UD);
assert(nir_intrinsic_write_mask(instr) ==
(1u << instr->num_components) - 1);
if (nir_intrinsic_align(instr) >= 4) {
assert(nir_src_bit_size(instr->src[0]) == 32);
assert(nir_src_num_components(instr->src[0]) <= 4);
- emit_untyped_write(bld, brw_imm_ud(GEN7_BTI_SLM), offset_reg, val_reg,
- 1 /* dims */, instr->num_components,
- BRW_PREDICATE_NONE);
+ srcs[SURFACE_LOGICAL_SRC_DATA] = data;
+ srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(instr->num_components);
+ bld.emit(SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL,
+ fs_reg(), srcs, SURFACE_LOGICAL_NUM_SRCS);
} else {
assert(nir_src_bit_size(instr->src[0]) <= 32);
assert(nir_src_num_components(instr->src[0]) == 1);
- fs_reg write_src = bld.vgrf(BRW_REGISTER_TYPE_UD);
- bld.MOV(write_src, val_reg);
- emit_byte_scattered_write(bld, brw_imm_ud(GEN7_BTI_SLM), offset_reg,
- write_src, 1 /* dims */, bit_size,
- BRW_PREDICATE_NONE);
+ srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(bit_size);
+
+ srcs[SURFACE_LOGICAL_SRC_DATA] = bld.vgrf(BRW_REGISTER_TYPE_UD);
+ bld.MOV(srcs[SURFACE_LOGICAL_SRC_DATA], data);
+
+ bld.emit(SHADER_OPCODE_BYTE_SCATTERED_WRITE_LOGICAL,
+ fs_reg(), srcs, SURFACE_LOGICAL_NUM_SRCS);
}
break;
}
switch (type_sz(type)) {
case 2:
assert(type != BRW_REGISTER_TYPE_HF);
- return retype(brw_imm_uw(value.u16[0]), type);
+ return retype(brw_imm_uw(value.u16), type);
case 4:
- return retype(brw_imm_ud(value.u32[0]), type);
+ return retype(brw_imm_ud(value.u32), type);
case 8:
if (type == BRW_REGISTER_TYPE_DF)
- return setup_imm_df(bld, value.f64[0]);
+ return setup_imm_df(bld, value.f64);
else
- return retype(brw_imm_u64(value.u64[0]), type);
+ return retype(brw_imm_u64(value.u64), type);
default:
unreachable("Invalid type size");
}
brw_imm_ud(stage_prog_data->binding_table.ssbo_start));
}
- return surf_index;
+ return bld.emit_uniformize(surf_index);
}
static unsigned
/* Get some metadata from the image intrinsic. */
const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic];
- const unsigned dims = image_intrinsic_coord_components(instr);
const GLenum format = nir_intrinsic_format(instr);
- const unsigned dest_components = nir_intrinsic_dest_components(instr);
- /* Get the arguments of the image intrinsic. */
- const fs_reg image = get_nir_image_intrinsic_image(bld, instr);
- const fs_reg coords = retype(get_nir_src(instr->src[1]),
- BRW_REGISTER_TYPE_UD);
- fs_reg tmp;
+ fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
+ srcs[SURFACE_LOGICAL_SRC_SURFACE] =
+ get_nir_image_intrinsic_image(bld, instr);
+ srcs[SURFACE_LOGICAL_SRC_ADDRESS] = get_nir_src(instr->src[1]);
+ srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] =
+ brw_imm_ud(image_intrinsic_coord_components(instr));
/* Emit an image load, store or atomic op. */
if (instr->intrinsic == nir_intrinsic_image_load) {
- tmp = emit_typed_read(bld, image, coords, dims,
- instr->num_components);
+ srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(instr->num_components);
+ fs_inst *inst =
+ bld.emit(SHADER_OPCODE_TYPED_SURFACE_READ_LOGICAL,
+ dest, srcs, SURFACE_LOGICAL_NUM_SRCS);
+ inst->size_written = instr->num_components * dispatch_width * 4;
} else if (instr->intrinsic == nir_intrinsic_image_store) {
- const fs_reg src0 = get_nir_src(instr->src[3]);
- emit_typed_write(bld, image, coords, src0, dims,
- instr->num_components);
+ srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(instr->num_components);
+ srcs[SURFACE_LOGICAL_SRC_DATA] = get_nir_src(instr->src[3]);
+ bld.emit(SHADER_OPCODE_TYPED_SURFACE_WRITE_LOGICAL,
+ fs_reg(), srcs, SURFACE_LOGICAL_NUM_SRCS);
} else {
int op;
unsigned num_srcs = info->num_srcs;
unreachable("Not reachable.");
}
- const fs_reg src0 = (num_srcs >= 4 ?
- get_nir_src(instr->src[3]) : fs_reg());
- const fs_reg src1 = (num_srcs >= 5 ?
- get_nir_src(instr->src[4]) : fs_reg());
+ srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(op);
- tmp = emit_typed_atomic(bld, image, coords, src0, src1, dims, 1, op);
- }
+ fs_reg data;
+ if (num_srcs >= 4)
+ data = get_nir_src(instr->src[3]);
+ if (num_srcs >= 5) {
+ fs_reg tmp = bld.vgrf(data.type, 2);
+ fs_reg sources[2] = { data, get_nir_src(instr->src[4]) };
+ bld.LOAD_PAYLOAD(tmp, sources, 2, 0);
+ data = tmp;
+ }
+ srcs[SURFACE_LOGICAL_SRC_DATA] = data;
- /* Assign the result. */
- for (unsigned c = 0; c < dest_components; ++c) {
- bld.MOV(offset(retype(dest, tmp.type), bld, c),
- offset(tmp, bld, c));
+ bld.emit(SHADER_OPCODE_TYPED_ATOMIC_LOGICAL,
+ dest, srcs, SURFACE_LOGICAL_NUM_SRCS);
}
break;
}
BRW_REGISTER_TYPE_UD);
image = bld.emit_uniformize(image);
+ fs_reg srcs[TEX_LOGICAL_NUM_SRCS];
+ srcs[TEX_LOGICAL_SRC_SURFACE] = image;
+ srcs[TEX_LOGICAL_SRC_SAMPLER] = brw_imm_d(0);
+ srcs[TEX_LOGICAL_SRC_COORD_COMPONENTS] = brw_imm_d(0);
+ srcs[TEX_LOGICAL_SRC_GRAD_COMPONENTS] = brw_imm_d(0);
+
/* Since the image size is always uniform, we can just emit a SIMD8
* query instruction and splat the result out.
*/
const fs_builder ubld = bld.exec_all().group(8, 0);
- /* The LOD also serves as the message payload */
- fs_reg lod = ubld.vgrf(BRW_REGISTER_TYPE_UD);
- ubld.MOV(lod, brw_imm_ud(0));
-
fs_reg tmp = ubld.vgrf(BRW_REGISTER_TYPE_UD, 4);
- fs_inst *inst = ubld.emit(SHADER_OPCODE_IMAGE_SIZE, tmp, lod, image);
- inst->mlen = 1;
+ fs_inst *inst = ubld.emit(SHADER_OPCODE_IMAGE_SIZE_LOGICAL,
+ tmp, srcs, ARRAY_SIZE(srcs));
inst->size_written = 4 * REG_SIZE;
for (unsigned c = 0; c < instr->dest.ssa.num_components; ++c) {
}
case nir_intrinsic_image_load_raw_intel: {
- const fs_reg image = get_nir_image_intrinsic_image(bld, instr);
- const fs_reg addr = retype(get_nir_src(instr->src[1]),
- BRW_REGISTER_TYPE_UD);
+ fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
+ srcs[SURFACE_LOGICAL_SRC_SURFACE] =
+ get_nir_image_intrinsic_image(bld, instr);
+ srcs[SURFACE_LOGICAL_SRC_ADDRESS] = get_nir_src(instr->src[1]);
+ srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] = brw_imm_ud(1);
+ srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(instr->num_components);
- fs_reg tmp = emit_untyped_read(bld, image, addr, 1,
- instr->num_components);
-
- for (unsigned c = 0; c < instr->num_components; ++c) {
- bld.MOV(offset(retype(dest, tmp.type), bld, c),
- offset(tmp, bld, c));
- }
+ fs_inst *inst =
+ bld.emit(SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL,
+ dest, srcs, SURFACE_LOGICAL_NUM_SRCS);
+ inst->size_written = instr->num_components * dispatch_width * 4;
break;
}
case nir_intrinsic_image_store_raw_intel: {
- const fs_reg image = get_nir_image_intrinsic_image(bld, instr);
- const fs_reg addr = retype(get_nir_src(instr->src[1]),
- BRW_REGISTER_TYPE_UD);
- const fs_reg data = retype(get_nir_src(instr->src[2]),
- BRW_REGISTER_TYPE_UD);
+ if (stage == MESA_SHADER_FRAGMENT)
+ brw_wm_prog_data(prog_data)->has_side_effects = true;
- brw_wm_prog_data(prog_data)->has_side_effects = true;
+ fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
+ srcs[SURFACE_LOGICAL_SRC_SURFACE] =
+ get_nir_image_intrinsic_image(bld, instr);
+ srcs[SURFACE_LOGICAL_SRC_ADDRESS] = get_nir_src(instr->src[1]);
+ srcs[SURFACE_LOGICAL_SRC_DATA] = get_nir_src(instr->src[2]);
+ srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] = brw_imm_ud(1);
+ srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(instr->num_components);
- emit_untyped_write(bld, image, addr, data, 1,
- instr->num_components);
+ bld.emit(SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL,
+ fs_reg(), srcs, SURFACE_LOGICAL_NUM_SRCS);
break;
}
break;
}
+ case nir_intrinsic_load_global: {
+ assert(devinfo->gen >= 8);
+
+ if (nir_intrinsic_align(instr) >= 4) {
+ assert(nir_dest_bit_size(instr->dest) == 32);
+ fs_inst *inst = bld.emit(SHADER_OPCODE_A64_UNTYPED_READ_LOGICAL,
+ dest,
+ get_nir_src(instr->src[0]), /* Address */
+ fs_reg(), /* No source data */
+ brw_imm_ud(instr->num_components));
+ inst->size_written = instr->num_components *
+ inst->dst.component_size(inst->exec_size);
+ } else {
+ const unsigned bit_size = nir_dest_bit_size(instr->dest);
+ assert(bit_size <= 32);
+ assert(nir_dest_num_components(instr->dest) == 1);
+ brw_reg_type data_type =
+ brw_reg_type_from_bit_size(bit_size, BRW_REGISTER_TYPE_UD);
+ fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_UD);
+ bld.emit(SHADER_OPCODE_A64_BYTE_SCATTERED_READ_LOGICAL,
+ tmp,
+ get_nir_src(instr->src[0]), /* Address */
+ fs_reg(), /* No source data */
+ brw_imm_ud(bit_size));
+ bld.MOV(retype(dest, data_type), tmp);
+ }
+ break;
+ }
+
+ case nir_intrinsic_store_global:
+ assert(devinfo->gen >= 8);
+
+ if (stage == MESA_SHADER_FRAGMENT)
+ brw_wm_prog_data(prog_data)->has_side_effects = true;
+
+ if (nir_intrinsic_align(instr) >= 4) {
+ assert(nir_src_bit_size(instr->src[0]) == 32);
+ bld.emit(SHADER_OPCODE_A64_UNTYPED_WRITE_LOGICAL,
+ fs_reg(),
+ get_nir_src(instr->src[1]), /* Address */
+ get_nir_src(instr->src[0]), /* Data */
+ brw_imm_ud(instr->num_components));
+ } else {
+ const unsigned bit_size = nir_src_bit_size(instr->src[0]);
+ assert(bit_size <= 32);
+ assert(nir_src_num_components(instr->src[0]) == 1);
+ brw_reg_type data_type =
+ brw_reg_type_from_bit_size(bit_size, BRW_REGISTER_TYPE_UD);
+ fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_UD);
+ bld.MOV(tmp, retype(get_nir_src(instr->src[0]), data_type));
+ bld.emit(SHADER_OPCODE_A64_BYTE_SCATTERED_WRITE_LOGICAL,
+ fs_reg(),
+ get_nir_src(instr->src[1]), /* Address */
+ tmp, /* Data */
+ brw_imm_ud(nir_src_bit_size(instr->src[0])));
+ }
+ break;
+
+ case nir_intrinsic_global_atomic_add:
+ nir_emit_global_atomic(bld, get_op_for_atomic_add(instr, 1), instr);
+ break;
+ case nir_intrinsic_global_atomic_imin:
+ nir_emit_global_atomic(bld, BRW_AOP_IMIN, instr);
+ break;
+ case nir_intrinsic_global_atomic_umin:
+ nir_emit_global_atomic(bld, BRW_AOP_UMIN, instr);
+ break;
+ case nir_intrinsic_global_atomic_imax:
+ nir_emit_global_atomic(bld, BRW_AOP_IMAX, instr);
+ break;
+ case nir_intrinsic_global_atomic_umax:
+ nir_emit_global_atomic(bld, BRW_AOP_UMAX, instr);
+ break;
+ case nir_intrinsic_global_atomic_and:
+ nir_emit_global_atomic(bld, BRW_AOP_AND, instr);
+ break;
+ case nir_intrinsic_global_atomic_or:
+ nir_emit_global_atomic(bld, BRW_AOP_OR, instr);
+ break;
+ case nir_intrinsic_global_atomic_xor:
+ nir_emit_global_atomic(bld, BRW_AOP_XOR, instr);
+ break;
+ case nir_intrinsic_global_atomic_exchange:
+ nir_emit_global_atomic(bld, BRW_AOP_MOV, instr);
+ break;
+ case nir_intrinsic_global_atomic_comp_swap:
+ nir_emit_global_atomic(bld, BRW_AOP_CMPWR, instr);
+ break;
+ case nir_intrinsic_global_atomic_fmin:
+ nir_emit_global_atomic_float(bld, BRW_AOP_FMIN, instr);
+ break;
+ case nir_intrinsic_global_atomic_fmax:
+ nir_emit_global_atomic_float(bld, BRW_AOP_FMAX, instr);
+ break;
+ case nir_intrinsic_global_atomic_fcomp_swap:
+ nir_emit_global_atomic_float(bld, BRW_AOP_FCMPWR, instr);
+ break;
+
case nir_intrinsic_load_ssbo: {
assert(devinfo->gen >= 7);
const unsigned bit_size = nir_dest_bit_size(instr->dest);
- fs_reg surf_index = get_nir_ssbo_intrinsic_index(bld, instr);
- fs_reg offset_reg = retype(get_nir_src(instr->src[1]),
- BRW_REGISTER_TYPE_UD);
+ fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
+ srcs[SURFACE_LOGICAL_SRC_SURFACE] =
+ get_nir_ssbo_intrinsic_index(bld, instr);
+ srcs[SURFACE_LOGICAL_SRC_ADDRESS] = get_nir_src(instr->src[1]);
+ srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] = brw_imm_ud(1);
/* Make dest unsigned because that's what the temporary will be */
dest.type = brw_reg_type_from_bit_size(bit_size, BRW_REGISTER_TYPE_UD);
/* Read the vector */
if (nir_intrinsic_align(instr) >= 4) {
assert(nir_dest_bit_size(instr->dest) == 32);
- fs_reg read_result = emit_untyped_read(bld, surf_index, offset_reg,
- 1 /* dims */,
- instr->num_components,
- BRW_PREDICATE_NONE);
- for (unsigned i = 0; i < instr->num_components; i++)
- bld.MOV(offset(dest, bld, i), offset(read_result, bld, i));
+ srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(instr->num_components);
+ fs_inst *inst =
+ bld.emit(SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL,
+ dest, srcs, SURFACE_LOGICAL_NUM_SRCS);
+ inst->size_written = instr->num_components * dispatch_width * 4;
} else {
assert(nir_dest_bit_size(instr->dest) <= 32);
assert(nir_dest_num_components(instr->dest) == 1);
- fs_reg read_result =
- emit_byte_scattered_read(bld, surf_index, offset_reg,
- 1 /* dims */, 1, bit_size,
- BRW_PREDICATE_NONE);
+ srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(bit_size);
+
+ fs_reg read_result = bld.vgrf(BRW_REGISTER_TYPE_UD);
+ bld.emit(SHADER_OPCODE_BYTE_SCATTERED_READ_LOGICAL,
+ read_result, srcs, SURFACE_LOGICAL_NUM_SRCS);
bld.MOV(dest, read_result);
}
break;
brw_wm_prog_data(prog_data)->has_side_effects = true;
const unsigned bit_size = nir_src_bit_size(instr->src[0]);
- fs_reg val_reg = get_nir_src(instr->src[0]);
- fs_reg surf_index = get_nir_ssbo_intrinsic_index(bld, instr);
- fs_reg offset_reg = retype(get_nir_src(instr->src[2]),
- BRW_REGISTER_TYPE_UD);
+ fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
+ srcs[SURFACE_LOGICAL_SRC_SURFACE] =
+ get_nir_ssbo_intrinsic_index(bld, instr);
+ srcs[SURFACE_LOGICAL_SRC_ADDRESS] = get_nir_src(instr->src[2]);
+ srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] = brw_imm_ud(1);
- val_reg.type = brw_reg_type_from_bit_size(bit_size, BRW_REGISTER_TYPE_UD);
+ fs_reg data = get_nir_src(instr->src[0]);
+ data.type = brw_reg_type_from_bit_size(bit_size, BRW_REGISTER_TYPE_UD);
assert(nir_intrinsic_write_mask(instr) ==
(1u << instr->num_components) - 1);
if (nir_intrinsic_align(instr) >= 4) {
assert(nir_src_bit_size(instr->src[0]) == 32);
assert(nir_src_num_components(instr->src[0]) <= 4);
- emit_untyped_write(bld, surf_index, offset_reg, val_reg,
- 1 /* dims */, instr->num_components,
- BRW_PREDICATE_NONE);
+ srcs[SURFACE_LOGICAL_SRC_DATA] = data;
+ srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(instr->num_components);
+ bld.emit(SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL,
+ fs_reg(), srcs, SURFACE_LOGICAL_NUM_SRCS);
} else {
assert(nir_src_bit_size(instr->src[0]) <= 32);
assert(nir_src_num_components(instr->src[0]) == 1);
- fs_reg write_src = bld.vgrf(BRW_REGISTER_TYPE_UD);
- bld.MOV(write_src, val_reg);
- emit_byte_scattered_write(bld, surf_index, offset_reg,
- write_src, 1 /* dims */, bit_size,
- BRW_PREDICATE_NONE);
+ srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(bit_size);
+
+ srcs[SURFACE_LOGICAL_SRC_DATA] = bld.vgrf(BRW_REGISTER_TYPE_UD);
+ bld.MOV(srcs[SURFACE_LOGICAL_SRC_DATA], data);
+
+ bld.emit(SHADER_OPCODE_BYTE_SCATTERED_WRITE_LOGICAL,
+ fs_reg(), srcs, SURFACE_LOGICAL_NUM_SRCS);
}
break;
}
break;
case nir_intrinsic_get_buffer_size: {
+ assert(nir_src_num_components(instr->src[0]) == 1);
unsigned ssbo_index = nir_src_is_const(instr->src[0]) ?
nir_src_as_uint(instr->src[0]) : 0;
if (nir_intrinsic_infos[instr->intrinsic].has_dest)
dest = get_nir_dest(instr->dest);
- fs_reg surface = get_nir_ssbo_intrinsic_index(bld, instr);
- fs_reg offset = get_nir_src(instr->src[1]);
- fs_reg data1;
+ fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
+ srcs[SURFACE_LOGICAL_SRC_SURFACE] = get_nir_ssbo_intrinsic_index(bld, instr);
+ srcs[SURFACE_LOGICAL_SRC_ADDRESS] = get_nir_src(instr->src[1]);
+ srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] = brw_imm_ud(1);
+ srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(op);
+
+ fs_reg data;
if (op != BRW_AOP_INC && op != BRW_AOP_DEC && op != BRW_AOP_PREDEC)
- data1 = get_nir_src(instr->src[2]);
- fs_reg data2;
- if (op == BRW_AOP_CMPWR)
- data2 = get_nir_src(instr->src[3]);
+ data = get_nir_src(instr->src[2]);
+
+ if (op == BRW_AOP_CMPWR) {
+ fs_reg tmp = bld.vgrf(data.type, 2);
+ fs_reg sources[2] = { data, get_nir_src(instr->src[3]) };
+ bld.LOAD_PAYLOAD(tmp, sources, 2, 0);
+ data = tmp;
+ }
+ srcs[SURFACE_LOGICAL_SRC_DATA] = data;
/* Emit the actual atomic operation */
- fs_reg atomic_result = emit_untyped_atomic(bld, surface, offset,
- data1, data2,
- 1 /* dims */, 1 /* rsize */,
- op,
- BRW_PREDICATE_NONE);
- dest.type = atomic_result.type;
- bld.MOV(dest, atomic_result);
+ bld.emit(SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL,
+ dest, srcs, SURFACE_LOGICAL_NUM_SRCS);
}
void
if (nir_intrinsic_infos[instr->intrinsic].has_dest)
dest = get_nir_dest(instr->dest);
- fs_reg surface = get_nir_ssbo_intrinsic_index(bld, instr);
- fs_reg offset = get_nir_src(instr->src[1]);
- fs_reg data1 = get_nir_src(instr->src[2]);
- fs_reg data2;
- if (op == BRW_AOP_FCMPWR)
- data2 = get_nir_src(instr->src[3]);
+ fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
+ srcs[SURFACE_LOGICAL_SRC_SURFACE] = get_nir_ssbo_intrinsic_index(bld, instr);
+ srcs[SURFACE_LOGICAL_SRC_ADDRESS] = get_nir_src(instr->src[1]);
+ srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] = brw_imm_ud(1);
+ srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(op);
+
+ fs_reg data = get_nir_src(instr->src[2]);
+ if (op == BRW_AOP_FCMPWR) {
+ fs_reg tmp = bld.vgrf(data.type, 2);
+ fs_reg sources[2] = { data, get_nir_src(instr->src[3]) };
+ bld.LOAD_PAYLOAD(tmp, sources, 2, 0);
+ data = tmp;
+ }
+ srcs[SURFACE_LOGICAL_SRC_DATA] = data;
/* Emit the actual atomic operation */
- fs_reg atomic_result = emit_untyped_atomic_float(bld, surface, offset,
- data1, data2,
- 1 /* dims */, 1 /* rsize */,
- op,
- BRW_PREDICATE_NONE);
- dest.type = atomic_result.type;
- bld.MOV(dest, atomic_result);
+ bld.emit(SHADER_OPCODE_UNTYPED_ATOMIC_FLOAT_LOGICAL,
+ dest, srcs, SURFACE_LOGICAL_NUM_SRCS);
}
void
if (nir_intrinsic_infos[instr->intrinsic].has_dest)
dest = get_nir_dest(instr->dest);
- fs_reg surface = brw_imm_ud(GEN7_BTI_SLM);
- fs_reg offset;
- fs_reg data1;
+ fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
+ srcs[SURFACE_LOGICAL_SRC_SURFACE] = brw_imm_ud(GEN7_BTI_SLM);
+ srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] = brw_imm_ud(1);
+ srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(op);
+
+ fs_reg data;
if (op != BRW_AOP_INC && op != BRW_AOP_DEC && op != BRW_AOP_PREDEC)
- data1 = get_nir_src(instr->src[1]);
- fs_reg data2;
- if (op == BRW_AOP_CMPWR)
- data2 = get_nir_src(instr->src[2]);
+ data = get_nir_src(instr->src[1]);
+ if (op == BRW_AOP_CMPWR) {
+ fs_reg tmp = bld.vgrf(data.type, 2);
+ fs_reg sources[2] = { data, get_nir_src(instr->src[2]) };
+ bld.LOAD_PAYLOAD(tmp, sources, 2, 0);
+ data = tmp;
+ }
+ srcs[SURFACE_LOGICAL_SRC_DATA] = data;
/* Get the offset */
if (nir_src_is_const(instr->src[0])) {
- offset = brw_imm_ud(instr->const_index[0] +
- nir_src_as_uint(instr->src[0]));
+ srcs[SURFACE_LOGICAL_SRC_ADDRESS] =
+ brw_imm_ud(instr->const_index[0] + nir_src_as_uint(instr->src[0]));
} else {
- offset = vgrf(glsl_type::uint_type);
- bld.ADD(offset,
+ srcs[SURFACE_LOGICAL_SRC_ADDRESS] = vgrf(glsl_type::uint_type);
+ bld.ADD(srcs[SURFACE_LOGICAL_SRC_ADDRESS],
retype(get_nir_src(instr->src[0]), BRW_REGISTER_TYPE_UD),
brw_imm_ud(instr->const_index[0]));
}
/* Emit the actual atomic operation operation */
- fs_reg atomic_result = emit_untyped_atomic(bld, surface, offset,
- data1, data2,
- 1 /* dims */, 1 /* rsize */,
- op,
- BRW_PREDICATE_NONE);
- dest.type = atomic_result.type;
- bld.MOV(dest, atomic_result);
+ bld.emit(SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL,
+ dest, srcs, SURFACE_LOGICAL_NUM_SRCS);
}
void
if (nir_intrinsic_infos[instr->intrinsic].has_dest)
dest = get_nir_dest(instr->dest);
- fs_reg surface = brw_imm_ud(GEN7_BTI_SLM);
- fs_reg offset;
- fs_reg data1 = get_nir_src(instr->src[1]);
- fs_reg data2;
- if (op == BRW_AOP_FCMPWR)
- data2 = get_nir_src(instr->src[2]);
+ fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
+ srcs[SURFACE_LOGICAL_SRC_SURFACE] = brw_imm_ud(GEN7_BTI_SLM);
+ srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] = brw_imm_ud(1);
+ srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(op);
+
+ fs_reg data = get_nir_src(instr->src[1]);
+ if (op == BRW_AOP_FCMPWR) {
+ fs_reg tmp = bld.vgrf(data.type, 2);
+ fs_reg sources[2] = { data, get_nir_src(instr->src[2]) };
+ bld.LOAD_PAYLOAD(tmp, sources, 2, 0);
+ data = tmp;
+ }
+ srcs[SURFACE_LOGICAL_SRC_DATA] = data;
/* Get the offset */
if (nir_src_is_const(instr->src[0])) {
- offset = brw_imm_ud(instr->const_index[0] +
- nir_src_as_uint(instr->src[0]));
+ srcs[SURFACE_LOGICAL_SRC_ADDRESS] =
+ brw_imm_ud(instr->const_index[0] + nir_src_as_uint(instr->src[0]));
} else {
- offset = vgrf(glsl_type::uint_type);
- bld.ADD(offset,
- retype(get_nir_src(instr->src[0]), BRW_REGISTER_TYPE_UD),
- brw_imm_ud(instr->const_index[0]));
+ srcs[SURFACE_LOGICAL_SRC_ADDRESS] = vgrf(glsl_type::uint_type);
+ bld.ADD(srcs[SURFACE_LOGICAL_SRC_ADDRESS],
+ retype(get_nir_src(instr->src[0]), BRW_REGISTER_TYPE_UD),
+ brw_imm_ud(instr->const_index[0]));
}
/* Emit the actual atomic operation operation */
- fs_reg atomic_result = emit_untyped_atomic_float(bld, surface, offset,
- data1, data2,
- 1 /* dims */, 1 /* rsize */,
- op,
- BRW_PREDICATE_NONE);
- dest.type = atomic_result.type;
- bld.MOV(dest, atomic_result);
+ bld.emit(SHADER_OPCODE_UNTYPED_ATOMIC_FLOAT_LOGICAL,
+ dest, srcs, SURFACE_LOGICAL_NUM_SRCS);
+}
+
+void
+fs_visitor::nir_emit_global_atomic(const fs_builder &bld,
+ int op, nir_intrinsic_instr *instr)
+{
+ if (stage == MESA_SHADER_FRAGMENT)
+ brw_wm_prog_data(prog_data)->has_side_effects = true;
+
+ fs_reg dest;
+ if (nir_intrinsic_infos[instr->intrinsic].has_dest)
+ dest = get_nir_dest(instr->dest);
+
+ fs_reg addr = get_nir_src(instr->src[0]);
+
+ fs_reg data;
+ if (op != BRW_AOP_INC && op != BRW_AOP_DEC && op != BRW_AOP_PREDEC)
+ data = get_nir_src(instr->src[1]);
+
+ if (op == BRW_AOP_CMPWR) {
+ fs_reg tmp = bld.vgrf(data.type, 2);
+ fs_reg sources[2] = { data, get_nir_src(instr->src[2]) };
+ bld.LOAD_PAYLOAD(tmp, sources, 2, 0);
+ data = tmp;
+ }
+
+ bld.emit(SHADER_OPCODE_A64_UNTYPED_ATOMIC_LOGICAL,
+ dest, addr, data, brw_imm_ud(op));
+}
+
+void
+fs_visitor::nir_emit_global_atomic_float(const fs_builder &bld,
+ int op, nir_intrinsic_instr *instr)
+{
+ if (stage == MESA_SHADER_FRAGMENT)
+ brw_wm_prog_data(prog_data)->has_side_effects = true;
+
+ assert(nir_intrinsic_infos[instr->intrinsic].has_dest);
+ fs_reg dest = get_nir_dest(instr->dest);
+
+ fs_reg addr = get_nir_src(instr->src[0]);
+
+ assert(op != BRW_AOP_INC && op != BRW_AOP_DEC && op != BRW_AOP_PREDEC);
+ fs_reg data = get_nir_src(instr->src[1]);
+
+ if (op == BRW_AOP_FCMPWR) {
+ fs_reg tmp = bld.vgrf(data.type, 2);
+ fs_reg sources[2] = { data, get_nir_src(instr->src[2]) };
+ bld.LOAD_PAYLOAD(tmp, sources, 2, 0);
+ data = tmp;
+ }
+
+ bld.emit(SHADER_OPCODE_A64_UNTYPED_ATOMIC_LOGICAL,
+ dest, addr, data, brw_imm_ud(op));
}
void
break;
case nir_tex_src_offset: {
- nir_const_value *const_offset =
- nir_src_as_const_value(instr->src[i].src);
- assert(nir_src_bit_size(instr->src[i].src) == 32);
- unsigned offset_bits = 0;
- if (const_offset &&
- brw_texture_offset(const_offset->i32,
- nir_tex_instr_src_size(instr, i),
- &offset_bits)) {
+ uint32_t offset_bits = 0;
+ if (brw_texture_offset(instr, i, &offset_bits)) {
header_bits |= offset_bits;
} else {
srcs[TEX_LOGICAL_SRC_TG4_OFFSET] =
srcs[TEX_LOGICAL_SRC_COORD_COMPONENTS] = brw_imm_d(instr->coord_components);
srcs[TEX_LOGICAL_SRC_GRAD_COMPONENTS] = brw_imm_d(lod_components);
+ bool shader_supports_implicit_lod = stage == MESA_SHADER_FRAGMENT ||
+ (stage == MESA_SHADER_COMPUTE &&
+ nir->info.cs.derivative_group != DERIVATIVE_GROUP_NONE);
+
enum opcode opcode;
switch (instr->op) {
case nir_texop_tex:
- opcode = (stage == MESA_SHADER_FRAGMENT ? SHADER_OPCODE_TEX_LOGICAL :
- SHADER_OPCODE_TXL_LOGICAL);
+ opcode = shader_supports_implicit_lod ?
+ SHADER_OPCODE_TEX_LOGICAL : SHADER_OPCODE_TXL_LOGICAL;
break;
case nir_texop_txb:
opcode = FS_OPCODE_TXB_LOGICAL;