#include "program/prog_to_nir.h"
#include "brw_fs.h"
#include "brw_fs_surface_builder.h"
+#include "brw_vec4_gs_visitor.h"
#include "brw_nir.h"
#include "brw_fs_surface_builder.h"
+#include "brw_vec4_gs_visitor.h"
using namespace brw;
using namespace brw::surface_access;
void
fs_visitor::nir_setup_inputs()
{
+ if (stage != MESA_SHADER_FRAGMENT)
+ return;
+
nir_inputs = bld.vgrf(BRW_REGISTER_TYPE_F, nir->num_inputs);
nir_foreach_variable(var, &nir->inputs) {
- enum brw_reg_type type = brw_type_for_base_type(var->type);
fs_reg input = offset(nir_inputs, bld, var->data.driver_location);
fs_reg reg;
- switch (stage) {
- case MESA_SHADER_VERTEX: {
- /* Our ATTR file is indexed by VERT_ATTRIB_*, which is the value
- * stored in nir_variable::location.
- *
- * However, NIR's load_input intrinsics use a different index - an
- * offset into a single contiguous array containing all inputs.
- * This index corresponds to the nir_variable::driver_location field.
- *
- * So, we need to copy from fs_reg(ATTR, var->location) to
- * offset(nir_inputs, var->data.driver_location).
- */
- const glsl_type *const t = var->type->without_array();
- const unsigned components = t->components();
- const unsigned cols = t->matrix_columns;
- const unsigned elts = t->vector_elements;
- unsigned array_length = var->type->is_array() ? var->type->length : 1;
- for (unsigned i = 0; i < array_length; i++) {
- for (unsigned j = 0; j < cols; j++) {
- for (unsigned k = 0; k < elts; k++) {
- bld.MOV(offset(retype(input, type), bld,
- components * i + elts * j + k),
- offset(fs_reg(ATTR, var->data.location + i, type),
- bld, 4 * j + k));
- }
- }
- }
- break;
- }
- case MESA_SHADER_GEOMETRY:
- case MESA_SHADER_COMPUTE:
- case MESA_SHADER_TESS_CTRL:
- case MESA_SHADER_TESS_EVAL:
- unreachable("fs_visitor not used for these stages yet.");
- break;
- case MESA_SHADER_FRAGMENT:
- if (var->data.location == VARYING_SLOT_POS) {
- reg = *emit_fragcoord_interpolation(var->data.pixel_center_integer,
- var->data.origin_upper_left);
- emit_percomp(bld, fs_inst(BRW_OPCODE_MOV, bld.dispatch_width(),
- input, reg), 0xF);
- } else {
- emit_general_interpolation(input, var->name, var->type,
- (glsl_interp_qualifier) var->data.interpolation,
- var->data.location, var->data.centroid,
- var->data.sample);
- }
- break;
+ if (var->data.location == VARYING_SLOT_POS) {
+ reg = *emit_fragcoord_interpolation(var->data.pixel_center_integer,
+ var->data.origin_upper_left);
+ emit_percomp(bld, fs_inst(BRW_OPCODE_MOV, bld.dispatch_width(),
+ input, reg), 0xF);
+ } else if (var->data.location == VARYING_SLOT_LAYER) {
+ struct brw_reg reg = suboffset(interp_reg(VARYING_SLOT_LAYER, 1), 3);
+ reg.type = BRW_REGISTER_TYPE_D;
+ bld.emit(FS_OPCODE_CINTERP, retype(input, BRW_REGISTER_TYPE_D), reg);
+ } else if (var->data.location == VARYING_SLOT_VIEWPORT) {
+ struct brw_reg reg = suboffset(interp_reg(VARYING_SLOT_VIEWPORT, 2), 3);
+ reg.type = BRW_REGISTER_TYPE_D;
+ bld.emit(FS_OPCODE_CINTERP, retype(input, BRW_REGISTER_TYPE_D), reg);
+ } else {
+ emit_general_interpolation(input, var->name, var->type,
+ (glsl_interp_qualifier) var->data.interpolation,
+ var->data.location, var->data.centroid,
+ var->data.sample);
}
}
}
nir_foreach_variable(var, &nir->outputs) {
fs_reg reg = offset(nir_outputs, bld, var->data.driver_location);
- int vector_elements =
- var->type->is_array() ? var->type->fields.array->vector_elements
- : var->type->vector_elements;
+ int vector_elements = var->type->without_array()->vector_elements;
switch (stage) {
case MESA_SHADER_VERTEX:
+ case MESA_SHADER_GEOMETRY:
for (unsigned int i = 0; i < ALIGN(type_size_scalar(var->type), 4) / 4; i++) {
int output = var->data.location + i;
this->outputs[output] = offset(reg, bld, 4 * i);
}
} else if (var->data.location == FRAG_RESULT_DEPTH) {
this->frag_depth = reg;
+ } else if (var->data.location == FRAG_RESULT_STENCIL) {
+ this->frag_stencil = reg;
} else if (var->data.location == FRAG_RESULT_SAMPLE_MASK) {
this->sample_mask = reg;
} else {
*reg = *v->emit_vs_system_value(SYSTEM_VALUE_INSTANCE_ID);
break;
+ case nir_intrinsic_load_invocation_id:
+ assert(v->stage == MESA_SHADER_GEOMETRY);
+ reg = &v->nir_system_values[SYSTEM_VALUE_INVOCATION_ID];
+ if (reg->file == BAD_FILE) {
+ const fs_builder abld = v->bld.annotate("gl_InvocationID", NULL);
+ fs_reg g1(retype(brw_vec8_grf(1, 0), BRW_REGISTER_TYPE_UD));
+ fs_reg iid = abld.vgrf(BRW_REGISTER_TYPE_UD, 1);
+ abld.SHR(iid, g1, fs_reg(27u));
+ *reg = iid;
+ }
+ break;
+
case nir_intrinsic_load_sample_pos:
assert(v->stage == MESA_SHADER_FRAGMENT);
reg = &v->nir_system_values[SYSTEM_VALUE_SAMPLE_POS];
* from the LSB side. If FBH didn't return an error (0xFFFFFFFF), then
* subtract the result from 31 to convert the MSB count into an LSB count.
*/
-
bld.CMP(bld.null_reg_d(), result, fs_reg(-1), BRW_CONDITIONAL_NZ);
- fs_reg neg_result(result);
- neg_result.negate = true;
- inst = bld.ADD(result, neg_result, fs_reg(31));
+
+ inst = bld.ADD(result, result, fs_reg(31));
inst->predicate = BRW_PREDICATE_NORMAL;
+ inst->src[0].negate = true;
break;
}
fs_reg image(UNIFORM, deref->var->data.driver_location,
BRW_REGISTER_TYPE_UD);
- if (deref->deref.child) {
- const nir_deref_array *deref_array =
- nir_deref_as_array(deref->deref.child);
- assert(deref->deref.child->deref_type == nir_deref_type_array &&
- deref_array->deref.child == NULL);
- const unsigned size = glsl_get_length(deref->var->type);
+ for (const nir_deref *tail = &deref->deref; tail->child;
+ tail = tail->child) {
+ const nir_deref_array *deref_array = nir_deref_as_array(tail->child);
+ assert(tail->child->deref_type == nir_deref_type_array);
+ const unsigned size = glsl_get_length(tail->type);
+ const unsigned element_size = type_size_scalar(deref_array->deref.type);
const unsigned base = MIN2(deref_array->base_offset, size - 1);
-
- image = offset(image, bld, base * BRW_IMAGE_PARAM_SIZE);
+ image = offset(image, bld, base * element_size);
if (deref_array->deref_array_type == nir_deref_array_type_indirect) {
- fs_reg *tmp = new(mem_ctx) fs_reg(vgrf(glsl_type::int_type));
+ fs_reg tmp = vgrf(glsl_type::int_type);
if (devinfo->gen == 7 && !devinfo->is_haswell) {
/* IVB hangs when trying to access an invalid surface index with
* of the possible outcomes of the hang. Clamp the index to
* prevent access outside of the array bounds.
*/
- bld.emit_minmax(*tmp, retype(get_nir_src(deref_array->indirect),
- BRW_REGISTER_TYPE_UD),
+ bld.emit_minmax(tmp, retype(get_nir_src(deref_array->indirect),
+ BRW_REGISTER_TYPE_UD),
fs_reg(size - base - 1), BRW_CONDITIONAL_L);
} else {
- bld.MOV(*tmp, get_nir_src(deref_array->indirect));
+ bld.MOV(tmp, get_nir_src(deref_array->indirect));
}
- bld.MUL(*tmp, *tmp, fs_reg(BRW_IMAGE_PARAM_SIZE));
- image.reladdr = tmp;
+ bld.MUL(tmp, tmp, fs_reg(element_size));
+ if (image.reladdr)
+ bld.ADD(*image.reladdr, *image.reladdr, tmp);
+ else
+ image.reladdr = new(mem_ctx) fs_reg(tmp);
}
}
return inst;
}
+/**
+ * Computes 1 << x, given a D/UD register containing some value x.
+ */
+static fs_reg
+intexp2(const fs_builder &bld, const fs_reg &x)
+{
+ assert(x.type == BRW_REGISTER_TYPE_UD || x.type == BRW_REGISTER_TYPE_D);
+
+ fs_reg result = bld.vgrf(x.type, 1);
+ fs_reg one = bld.vgrf(x.type, 1);
+
+ bld.MOV(one, retype(fs_reg(1), one.type));
+ bld.SHL(result, one, x);
+ return result;
+}
+
+void
+fs_visitor::emit_gs_end_primitive(const nir_src &vertex_count_nir_src)
+{
+ assert(stage == MESA_SHADER_GEOMETRY);
+
+ struct brw_gs_prog_data *gs_prog_data =
+ (struct brw_gs_prog_data *) prog_data;
+
+ /* We can only do EndPrimitive() functionality when the control data
+ * consists of cut bits. Fortunately, the only time it isn't is when the
+ * output type is points, in which case EndPrimitive() is a no-op.
+ */
+ if (gs_prog_data->control_data_format !=
+ GEN7_GS_CONTROL_DATA_FORMAT_GSCTL_CUT) {
+ return;
+ }
+
+ /* Cut bits use one bit per vertex. */
+ assert(gs_compile->control_data_bits_per_vertex == 1);
+
+ fs_reg vertex_count = get_nir_src(vertex_count_nir_src);
+ vertex_count.type = BRW_REGISTER_TYPE_UD;
+
+ /* Cut bit n should be set to 1 if EndPrimitive() was called after emitting
+ * vertex n, 0 otherwise. So all we need to do here is mark bit
+ * (vertex_count - 1) % 32 in the cut_bits register to indicate that
+ * EndPrimitive() was called after emitting vertex (vertex_count - 1);
+ * vec4_gs_visitor::emit_control_data_bits() will take care of the rest.
+ *
+ * Note that if EndPrimitive() is called before emitting any vertices, this
+ * will cause us to set bit 31 of the control_data_bits register to 1.
+ * That's fine because:
+ *
+ * - If max_vertices < 32, then vertex number 31 (zero-based) will never be
+ * output, so the hardware will ignore cut bit 31.
+ *
+ * - If max_vertices == 32, then vertex number 31 is guaranteed to be the
+ * last vertex, so setting cut bit 31 has no effect (since the primitive
+ * is automatically ended when the GS terminates).
+ *
+ * - If max_vertices > 32, then the ir_emit_vertex visitor will reset the
+ * control_data_bits register to 0 when the first vertex is emitted.
+ */
+
+ const fs_builder abld = bld.annotate("end primitive");
+
+ /* control_data_bits |= 1 << ((vertex_count - 1) % 32) */
+ fs_reg prev_count = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
+ abld.ADD(prev_count, vertex_count, fs_reg(0xffffffffu));
+ fs_reg mask = intexp2(abld, prev_count);
+ /* Note: we're relying on the fact that the GEN SHL instruction only pays
+ * attention to the lower 5 bits of its second source argument, so on this
+ * architecture, 1 << (vertex_count - 1) is equivalent to 1 <<
+ * ((vertex_count - 1) % 32).
+ */
+ abld.OR(this->control_data_bits, this->control_data_bits, mask);
+}
+
+void
+fs_visitor::emit_gs_control_data_bits(const fs_reg &vertex_count)
+{
+ assert(stage == MESA_SHADER_GEOMETRY);
+ assert(gs_compile->control_data_bits_per_vertex != 0);
+
+ struct brw_gs_prog_data *gs_prog_data =
+ (struct brw_gs_prog_data *) prog_data;
+
+ const fs_builder abld = bld.annotate("emit control data bits");
+ const fs_builder fwa_bld = bld.exec_all();
+
+ /* We use a single UD register to accumulate control data bits (32 bits
+ * for each of the SIMD8 channels). So we need to write a DWord (32 bits)
+ * at a time.
+ *
+ * Unfortunately, the URB_WRITE_SIMD8 message uses 128-bit (OWord) offsets.
+ * We have select a 128-bit group via the Global and Per-Slot Offsets, then
+ * use the Channel Mask phase to enable/disable which DWord within that
+ * group to write. (Remember, different SIMD8 channels may have emitted
+ * different numbers of vertices, so we may need per-slot offsets.)
+ *
+ * Channel masking presents an annoying problem: we may have to replicate
+ * the data up to 4 times:
+ *
+ * Msg = Handles, Per-Slot Offsets, Channel Masks, Data, Data, Data, Data.
+ *
+ * To avoid penalizing shaders that emit a small number of vertices, we
+ * can avoid these sometimes: if the size of the control data header is
+ * <= 128 bits, then there is only 1 OWord. All SIMD8 channels will land
+ * land in the same 128-bit group, so we can skip per-slot offsets.
+ *
+ * Similarly, if the control data header is <= 32 bits, there is only one
+ * DWord, so we can skip channel masks.
+ */
+ enum opcode opcode = SHADER_OPCODE_URB_WRITE_SIMD8;
+
+ fs_reg channel_mask, per_slot_offset;
+
+ if (gs_compile->control_data_header_size_bits > 32) {
+ opcode = SHADER_OPCODE_URB_WRITE_SIMD8_MASKED;
+ channel_mask = vgrf(glsl_type::uint_type);
+ }
+
+ if (gs_compile->control_data_header_size_bits > 128) {
+ opcode = SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT;
+ per_slot_offset = vgrf(glsl_type::uint_type);
+ }
+
+ /* Figure out which DWord we're trying to write to using the formula:
+ *
+ * dword_index = (vertex_count - 1) * bits_per_vertex / 32
+ *
+ * Since bits_per_vertex is a power of two, and is known at compile
+ * time, this can be optimized to:
+ *
+ * dword_index = (vertex_count - 1) >> (6 - log2(bits_per_vertex))
+ */
+ if (opcode != SHADER_OPCODE_URB_WRITE_SIMD8) {
+ fs_reg dword_index = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
+ fs_reg prev_count = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
+ abld.ADD(prev_count, vertex_count, fs_reg(0xffffffffu));
+ unsigned log2_bits_per_vertex =
+ _mesa_fls(gs_compile->control_data_bits_per_vertex);
+ abld.SHR(dword_index, prev_count, fs_reg(6u - log2_bits_per_vertex));
+
+ if (per_slot_offset.file != BAD_FILE) {
+ /* Set the per-slot offset to dword_index / 4, so that we'll write to
+ * the appropriate OWord within the control data header.
+ */
+ abld.SHR(per_slot_offset, dword_index, fs_reg(2u));
+ }
+
+ /* Set the channel masks to 1 << (dword_index % 4), so that we'll
+ * write to the appropriate DWORD within the OWORD.
+ */
+ fs_reg channel = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
+ fwa_bld.AND(channel, dword_index, fs_reg(3u));
+ channel_mask = intexp2(fwa_bld, channel);
+ /* Then the channel masks need to be in bits 23:16. */
+ fwa_bld.SHL(channel_mask, channel_mask, fs_reg(16u));
+ }
+
+ /* Store the control data bits in the message payload and send it. */
+ int mlen = 2;
+ if (channel_mask.file != BAD_FILE)
+ mlen += 4; /* channel masks, plus 3 extra copies of the data */
+ if (per_slot_offset.file != BAD_FILE)
+ mlen++;
+
+ fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, mlen);
+ fs_reg *sources = ralloc_array(mem_ctx, fs_reg, mlen);
+ int i = 0;
+ sources[i++] = fs_reg(retype(brw_vec8_grf(1, 0), BRW_REGISTER_TYPE_UD));
+ if (per_slot_offset.file != BAD_FILE)
+ sources[i++] = per_slot_offset;
+ if (channel_mask.file != BAD_FILE)
+ sources[i++] = channel_mask;
+ while (i < mlen) {
+ sources[i++] = this->control_data_bits;
+ }
+
+ abld.LOAD_PAYLOAD(payload, sources, mlen, mlen);
+ fs_inst *inst = abld.emit(opcode, reg_undef, payload);
+ inst->mlen = mlen;
+ /* We need to increment Global Offset by 256-bits to make room for
+ * Broadwell's extra "Vertex Count" payload at the beginning of the
+ * URB entry. Since this is an OWord message, Global Offset is counted
+ * in 128-bit units, so we must set it to 2.
+ */
+ if (gs_prog_data->static_vertex_count == -1)
+ inst->offset = 2;
+}
+
+void
+fs_visitor::set_gs_stream_control_data_bits(const fs_reg &vertex_count,
+ unsigned stream_id)
+{
+ /* control_data_bits |= stream_id << ((2 * (vertex_count - 1)) % 32) */
+
+ /* Note: we are calling this *before* increasing vertex_count, so
+ * this->vertex_count == vertex_count - 1 in the formula above.
+ */
+
+ /* Stream mode uses 2 bits per vertex */
+ assert(gs_compile->control_data_bits_per_vertex == 2);
+
+ /* Must be a valid stream */
+ assert(stream_id >= 0 && stream_id < MAX_VERTEX_STREAMS);
+
+ /* Control data bits are initialized to 0 so we don't have to set any
+ * bits when sending vertices to stream 0.
+ */
+ if (stream_id == 0)
+ return;
+
+ const fs_builder abld = bld.annotate("set stream control data bits", NULL);
+
+ /* reg::sid = stream_id */
+ fs_reg sid = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
+ abld.MOV(sid, fs_reg(stream_id));
+
+ /* reg:shift_count = 2 * (vertex_count - 1) */
+ fs_reg shift_count = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
+ abld.SHL(shift_count, vertex_count, fs_reg(1u));
+
+ /* Note: we're relying on the fact that the GEN SHL instruction only pays
+ * attention to the lower 5 bits of its second source argument, so on this
+ * architecture, stream_id << 2 * (vertex_count - 1) is equivalent to
+ * stream_id << ((2 * (vertex_count - 1)) % 32).
+ */
+ fs_reg mask = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
+ abld.SHL(mask, sid, shift_count);
+ abld.OR(this->control_data_bits, this->control_data_bits, mask);
+}
+
+void
+fs_visitor::emit_gs_vertex(const nir_src &vertex_count_nir_src,
+ unsigned stream_id)
+{
+ assert(stage == MESA_SHADER_GEOMETRY);
+
+ struct brw_gs_prog_data *gs_prog_data =
+ (struct brw_gs_prog_data *) prog_data;
+
+ fs_reg vertex_count = get_nir_src(vertex_count_nir_src);
+ vertex_count.type = BRW_REGISTER_TYPE_UD;
+
+ /* Haswell and later hardware ignores the "Render Stream Select" bits
+ * from the 3DSTATE_STREAMOUT packet when the SOL stage is disabled,
+ * and instead sends all primitives down the pipeline for rasterization.
+ * If the SOL stage is enabled, "Render Stream Select" is honored and
+ * primitives bound to non-zero streams are discarded after stream output.
+ *
+ * Since the only purpose of primives sent to non-zero streams is to
+ * be recorded by transform feedback, we can simply discard all geometry
+ * bound to these streams when transform feedback is disabled.
+ */
+ if (stream_id > 0 && !nir->info.has_transform_feedback_varyings)
+ return;
+
+ /* If we're outputting 32 control data bits or less, then we can wait
+ * until the shader is over to output them all. Otherwise we need to
+ * output them as we go. Now is the time to do it, since we're about to
+ * output the vertex_count'th vertex, so it's guaranteed that the
+ * control data bits associated with the (vertex_count - 1)th vertex are
+ * correct.
+ */
+ if (gs_compile->control_data_header_size_bits > 32) {
+ const fs_builder abld =
+ bld.annotate("emit vertex: emit control data bits");
+
+ /* Only emit control data bits if we've finished accumulating a batch
+ * of 32 bits. This is the case when:
+ *
+ * (vertex_count * bits_per_vertex) % 32 == 0
+ *
+ * (in other words, when the last 5 bits of vertex_count *
+ * bits_per_vertex are 0). Assuming bits_per_vertex == 2^n for some
+ * integer n (which is always the case, since bits_per_vertex is
+ * always 1 or 2), this is equivalent to requiring that the last 5-n
+ * bits of vertex_count are 0:
+ *
+ * vertex_count & (2^(5-n) - 1) == 0
+ *
+ * 2^(5-n) == 2^5 / 2^n == 32 / bits_per_vertex, so this is
+ * equivalent to:
+ *
+ * vertex_count & (32 / bits_per_vertex - 1) == 0
+ *
+ * TODO: If vertex_count is an immediate, we could do some of this math
+ * at compile time...
+ */
+ fs_inst *inst =
+ abld.AND(bld.null_reg_d(), vertex_count,
+ fs_reg(32u / gs_compile->control_data_bits_per_vertex - 1u));
+ inst->conditional_mod = BRW_CONDITIONAL_Z;
+
+ abld.IF(BRW_PREDICATE_NORMAL);
+ /* If vertex_count is 0, then no control data bits have been
+ * accumulated yet, so we can skip emitting them.
+ */
+ abld.CMP(bld.null_reg_d(), vertex_count, fs_reg(0u),
+ BRW_CONDITIONAL_NEQ);
+ abld.IF(BRW_PREDICATE_NORMAL);
+ emit_gs_control_data_bits(vertex_count);
+ abld.emit(BRW_OPCODE_ENDIF);
+
+ /* Reset control_data_bits to 0 so we can start accumulating a new
+ * batch.
+ *
+ * Note: in the case where vertex_count == 0, this neutralizes the
+ * effect of any call to EndPrimitive() that the shader may have
+ * made before outputting its first vertex.
+ */
+ inst = abld.MOV(this->control_data_bits, fs_reg(0u));
+ inst->force_writemask_all = true;
+ abld.emit(BRW_OPCODE_ENDIF);
+ }
+
+ emit_urb_writes(vertex_count);
+
+ /* In stream mode we have to set control data bits for all vertices
+ * unless we have disabled control data bits completely (which we do
+ * do for GL_POINTS outputs that don't use streams).
+ */
+ if (gs_compile->control_data_header_size_bits > 0 &&
+ gs_prog_data->control_data_format ==
+ GEN7_GS_CONTROL_DATA_FORMAT_GSCTL_SID) {
+ set_gs_stream_control_data_bits(vertex_count, stream_id);
+ }
+}
+
+void
+fs_visitor::emit_gs_input_load(const fs_reg &dst,
+ const nir_src &vertex_src,
+ unsigned input_offset,
+ unsigned num_components)
+{
+ const brw_vue_prog_data *vue_prog_data = (const brw_vue_prog_data *) prog_data;
+ const unsigned vertex = nir_src_as_const_value(vertex_src)->u[0];
+
+ const unsigned array_stride = vue_prog_data->urb_read_length * 8;
+
+ const bool pushed = 4 * input_offset < array_stride;
+
+ if (input_offset == 0) {
+ /* This is the VUE header, containing VARYING_SLOT_LAYER [.y],
+ * VARYING_SLOT_VIEWPORT [.z], and VARYING_SLOT_PSIZ [.w].
+ * Only gl_PointSize is available as a GS input, so they must
+ * be asking for that input.
+ */
+ if (pushed) {
+ bld.MOV(dst, fs_reg(ATTR, array_stride * vertex + 3, dst.type));
+ } else {
+ fs_reg tmp = bld.vgrf(dst.type, 4);
+ fs_inst *inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, tmp,
+ fs_reg(vertex), fs_reg(0));
+ inst->regs_written = 4;
+ bld.MOV(dst, offset(tmp, bld, 3));
+ }
+ } else {
+ if (pushed) {
+ int index = vertex * array_stride + 4 * input_offset;
+ for (unsigned i = 0; i < num_components; i++) {
+ bld.MOV(offset(dst, bld, i), fs_reg(ATTR, index + i, dst.type));
+ }
+ } else {
+ fs_inst *inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, dst,
+ fs_reg(vertex), fs_reg(input_offset));
+ inst->regs_written = num_components;
+ }
+ }
+}
+
void
fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr)
{
break;
}
+ case nir_intrinsic_shader_clock: {
+ /* We cannot do anything if there is an event, so ignore it for now */
+ fs_reg shader_clock = get_timestamp(bld);
+ const fs_reg srcs[] = { shader_clock.set_smear(0), shader_clock.set_smear(1) };
+
+ bld.LOAD_PAYLOAD(dest, srcs, ARRAY_SIZE(srcs), 0);
+ break;
+ }
+
case nir_intrinsic_image_size: {
/* Get the referenced image variable and type. */
const nir_variable *var = instr->variables[0]->var;
case nir_intrinsic_load_vertex_id:
unreachable("should be lowered by lower_vertex_id()");
+ case nir_intrinsic_load_primitive_id:
+ assert(stage == MESA_SHADER_GEOMETRY);
+ assert(((struct brw_gs_prog_data *)prog_data)->include_primitive_id);
+ bld.MOV(retype(dest, BRW_REGISTER_TYPE_UD),
+ retype(fs_reg(brw_vec8_grf(2, 0)), BRW_REGISTER_TYPE_UD));
+ break;
+
case nir_intrinsic_load_vertex_id_zero_base:
case nir_intrinsic_load_base_vertex:
case nir_intrinsic_load_instance_id:
+ case nir_intrinsic_load_invocation_id:
case nir_intrinsic_load_sample_mask_in:
case nir_intrinsic_load_sample_id: {
gl_system_value sv = nir_system_value_from_intrinsic(instr->intrinsic);
fs_reg surf_index;
if (const_index) {
- surf_index = fs_reg(stage_prog_data->binding_table.ubo_start +
- const_index->u[0]);
+ const unsigned index = stage_prog_data->binding_table.ubo_start +
+ const_index->u[0];
+ surf_index = fs_reg(index);
+ brw_mark_surface_used(prog_data, index);
} else {
/* The block index is not a constant. Evaluate the index expression
* per-channel and add the base UBO index; we have to select a value
*/
brw_mark_surface_used(prog_data,
stage_prog_data->binding_table.ubo_start +
- nir->info.num_ssbos - 1);
+ nir->info.num_ubos - 1);
}
if (has_indirect) {
fs_reg surf_index;
if (const_uniform_block) {
- unsigned index = stage_prog_data->binding_table.ubo_start +
+ unsigned index = stage_prog_data->binding_table.ssbo_start +
const_uniform_block->u[0];
surf_index = fs_reg(index);
brw_mark_surface_used(prog_data, index);
} else {
surf_index = vgrf(glsl_type::uint_type);
bld.ADD(surf_index, get_nir_src(instr->src[0]),
- fs_reg(stage_prog_data->binding_table.ubo_start));
- surf_index = bld.emit_uniformize(surf_index);
+ fs_reg(stage_prog_data->binding_table.ssbo_start));
/* Assume this may touch any UBO. It would be nice to provide
* a tighter bound, but the array information is already lowered away.
*/
brw_mark_surface_used(prog_data,
- stage_prog_data->binding_table.ubo_start +
+ stage_prog_data->binding_table.ssbo_start +
nir->info.num_ssbos - 1);
}
/* Get the offset to read from */
- fs_reg offset_reg = vgrf(glsl_type::uint_type);
- unsigned const_offset_bytes = 0;
+ fs_reg offset_reg;
if (has_indirect) {
- bld.MOV(offset_reg, get_nir_src(instr->src[1]));
+ offset_reg = get_nir_src(instr->src[1]);
} else {
- const_offset_bytes = instr->const_index[0];
- bld.MOV(offset_reg, fs_reg(const_offset_bytes));
+ offset_reg = fs_reg(instr->const_index[0]);
}
/* Read the vector */
- for (int i = 0; i < instr->num_components; i++) {
- fs_reg read_result = emit_untyped_read(bld, surf_index, offset_reg,
- 1 /* dims */, 1 /* size */,
- BRW_PREDICATE_NONE);
- read_result.type = dest.type;
- bld.MOV(dest, read_result);
- dest = offset(dest, bld, 1);
-
- /* Vector components are stored contiguous in memory */
- if (i < instr->num_components) {
- if (!has_indirect) {
- const_offset_bytes += 4;
- bld.MOV(offset_reg, fs_reg(const_offset_bytes));
- } else {
- bld.ADD(offset_reg, offset_reg, brw_imm_ud(4));
- }
- }
- }
+ fs_reg read_result = emit_untyped_read(bld, surf_index, offset_reg,
+ 1 /* dims */,
+ instr->num_components,
+ BRW_PREDICATE_NONE);
+ read_result.type = dest.type;
+ for (int i = 0; i < instr->num_components; i++)
+ bld.MOV(offset(dest, bld, i), offset(read_result, bld, i));
break;
}
case nir_intrinsic_load_input: {
unsigned index = 0;
for (unsigned j = 0; j < instr->num_components; j++) {
- fs_reg src = offset(retype(nir_inputs, dest.type), bld,
- instr->const_index[0] + index);
+ fs_reg src;
+ if (stage == MESA_SHADER_VERTEX) {
+ src = offset(fs_reg(ATTR, instr->const_index[0], dest.type), bld, index);
+ } else {
+ src = offset(retype(nir_inputs, dest.type), bld,
+ instr->const_index[0] + index);
+ }
if (has_indirect)
src.reladdr = new(mem_ctx) fs_reg(get_nir_src(instr->src[0]));
index++;
break;
}
+ case nir_intrinsic_load_per_vertex_input_indirect:
+ assert(!"Not allowed");
+ /* fallthrough */
+ case nir_intrinsic_load_per_vertex_input:
+ emit_gs_input_load(dest, instr->src[0], instr->const_index[0],
+ instr->num_components);
+ break;
+
/* Handle ARB_gpu_shader5 interpolation intrinsics
*
* It's worth a quick word of explanation as to why we handle the full
nir_const_value *const_uniform_block =
nir_src_as_const_value(instr->src[1]);
if (const_uniform_block) {
- unsigned index = stage_prog_data->binding_table.ubo_start +
+ unsigned index = stage_prog_data->binding_table.ssbo_start +
const_uniform_block->u[0];
surf_index = fs_reg(index);
brw_mark_surface_used(prog_data, index);
} else {
surf_index = vgrf(glsl_type::uint_type);
bld.ADD(surf_index, get_nir_src(instr->src[1]),
- fs_reg(stage_prog_data->binding_table.ubo_start));
- surf_index = bld.emit_uniformize(surf_index);
+ fs_reg(stage_prog_data->binding_table.ssbo_start));
brw_mark_surface_used(prog_data,
- stage_prog_data->binding_table.ubo_start +
+ stage_prog_data->binding_table.ssbo_start +
nir->info.num_ssbos - 1);
}
- /* Offset */
- fs_reg offset_reg = vgrf(glsl_type::uint_type);
- unsigned const_offset_bytes = 0;
- if (has_indirect) {
- bld.MOV(offset_reg, get_nir_src(instr->src[2]));
- } else {
- const_offset_bytes = instr->const_index[0];
- bld.MOV(offset_reg, fs_reg(const_offset_bytes));
- }
-
/* Value */
fs_reg val_reg = get_nir_src(instr->src[0]);
/* Writemask */
unsigned writemask = instr->const_index[1];
- /* Write each component present in the writemask */
- unsigned skipped_channels = 0;
- for (int i = 0; i < instr->num_components; i++) {
- int component_mask = 1 << i;
- if (writemask & component_mask) {
- if (skipped_channels) {
- if (!has_indirect) {
- const_offset_bytes += 4 * skipped_channels;
- bld.MOV(offset_reg, fs_reg(const_offset_bytes));
- } else {
- bld.ADD(offset_reg, offset_reg,
- brw_imm_ud(4 * skipped_channels));
- }
- skipped_channels = 0;
- }
+ /* Combine groups of consecutive enabled channels in one write
+ * message. We use ffs to find the first enabled channel and then ffs on
+ * the bit-inverse, down-shifted writemask to determine the length of
+ * the block of enabled bits.
+ */
+ while (writemask) {
+ unsigned first_component = ffs(writemask) - 1;
+ unsigned length = ffs(~(writemask >> first_component)) - 1;
+ fs_reg offset_reg;
- emit_untyped_write(bld, surf_index, offset_reg,
- offset(val_reg, bld, i),
- 1 /* dims */, 1 /* size */,
- BRW_PREDICATE_NONE);
+ if (!has_indirect) {
+ offset_reg = fs_reg(instr->const_index[0] + 4 * first_component);
+ } else {
+ offset_reg = vgrf(glsl_type::uint_type);
+ bld.ADD(offset_reg,
+ retype(get_nir_src(instr->src[2]), BRW_REGISTER_TYPE_UD),
+ fs_reg(4 * first_component));
}
- skipped_channels++;
+ emit_untyped_write(bld, surf_index, offset_reg,
+ offset(val_reg, bld, first_component),
+ 1 /* dims */, length,
+ BRW_PREDICATE_NONE);
+
+ /* Clear the bits in the writemask that we just wrote, then try
+ * again to see if more channels are left.
+ */
+ writemask &= (15 << (first_component + length));
}
break;
}
case nir_intrinsic_ssbo_atomic_add:
nir_emit_ssbo_atomic(bld, BRW_AOP_ADD, instr);
break;
- case nir_intrinsic_ssbo_atomic_min:
- if (dest.type == BRW_REGISTER_TYPE_D)
- nir_emit_ssbo_atomic(bld, BRW_AOP_IMIN, instr);
- else
- nir_emit_ssbo_atomic(bld, BRW_AOP_UMIN, instr);
+ case nir_intrinsic_ssbo_atomic_imin:
+ nir_emit_ssbo_atomic(bld, BRW_AOP_IMIN, instr);
break;
- case nir_intrinsic_ssbo_atomic_max:
- if (dest.type == BRW_REGISTER_TYPE_D)
- nir_emit_ssbo_atomic(bld, BRW_AOP_IMAX, instr);
- else
- nir_emit_ssbo_atomic(bld, BRW_AOP_UMAX, instr);
+ case nir_intrinsic_ssbo_atomic_umin:
+ nir_emit_ssbo_atomic(bld, BRW_AOP_UMIN, instr);
+ break;
+ case nir_intrinsic_ssbo_atomic_imax:
+ nir_emit_ssbo_atomic(bld, BRW_AOP_IMAX, instr);
+ break;
+ case nir_intrinsic_ssbo_atomic_umax:
+ nir_emit_ssbo_atomic(bld, BRW_AOP_UMAX, instr);
break;
case nir_intrinsic_ssbo_atomic_and:
nir_emit_ssbo_atomic(bld, BRW_AOP_AND, instr);
case nir_intrinsic_get_buffer_size: {
nir_const_value *const_uniform_block = nir_src_as_const_value(instr->src[0]);
- unsigned ubo_index = const_uniform_block ? const_uniform_block->u[0] : 0;
+ unsigned ssbo_index = const_uniform_block ? const_uniform_block->u[0] : 0;
int reg_width = dispatch_width / 8;
/* Set LOD = 0 */
BRW_REGISTER_TYPE_UD);
bld.LOAD_PAYLOAD(src_payload, &source, 1, 0);
- fs_reg surf_index = fs_reg(prog_data->binding_table.ubo_start + ubo_index);
+ const unsigned index = prog_data->binding_table.ssbo_start + ssbo_index;
fs_inst *inst = bld.emit(FS_OPCODE_GET_BUFFER_SIZE, dest,
- src_payload, surf_index);
+ src_payload, fs_reg(index));
inst->header_size = 0;
inst->mlen = mlen;
bld.emit(inst);
+
+ brw_mark_surface_used(prog_data, index);
break;
}
break;
}
+ case nir_intrinsic_emit_vertex_with_counter:
+ emit_gs_vertex(instr->src[0], instr->const_index[0]);
+ break;
+
+ case nir_intrinsic_end_primitive_with_counter:
+ emit_gs_end_primitive(instr->src[0]);
+ break;
+
+ case nir_intrinsic_set_vertex_count:
+ bld.MOV(this->final_gs_vertex_count, get_nir_src(instr->src[0]));
+ break;
+
default:
unreachable("unknown intrinsic");
}
fs_reg surface;
nir_const_value *const_surface = nir_src_as_const_value(instr->src[0]);
if (const_surface) {
- unsigned surf_index = stage_prog_data->binding_table.ubo_start +
+ unsigned surf_index = stage_prog_data->binding_table.ssbo_start +
const_surface->u[0];
surface = fs_reg(surf_index);
brw_mark_surface_used(prog_data, surf_index);
} else {
surface = vgrf(glsl_type::uint_type);
bld.ADD(surface, get_nir_src(instr->src[0]),
- fs_reg(stage_prog_data->binding_table.ubo_start));
+ fs_reg(stage_prog_data->binding_table.ssbo_start));
- /* Assume this may touch any UBO. This is the same we do for other
+ /* Assume this may touch any SSBO. This is the same we do for other
* UBO/SSBO accesses with non-constant surface.
*/
brw_mark_surface_used(prog_data,
- stage_prog_data->binding_table.ubo_start +
+ stage_prog_data->binding_table.ssbo_start +
nir->info.num_ssbos - 1);
}