#include "brw_vec4_gs_visitor.h"
#include "brw_cfg.h"
#include "brw_dead_control_flow.h"
-#include "common/gen_debug.h"
+#include "dev/gen_debug.h"
#include "compiler/glsl_types.h"
#include "compiler/nir/nir_builder.h"
#include "program/prog_parameter.h"
}
extern "C" int
-type_size_scalar(const struct glsl_type *type)
+type_size_scalar(const struct glsl_type *type, bool bindless)
{
unsigned int size, i;
case GLSL_TYPE_INT64:
return type->components() * 2;
case GLSL_TYPE_ARRAY:
- return type_size_scalar(type->fields.array) * type->length;
+ return type_size_scalar(type->fields.array, bindless) * type->length;
case GLSL_TYPE_STRUCT:
case GLSL_TYPE_INTERFACE:
size = 0;
for (i = 0; i < type->length; i++) {
- size += type_size_scalar(type->fields.structure[i].type);
+ size += type_size_scalar(type->fields.structure[i].type, bindless);
}
return size;
case GLSL_TYPE_SAMPLER:
- case GLSL_TYPE_ATOMIC_UINT:
case GLSL_TYPE_IMAGE:
+ if (bindless)
+ return type->components() * 2;
+ case GLSL_TYPE_ATOMIC_UINT:
/* Samplers, atomics, and images take up no register space, since
* they're baked in at link time.
*/
return i == 1 ? src[2].ud : 1;
case SHADER_OPCODE_A64_UNTYPED_ATOMIC_LOGICAL:
+ case SHADER_OPCODE_A64_UNTYPED_ATOMIC_INT64_LOGICAL:
assert(src[2].file == IMM);
if (i == 1) {
/* Data source */
fs_visitor::vgrf(const glsl_type *const type)
{
int reg_width = dispatch_width / 8;
- return fs_reg(VGRF, alloc.allocate(type_size_scalar(type) * reg_width),
+ return fs_reg(VGRF,
+ alloc.allocate(type_size_scalar(type, false) * reg_width),
brw_type_for_base_type(type));
}
} else {
bld.emit(FS_OPCODE_LINTERP, wpos,
this->delta_xy[BRW_BARYCENTRIC_PERSPECTIVE_PIXEL],
- component(interp_reg(VARYING_SLOT_POS, 2), 0));
+ interp_reg(VARYING_SLOT_POS, 2));
}
wpos = offset(wpos, bld, 1);
}
void
-fs_visitor::assign_tcs_single_patch_urb_setup()
+fs_visitor::assign_tcs_urb_setup()
{
assert(stage == MESA_SHADER_TESS_CTRL);
break;
}
- /* a * 0.0 = 0.0 */
- if (inst->src[1].is_zero()) {
- inst->opcode = BRW_OPCODE_MOV;
- inst->src[0] = inst->src[1];
- inst->src[1] = reg_undef;
- progress = true;
- break;
- }
-
if (inst->src[0].file == IMM) {
assert(inst->src[0].type == BRW_REGISTER_TYPE_F);
inst->opcode = BRW_OPCODE_MOV;
if (inst->src[1].file != IMM)
continue;
- /* a + 0.0 = a */
- if (inst->src[1].is_zero()) {
- inst->opcode = BRW_OPCODE_MOV;
- inst->src[1] = reg_undef;
- progress = true;
- break;
- }
-
if (inst->src[0].file == IMM) {
assert(inst->src[0].type == BRW_REGISTER_TYPE_F);
inst->opcode = BRW_OPCODE_MOV;
break;
}
break;
- case BRW_OPCODE_LRP:
- if (inst->src[1].equals(inst->src[2])) {
- inst->opcode = BRW_OPCODE_MOV;
- inst->src[0] = inst->src[1];
- inst->src[1] = reg_undef;
- inst->src[2] = reg_undef;
- progress = true;
- break;
- }
- break;
case BRW_OPCODE_CMP:
if ((inst->conditional_mod == BRW_CONDITIONAL_Z ||
inst->conditional_mod == BRW_CONDITIONAL_NZ) &&
}
break;
case BRW_OPCODE_MAD:
- if (inst->src[1].is_zero() || inst->src[2].is_zero()) {
- inst->opcode = BRW_OPCODE_MOV;
- inst->src[1] = reg_undef;
- inst->src[2] = reg_undef;
- progress = true;
- } else if (inst->src[0].is_zero()) {
- inst->opcode = BRW_OPCODE_MUL;
- inst->src[0] = inst->src[2];
- inst->src[2] = reg_undef;
- progress = true;
- } else if (inst->src[1].is_one()) {
+ if (inst->src[0].type != BRW_REGISTER_TYPE_F ||
+ inst->src[1].type != BRW_REGISTER_TYPE_F ||
+ inst->src[2].type != BRW_REGISTER_TYPE_F)
+ break;
+ if (inst->src[1].is_one()) {
inst->opcode = BRW_OPCODE_ADD;
inst->src[1] = inst->src[2];
inst->src[2] = reg_undef;
inst->opcode = BRW_OPCODE_ADD;
inst->src[2] = reg_undef;
progress = true;
- } else if (inst->src[1].file == IMM && inst->src[2].file == IMM) {
- inst->opcode = BRW_OPCODE_ADD;
- inst->src[1].f *= inst->src[2].f;
- inst->src[2] = reg_undef;
- progress = true;
}
break;
case SHADER_OPCODE_BROADCAST:
return progress;
}
+bool
+fs_visitor::lower_linterp()
+{
+ bool progress = false;
+
+ if (devinfo->gen < 11)
+ return false;
+
+ foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
+ const fs_builder ibld(this, block, inst);
+
+ if (inst->opcode != FS_OPCODE_LINTERP)
+ continue;
+
+ fs_reg dwP = component(inst->src[1], 0);
+ fs_reg dwQ = component(inst->src[1], 1);
+ fs_reg dwR = component(inst->src[1], 3);
+ for (unsigned i = 0; i < DIV_ROUND_UP(dispatch_width, 8); i++) {
+ const fs_builder hbld(ibld.half(i));
+ fs_reg dst = half(inst->dst, i);
+ fs_reg delta_xy = offset(inst->src[0], ibld, i);
+ hbld.MAD(dst, dwR, half(delta_xy, 0), dwP);
+ fs_inst *mad = hbld.MAD(dst, dst, half(delta_xy, 1), dwQ);
+
+ /* Propagate conditional mod and saturate from the original
+ * instruction to the second MAD instruction.
+ */
+ set_saturate(inst->saturate, mad);
+ set_condmod(inst->conditional_mod, mad);
+ }
+
+ inst->remove(block);
+ progress = true;
+ }
+
+ if (progress)
+ invalidate_live_intervals();
+
+ return progress;
+}
+
bool
fs_visitor::lower_integer_multiplication()
{
mul->src[1].type = BRW_REGISTER_TYPE_UW;
mul->src[1].stride *= 2;
+ if (mul->src[1].file == IMM) {
+ mul->src[1] = brw_imm_uw(mul->src[1].ud);
+ }
} else if (devinfo->gen == 7 && !devinfo->is_haswell &&
inst->group > 0) {
/* Among other things the quarter control bits influence which
const fs_reg &mcs,
const fs_reg &surface,
const fs_reg &sampler,
+ const fs_reg &surface_handle,
+ const fs_reg &sampler_handle,
const fs_reg &tg4_offset,
unsigned coord_components,
unsigned grad_components)
for (unsigned i = 0; i < ARRAY_SIZE(sources); i++)
sources[i] = bld.vgrf(BRW_REGISTER_TYPE_F);
+ /* We must have exactly one of surface/sampler and surface/sampler_handle */
+ assert((surface.file == BAD_FILE) != (surface_handle.file == BAD_FILE));
+ assert((sampler.file == BAD_FILE) != (sampler_handle.file == BAD_FILE));
+
if (op == SHADER_OPCODE_TG4 || op == SHADER_OPCODE_TG4_OFFSET ||
inst->offset != 0 || inst->eot ||
op == SHADER_OPCODE_SAMPLEINFO ||
+ sampler_handle.file != BAD_FILE ||
is_high_sampler(devinfo, sampler)) {
/* For general texture offsets (no txf workaround), we need a header to
* put them in.
ubld1.MOV(component(header, 2), brw_imm_ud(0));
}
- if (is_high_sampler(devinfo, sampler)) {
+ if (sampler_handle.file != BAD_FILE) {
+ /* Bindless sampler handles aren't relative to the sampler state
+ * pointer passed into the shader through SAMPLER_STATE_POINTERS_*.
+ * Instead, it's an absolute pointer relative to dynamic state base
+ * address.
+ *
+ * Sampler states are 16 bytes each and the pointer we give here has
+ * to be 32-byte aligned. In order to avoid more indirect messages
+ * than required, we assume that all bindless sampler states are
+ * 32-byte aligned. This sacrifices a bit of general state base
+ * address space but means we can do something more efficient in the
+ * shader.
+ */
+ ubld1.MOV(component(header, 3), sampler_handle);
+ } else if (is_high_sampler(devinfo, sampler)) {
if (sampler.file == BRW_IMMEDIATE_VALUE) {
assert(sampler.ud >= 16);
const int sampler_state_size = 16; /* 16 bytes */
}
inst->sfid = BRW_SFID_SAMPLER;
- if (surface.file == IMM && sampler.file == IMM) {
+ if (surface.file == IMM &&
+ (sampler.file == IMM || sampler_handle.file != BAD_FILE)) {
inst->desc = brw_sampler_desc(devinfo,
surface.ud + base_binding_table_index,
- sampler.ud % 16,
+ sampler.file == IMM ? sampler.ud % 16 : 0,
msg_type,
simd_mode,
0 /* return_format unused on gen7+ */);
inst->src[0] = brw_imm_ud(0);
+ inst->src[1] = brw_imm_ud(0); /* ex_desc */
+ } else if (surface_handle.file != BAD_FILE) {
+ /* Bindless surface */
+ assert(devinfo->gen >= 9);
+ inst->desc = brw_sampler_desc(devinfo,
+ GEN9_BTI_BINDLESS,
+ sampler.file == IMM ? sampler.ud % 16 : 0,
+ msg_type,
+ simd_mode,
+ 0 /* return_format unused on gen7+ */);
+
+ /* For bindless samplers, the entire address is included in the message
+ * header so we can leave the portion in the message descriptor 0.
+ */
+ if (sampler_handle.file != BAD_FILE || sampler.file == IMM) {
+ inst->src[0] = brw_imm_ud(0);
+ } else {
+ const fs_builder ubld = bld.group(1, 0).exec_all();
+ fs_reg desc = ubld.vgrf(BRW_REGISTER_TYPE_UD);
+ ubld.SHL(desc, sampler, brw_imm_ud(8));
+ inst->src[0] = desc;
+ }
+
+ /* We assume that the driver provided the handle in the top 20 bits so
+ * we can use the surface handle directly as the extended descriptor.
+ */
+ inst->src[1] = retype(surface_handle, BRW_REGISTER_TYPE_UD);
} else {
/* Immediate portion of the descriptor */
inst->desc = brw_sampler_desc(devinfo,
/* This case is common in GL */
ubld.MUL(desc, surface, brw_imm_ud(0x101));
} else {
- if (sampler.file == IMM) {
+ if (sampler_handle.file != BAD_FILE) {
+ ubld.MOV(desc, surface);
+ } else if (sampler.file == IMM) {
ubld.OR(desc, surface, brw_imm_ud(sampler.ud << 8));
} else {
ubld.SHL(desc, sampler, brw_imm_ud(8));
ubld.AND(desc, desc, brw_imm_ud(0xfff));
inst->src[0] = component(desc, 0);
+ inst->src[1] = brw_imm_ud(0); /* ex_desc */
}
- inst->src[1] = brw_imm_ud(0); /* ex_desc */
inst->src[2] = src_payload;
inst->resize_sources(3);
const fs_reg &mcs = inst->src[TEX_LOGICAL_SRC_MCS];
const fs_reg &surface = inst->src[TEX_LOGICAL_SRC_SURFACE];
const fs_reg &sampler = inst->src[TEX_LOGICAL_SRC_SAMPLER];
+ const fs_reg &surface_handle = inst->src[TEX_LOGICAL_SRC_SURFACE_HANDLE];
+ const fs_reg &sampler_handle = inst->src[TEX_LOGICAL_SRC_SAMPLER_HANDLE];
const fs_reg &tg4_offset = inst->src[TEX_LOGICAL_SRC_TG4_OFFSET];
assert(inst->src[TEX_LOGICAL_SRC_COORD_COMPONENTS].file == IMM);
const unsigned coord_components = inst->src[TEX_LOGICAL_SRC_COORD_COMPONENTS].ud;
lower_sampler_logical_send_gen7(bld, inst, op, coordinate,
shadow_c, lod, lod2, min_lod,
sample_index,
- mcs, surface, sampler, tg4_offset,
+ mcs, surface, sampler,
+ surface_handle, sampler_handle,
+ tg4_offset,
coord_components, grad_components);
} else if (devinfo->gen >= 5) {
lower_sampler_logical_send_gen5(bld, inst, op, coordinate,
const fs_reg &addr = inst->src[SURFACE_LOGICAL_SRC_ADDRESS];
const fs_reg &src = inst->src[SURFACE_LOGICAL_SRC_DATA];
const fs_reg &surface = inst->src[SURFACE_LOGICAL_SRC_SURFACE];
+ const fs_reg &surface_handle = inst->src[SURFACE_LOGICAL_SRC_SURFACE_HANDLE];
const UNUSED fs_reg &dims = inst->src[SURFACE_LOGICAL_SRC_IMM_DIMS];
const fs_reg &arg = inst->src[SURFACE_LOGICAL_SRC_IMM_ARG];
assert(arg.file == IMM);
+ /* We must have exactly one of surface and surface_handle */
+ assert((surface.file == BAD_FILE) != (surface_handle.file == BAD_FILE));
+
/* Calculate the total number of components of the payload. */
const unsigned addr_sz = inst->components_read(SURFACE_LOGICAL_SRC_ADDRESS);
const unsigned src_sz = inst->components_read(SURFACE_LOGICAL_SRC_DATA);
if (surface.file == IMM) {
inst->desc |= surface.ud & 0xff;
inst->src[0] = brw_imm_ud(0);
+ inst->src[1] = brw_imm_ud(0); /* ex_desc */
+ } else if (surface_handle.file != BAD_FILE) {
+ /* Bindless surface */
+ assert(devinfo->gen >= 9);
+ inst->desc |= GEN9_BTI_BINDLESS;
+ inst->src[0] = brw_imm_ud(0);
+
+ /* We assume that the driver provided the handle in the top 20 bits so
+ * we can use the surface handle directly as the extended descriptor.
+ */
+ inst->src[1] = retype(surface_handle, BRW_REGISTER_TYPE_UD);
} else {
const fs_builder ubld = bld.exec_all().group(1, 0);
fs_reg tmp = ubld.vgrf(BRW_REGISTER_TYPE_UD);
ubld.AND(tmp, surface, brw_imm_ud(0xff));
inst->src[0] = component(tmp, 0);
+ inst->src[1] = brw_imm_ud(0); /* ex_desc */
}
- inst->src[1] = brw_imm_ud(0); /* ex_desc */
/* Finally, the payload */
inst->src[2] = payload;
if (devinfo->gen >= 9) {
/* On Skylake and above, we have SENDS */
mlen = 2 * (inst->exec_size / 8);
- ex_mlen = src_comps * (inst->exec_size / 8);
+ ex_mlen = src_comps * type_sz(src.type) * inst->exec_size / REG_SIZE;
payload = retype(bld.move_to_vgrf(addr, 1), BRW_REGISTER_TYPE_UD);
payload2 = retype(bld.move_to_vgrf(src, src_comps),
BRW_REGISTER_TYPE_UD);
!inst->dst.is_null());
break;
+ case SHADER_OPCODE_A64_UNTYPED_ATOMIC_INT64_LOGICAL:
+ desc = brw_dp_a64_untyped_atomic_desc(devinfo, inst->exec_size, 64,
+ arg, /* atomic_op */
+ !inst->dst.is_null());
+ break;
+
+
case SHADER_OPCODE_A64_UNTYPED_ATOMIC_FLOAT_LOGICAL:
desc = brw_dp_a64_untyped_atomic_float_desc(devinfo, inst->exec_size,
arg, /* atomic_op */
case SHADER_OPCODE_A64_BYTE_SCATTERED_WRITE_LOGICAL:
case SHADER_OPCODE_A64_BYTE_SCATTERED_READ_LOGICAL:
case SHADER_OPCODE_A64_UNTYPED_ATOMIC_LOGICAL:
+ case SHADER_OPCODE_A64_UNTYPED_ATOMIC_INT64_LOGICAL:
case SHADER_OPCODE_A64_UNTYPED_ATOMIC_FLOAT_LOGICAL:
lower_a64_logical_send(ibld, inst);
break;
return progress;
}
+static bool
+is_mixed_float_with_fp32_dst(const fs_inst *inst)
+{
+ /* This opcode sometimes uses :W type on the source even if the operand is
+ * a :HF, because in gen7 there is no support for :HF, and thus it uses :W.
+ */
+ if (inst->opcode == BRW_OPCODE_F16TO32)
+ return true;
+
+ if (inst->dst.type != BRW_REGISTER_TYPE_F)
+ return false;
+
+ for (int i = 0; i < inst->sources; i++) {
+ if (inst->src[i].type == BRW_REGISTER_TYPE_HF)
+ return true;
+ }
+
+ return false;
+}
+
+static bool
+is_mixed_float_with_packed_fp16_dst(const fs_inst *inst)
+{
+ /* This opcode sometimes uses :W type on the destination even if the
+ * destination is a :HF, because in gen7 there is no support for :HF, and
+ * thus it uses :W.
+ */
+ if (inst->opcode == BRW_OPCODE_F32TO16 &&
+ inst->dst.stride == 1)
+ return true;
+
+ if (inst->dst.type != BRW_REGISTER_TYPE_HF ||
+ inst->dst.stride != 1)
+ return false;
+
+ for (int i = 0; i < inst->sources; i++) {
+ if (inst->src[i].type == BRW_REGISTER_TYPE_F)
+ return true;
+ }
+
+ return false;
+}
+
/**
* Get the closest allowed SIMD width for instruction \p inst accounting for
* some common regioning and execution control restrictions that apply to FPU
max_width = MIN2(max_width, 4);
}
+ /* From the SKL PRM, Special Restrictions for Handling Mixed Mode
+ * Float Operations:
+ *
+ * "No SIMD16 in mixed mode when destination is f32. Instruction
+ * execution size must be no more than 8."
+ *
+ * FIXME: the simulator doesn't seem to complain if we don't do this and
+ * empirical testing with existing CTS tests show that they pass just fine
+ * without implementing this, however, since our interpretation of the PRM
+ * is that conversion MOVs between HF and F are still mixed-float
+ * instructions (and therefore subject to this restriction) we decided to
+ * split them to be safe. Might be useful to do additional investigation to
+ * lift the restriction if we can ensure that it is safe though, since these
+ * conversions are common when half-float types are involved since many
+ * instructions do not support HF types and conversions from/to F are
+ * required.
+ */
+ if (is_mixed_float_with_fp32_dst(inst))
+ max_width = MIN2(max_width, 8);
+
+ /* From the SKL PRM, Special Restrictions for Handling Mixed Mode
+ * Float Operations:
+ *
+ * "No SIMD16 in mixed mode when destination is packed f16 for both
+ * Align1 and Align16."
+ */
+ if (is_mixed_float_with_packed_fp16_dst(inst))
+ max_width = MIN2(max_width, 8);
+
/* Only power-of-two execution sizes are representable in the instruction
* control fields.
*/
case SHADER_OPCODE_EXP2:
case SHADER_OPCODE_LOG2:
case SHADER_OPCODE_SIN:
- case SHADER_OPCODE_COS:
+ case SHADER_OPCODE_COS: {
/* Unary extended math instructions are limited to SIMD8 on Gen4 and
- * Gen6.
+ * Gen6. Extended Math Function is limited to SIMD8 with half-float.
*/
- return (devinfo->gen >= 7 ? MIN2(16, inst->exec_size) :
- devinfo->gen == 5 || devinfo->is_g4x ? MIN2(16, inst->exec_size) :
- MIN2(8, inst->exec_size));
+ if (devinfo->gen == 6 || (devinfo->gen == 4 && !devinfo->is_g4x))
+ return MIN2(8, inst->exec_size);
+ if (inst->dst.type == BRW_REGISTER_TYPE_HF)
+ return MIN2(8, inst->exec_size);
+ return MIN2(16, inst->exec_size);
+ }
- case SHADER_OPCODE_POW:
- /* SIMD16 is only allowed on Gen7+. */
- return (devinfo->gen >= 7 ? MIN2(16, inst->exec_size) :
- MIN2(8, inst->exec_size));
+ case SHADER_OPCODE_POW: {
+ /* SIMD16 is only allowed on Gen7+. Extended Math Function is limited
+ * to SIMD8 with half-float
+ */
+ if (devinfo->gen < 7)
+ return MIN2(8, inst->exec_size);
+ if (inst->dst.type == BRW_REGISTER_TYPE_HF)
+ return MIN2(8, inst->exec_size);
+ return MIN2(16, inst->exec_size);
+ }
case SHADER_OPCODE_INT_QUOTIENT:
case SHADER_OPCODE_INT_REMAINDER:
return devinfo->gen <= 8 ? 8 : MIN2(16, inst->exec_size);
case SHADER_OPCODE_A64_UNTYPED_ATOMIC_LOGICAL:
+ case SHADER_OPCODE_A64_UNTYPED_ATOMIC_INT64_LOGICAL:
case SHADER_OPCODE_A64_UNTYPED_ATOMIC_FLOAT_LOGICAL:
return 8;
int iteration = 0;
int pass_num = 0;
+ /* Before anything else, eliminate dead code. The results of some NIR
+ * instructions may effectively be calculated twice. Once when the
+ * instruction is encountered, and again when the user of that result is
+ * encountered. Wipe those away before algebraic optimizations and
+ * especially copy propagation can mix things up.
+ */
+ OPT(dead_code_eliminate);
+
OPT(remove_extra_rounding_modes);
do {
OPT(compact_virtual_grfs);
} while (progress);
+ if (OPT(lower_linterp)) {
+ OPT(opt_copy_propagation);
+ OPT(dead_code_eliminate);
+ }
+
/* Do this after cmod propagation has had every possible opportunity to
* propagate results into SEL instructions.
*/
void
fs_visitor::allocate_registers(unsigned min_dispatch_width, bool allow_spilling)
{
- bool allocated_without_spills;
+ bool allocated;
static const enum instruction_scheduler_mode pre_modes[] = {
SCHEDULE_PRE,
if (0) {
assign_regs_trivial();
- allocated_without_spills = true;
- } else {
- allocated_without_spills = assign_regs(false, spill_all);
+ allocated = true;
+ break;
}
- if (allocated_without_spills)
+
+ /* We only allow spilling for the last schedule mode and only if the
+ * allow_spilling parameter and dispatch width work out ok.
+ */
+ bool can_spill = allow_spilling &&
+ (i == ARRAY_SIZE(pre_modes) - 1) &&
+ dispatch_width == min_dispatch_width;
+
+ /* We should only spill registers on the last scheduling. */
+ assert(!spilled_any_registers);
+
+ allocated = assign_regs(can_spill, spill_all);
+ if (allocated)
break;
}
- if (!allocated_without_spills) {
+ if (!allocated) {
if (!allow_spilling)
fail("Failure to register allocate and spilling is not allowed.");
if (dispatch_width > min_dispatch_width) {
fail("Failure to register allocate. Reduce number of "
"live scalar values to avoid this.");
- } else {
- compiler->shader_perf_log(log_data,
- "%s shader triggered register spilling. "
- "Try reducing the number of live scalar "
- "values to improve performance.\n",
- stage_name);
}
- /* Since we're out of heuristics, just go spill registers until we
- * get an allocation.
- */
- while (!assign_regs(true, spill_all)) {
- if (failed)
- break;
- }
+ /* If we failed to allocate, we must have a reason */
+ assert(failed);
+ } else if (spilled_any_registers) {
+ compiler->shader_perf_log(log_data,
+ "%s shader triggered register spilling. "
+ "Try reducing the number of live scalar "
+ "values to improve performance.\n",
+ stage_name);
}
/* This must come after all optimization and register allocation, since
return !failed;
}
-bool
-fs_visitor::run_tcs_single_patch()
+void
+fs_visitor::set_tcs_invocation_id()
{
- assert(stage == MESA_SHADER_TESS_CTRL);
-
struct brw_tcs_prog_data *tcs_prog_data = brw_tcs_prog_data(prog_data);
+ struct brw_vue_prog_data *vue_prog_data = &tcs_prog_data->base;
- /* r1-r4 contain the ICP handles. */
- payload.num_regs = 5;
+ const unsigned instance_id_mask =
+ devinfo->gen >= 11 ? INTEL_MASK(22, 16) : INTEL_MASK(23, 17);
+ const unsigned instance_id_shift =
+ devinfo->gen >= 11 ? 16 : 17;
- if (shader_time_index >= 0)
- emit_shader_time_begin();
+ /* Get instance number from g0.2 bits 22:16 or 23:17 */
+ fs_reg t = bld.vgrf(BRW_REGISTER_TYPE_UD);
+ bld.AND(t, fs_reg(retype(brw_vec1_grf(0, 2), BRW_REGISTER_TYPE_UD)),
+ brw_imm_ud(instance_id_mask));
+
+ invocation_id = bld.vgrf(BRW_REGISTER_TYPE_UD);
+
+ if (vue_prog_data->dispatch_mode == DISPATCH_MODE_TCS_8_PATCH) {
+ /* gl_InvocationID is just the thread number */
+ bld.SHR(invocation_id, t, brw_imm_ud(instance_id_shift));
+ return;
+ }
+
+ assert(vue_prog_data->dispatch_mode == DISPATCH_MODE_TCS_SINGLE_PATCH);
- /* Initialize gl_InvocationID */
fs_reg channels_uw = bld.vgrf(BRW_REGISTER_TYPE_UW);
fs_reg channels_ud = bld.vgrf(BRW_REGISTER_TYPE_UD);
bld.MOV(channels_uw, fs_reg(brw_imm_uv(0x76543210)));
if (tcs_prog_data->instances == 1) {
invocation_id = channels_ud;
} else {
- const unsigned invocation_id_mask = devinfo->gen >= 11 ?
- INTEL_MASK(22, 16) : INTEL_MASK(23, 17);
- const unsigned invocation_id_shift = devinfo->gen >= 11 ? 16 : 17;
+ fs_reg instance_times_8 = bld.vgrf(BRW_REGISTER_TYPE_UD);
+ bld.SHR(instance_times_8, t, brw_imm_ud(instance_id_shift - 3));
+ bld.ADD(invocation_id, instance_times_8, channels_ud);
+ }
+}
- invocation_id = bld.vgrf(BRW_REGISTER_TYPE_UD);
+bool
+fs_visitor::run_tcs()
+{
+ assert(stage == MESA_SHADER_TESS_CTRL);
- /* Get instance number from g0.2 bits 23:17, and multiply it by 8. */
- fs_reg t = bld.vgrf(BRW_REGISTER_TYPE_UD);
- fs_reg instance_times_8 = bld.vgrf(BRW_REGISTER_TYPE_UD);
- bld.AND(t, fs_reg(retype(brw_vec1_grf(0, 2), BRW_REGISTER_TYPE_UD)),
- brw_imm_ud(invocation_id_mask));
- bld.SHR(instance_times_8, t, brw_imm_ud(invocation_id_shift - 3));
+ struct brw_vue_prog_data *vue_prog_data = brw_vue_prog_data(prog_data);
+ struct brw_tcs_prog_data *tcs_prog_data = brw_tcs_prog_data(prog_data);
+ struct brw_tcs_prog_key *tcs_key = (struct brw_tcs_prog_key *) key;
- bld.ADD(invocation_id, instance_times_8, channels_ud);
+ assert(vue_prog_data->dispatch_mode == DISPATCH_MODE_TCS_SINGLE_PATCH ||
+ vue_prog_data->dispatch_mode == DISPATCH_MODE_TCS_8_PATCH);
+
+ if (vue_prog_data->dispatch_mode == DISPATCH_MODE_TCS_SINGLE_PATCH) {
+ /* r1-r4 contain the ICP handles. */
+ payload.num_regs = 5;
+ } else {
+ assert(vue_prog_data->dispatch_mode == DISPATCH_MODE_TCS_8_PATCH);
+ assert(tcs_key->input_vertices > 0);
+ /* r1 contains output handles, r2 may contain primitive ID, then the
+ * ICP handles occupy the next 1-32 registers.
+ */
+ payload.num_regs = 2 + tcs_prog_data->include_primitive_id +
+ tcs_key->input_vertices;
}
+ if (shader_time_index >= 0)
+ emit_shader_time_begin();
+
+ /* Initialize gl_InvocationID */
+ set_tcs_invocation_id();
+
+ const bool fix_dispatch_mask =
+ vue_prog_data->dispatch_mode == DISPATCH_MODE_TCS_SINGLE_PATCH &&
+ (nir->info.tess.tcs_vertices_out % 8) != 0;
+
/* Fix the disptach mask */
- if (nir->info.tess.tcs_vertices_out % 8) {
+ if (fix_dispatch_mask) {
bld.CMP(bld.null_reg_ud(), invocation_id,
brw_imm_ud(nir->info.tess.tcs_vertices_out), BRW_CONDITIONAL_L);
bld.IF(BRW_PREDICATE_NORMAL);
emit_nir_code();
- if (nir->info.tess.tcs_vertices_out % 8) {
+ if (fix_dispatch_mask) {
bld.emit(BRW_OPCODE_ENDIF);
}
/* Emit EOT write; set TR DS Cache bit */
fs_reg srcs[3] = {
- fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UD)),
+ fs_reg(get_tcs_output_urb_handle()),
fs_reg(brw_imm_ud(WRITEMASK_X << 16)),
fs_reg(brw_imm_ud(0)),
};
optimize();
assign_curb_setup();
- assign_tcs_single_patch_urb_setup();
+ assign_tcs_urb_setup();
fixup_3src_null_dest();
allocate_registers(8, true);
{
nir_shader *shader = nir_shader_clone(mem_ctx, src_shader);
shader = brw_nir_apply_sampler_key(shader, compiler, &key->tex, true);
- brw_nir_lower_cs_intrinsics(shader, dispatch_width);
+
+ NIR_PASS_V(shader, brw_nir_lower_cs_intrinsics, dispatch_width);
+
+ /* Clean up after the local index and ID calculations. */
+ NIR_PASS_V(shader, nir_opt_constant_folding);
+ NIR_PASS_V(shader, nir_opt_dce);
+
return brw_postprocess_nir(shader, compiler, true);
}