case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT:
case SHADER_OPCODE_URB_READ_SIMD8:
case SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT:
+ case SHADER_OPCODE_INTERLOCK:
+ case SHADER_OPCODE_MEMORY_FENCE:
+ case SHADER_OPCODE_BARRIER:
return true;
case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
return src[1].file == VGRF;
}
}
+bool
+fs_inst::is_payload(unsigned arg) const
+{
+ switch (opcode) {
+ case FS_OPCODE_FB_WRITE:
+ case FS_OPCODE_FB_READ:
+ case SHADER_OPCODE_URB_WRITE_SIMD8:
+ case SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT:
+ case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED:
+ case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT:
+ case SHADER_OPCODE_URB_READ_SIMD8:
+ case SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT:
+ case VEC4_OPCODE_UNTYPED_ATOMIC:
+ case VEC4_OPCODE_UNTYPED_SURFACE_READ:
+ case VEC4_OPCODE_UNTYPED_SURFACE_WRITE:
+ case FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET:
+ case SHADER_OPCODE_SHADER_TIME_ADD:
+ case FS_OPCODE_INTERPOLATE_AT_SAMPLE:
+ case FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET:
+ case SHADER_OPCODE_INTERLOCK:
+ case SHADER_OPCODE_MEMORY_FENCE:
+ case SHADER_OPCODE_BARRIER:
+ return arg == 0;
+
+ case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD_GEN7:
+ return arg == 1;
+
+ case SHADER_OPCODE_SEND:
+ return arg == 2 || arg == 3;
+
+ default:
+ if (is_tex())
+ return arg == 0;
+ else
+ return false;
+ }
+}
+
/**
* Returns true if this instruction's sources and destinations cannot
* safely be the same register.
if (is_send_from_grf())
return false;
+ /* From GEN:BUG:1604601757:
+ *
+ * "When multiplying a DW and any lower precision integer, source modifier
+ * is not supported."
+ */
+ if (devinfo->gen >= 12 && (opcode == BRW_OPCODE_MUL ||
+ opcode == BRW_OPCODE_MAD)) {
+ const brw_reg_type exec_type = get_exec_type(this);
+ const unsigned min_type_sz = opcode == BRW_OPCODE_MAD ?
+ MIN2(type_sz(src[1].type), type_sz(src[2].type)) :
+ MIN2(type_sz(src[0].type), type_sz(src[1].type));
+
+ if (brw_reg_type_is_integer(exec_type) &&
+ type_sz(exec_type) >= 4 &&
+ type_sz(exec_type) != min_type_sz)
+ return false;
+ }
+
if (!backend_instruction::can_do_source_mods())
return false;
/**
* Create a MOV to read the timestamp register.
- *
- * The caller is responsible for emitting the MOV. The return value is
- * the destination of the MOV, with extra parameters set.
*/
fs_reg
fs_visitor::get_timestamp(const fs_builder &bld)
}
case SHADER_OPCODE_BYTE_SCATTERED_READ_LOGICAL:
+ case SHADER_OPCODE_DWORD_SCATTERED_READ_LOGICAL:
/* Scattered logical opcodes use the following params:
* src[0] Surface coordinates
* src[1] Surface operation source (ignored for reads)
return i == SURFACE_LOGICAL_SRC_DATA ? 0 : 1;
case SHADER_OPCODE_BYTE_SCATTERED_WRITE_LOGICAL:
+ case SHADER_OPCODE_DWORD_SCATTERED_WRITE_LOGICAL:
assert(src[SURFACE_LOGICAL_SRC_IMM_DIMS].file == IMM &&
src[SURFACE_LOGICAL_SRC_IMM_ARG].file == IMM);
return 1;
}
namespace {
+ unsigned
+ predicate_width(brw_predicate predicate)
+ {
+ switch (predicate) {
+ case BRW_PREDICATE_NONE: return 1;
+ case BRW_PREDICATE_NORMAL: return 1;
+ case BRW_PREDICATE_ALIGN1_ANY2H: return 2;
+ case BRW_PREDICATE_ALIGN1_ALL2H: return 2;
+ case BRW_PREDICATE_ALIGN1_ANY4H: return 4;
+ case BRW_PREDICATE_ALIGN1_ALL4H: return 4;
+ case BRW_PREDICATE_ALIGN1_ANY8H: return 8;
+ case BRW_PREDICATE_ALIGN1_ALL8H: return 8;
+ case BRW_PREDICATE_ALIGN1_ANY16H: return 16;
+ case BRW_PREDICATE_ALIGN1_ALL16H: return 16;
+ case BRW_PREDICATE_ALIGN1_ANY32H: return 32;
+ case BRW_PREDICATE_ALIGN1_ALL32H: return 32;
+ default: unreachable("Unsupported predicate");
+ }
+ }
+
/* Return the subset of flag registers that an instruction could
* potentially read or write based on the execution controls and flag
* subregister number of the instruction.
*/
unsigned
- flag_mask(const fs_inst *inst)
+ flag_mask(const fs_inst *inst, unsigned width)
{
- const unsigned start = inst->flag_subreg * 16 + inst->group;
- const unsigned end = start + inst->exec_size;
+ assert(util_is_power_of_two_nonzero(width));
+ const unsigned start = (inst->flag_subreg * 16 + inst->group) &
+ ~(width - 1);
+ const unsigned end = start + ALIGN(inst->exec_size, width);
return ((1 << DIV_ROUND_UP(end, 8)) - 1) & ~((1 << (start / 8)) - 1);
}
* f0.0 and f1.0 on Gen7+, and f0.0 and f0.1 on older hardware.
*/
const unsigned shift = devinfo->gen >= 7 ? 4 : 2;
- return flag_mask(this) << shift | flag_mask(this);
+ return flag_mask(this, 1) << shift | flag_mask(this, 1);
} else if (predicate) {
- return flag_mask(this);
+ return flag_mask(this, predicate_width(predicate));
} else {
unsigned mask = 0;
for (int i = 0; i < sources; i++) {
opcode != BRW_OPCODE_WHILE)) ||
opcode == SHADER_OPCODE_FIND_LIVE_CHANNEL ||
opcode == FS_OPCODE_FB_WRITE) {
- return flag_mask(this);
+ return flag_mask(this, 1);
} else {
return flag_mask(dst, size_written);
}
* instruction -- the FS opcodes often generate MOVs in addition.
*/
int
-fs_visitor::implied_mrf_writes(fs_inst *inst) const
+fs_visitor::implied_mrf_writes(const fs_inst *inst) const
{
if (inst->mlen == 0)
return 0;
} else {
bld.emit(FS_OPCODE_LINTERP, wpos,
this->delta_xy[BRW_BARYCENTRIC_PERSPECTIVE_PIXEL],
- interp_reg(VARYING_SLOT_POS, 2));
+ component(interp_reg(VARYING_SLOT_POS, 2), 0));
}
wpos = offset(wpos, bld, 1);
{
fs_reg *reg = new(this->mem_ctx) fs_reg(vgrf(glsl_type::bool_type));
- if (devinfo->gen >= 6) {
+ if (devinfo->gen >= 12) {
+ fs_reg g1 = fs_reg(retype(brw_vec1_grf(1, 1), BRW_REGISTER_TYPE_W));
+
+ fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_W);
+ bld.ASR(tmp, g1, brw_imm_d(15));
+ bld.NOT(*reg, tmp);
+ } else if (devinfo->gen >= 6) {
/* Bit 15 of g0.0 is 0 if the polygon is front facing. We want to create
* a boolean result from this (~0/true or 0/false).
*
this->first_non_payload_grf = payload.num_regs + prog_data->curb_read_length;
}
-void
-fs_visitor::calculate_urb_setup()
+static void
+calculate_urb_setup(const struct gen_device_info *devinfo,
+ const struct brw_wm_prog_key *key,
+ struct brw_wm_prog_data *prog_data,
+ const nir_shader *nir)
{
- assert(stage == MESA_SHADER_FRAGMENT);
- struct brw_wm_prog_data *prog_data = brw_wm_prog_data(this->prog_data);
- brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
-
memset(prog_data->urb_setup, -1,
sizeof(prog_data->urb_setup[0]) * VARYING_SLOT_MAX);
}
void
-fs_visitor::assign_tcs_single_patch_urb_setup()
+fs_visitor::assign_tcs_urb_setup()
{
assert(stage == MESA_SHADER_TESS_CTRL);
* destination), we mark the used slots as inseparable. Then we go
* through and split the registers into the smallest pieces we can.
*/
- bool split_points[reg_count];
- memset(split_points, 0, sizeof(split_points));
+ bool *split_points = new bool[reg_count];
+ memset(split_points, 0, reg_count * sizeof(*split_points));
/* Mark all used registers as fully splittable */
foreach_block_and_inst(block, fs_inst, inst, cfg) {
}
foreach_block_and_inst(block, fs_inst, inst, cfg) {
+ /* We fix up undef instructions later */
+ if (inst->opcode == SHADER_OPCODE_UNDEF) {
+ /* UNDEF instructions are currently only used to undef entire
+ * registers. We need this invariant later when we split them.
+ */
+ assert(inst->dst.file == VGRF);
+ assert(inst->dst.offset == 0);
+ assert(inst->size_written == alloc.sizes[inst->dst.nr] * REG_SIZE);
+ continue;
+ }
+
if (inst->dst.file == VGRF) {
int reg = vgrf_to_reg[inst->dst.nr] + inst->dst.offset / REG_SIZE;
for (unsigned j = 1; j < regs_written(inst); j++)
}
}
- int new_virtual_grf[reg_count];
- int new_reg_offset[reg_count];
+ int *new_virtual_grf = new int[reg_count];
+ int *new_reg_offset = new int[reg_count];
int reg = 0;
for (int i = 0; i < num_vars; i++) {
}
assert(reg == reg_count);
- foreach_block_and_inst(block, fs_inst, inst, cfg) {
+ foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
+ if (inst->opcode == SHADER_OPCODE_UNDEF) {
+ const fs_builder ibld(this, block, inst);
+ assert(inst->size_written % REG_SIZE == 0);
+ unsigned reg_offset = 0;
+ while (reg_offset < inst->size_written / REG_SIZE) {
+ reg = vgrf_to_reg[inst->dst.nr] + reg_offset;
+ ibld.UNDEF(fs_reg(VGRF, new_virtual_grf[reg], inst->dst.type));
+ reg_offset += alloc.sizes[new_virtual_grf[reg]];
+ }
+ inst->remove(block);
+ continue;
+ }
+
if (inst->dst.file == VGRF) {
reg = vgrf_to_reg[inst->dst.nr] + inst->dst.offset / REG_SIZE;
inst->dst.nr = new_virtual_grf[reg];
}
}
invalidate_live_intervals();
+
+ delete[] split_points;
+ delete[] new_virtual_grf;
+ delete[] new_reg_offset;
}
/**
fs_visitor::compact_virtual_grfs()
{
bool progress = false;
- int remap_table[this->alloc.count];
- memset(remap_table, -1, sizeof(remap_table));
+ int *remap_table = new int[this->alloc.count];
+ memset(remap_table, -1, this->alloc.count * sizeof(int));
/* Mark which virtual GRFs are used. */
foreach_block_and_inst(block, const fs_inst, inst, cfg) {
}
}
+ delete[] remap_table;
+
return progress;
}
return;
}
- struct uniform_slot_info slots[uniforms];
- memset(slots, 0, sizeof(slots));
+ if (compiler->compact_params) {
+ struct uniform_slot_info slots[uniforms];
+ memset(slots, 0, sizeof(slots));
- foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
- for (int i = 0 ; i < inst->sources; i++) {
- if (inst->src[i].file != UNIFORM)
- continue;
+ foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
+ for (int i = 0 ; i < inst->sources; i++) {
+ if (inst->src[i].file != UNIFORM)
+ continue;
- /* NIR tightly packs things so the uniform number might not be
- * aligned (if we have a double right after a float, for instance).
- * This is fine because the process of re-arranging them will ensure
- * that things are properly aligned. The offset into that uniform,
- * however, must be aligned.
- *
- * In Vulkan, we have explicit offsets but everything is crammed
- * into a single "variable" so inst->src[i].nr will always be 0.
- * Everything will be properly aligned relative to that one base.
- */
- assert(inst->src[i].offset % type_sz(inst->src[i].type) == 0);
+ /* NIR tightly packs things so the uniform number might not be
+ * aligned (if we have a double right after a float, for
+ * instance). This is fine because the process of re-arranging
+ * them will ensure that things are properly aligned. The offset
+ * into that uniform, however, must be aligned.
+ *
+ * In Vulkan, we have explicit offsets but everything is crammed
+ * into a single "variable" so inst->src[i].nr will always be 0.
+ * Everything will be properly aligned relative to that one base.
+ */
+ assert(inst->src[i].offset % type_sz(inst->src[i].type) == 0);
- unsigned u = inst->src[i].nr +
- inst->src[i].offset / UNIFORM_SLOT_SIZE;
+ unsigned u = inst->src[i].nr +
+ inst->src[i].offset / UNIFORM_SLOT_SIZE;
- if (u >= uniforms)
- continue;
+ if (u >= uniforms)
+ continue;
- unsigned slots_read;
- if (inst->opcode == SHADER_OPCODE_MOV_INDIRECT && i == 0) {
- slots_read = DIV_ROUND_UP(inst->src[2].ud, UNIFORM_SLOT_SIZE);
- } else {
- unsigned bytes_read = inst->components_read(i) *
- type_sz(inst->src[i].type);
- slots_read = DIV_ROUND_UP(bytes_read, UNIFORM_SLOT_SIZE);
- }
+ unsigned slots_read;
+ if (inst->opcode == SHADER_OPCODE_MOV_INDIRECT && i == 0) {
+ slots_read = DIV_ROUND_UP(inst->src[2].ud, UNIFORM_SLOT_SIZE);
+ } else {
+ unsigned bytes_read = inst->components_read(i) *
+ type_sz(inst->src[i].type);
+ slots_read = DIV_ROUND_UP(bytes_read, UNIFORM_SLOT_SIZE);
+ }
- assert(u + slots_read <= uniforms);
- mark_uniform_slots_read(&slots[u], slots_read,
- type_sz(inst->src[i].type));
+ assert(u + slots_read <= uniforms);
+ mark_uniform_slots_read(&slots[u], slots_read,
+ type_sz(inst->src[i].type));
+ }
}
- }
- int subgroup_id_index = get_subgroup_id_param_index(stage_prog_data);
+ int subgroup_id_index = get_subgroup_id_param_index(stage_prog_data);
- /* Only allow 16 registers (128 uniform components) as push constants.
- *
- * Just demote the end of the list. We could probably do better
- * here, demoting things that are rarely used in the program first.
- *
- * If changing this value, note the limitation about total_regs in
- * brw_curbe.c.
- */
- unsigned int max_push_components = 16 * 8;
- if (subgroup_id_index >= 0)
- max_push_components--; /* Save a slot for the thread ID */
+ /* Only allow 16 registers (128 uniform components) as push constants.
+ *
+ * Just demote the end of the list. We could probably do better
+ * here, demoting things that are rarely used in the program first.
+ *
+ * If changing this value, note the limitation about total_regs in
+ * brw_curbe.c.
+ */
+ unsigned int max_push_components = 16 * 8;
+ if (subgroup_id_index >= 0)
+ max_push_components--; /* Save a slot for the thread ID */
- /* We push small arrays, but no bigger than 16 floats. This is big enough
- * for a vec4 but hopefully not large enough to push out other stuff. We
- * should probably use a better heuristic at some point.
- */
- const unsigned int max_chunk_size = 16;
+ /* We push small arrays, but no bigger than 16 floats. This is big
+ * enough for a vec4 but hopefully not large enough to push out other
+ * stuff. We should probably use a better heuristic at some point.
+ */
+ const unsigned int max_chunk_size = 16;
- unsigned int num_push_constants = 0;
- unsigned int num_pull_constants = 0;
+ unsigned int num_push_constants = 0;
+ unsigned int num_pull_constants = 0;
- push_constant_loc = ralloc_array(mem_ctx, int, uniforms);
- pull_constant_loc = ralloc_array(mem_ctx, int, uniforms);
+ push_constant_loc = ralloc_array(mem_ctx, int, uniforms);
+ pull_constant_loc = ralloc_array(mem_ctx, int, uniforms);
- /* Default to -1 meaning no location */
- memset(push_constant_loc, -1, uniforms * sizeof(*push_constant_loc));
- memset(pull_constant_loc, -1, uniforms * sizeof(*pull_constant_loc));
+ /* Default to -1 meaning no location */
+ memset(push_constant_loc, -1, uniforms * sizeof(*push_constant_loc));
+ memset(pull_constant_loc, -1, uniforms * sizeof(*pull_constant_loc));
- int chunk_start = -1;
- struct cplx_align align;
- for (unsigned u = 0; u < uniforms; u++) {
- if (!slots[u].is_live) {
- assert(chunk_start == -1);
- continue;
- }
+ int chunk_start = -1;
+ struct cplx_align align;
+ for (unsigned u = 0; u < uniforms; u++) {
+ if (!slots[u].is_live) {
+ assert(chunk_start == -1);
+ continue;
+ }
- /* Skip subgroup_id_index to put it in the last push register. */
- if (subgroup_id_index == (int)u)
- continue;
+ /* Skip subgroup_id_index to put it in the last push register. */
+ if (subgroup_id_index == (int)u)
+ continue;
- if (chunk_start == -1) {
- chunk_start = u;
- align = slots[u].align;
- } else {
- /* Offset into the chunk */
- unsigned chunk_offset = (u - chunk_start) * UNIFORM_SLOT_SIZE;
+ if (chunk_start == -1) {
+ chunk_start = u;
+ align = slots[u].align;
+ } else {
+ /* Offset into the chunk */
+ unsigned chunk_offset = (u - chunk_start) * UNIFORM_SLOT_SIZE;
- /* Shift the slot alignment down by the chunk offset so it is
- * comparable with the base chunk alignment.
- */
- struct cplx_align slot_align = slots[u].align;
- slot_align.offset =
- (slot_align.offset - chunk_offset) & (align.mul - 1);
+ /* Shift the slot alignment down by the chunk offset so it is
+ * comparable with the base chunk alignment.
+ */
+ struct cplx_align slot_align = slots[u].align;
+ slot_align.offset =
+ (slot_align.offset - chunk_offset) & (align.mul - 1);
- align = cplx_align_combine(align, slot_align);
- }
+ align = cplx_align_combine(align, slot_align);
+ }
- /* Sanity check the alignment */
- cplx_align_assert_sane(align);
+ /* Sanity check the alignment */
+ cplx_align_assert_sane(align);
- if (slots[u].contiguous)
- continue;
+ if (slots[u].contiguous)
+ continue;
- /* Adjust the alignment to be in terms of slots, not bytes */
- assert((align.mul & (UNIFORM_SLOT_SIZE - 1)) == 0);
- assert((align.offset & (UNIFORM_SLOT_SIZE - 1)) == 0);
- align.mul /= UNIFORM_SLOT_SIZE;
- align.offset /= UNIFORM_SLOT_SIZE;
-
- unsigned push_start_align = cplx_align_apply(align, num_push_constants);
- unsigned chunk_size = u - chunk_start + 1;
- if ((!compiler->supports_pull_constants && u < UBO_START) ||
- (chunk_size < max_chunk_size &&
- push_start_align + chunk_size <= max_push_components)) {
- /* Align up the number of push constants */
- num_push_constants = push_start_align;
- for (unsigned i = 0; i < chunk_size; i++)
- push_constant_loc[chunk_start + i] = num_push_constants++;
- } else {
- /* We need to pull this one */
- num_pull_constants = cplx_align_apply(align, num_pull_constants);
- for (unsigned i = 0; i < chunk_size; i++)
- pull_constant_loc[chunk_start + i] = num_pull_constants++;
+ /* Adjust the alignment to be in terms of slots, not bytes */
+ assert((align.mul & (UNIFORM_SLOT_SIZE - 1)) == 0);
+ assert((align.offset & (UNIFORM_SLOT_SIZE - 1)) == 0);
+ align.mul /= UNIFORM_SLOT_SIZE;
+ align.offset /= UNIFORM_SLOT_SIZE;
+
+ unsigned push_start_align = cplx_align_apply(align, num_push_constants);
+ unsigned chunk_size = u - chunk_start + 1;
+ if ((!compiler->supports_pull_constants && u < UBO_START) ||
+ (chunk_size < max_chunk_size &&
+ push_start_align + chunk_size <= max_push_components)) {
+ /* Align up the number of push constants */
+ num_push_constants = push_start_align;
+ for (unsigned i = 0; i < chunk_size; i++)
+ push_constant_loc[chunk_start + i] = num_push_constants++;
+ } else {
+ /* We need to pull this one */
+ num_pull_constants = cplx_align_apply(align, num_pull_constants);
+ for (unsigned i = 0; i < chunk_size; i++)
+ pull_constant_loc[chunk_start + i] = num_pull_constants++;
+ }
+
+ /* Reset the chunk and start again */
+ chunk_start = -1;
}
- /* Reset the chunk and start again */
- chunk_start = -1;
- }
+ /* Add the CS local thread ID uniform at the end of the push constants */
+ if (subgroup_id_index >= 0)
+ push_constant_loc[subgroup_id_index] = num_push_constants++;
- /* Add the CS local thread ID uniform at the end of the push constants */
- if (subgroup_id_index >= 0)
- push_constant_loc[subgroup_id_index] = num_push_constants++;
+ /* As the uniforms are going to be reordered, stash the old array and
+ * create two new arrays for push/pull params.
+ */
+ uint32_t *param = stage_prog_data->param;
+ stage_prog_data->nr_params = num_push_constants;
+ if (num_push_constants) {
+ stage_prog_data->param = rzalloc_array(mem_ctx, uint32_t,
+ num_push_constants);
+ } else {
+ stage_prog_data->param = NULL;
+ }
+ assert(stage_prog_data->nr_pull_params == 0);
+ assert(stage_prog_data->pull_param == NULL);
+ if (num_pull_constants > 0) {
+ stage_prog_data->nr_pull_params = num_pull_constants;
+ stage_prog_data->pull_param = rzalloc_array(mem_ctx, uint32_t,
+ num_pull_constants);
+ }
- /* As the uniforms are going to be reordered, stash the old array and
- * create two new arrays for push/pull params.
- */
- uint32_t *param = stage_prog_data->param;
- stage_prog_data->nr_params = num_push_constants;
- if (num_push_constants) {
- stage_prog_data->param = rzalloc_array(mem_ctx, uint32_t,
- num_push_constants);
+ /* Up until now, the param[] array has been indexed by reg + offset
+ * of UNIFORM registers. Move pull constants into pull_param[] and
+ * condense param[] to only contain the uniforms we chose to push.
+ *
+ * NOTE: Because we are condensing the params[] array, we know that
+ * push_constant_loc[i] <= i and we can do it in one smooth loop without
+ * having to make a copy.
+ */
+ for (unsigned int i = 0; i < uniforms; i++) {
+ uint32_t value = param[i];
+ if (pull_constant_loc[i] != -1) {
+ stage_prog_data->pull_param[pull_constant_loc[i]] = value;
+ } else if (push_constant_loc[i] != -1) {
+ stage_prog_data->param[push_constant_loc[i]] = value;
+ }
+ }
+ ralloc_free(param);
} else {
- stage_prog_data->param = NULL;
- }
- assert(stage_prog_data->nr_pull_params == 0);
- assert(stage_prog_data->pull_param == NULL);
- if (num_pull_constants > 0) {
- stage_prog_data->nr_pull_params = num_pull_constants;
- stage_prog_data->pull_param = rzalloc_array(mem_ctx, uint32_t,
- num_pull_constants);
+ /* If we don't want to compact anything, just set up dummy push/pull
+ * arrays. All the rest of the compiler cares about are these arrays.
+ */
+ push_constant_loc = ralloc_array(mem_ctx, int, uniforms);
+ pull_constant_loc = ralloc_array(mem_ctx, int, uniforms);
+
+ for (unsigned u = 0; u < uniforms; u++)
+ push_constant_loc[u] = u;
+
+ memset(pull_constant_loc, -1, uniforms * sizeof(*pull_constant_loc));
}
/* Now that we know how many regular uniforms we'll push, reduce the
push_length += range->length;
}
assert(push_length <= 64);
-
- /* Up until now, the param[] array has been indexed by reg + offset
- * of UNIFORM registers. Move pull constants into pull_param[] and
- * condense param[] to only contain the uniforms we chose to push.
- *
- * NOTE: Because we are condensing the params[] array, we know that
- * push_constant_loc[i] <= i and we can do it in one smooth loop without
- * having to make a copy.
- */
- for (unsigned int i = 0; i < uniforms; i++) {
- uint32_t value = param[i];
- if (pull_constant_loc[i] != -1) {
- stage_prog_data->pull_param[pull_constant_loc[i]] = value;
- } else if (push_constant_loc[i] != -1) {
- stage_prog_data->param[push_constant_loc[i]] = value;
- }
- }
- ralloc_free(param);
}
bool
*out_surf_index = prog_data->binding_table.ubo_start + range->block;
*out_pull_index = (32 * range->start + src.offset) / 4;
+
+ prog_data->has_ubo_pull = true;
return true;
}
/* A regular uniform push constant */
*out_surf_index = stage_prog_data->binding_table.pull_constants_start;
*out_pull_index = pull_constant_loc[location];
+
+ prog_data->has_ubo_pull = true;
return true;
}
return ((1 << n) - 1) << shift;
}
-bool
-fs_visitor::opt_peephole_csel()
-{
- if (devinfo->gen < 8)
- return false;
-
- bool progress = false;
-
- foreach_block_reverse(block, cfg) {
- int ip = block->end_ip + 1;
-
- foreach_inst_in_block_reverse_safe(fs_inst, inst, block) {
- ip--;
-
- if (inst->opcode != BRW_OPCODE_SEL ||
- inst->predicate != BRW_PREDICATE_NORMAL ||
- (inst->dst.type != BRW_REGISTER_TYPE_F &&
- inst->dst.type != BRW_REGISTER_TYPE_D &&
- inst->dst.type != BRW_REGISTER_TYPE_UD))
- continue;
-
- /* Because it is a 3-src instruction, CSEL cannot have an immediate
- * value as a source, but we can sometimes handle zero.
- */
- if ((inst->src[0].file != VGRF && inst->src[0].file != ATTR &&
- inst->src[0].file != UNIFORM) ||
- (inst->src[1].file != VGRF && inst->src[1].file != ATTR &&
- inst->src[1].file != UNIFORM && !inst->src[1].is_zero()))
- continue;
-
- foreach_inst_in_block_reverse_starting_from(fs_inst, scan_inst, inst) {
- if (!scan_inst->flags_written())
- continue;
-
- if ((scan_inst->opcode != BRW_OPCODE_CMP &&
- scan_inst->opcode != BRW_OPCODE_MOV) ||
- scan_inst->predicate != BRW_PREDICATE_NONE ||
- (scan_inst->src[0].file != VGRF &&
- scan_inst->src[0].file != ATTR &&
- scan_inst->src[0].file != UNIFORM) ||
- scan_inst->src[0].type != BRW_REGISTER_TYPE_F)
- break;
-
- if (scan_inst->opcode == BRW_OPCODE_CMP && !scan_inst->src[1].is_zero())
- break;
-
- const brw::fs_builder ibld(this, block, inst);
-
- const enum brw_conditional_mod cond =
- inst->predicate_inverse
- ? brw_negate_cmod(scan_inst->conditional_mod)
- : scan_inst->conditional_mod;
-
- fs_inst *csel_inst = NULL;
-
- if (inst->src[1].file != IMM) {
- csel_inst = ibld.CSEL(inst->dst,
- inst->src[0],
- inst->src[1],
- scan_inst->src[0],
- cond);
- } else if (cond == BRW_CONDITIONAL_NZ) {
- /* Consider the sequence
- *
- * cmp.nz.f0 null<1>F g3<8,8,1>F 0F
- * (+f0) sel g124<1>UD g2<8,8,1>UD 0x00000000UD
- *
- * The sel will pick the immediate value 0 if r0 is ±0.0.
- * Therefore, this sequence is equivalent:
- *
- * cmp.nz.f0 null<1>F g3<8,8,1>F 0F
- * (+f0) sel g124<1>F g2<8,8,1>F (abs)g3<8,8,1>F
- *
- * The abs is ensures that the result is 0UD when g3 is -0.0F.
- * By normal cmp-sel merging, this is also equivalent:
- *
- * csel.nz g124<1>F g2<4,4,1>F (abs)g3<4,4,1>F g3<4,4,1>F
- */
- csel_inst = ibld.CSEL(inst->dst,
- inst->src[0],
- scan_inst->src[0],
- scan_inst->src[0],
- cond);
-
- csel_inst->src[1].abs = true;
- }
-
- if (csel_inst != NULL) {
- progress = true;
- csel_inst->saturate = inst->saturate;
- inst->remove(block);
- }
-
- break;
- }
- }
- }
-
- return progress;
-}
-
bool
fs_visitor::compute_to_mrf()
{
assert(mov->src[0].file == FIXED_GRF);
mov->src[0] = brw_vec4_grf(mov->src[0].nr, 0);
}
+
+ lower_scoreboard();
}
/**
fs_visitor::remove_extra_rounding_modes()
{
bool progress = false;
+ unsigned execution_mode = this->nir->info.float_controls_execution_mode;
+
+ brw_rnd_mode base_mode = BRW_RND_MODE_UNSPECIFIED;
+ if ((FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP16 |
+ FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP32 |
+ FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP64) &
+ execution_mode)
+ base_mode = BRW_RND_MODE_RTNE;
+ if ((FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP16 |
+ FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP32 |
+ FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP64) &
+ execution_mode)
+ base_mode = BRW_RND_MODE_RTZ;
foreach_block (block, cfg) {
- brw_rnd_mode prev_mode = BRW_RND_MODE_UNSPECIFIED;
+ brw_rnd_mode prev_mode = base_mode;
foreach_inst_in_block_safe (fs_inst, inst, block) {
if (inst->opcode == SHADER_OPCODE_RND_MODE) {
return progress;
}
-bool
-fs_visitor::lower_linterp()
+void
+fs_visitor::lower_mul_dword_inst(fs_inst *inst, bblock_t *block)
{
- bool progress = false;
+ const fs_builder ibld(this, block, inst);
- if (devinfo->gen < 11)
- return false;
+ if (inst->src[1].file == IMM && inst->src[1].ud < (1 << 16)) {
+ /* The MUL instruction isn't commutative. On Gen <= 6, only the low
+ * 16-bits of src0 are read, and on Gen >= 7 only the low 16-bits of
+ * src1 are used.
+ *
+ * If multiplying by an immediate value that fits in 16-bits, do a
+ * single MUL instruction with that value in the proper location.
+ */
+ if (devinfo->gen < 7) {
+ fs_reg imm(VGRF, alloc.allocate(dispatch_width / 8), inst->dst.type);
+ ibld.MOV(imm, inst->src[1]);
+ ibld.MUL(inst->dst, imm, inst->src[0]);
+ } else {
+ const bool ud = (inst->src[1].type == BRW_REGISTER_TYPE_UD);
+ ibld.MUL(inst->dst, inst->src[0],
+ ud ? brw_imm_uw(inst->src[1].ud)
+ : brw_imm_w(inst->src[1].d));
+ }
+ } else {
+ /* Gen < 8 (and some Gen8+ low-power parts like Cherryview) cannot
+ * do 32-bit integer multiplication in one instruction, but instead
+ * must do a sequence (which actually calculates a 64-bit result):
+ *
+ * mul(8) acc0<1>D g3<8,8,1>D g4<8,8,1>D
+ * mach(8) null g3<8,8,1>D g4<8,8,1>D
+ * mov(8) g2<1>D acc0<8,8,1>D
+ *
+ * But on Gen > 6, the ability to use second accumulator register
+ * (acc1) for non-float data types was removed, preventing a simple
+ * implementation in SIMD16. A 16-channel result can be calculated by
+ * executing the three instructions twice in SIMD8, once with quarter
+ * control of 1Q for the first eight channels and again with 2Q for
+ * the second eight channels.
+ *
+ * Which accumulator register is implicitly accessed (by AccWrEnable
+ * for instance) is determined by the quarter control. Unfortunately
+ * Ivybridge (and presumably Baytrail) has a hardware bug in which an
+ * implicit accumulator access by an instruction with 2Q will access
+ * acc1 regardless of whether the data type is usable in acc1.
+ *
+ * Specifically, the 2Q mach(8) writes acc1 which does not exist for
+ * integer data types.
+ *
+ * Since we only want the low 32-bits of the result, we can do two
+ * 32-bit x 16-bit multiplies (like the mul and mach are doing), and
+ * adjust the high result and add them (like the mach is doing):
+ *
+ * mul(8) g7<1>D g3<8,8,1>D g4.0<8,8,1>UW
+ * mul(8) g8<1>D g3<8,8,1>D g4.1<8,8,1>UW
+ * shl(8) g9<1>D g8<8,8,1>D 16D
+ * add(8) g2<1>D g7<8,8,1>D g8<8,8,1>D
+ *
+ * We avoid the shl instruction by realizing that we only want to add
+ * the low 16-bits of the "high" result to the high 16-bits of the
+ * "low" result and using proper regioning on the add:
+ *
+ * mul(8) g7<1>D g3<8,8,1>D g4.0<16,8,2>UW
+ * mul(8) g8<1>D g3<8,8,1>D g4.1<16,8,2>UW
+ * add(8) g7.1<2>UW g7.1<16,8,2>UW g8<16,8,2>UW
+ *
+ * Since it does not use the (single) accumulator register, we can
+ * schedule multi-component multiplications much better.
+ */
- foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
- const fs_builder ibld(this, block, inst);
+ bool needs_mov = false;
+ fs_reg orig_dst = inst->dst;
- if (inst->opcode != FS_OPCODE_LINTERP)
- continue;
+ /* Get a new VGRF for the "low" 32x16-bit multiplication result if
+ * reusing the original destination is impossible due to hardware
+ * restrictions, source/destination overlap, or it being the null
+ * register.
+ */
+ fs_reg low = inst->dst;
+ if (orig_dst.is_null() || orig_dst.file == MRF ||
+ regions_overlap(inst->dst, inst->size_written,
+ inst->src[0], inst->size_read(0)) ||
+ regions_overlap(inst->dst, inst->size_written,
+ inst->src[1], inst->size_read(1)) ||
+ inst->dst.stride >= 4) {
+ needs_mov = true;
+ low = fs_reg(VGRF, alloc.allocate(regs_written(inst)),
+ inst->dst.type);
+ }
+
+ /* Get a new VGRF but keep the same stride as inst->dst */
+ fs_reg high(VGRF, alloc.allocate(regs_written(inst)), inst->dst.type);
+ high.stride = inst->dst.stride;
+ high.offset = inst->dst.offset % REG_SIZE;
- fs_reg dwP = component(inst->src[1], 0);
- fs_reg dwQ = component(inst->src[1], 1);
- fs_reg dwR = component(inst->src[1], 3);
- for (unsigned i = 0; i < DIV_ROUND_UP(dispatch_width, 8); i++) {
- const fs_builder hbld(ibld.half(i));
- fs_reg dst = half(inst->dst, i);
- fs_reg delta_xy = offset(inst->src[0], ibld, i);
- hbld.MAD(dst, dwR, half(delta_xy, 0), dwP);
- fs_inst *mad = hbld.MAD(dst, dst, half(delta_xy, 1), dwQ);
-
- /* Propagate conditional mod and saturate from the original
- * instruction to the second MAD instruction.
- */
- set_saturate(inst->saturate, mad);
- set_condmod(inst->conditional_mod, mad);
- }
+ if (devinfo->gen >= 7) {
+ if (inst->src[1].abs)
+ lower_src_modifiers(this, block, inst, 1);
- inst->remove(block);
- progress = true;
- }
+ if (inst->src[1].file == IMM) {
+ ibld.MUL(low, inst->src[0],
+ brw_imm_uw(inst->src[1].ud & 0xffff));
+ ibld.MUL(high, inst->src[0],
+ brw_imm_uw(inst->src[1].ud >> 16));
+ } else {
+ ibld.MUL(low, inst->src[0],
+ subscript(inst->src[1], BRW_REGISTER_TYPE_UW, 0));
+ ibld.MUL(high, inst->src[0],
+ subscript(inst->src[1], BRW_REGISTER_TYPE_UW, 1));
+ }
+ } else {
+ if (inst->src[0].abs)
+ lower_src_modifiers(this, block, inst, 0);
- if (progress)
- invalidate_live_intervals();
+ ibld.MUL(low, subscript(inst->src[0], BRW_REGISTER_TYPE_UW, 0),
+ inst->src[1]);
+ ibld.MUL(high, subscript(inst->src[0], BRW_REGISTER_TYPE_UW, 1),
+ inst->src[1]);
+ }
- return progress;
-}
+ ibld.ADD(subscript(low, BRW_REGISTER_TYPE_UW, 1),
+ subscript(low, BRW_REGISTER_TYPE_UW, 1),
+ subscript(high, BRW_REGISTER_TYPE_UW, 0));
-bool
-fs_visitor::lower_integer_multiplication()
-{
- bool progress = false;
+ if (needs_mov || inst->conditional_mod)
+ set_condmod(inst->conditional_mod, ibld.MOV(orig_dst, low));
+ }
+}
- foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
- const fs_builder ibld(this, block, inst);
+void
+fs_visitor::lower_mul_qword_inst(fs_inst *inst, bblock_t *block)
+{
+ const fs_builder ibld(this, block, inst);
+
+ /* Considering two 64-bit integers ab and cd where each letter ab
+ * corresponds to 32 bits, we get a 128-bit result WXYZ. We * cd
+ * only need to provide the YZ part of the result. -------
+ * BD
+ * Only BD needs to be 64 bits. For AD and BC we only care + AD
+ * about the lower 32 bits (since they are part of the upper + BC
+ * 32 bits of our result). AC is not needed since it starts + AC
+ * on the 65th bit of the result. -------
+ * WXYZ
+ */
+ unsigned int q_regs = regs_written(inst);
+ unsigned int d_regs = (q_regs + 1) / 2;
- if (inst->opcode == BRW_OPCODE_MUL) {
- if (inst->dst.is_accumulator() ||
- (inst->dst.type != BRW_REGISTER_TYPE_D &&
- inst->dst.type != BRW_REGISTER_TYPE_UD))
- continue;
+ fs_reg bd(VGRF, alloc.allocate(q_regs), BRW_REGISTER_TYPE_UQ);
+ fs_reg ad(VGRF, alloc.allocate(d_regs), BRW_REGISTER_TYPE_UD);
+ fs_reg bc(VGRF, alloc.allocate(d_regs), BRW_REGISTER_TYPE_UD);
- if (devinfo->has_integer_dword_mul)
- continue;
+ /* Here we need the full 64 bit result for 32b * 32b. */
+ if (devinfo->has_integer_dword_mul) {
+ ibld.MUL(bd, subscript(inst->src[0], BRW_REGISTER_TYPE_UD, 0),
+ subscript(inst->src[1], BRW_REGISTER_TYPE_UD, 0));
+ } else {
+ fs_reg bd_high(VGRF, alloc.allocate(d_regs), BRW_REGISTER_TYPE_UD);
+ fs_reg bd_low(VGRF, alloc.allocate(d_regs), BRW_REGISTER_TYPE_UD);
+ fs_reg acc = retype(brw_acc_reg(inst->exec_size), BRW_REGISTER_TYPE_UD);
- if (inst->src[1].file == IMM &&
- inst->src[1].ud < (1 << 16)) {
- /* The MUL instruction isn't commutative. On Gen <= 6, only the low
- * 16-bits of src0 are read, and on Gen >= 7 only the low 16-bits of
- * src1 are used.
- *
- * If multiplying by an immediate value that fits in 16-bits, do a
- * single MUL instruction with that value in the proper location.
- */
- if (devinfo->gen < 7) {
- fs_reg imm(VGRF, alloc.allocate(dispatch_width / 8),
- inst->dst.type);
- ibld.MOV(imm, inst->src[1]);
- ibld.MUL(inst->dst, imm, inst->src[0]);
- } else {
- const bool ud = (inst->src[1].type == BRW_REGISTER_TYPE_UD);
- ibld.MUL(inst->dst, inst->src[0],
- ud ? brw_imm_uw(inst->src[1].ud)
- : brw_imm_w(inst->src[1].d));
- }
- } else {
- /* Gen < 8 (and some Gen8+ low-power parts like Cherryview) cannot
- * do 32-bit integer multiplication in one instruction, but instead
- * must do a sequence (which actually calculates a 64-bit result):
- *
- * mul(8) acc0<1>D g3<8,8,1>D g4<8,8,1>D
- * mach(8) null g3<8,8,1>D g4<8,8,1>D
- * mov(8) g2<1>D acc0<8,8,1>D
- *
- * But on Gen > 6, the ability to use second accumulator register
- * (acc1) for non-float data types was removed, preventing a simple
- * implementation in SIMD16. A 16-channel result can be calculated by
- * executing the three instructions twice in SIMD8, once with quarter
- * control of 1Q for the first eight channels and again with 2Q for
- * the second eight channels.
- *
- * Which accumulator register is implicitly accessed (by AccWrEnable
- * for instance) is determined by the quarter control. Unfortunately
- * Ivybridge (and presumably Baytrail) has a hardware bug in which an
- * implicit accumulator access by an instruction with 2Q will access
- * acc1 regardless of whether the data type is usable in acc1.
- *
- * Specifically, the 2Q mach(8) writes acc1 which does not exist for
- * integer data types.
- *
- * Since we only want the low 32-bits of the result, we can do two
- * 32-bit x 16-bit multiplies (like the mul and mach are doing), and
- * adjust the high result and add them (like the mach is doing):
- *
- * mul(8) g7<1>D g3<8,8,1>D g4.0<8,8,1>UW
- * mul(8) g8<1>D g3<8,8,1>D g4.1<8,8,1>UW
- * shl(8) g9<1>D g8<8,8,1>D 16D
- * add(8) g2<1>D g7<8,8,1>D g8<8,8,1>D
- *
- * We avoid the shl instruction by realizing that we only want to add
- * the low 16-bits of the "high" result to the high 16-bits of the
- * "low" result and using proper regioning on the add:
- *
- * mul(8) g7<1>D g3<8,8,1>D g4.0<16,8,2>UW
- * mul(8) g8<1>D g3<8,8,1>D g4.1<16,8,2>UW
- * add(8) g7.1<2>UW g7.1<16,8,2>UW g8<16,8,2>UW
- *
- * Since it does not use the (single) accumulator register, we can
- * schedule multi-component multiplications much better.
- */
+ fs_inst *mul = ibld.MUL(acc,
+ subscript(inst->src[0], BRW_REGISTER_TYPE_UD, 0),
+ subscript(inst->src[1], BRW_REGISTER_TYPE_UW, 0));
+ mul->writes_accumulator = true;
- bool needs_mov = false;
- fs_reg orig_dst = inst->dst;
+ ibld.MACH(bd_high, subscript(inst->src[0], BRW_REGISTER_TYPE_UD, 0),
+ subscript(inst->src[1], BRW_REGISTER_TYPE_UD, 0));
+ ibld.MOV(bd_low, acc);
- /* Get a new VGRF for the "low" 32x16-bit multiplication result if
- * reusing the original destination is impossible due to hardware
- * restrictions, source/destination overlap, or it being the null
- * register.
- */
- fs_reg low = inst->dst;
- if (orig_dst.is_null() || orig_dst.file == MRF ||
- regions_overlap(inst->dst, inst->size_written,
- inst->src[0], inst->size_read(0)) ||
- regions_overlap(inst->dst, inst->size_written,
- inst->src[1], inst->size_read(1)) ||
- inst->dst.stride >= 4) {
- needs_mov = true;
- low = fs_reg(VGRF, alloc.allocate(regs_written(inst)),
- inst->dst.type);
- }
+ ibld.MOV(subscript(bd, BRW_REGISTER_TYPE_UD, 0), bd_low);
+ ibld.MOV(subscript(bd, BRW_REGISTER_TYPE_UD, 1), bd_high);
+ }
- /* Get a new VGRF but keep the same stride as inst->dst */
- fs_reg high(VGRF, alloc.allocate(regs_written(inst)),
- inst->dst.type);
- high.stride = inst->dst.stride;
- high.offset = inst->dst.offset % REG_SIZE;
-
- if (devinfo->gen >= 7) {
- if (inst->src[1].abs)
- lower_src_modifiers(this, block, inst, 1);
-
- if (inst->src[1].file == IMM) {
- ibld.MUL(low, inst->src[0],
- brw_imm_uw(inst->src[1].ud & 0xffff));
- ibld.MUL(high, inst->src[0],
- brw_imm_uw(inst->src[1].ud >> 16));
- } else {
- ibld.MUL(low, inst->src[0],
- subscript(inst->src[1], BRW_REGISTER_TYPE_UW, 0));
- ibld.MUL(high, inst->src[0],
- subscript(inst->src[1], BRW_REGISTER_TYPE_UW, 1));
- }
- } else {
- if (inst->src[0].abs)
- lower_src_modifiers(this, block, inst, 0);
+ ibld.MUL(ad, subscript(inst->src[0], BRW_REGISTER_TYPE_UD, 1),
+ subscript(inst->src[1], BRW_REGISTER_TYPE_UD, 0));
+ ibld.MUL(bc, subscript(inst->src[0], BRW_REGISTER_TYPE_UD, 0),
+ subscript(inst->src[1], BRW_REGISTER_TYPE_UD, 1));
- ibld.MUL(low, subscript(inst->src[0], BRW_REGISTER_TYPE_UW, 0),
- inst->src[1]);
- ibld.MUL(high, subscript(inst->src[0], BRW_REGISTER_TYPE_UW, 1),
- inst->src[1]);
- }
+ ibld.ADD(ad, ad, bc);
+ ibld.ADD(subscript(bd, BRW_REGISTER_TYPE_UD, 1),
+ subscript(bd, BRW_REGISTER_TYPE_UD, 1), ad);
- ibld.ADD(subscript(low, BRW_REGISTER_TYPE_UW, 1),
- subscript(low, BRW_REGISTER_TYPE_UW, 1),
- subscript(high, BRW_REGISTER_TYPE_UW, 0));
+ ibld.MOV(inst->dst, bd);
+}
- if (needs_mov || inst->conditional_mod) {
- set_condmod(inst->conditional_mod,
- ibld.MOV(orig_dst, low));
- }
- }
+void
+fs_visitor::lower_mulh_inst(fs_inst *inst, bblock_t *block)
+{
+ const fs_builder ibld(this, block, inst);
- } else if (inst->opcode == SHADER_OPCODE_MULH) {
- /* According to the BDW+ BSpec page for the "Multiply Accumulate
- * High" instruction:
- *
- * "An added preliminary mov is required for source modification on
- * src1:
- * mov (8) r3.0<1>:d -r3<8;8,1>:d
- * mul (8) acc0:d r2.0<8;8,1>:d r3.0<16;8,2>:uw
- * mach (8) r5.0<1>:d r2.0<8;8,1>:d r3.0<8;8,1>:d"
- */
- if (devinfo->gen >= 8 && (inst->src[1].negate || inst->src[1].abs))
- lower_src_modifiers(this, block, inst, 1);
+ /* According to the BDW+ BSpec page for the "Multiply Accumulate
+ * High" instruction:
+ *
+ * "An added preliminary mov is required for source modification on
+ * src1:
+ * mov (8) r3.0<1>:d -r3<8;8,1>:d
+ * mul (8) acc0:d r2.0<8;8,1>:d r3.0<16;8,2>:uw
+ * mach (8) r5.0<1>:d r2.0<8;8,1>:d r3.0<8;8,1>:d"
+ */
+ if (devinfo->gen >= 8 && (inst->src[1].negate || inst->src[1].abs))
+ lower_src_modifiers(this, block, inst, 1);
+
+ /* Should have been lowered to 8-wide. */
+ assert(inst->exec_size <= get_lowered_simd_width(devinfo, inst));
+ const fs_reg acc = retype(brw_acc_reg(inst->exec_size), inst->dst.type);
+ fs_inst *mul = ibld.MUL(acc, inst->src[0], inst->src[1]);
+ fs_inst *mach = ibld.MACH(inst->dst, inst->src[0], inst->src[1]);
+
+ if (devinfo->gen >= 8) {
+ /* Until Gen8, integer multiplies read 32-bits from one source,
+ * and 16-bits from the other, and relying on the MACH instruction
+ * to generate the high bits of the result.
+ *
+ * On Gen8, the multiply instruction does a full 32x32-bit
+ * multiply, but in order to do a 64-bit multiply we can simulate
+ * the previous behavior and then use a MACH instruction.
+ */
+ assert(mul->src[1].type == BRW_REGISTER_TYPE_D ||
+ mul->src[1].type == BRW_REGISTER_TYPE_UD);
+ mul->src[1].type = BRW_REGISTER_TYPE_UW;
+ mul->src[1].stride *= 2;
+
+ if (mul->src[1].file == IMM) {
+ mul->src[1] = brw_imm_uw(mul->src[1].ud);
+ }
+ } else if (devinfo->gen == 7 && !devinfo->is_haswell &&
+ inst->group > 0) {
+ /* Among other things the quarter control bits influence which
+ * accumulator register is used by the hardware for instructions
+ * that access the accumulator implicitly (e.g. MACH). A
+ * second-half instruction would normally map to acc1, which
+ * doesn't exist on Gen7 and up (the hardware does emulate it for
+ * floating-point instructions *only* by taking advantage of the
+ * extra precision of acc0 not normally used for floating point
+ * arithmetic).
+ *
+ * HSW and up are careful enough not to try to access an
+ * accumulator register that doesn't exist, but on earlier Gen7
+ * hardware we need to make sure that the quarter control bits are
+ * zero to avoid non-deterministic behaviour and emit an extra MOV
+ * to get the result masked correctly according to the current
+ * channel enables.
+ */
+ mach->group = 0;
+ mach->force_writemask_all = true;
+ mach->dst = ibld.vgrf(inst->dst.type);
+ ibld.MOV(inst->dst, mach->dst);
+ }
+}
- /* Should have been lowered to 8-wide. */
- assert(inst->exec_size <= get_lowered_simd_width(devinfo, inst));
- const fs_reg acc = retype(brw_acc_reg(inst->exec_size),
- inst->dst.type);
- fs_inst *mul = ibld.MUL(acc, inst->src[0], inst->src[1]);
- fs_inst *mach = ibld.MACH(inst->dst, inst->src[0], inst->src[1]);
-
- if (devinfo->gen >= 8) {
- /* Until Gen8, integer multiplies read 32-bits from one source,
- * and 16-bits from the other, and relying on the MACH instruction
- * to generate the high bits of the result.
- *
- * On Gen8, the multiply instruction does a full 32x32-bit
- * multiply, but in order to do a 64-bit multiply we can simulate
- * the previous behavior and then use a MACH instruction.
- */
- assert(mul->src[1].type == BRW_REGISTER_TYPE_D ||
- mul->src[1].type == BRW_REGISTER_TYPE_UD);
- mul->src[1].type = BRW_REGISTER_TYPE_UW;
- mul->src[1].stride *= 2;
+bool
+fs_visitor::lower_integer_multiplication()
+{
+ bool progress = false;
- if (mul->src[1].file == IMM) {
- mul->src[1] = brw_imm_uw(mul->src[1].ud);
- }
- } else if (devinfo->gen == 7 && !devinfo->is_haswell &&
- inst->group > 0) {
- /* Among other things the quarter control bits influence which
- * accumulator register is used by the hardware for instructions
- * that access the accumulator implicitly (e.g. MACH). A
- * second-half instruction would normally map to acc1, which
- * doesn't exist on Gen7 and up (the hardware does emulate it for
- * floating-point instructions *only* by taking advantage of the
- * extra precision of acc0 not normally used for floating point
- * arithmetic).
- *
- * HSW and up are careful enough not to try to access an
- * accumulator register that doesn't exist, but on earlier Gen7
- * hardware we need to make sure that the quarter control bits are
- * zero to avoid non-deterministic behaviour and emit an extra MOV
- * to get the result masked correctly according to the current
- * channel enables.
- */
- mach->group = 0;
- mach->force_writemask_all = true;
- mach->dst = ibld.vgrf(inst->dst.type);
- ibld.MOV(inst->dst, mach->dst);
+ foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
+ if (inst->opcode == BRW_OPCODE_MUL) {
+ if ((inst->dst.type == BRW_REGISTER_TYPE_Q ||
+ inst->dst.type == BRW_REGISTER_TYPE_UQ) &&
+ (inst->src[0].type == BRW_REGISTER_TYPE_Q ||
+ inst->src[0].type == BRW_REGISTER_TYPE_UQ) &&
+ (inst->src[1].type == BRW_REGISTER_TYPE_Q ||
+ inst->src[1].type == BRW_REGISTER_TYPE_UQ)) {
+ lower_mul_qword_inst(inst, block);
+ inst->remove(block);
+ progress = true;
+ } else if (!inst->dst.is_accumulator() &&
+ (inst->dst.type == BRW_REGISTER_TYPE_D ||
+ inst->dst.type == BRW_REGISTER_TYPE_UD) &&
+ !devinfo->has_integer_dword_mul) {
+ lower_mul_dword_inst(inst, block);
+ inst->remove(block);
+ progress = true;
}
- } else {
- continue;
+ } else if (inst->opcode == SHADER_OPCODE_MULH) {
+ lower_mulh_inst(inst, block);
+ inst->remove(block);
+ progress = true;
}
- inst->remove(block);
- progress = true;
}
if (progress)
dst[i] = offset(color, bld, i);
}
+uint32_t
+brw_fb_write_msg_control(const fs_inst *inst,
+ const struct brw_wm_prog_data *prog_data)
+{
+ uint32_t mctl;
+
+ if (inst->opcode == FS_OPCODE_REP_FB_WRITE) {
+ assert(inst->group == 0 && inst->exec_size == 16);
+ mctl = BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD16_SINGLE_SOURCE_REPLICATED;
+ } else if (prog_data->dual_src_blend) {
+ assert(inst->exec_size == 8);
+
+ if (inst->group % 16 == 0)
+ mctl = BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_DUAL_SOURCE_SUBSPAN01;
+ else if (inst->group % 16 == 8)
+ mctl = BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_DUAL_SOURCE_SUBSPAN23;
+ else
+ unreachable("Invalid dual-source FB write instruction group");
+ } else {
+ assert(inst->group == 0 || (inst->group == 16 && inst->exec_size == 16));
+
+ if (inst->exec_size == 16)
+ mctl = BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD16_SINGLE_SOURCE;
+ else if (inst->exec_size == 8)
+ mctl = BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_SINGLE_SOURCE_SUBSPAN01;
+ else
+ unreachable("Invalid FB write execution size");
+ }
+
+ return mctl;
+}
+
static void
lower_fb_write_logical_send(const fs_builder &bld, fs_inst *inst,
const struct brw_wm_prog_data *prog_data,
length = 2;
} else if ((devinfo->gen <= 7 && !devinfo->is_haswell &&
prog_data->uses_kill) ||
- color1.file != BAD_FILE ||
- key->nr_color_regions > 1) {
+ (devinfo->gen < 11 &&
+ (color1.file != BAD_FILE || key->nr_color_regions > 1))) {
/* From the Sandy Bridge PRM, volume 4, page 198:
*
* "Dispatched Pixel Enables. One bit per pixel indicating
length++;
}
+ bool src0_alpha_present = false;
+
if (src0_alpha.file != BAD_FILE) {
for (unsigned i = 0; i < bld.dispatch_width() / 8; i++) {
const fs_builder &ubld = bld.exec_all().group(8, i)
setup_color_payload(ubld, key, &sources[length], tmp, 1);
length++;
}
+ src0_alpha_present = true;
} else if (prog_data->replicate_alpha && inst->target != 0) {
/* Handle the case when fragment shader doesn't write to draw buffer
* zero. No need to call setup_color_payload() for src0_alpha because
* alpha value will be undefined.
*/
length += bld.dispatch_width() / 8;
+ src0_alpha_present = true;
}
if (sample_mask.file != BAD_FILE) {
payload.nr = bld.shader->alloc.allocate(regs_written(load));
load->dst = payload;
- inst->src[0] = payload;
- inst->resize_sources(1);
+ uint32_t msg_ctl = brw_fb_write_msg_control(inst, prog_data);
+ uint32_t ex_desc = 0;
+
+ inst->desc =
+ (inst->group / 16) << 11 | /* rt slot group */
+ brw_dp_write_desc(devinfo, inst->target, msg_ctl,
+ GEN6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE,
+ inst->last_rt, false);
+
+ if (devinfo->gen >= 11) {
+ /* Set the "Render Target Index" and "Src0 Alpha Present" fields
+ * in the extended message descriptor, in lieu of using a header.
+ */
+ ex_desc = inst->target << 12 | src0_alpha_present << 15;
+
+ if (key->nr_color_regions == 0)
+ ex_desc |= 1 << 20; /* Null Render Target */
+ }
+
+ inst->opcode = SHADER_OPCODE_SEND;
+ inst->resize_sources(3);
+ inst->sfid = GEN6_SFID_DATAPORT_RENDER_CACHE;
+ inst->src[0] = brw_imm_ud(inst->desc);
+ inst->src[1] = brw_imm_ud(ex_desc);
+ inst->src[2] = payload;
+ inst->mlen = regs_written(load);
+ inst->ex_mlen = 0;
+ inst->header_size = header_size;
+ inst->check_tdr = true;
+ inst->send_has_side_effects = true;
} else {
/* Send from the MRF */
load = bld.LOAD_PAYLOAD(fs_reg(MRF, 1, BRW_REGISTER_TYPE_F),
inst->resize_sources(0);
}
inst->base_mrf = 1;
+ inst->opcode = FS_OPCODE_FB_WRITE;
+ inst->mlen = regs_written(load);
+ inst->header_size = header_size;
}
-
- inst->opcode = FS_OPCODE_FB_WRITE;
- inst->mlen = regs_written(load);
- inst->header_size = header_size;
}
static void
}
}
-/**
- * Initialize the header present in some typed and untyped surface
- * messages.
- */
-static fs_reg
-emit_surface_header(const fs_builder &bld, const fs_reg &sample_mask)
-{
- fs_builder ubld = bld.exec_all().group(8, 0);
- const fs_reg dst = ubld.vgrf(BRW_REGISTER_TYPE_UD);
- ubld.MOV(dst, brw_imm_d(0));
- ubld.group(1, 0).MOV(component(dst, 7), sample_mask);
- return dst;
-}
-
static void
lower_surface_logical_send(const fs_builder &bld, fs_inst *inst)
{
inst->opcode == SHADER_OPCODE_TYPED_SURFACE_WRITE_LOGICAL ||
inst->opcode == SHADER_OPCODE_TYPED_ATOMIC_LOGICAL;
+ const bool is_surface_access = is_typed_access ||
+ inst->opcode == SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL ||
+ inst->opcode == SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL ||
+ inst->opcode == SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL;
+
+ const bool is_stateless =
+ surface.file == IMM && (surface.ud == BRW_BTI_STATELESS ||
+ surface.ud == GEN8_BTI_STATELESS_NON_COHERENT);
+
+ const bool has_side_effects = inst->has_side_effects();
+ fs_reg sample_mask = has_side_effects ? bld.sample_mask_reg() :
+ fs_reg(brw_imm_d(0xffff));
+
/* From the BDW PRM Volume 7, page 147:
*
* "For the Data Cache Data Port*, the header must be present for the
* we don't attempt to implement sample masks via predication for such
* messages prior to Gen9, since we have to provide a header anyway. On
* Gen11+ the header has been removed so we can only use predication.
+ *
+ * For all stateless A32 messages, we also need a header
*/
- const unsigned header_sz = devinfo->gen < 9 && is_typed_access ? 1 : 0;
-
- const bool has_side_effects = inst->has_side_effects();
- fs_reg sample_mask = has_side_effects ? bld.sample_mask_reg() :
- fs_reg(brw_imm_d(0xffff));
+ fs_reg header;
+ if ((devinfo->gen < 9 && is_typed_access) || is_stateless) {
+ fs_builder ubld = bld.exec_all().group(8, 0);
+ header = ubld.vgrf(BRW_REGISTER_TYPE_UD);
+ ubld.MOV(header, brw_imm_d(0));
+ if (is_stateless) {
+ /* Both the typed and scattered byte/dword A32 messages take a buffer
+ * base address in R0.5:[31:0] (See MH1_A32_PSM for typed messages or
+ * MH_A32_GO for byte/dword scattered messages in the SKL PRM Vol. 2d
+ * for more details.) This is conveniently where the HW places the
+ * scratch surface base address.
+ *
+ * From the SKL PRM Vol. 7 "Per-Thread Scratch Space":
+ *
+ * "When a thread becomes 'active' it is allocated a portion of
+ * scratch space, sized according to PerThreadScratchSpace. The
+ * starting location of each thread’s scratch space allocation,
+ * ScratchSpaceOffset, is passed in the thread payload in
+ * R0.5[31:10] and is specified as a 1KB-granular offset from the
+ * GeneralStateBaseAddress. The computation of ScratchSpaceOffset
+ * includes the starting address of the stage’s scratch space
+ * allocation, as programmed by ScratchSpaceBasePointer."
+ *
+ * The base address is passed in bits R0.5[31:10] and the bottom 10
+ * bits of R0.5 are used for other things. Therefore, we have to
+ * mask off the bottom 10 bits so that we don't get a garbage base
+ * address.
+ */
+ ubld.group(1, 0).AND(component(header, 5),
+ retype(brw_vec1_grf(0, 5), BRW_REGISTER_TYPE_UD),
+ brw_imm_ud(0xfffffc00));
+ }
+ if (is_surface_access)
+ ubld.group(1, 0).MOV(component(header, 7), sample_mask);
+ }
+ const unsigned header_sz = header.file != BAD_FILE ? 1 : 0;
fs_reg payload, payload2;
unsigned mlen, ex_mlen = 0;
- if (devinfo->gen >= 9) {
+ if (devinfo->gen >= 9 &&
+ (src.file == BAD_FILE || header.file == BAD_FILE)) {
/* We have split sends on gen9 and above */
- assert(header_sz == 0);
- payload = bld.move_to_vgrf(addr, addr_sz);
- payload2 = bld.move_to_vgrf(src, src_sz);
- mlen = addr_sz * (inst->exec_size / 8);
- ex_mlen = src_sz * (inst->exec_size / 8);
+ if (header.file == BAD_FILE) {
+ payload = bld.move_to_vgrf(addr, addr_sz);
+ payload2 = bld.move_to_vgrf(src, src_sz);
+ mlen = addr_sz * (inst->exec_size / 8);
+ ex_mlen = src_sz * (inst->exec_size / 8);
+ } else {
+ assert(src.file == BAD_FILE);
+ payload = header;
+ payload2 = bld.move_to_vgrf(addr, addr_sz);
+ mlen = header_sz;
+ ex_mlen = addr_sz * (inst->exec_size / 8);
+ }
} else {
/* Allocate space for the payload. */
const unsigned sz = header_sz + addr_sz + src_sz;
unsigned n = 0;
/* Construct the payload. */
- if (header_sz)
- components[n++] = emit_surface_header(bld, sample_mask);
+ if (header.file != BAD_FILE)
+ components[n++] = header;
for (unsigned i = 0; i < addr_sz; i++)
components[n++] = offset(addr, bld, i);
/* Predicate the instruction on the sample mask if no header is
* provided.
*/
- if (!header_sz && sample_mask.file != BAD_FILE &&
- sample_mask.file != IMM) {
+ if ((header.file == BAD_FILE || !is_surface_access) &&
+ sample_mask.file != BAD_FILE && sample_mask.file != IMM) {
const fs_builder ubld = bld.group(1, 0).exec_all();
if (inst->predicate) {
assert(inst->predicate == BRW_PREDICATE_NORMAL);
sfid = GEN7_SFID_DATAPORT_DATA_CACHE;
break;
+ case SHADER_OPCODE_DWORD_SCATTERED_READ_LOGICAL:
+ case SHADER_OPCODE_DWORD_SCATTERED_WRITE_LOGICAL:
+ sfid = devinfo->gen >= 7 ? GEN7_SFID_DATAPORT_DATA_CACHE :
+ devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
+ BRW_DATAPORT_READ_TARGET_RENDER_CACHE;
+ break;
+
case SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL:
case SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL:
case SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL:
true /* write */);
break;
+ case SHADER_OPCODE_DWORD_SCATTERED_READ_LOGICAL:
+ assert(arg.ud == 32); /* bit_size */
+ desc = brw_dp_dword_scattered_rw_desc(devinfo, inst->exec_size,
+ false /* write */);
+ break;
+
+ case SHADER_OPCODE_DWORD_SCATTERED_WRITE_LOGICAL:
+ assert(arg.ud == 32); /* bit_size */
+ desc = brw_dp_dword_scattered_rw_desc(devinfo, inst->exec_size,
+ true /* write */);
+ break;
+
case SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL:
desc = brw_dp_untyped_atomic_desc(devinfo, inst->exec_size,
arg.ud, /* atomic_op */
case SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL:
case SHADER_OPCODE_BYTE_SCATTERED_READ_LOGICAL:
case SHADER_OPCODE_BYTE_SCATTERED_WRITE_LOGICAL:
+ case SHADER_OPCODE_DWORD_SCATTERED_READ_LOGICAL:
+ case SHADER_OPCODE_DWORD_SCATTERED_WRITE_LOGICAL:
case SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL:
case SHADER_OPCODE_UNTYPED_ATOMIC_FLOAT_LOGICAL:
case SHADER_OPCODE_TYPED_SURFACE_READ_LOGICAL:
case FS_OPCODE_LINTERP:
case SHADER_OPCODE_GET_BUFFER_SIZE:
- case FS_OPCODE_DDX_COARSE:
- case FS_OPCODE_DDX_FINE:
- case FS_OPCODE_DDY_COARSE:
case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
case FS_OPCODE_PACK_HALF_2x16_SPLIT:
case FS_OPCODE_INTERPOLATE_AT_SAMPLE:
*/
return (devinfo->gen == 4 ? 16 : MIN2(16, inst->exec_size));
+ case FS_OPCODE_DDX_COARSE:
+ case FS_OPCODE_DDX_FINE:
+ case FS_OPCODE_DDY_COARSE:
case FS_OPCODE_DDY_FINE:
/* The implementation of this virtual opcode may require emitting
* compressed Align16 instructions, which are severely limited on some
case SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL:
case SHADER_OPCODE_BYTE_SCATTERED_WRITE_LOGICAL:
case SHADER_OPCODE_BYTE_SCATTERED_READ_LOGICAL:
+ case SHADER_OPCODE_DWORD_SCATTERED_WRITE_LOGICAL:
+ case SHADER_OPCODE_DWORD_SCATTERED_READ_LOGICAL:
return MIN2(16, inst->exec_size);
case SHADER_OPCODE_A64_UNTYPED_WRITE_LOGICAL:
assert(devinfo->gen >= 6);
prog_data->uses_src_depth = prog_data->uses_src_w =
- (nir->info.inputs_read & (1 << VARYING_SLOT_POS)) != 0;
+ (nir->info.system_values_read & (1ull << SYSTEM_VALUE_FRAG_COORD)) != 0;
prog_data->uses_sample_mask =
(nir->info.system_values_read & SYSTEM_BIT_SAMPLE_MASK_IN) != 0;
OPT(compact_virtual_grfs);
} while (progress);
- if (OPT(lower_linterp)) {
- OPT(opt_copy_propagation);
- OPT(dead_code_eliminate);
- }
-
- /* Do this after cmod propagation has had every possible opportunity to
- * propagate results into SEL instructions.
- */
- if (OPT(opt_peephole_csel))
- OPT(dead_code_eliminate);
-
progress = false;
pass_num = 0;
if (OPT(lower_load_payload)) {
split_virtual_grfs();
+
+ /* Lower 64 bit MOVs generated by payload lowering. */
+ if (!devinfo->has_64bit_types)
+ OPT(opt_algebraic);
+
OPT(register_coalesce);
OPT(lower_simd_width);
OPT(compute_to_mrf);
void
fs_visitor::allocate_registers(unsigned min_dispatch_width, bool allow_spilling)
{
- bool allocated_without_spills;
+ bool allocated;
static const enum instruction_scheduler_mode pre_modes[] = {
SCHEDULE_PRE,
SCHEDULE_PRE_LIFO,
};
+ static const char *scheduler_mode_name[] = {
+ "top-down",
+ "non-lifo",
+ "lifo"
+ };
+
bool spill_all = allow_spilling && (INTEL_DEBUG & DEBUG_SPILL_FS);
/* Try each scheduling heuristic to see if it can successfully register
*/
for (unsigned i = 0; i < ARRAY_SIZE(pre_modes); i++) {
schedule_instructions(pre_modes[i]);
+ this->shader_stats.scheduler_mode = scheduler_mode_name[i];
if (0) {
assign_regs_trivial();
- allocated_without_spills = true;
- } else {
- allocated_without_spills = assign_regs(false, spill_all);
+ allocated = true;
+ break;
}
- if (allocated_without_spills)
+
+ /* We only allow spilling for the last schedule mode and only if the
+ * allow_spilling parameter and dispatch width work out ok.
+ */
+ bool can_spill = allow_spilling &&
+ (i == ARRAY_SIZE(pre_modes) - 1) &&
+ dispatch_width == min_dispatch_width;
+
+ /* We should only spill registers on the last scheduling. */
+ assert(!spilled_any_registers);
+
+ allocated = assign_regs(can_spill, spill_all);
+ if (allocated)
break;
}
- if (!allocated_without_spills) {
+ if (!allocated) {
if (!allow_spilling)
fail("Failure to register allocate and spilling is not allowed.");
if (dispatch_width > min_dispatch_width) {
fail("Failure to register allocate. Reduce number of "
"live scalar values to avoid this.");
- } else {
- compiler->shader_perf_log(log_data,
- "%s shader triggered register spilling. "
- "Try reducing the number of live scalar "
- "values to improve performance.\n",
- stage_name);
}
- /* Since we're out of heuristics, just go spill registers until we
- * get an allocation.
- */
- while (!assign_regs(true, spill_all)) {
- if (failed)
- break;
- }
+ /* If we failed to allocate, we must have a reason */
+ assert(failed);
+ } else if (spilled_any_registers) {
+ compiler->shader_perf_log(log_data,
+ "%s shader triggered register spilling. "
+ "Try reducing the number of live scalar "
+ "values to improve performance.\n",
+ stage_name);
}
/* This must come after all optimization and register allocation, since
schedule_instructions(SCHEDULE_POST);
if (last_scratch > 0) {
- MAYBE_UNUSED unsigned max_scratch_size = 2 * 1024 * 1024;
+ ASSERTED unsigned max_scratch_size = 2 * 1024 * 1024;
prog_data->total_scratch = brw_get_scratch_size(last_scratch);
*/
assert(prog_data->total_scratch < max_scratch_size);
}
+
+ lower_scoreboard();
}
bool
if (failed)
return false;
- compute_clip_distance();
-
emit_urb_writes();
if (shader_time_index >= 0)
return !failed;
}
-bool
-fs_visitor::run_tcs_single_patch()
+void
+fs_visitor::set_tcs_invocation_id()
{
- assert(stage == MESA_SHADER_TESS_CTRL);
-
struct brw_tcs_prog_data *tcs_prog_data = brw_tcs_prog_data(prog_data);
+ struct brw_vue_prog_data *vue_prog_data = &tcs_prog_data->base;
- /* r1-r4 contain the ICP handles. */
- payload.num_regs = 5;
+ const unsigned instance_id_mask =
+ devinfo->gen >= 11 ? INTEL_MASK(22, 16) : INTEL_MASK(23, 17);
+ const unsigned instance_id_shift =
+ devinfo->gen >= 11 ? 16 : 17;
- if (shader_time_index >= 0)
- emit_shader_time_begin();
+ /* Get instance number from g0.2 bits 22:16 or 23:17 */
+ fs_reg t = bld.vgrf(BRW_REGISTER_TYPE_UD);
+ bld.AND(t, fs_reg(retype(brw_vec1_grf(0, 2), BRW_REGISTER_TYPE_UD)),
+ brw_imm_ud(instance_id_mask));
+
+ invocation_id = bld.vgrf(BRW_REGISTER_TYPE_UD);
+
+ if (vue_prog_data->dispatch_mode == DISPATCH_MODE_TCS_8_PATCH) {
+ /* gl_InvocationID is just the thread number */
+ bld.SHR(invocation_id, t, brw_imm_ud(instance_id_shift));
+ return;
+ }
+
+ assert(vue_prog_data->dispatch_mode == DISPATCH_MODE_TCS_SINGLE_PATCH);
- /* Initialize gl_InvocationID */
fs_reg channels_uw = bld.vgrf(BRW_REGISTER_TYPE_UW);
fs_reg channels_ud = bld.vgrf(BRW_REGISTER_TYPE_UD);
bld.MOV(channels_uw, fs_reg(brw_imm_uv(0x76543210)));
if (tcs_prog_data->instances == 1) {
invocation_id = channels_ud;
} else {
- const unsigned invocation_id_mask = devinfo->gen >= 11 ?
- INTEL_MASK(22, 16) : INTEL_MASK(23, 17);
- const unsigned invocation_id_shift = devinfo->gen >= 11 ? 16 : 17;
+ fs_reg instance_times_8 = bld.vgrf(BRW_REGISTER_TYPE_UD);
+ bld.SHR(instance_times_8, t, brw_imm_ud(instance_id_shift - 3));
+ bld.ADD(invocation_id, instance_times_8, channels_ud);
+ }
+}
- invocation_id = bld.vgrf(BRW_REGISTER_TYPE_UD);
+bool
+fs_visitor::run_tcs()
+{
+ assert(stage == MESA_SHADER_TESS_CTRL);
- /* Get instance number from g0.2 bits 23:17, and multiply it by 8. */
- fs_reg t = bld.vgrf(BRW_REGISTER_TYPE_UD);
- fs_reg instance_times_8 = bld.vgrf(BRW_REGISTER_TYPE_UD);
- bld.AND(t, fs_reg(retype(brw_vec1_grf(0, 2), BRW_REGISTER_TYPE_UD)),
- brw_imm_ud(invocation_id_mask));
- bld.SHR(instance_times_8, t, brw_imm_ud(invocation_id_shift - 3));
+ struct brw_vue_prog_data *vue_prog_data = brw_vue_prog_data(prog_data);
+ struct brw_tcs_prog_data *tcs_prog_data = brw_tcs_prog_data(prog_data);
+ struct brw_tcs_prog_key *tcs_key = (struct brw_tcs_prog_key *) key;
- bld.ADD(invocation_id, instance_times_8, channels_ud);
+ assert(vue_prog_data->dispatch_mode == DISPATCH_MODE_TCS_SINGLE_PATCH ||
+ vue_prog_data->dispatch_mode == DISPATCH_MODE_TCS_8_PATCH);
+
+ if (vue_prog_data->dispatch_mode == DISPATCH_MODE_TCS_SINGLE_PATCH) {
+ /* r1-r4 contain the ICP handles. */
+ payload.num_regs = 5;
+ } else {
+ assert(vue_prog_data->dispatch_mode == DISPATCH_MODE_TCS_8_PATCH);
+ assert(tcs_key->input_vertices > 0);
+ /* r1 contains output handles, r2 may contain primitive ID, then the
+ * ICP handles occupy the next 1-32 registers.
+ */
+ payload.num_regs = 2 + tcs_prog_data->include_primitive_id +
+ tcs_key->input_vertices;
}
+ if (shader_time_index >= 0)
+ emit_shader_time_begin();
+
+ /* Initialize gl_InvocationID */
+ set_tcs_invocation_id();
+
+ const bool fix_dispatch_mask =
+ vue_prog_data->dispatch_mode == DISPATCH_MODE_TCS_SINGLE_PATCH &&
+ (nir->info.tess.tcs_vertices_out % 8) != 0;
+
/* Fix the disptach mask */
- if (nir->info.tess.tcs_vertices_out % 8) {
+ if (fix_dispatch_mask) {
bld.CMP(bld.null_reg_ud(), invocation_id,
brw_imm_ud(nir->info.tess.tcs_vertices_out), BRW_CONDITIONAL_L);
bld.IF(BRW_PREDICATE_NORMAL);
emit_nir_code();
- if (nir->info.tess.tcs_vertices_out % 8) {
+ if (fix_dispatch_mask) {
bld.emit(BRW_OPCODE_ENDIF);
}
/* Emit EOT write; set TR DS Cache bit */
fs_reg srcs[3] = {
- fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UD)),
+ fs_reg(get_tcs_output_urb_handle()),
fs_reg(brw_imm_ud(WRITEMASK_X << 16)),
fs_reg(brw_imm_ud(0)),
};
optimize();
assign_curb_setup();
- assign_tcs_single_patch_urb_setup();
+ assign_tcs_urb_setup();
fixup_3src_null_dest();
allocate_registers(8, true);
if (shader_time_index >= 0)
emit_shader_time_begin();
- calculate_urb_setup();
if (nir->info.inputs_read > 0 ||
+ (nir->info.system_values_read & (1ull << SYSTEM_VALUE_FRAG_COORD)) ||
(nir->info.outputs_read > 0 && !wm_key->coherent_fb_fetch)) {
if (devinfo->gen < 6)
emit_interpolation_setup_gen4();
return !failed;
}
+static bool
+is_used_in_not_interp_frag_coord(nir_ssa_def *def)
+{
+ nir_foreach_use(src, def) {
+ if (src->parent_instr->type != nir_instr_type_intrinsic)
+ return true;
+
+ nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(src->parent_instr);
+ if (intrin->intrinsic != nir_intrinsic_load_frag_coord)
+ return true;
+ }
+
+ nir_foreach_if_use(src, def)
+ return true;
+
+ return false;
+}
+
/**
* Return a bitfield where bit n is set if barycentric interpolation mode n
* (see enum brw_barycentric_mode) is needed by the fragment shader.
continue;
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
- if (intrin->intrinsic != nir_intrinsic_load_interpolated_input)
+ switch (intrin->intrinsic) {
+ case nir_intrinsic_load_barycentric_pixel:
+ case nir_intrinsic_load_barycentric_centroid:
+ case nir_intrinsic_load_barycentric_sample:
+ break;
+ default:
continue;
+ }
/* Ignore WPOS; it doesn't require interpolation. */
- if (nir_intrinsic_base(intrin) == VARYING_SLOT_POS)
+ assert(intrin->dest.is_ssa);
+ if (!is_used_in_not_interp_frag_coord(&intrin->dest.ssa))
continue;
- intrin = nir_instr_as_intrinsic(intrin->src[0].ssa->parent_instr);
enum glsl_interp_mode interp = (enum glsl_interp_mode)
nir_intrinsic_interp_mode(intrin);
nir_intrinsic_op bary_op = intrin->intrinsic;
const struct brw_wm_prog_key *key,
struct brw_wm_prog_data *prog_data,
nir_shader *shader,
- struct gl_program *prog,
int shader_time_index8, int shader_time_index16,
int shader_time_index32, bool allow_spilling,
bool use_rep_send, struct brw_vue_map *vue_map,
+ struct brw_compile_stats *stats,
char **error_str)
{
const struct gen_device_info *devinfo = compiler->devinfo;
- shader = brw_nir_apply_sampler_key(shader, compiler, &key->tex, true);
+ unsigned max_subgroup_size = unlikely(INTEL_DEBUG & DEBUG_DO32) ? 32 : 16;
+
+ brw_nir_apply_key(shader, compiler, &key->base, max_subgroup_size, true);
brw_nir_lower_fs_inputs(shader, devinfo, key);
brw_nir_lower_fs_outputs(shader);
if (devinfo->gen < 6)
brw_setup_vue_interpolation(vue_map, shader, prog_data);
+ /* From the SKL PRM, Volume 7, "Alpha Coverage":
+ * "If Pixel Shader outputs oMask, AlphaToCoverage is disabled in
+ * hardware, regardless of the state setting for this feature."
+ */
+ if (devinfo->gen > 6 && key->alpha_to_coverage) {
+ /* Run constant fold optimization in order to get the correct source
+ * offset to determine render target 0 store instruction in
+ * emit_alpha_to_coverage pass.
+ */
+ NIR_PASS_V(shader, nir_opt_constant_folding);
+ NIR_PASS_V(shader, brw_nir_lower_alpha_to_coverage);
+ }
+
if (!key->multisample_fbo)
NIR_PASS_V(shader, demote_sample_qualifiers);
NIR_PASS_V(shader, move_interpolation_to_top);
- shader = brw_postprocess_nir(shader, compiler, true);
+ brw_postprocess_nir(shader, compiler, true);
/* key->alpha_test_func means simulating alpha testing via discards,
* so the shader definitely kills pixels.
prog_data->barycentric_interp_modes =
brw_compute_barycentric_interp_modes(compiler->devinfo, shader);
+ calculate_urb_setup(devinfo, key, prog_data, shader);
+ brw_compute_flat_inputs(prog_data, shader);
+
cfg_t *simd8_cfg = NULL, *simd16_cfg = NULL, *simd32_cfg = NULL;
- fs_visitor v8(compiler, log_data, mem_ctx, key,
- &prog_data->base, prog, shader, 8,
+ fs_visitor v8(compiler, log_data, mem_ctx, &key->base,
+ &prog_data->base, shader, 8,
shader_time_index8);
if (!v8.run_fs(allow_spilling, false /* do_rep_send */)) {
if (error_str)
prog_data->reg_blocks_8 = brw_register_blocks(v8.grf_used);
}
+ /* Limit dispatch width to simd8 with dual source blending on gen8.
+ * See: https://gitlab.freedesktop.org/mesa/mesa/issues/1917
+ */
+ if (devinfo->gen == 8 && prog_data->dual_src_blend &&
+ !(INTEL_DEBUG & DEBUG_NO8)) {
+ assert(!use_rep_send);
+ v8.limit_dispatch_width(8, "gen8 workaround: "
+ "using SIMD8 when dual src blending.\n");
+ }
+
if (v8.max_dispatch_width >= 16 &&
likely(!(INTEL_DEBUG & DEBUG_NO16) || use_rep_send)) {
/* Try a SIMD16 compile */
- fs_visitor v16(compiler, log_data, mem_ctx, key,
- &prog_data->base, prog, shader, 16,
+ fs_visitor v16(compiler, log_data, mem_ctx, &key->base,
+ &prog_data->base, shader, 16,
shader_time_index16);
v16.import_uniforms(&v8);
if (!v16.run_fs(allow_spilling, use_rep_send)) {
compiler->devinfo->gen >= 6 &&
unlikely(INTEL_DEBUG & DEBUG_DO32)) {
/* Try a SIMD32 compile */
- fs_visitor v32(compiler, log_data, mem_ctx, key,
- &prog_data->base, prog, shader, 32,
+ fs_visitor v32(compiler, log_data, mem_ctx, &key->base,
+ &prog_data->base, shader, 32,
shader_time_index32);
v32.import_uniforms(&v8);
if (!v32.run_fs(allow_spilling, false)) {
simd16_cfg = NULL;
}
- /* We have to compute the flat inputs after the visitor is finished running
- * because it relies on prog_data->urb_setup which is computed in
- * fs_visitor::calculate_urb_setup().
- */
- brw_compute_flat_inputs(prog_data, shader);
-
fs_generator g(compiler, log_data, mem_ctx, &prog_data->base,
- v8.promoted_constants, v8.runtime_check_aads_emit,
+ v8.shader_stats, v8.runtime_check_aads_emit,
MESA_SHADER_FRAGMENT);
if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
if (simd8_cfg) {
prog_data->dispatch_8 = true;
- g.generate_code(simd8_cfg, 8);
+ g.generate_code(simd8_cfg, 8, stats);
+ stats = stats ? stats + 1 : NULL;
}
if (simd16_cfg) {
prog_data->dispatch_16 = true;
- prog_data->prog_offset_16 = g.generate_code(simd16_cfg, 16);
+ prog_data->prog_offset_16 = g.generate_code(simd16_cfg, 16, stats);
+ stats = stats ? stats + 1 : NULL;
}
if (simd32_cfg) {
prog_data->dispatch_32 = true;
- prog_data->prog_offset_32 = g.generate_code(simd32_cfg, 32);
+ prog_data->prog_offset_32 = g.generate_code(simd32_cfg, 32, stats);
+ stats = stats ? stats + 1 : NULL;
}
return g.get_assembly();
unsigned dispatch_width)
{
nir_shader *shader = nir_shader_clone(mem_ctx, src_shader);
- shader = brw_nir_apply_sampler_key(shader, compiler, &key->tex, true);
+ brw_nir_apply_key(shader, compiler, &key->base, dispatch_width, true);
NIR_PASS_V(shader, brw_nir_lower_cs_intrinsics, dispatch_width);
NIR_PASS_V(shader, nir_opt_constant_folding);
NIR_PASS_V(shader, nir_opt_dce);
- return brw_postprocess_nir(shader, compiler, true);
+ brw_postprocess_nir(shader, compiler, true);
+
+ return shader;
}
const unsigned *
struct brw_cs_prog_data *prog_data,
const nir_shader *src_shader,
int shader_time_index,
+ struct brw_compile_stats *stats,
char **error_str)
{
+ prog_data->base.total_shared = src_shader->info.cs.shared_size;
prog_data->local_size[0] = src_shader->info.cs.local_size[0];
prog_data->local_size[1] = src_shader->info.cs.local_size[1];
prog_data->local_size[2] = src_shader->info.cs.local_size[2];
+ prog_data->slm_size = src_shader->num_shared;
unsigned local_workgroup_size =
src_shader->info.cs.local_size[0] * src_shader->info.cs.local_size[1] *
src_shader->info.cs.local_size[2];
min_dispatch_width = MAX2(8, min_dispatch_width);
min_dispatch_width = util_next_power_of_two(min_dispatch_width);
assert(min_dispatch_width <= 32);
+ unsigned max_dispatch_width = 32;
fs_visitor *v8 = NULL, *v16 = NULL, *v32 = NULL;
- cfg_t *cfg = NULL;
+ fs_visitor *v = NULL;
const char *fail_msg = NULL;
- unsigned promoted_constants = 0;
+
+ if ((int)key->base.subgroup_size_type >= (int)BRW_SUBGROUP_SIZE_REQUIRE_8) {
+ /* These enum values are expressly chosen to be equal to the subgroup
+ * size that they require.
+ */
+ const unsigned required_dispatch_width =
+ (unsigned)key->base.subgroup_size_type;
+ assert(required_dispatch_width == 8 ||
+ required_dispatch_width == 16 ||
+ required_dispatch_width == 32);
+ if (required_dispatch_width < min_dispatch_width ||
+ required_dispatch_width > max_dispatch_width) {
+ fail_msg = "Cannot satisfy explicit subgroup size";
+ } else {
+ min_dispatch_width = max_dispatch_width = required_dispatch_width;
+ }
+ }
/* Now the main event: Visit the shader IR and generate our CS IR for it.
*/
- if (min_dispatch_width <= 8) {
+ if (!fail_msg && min_dispatch_width <= 8 && max_dispatch_width >= 8) {
nir_shader *nir8 = compile_cs_to_nir(compiler, mem_ctx, key,
src_shader, 8);
- v8 = new fs_visitor(compiler, log_data, mem_ctx, key, &prog_data->base,
- NULL, /* Never used in core profile */
+ v8 = new fs_visitor(compiler, log_data, mem_ctx, &key->base,
+ &prog_data->base,
nir8, 8, shader_time_index);
if (!v8->run_cs(min_dispatch_width)) {
fail_msg = v8->fail_msg;
/* We should always be able to do SIMD32 for compute shaders */
assert(v8->max_dispatch_width >= 32);
- cfg = v8->cfg;
+ v = v8;
cs_set_simd_size(prog_data, 8);
cs_fill_push_const_info(compiler->devinfo, prog_data);
- promoted_constants = v8->promoted_constants;
}
}
if (likely(!(INTEL_DEBUG & DEBUG_NO16)) &&
- !fail_msg && min_dispatch_width <= 16) {
+ !fail_msg && min_dispatch_width <= 16 && max_dispatch_width >= 16) {
/* Try a SIMD16 compile */
nir_shader *nir16 = compile_cs_to_nir(compiler, mem_ctx, key,
src_shader, 16);
- v16 = new fs_visitor(compiler, log_data, mem_ctx, key, &prog_data->base,
- NULL, /* Never used in core profile */
+ v16 = new fs_visitor(compiler, log_data, mem_ctx, &key->base,
+ &prog_data->base,
nir16, 16, shader_time_index);
if (v8)
v16->import_uniforms(v8);
compiler->shader_perf_log(log_data,
"SIMD16 shader failed to compile: %s",
v16->fail_msg);
- if (!cfg) {
+ if (!v) {
fail_msg =
"Couldn't generate SIMD16 program and not "
"enough threads for SIMD8";
/* We should always be able to do SIMD32 for compute shaders */
assert(v16->max_dispatch_width >= 32);
- cfg = v16->cfg;
+ v = v16;
cs_set_simd_size(prog_data, 16);
cs_fill_push_const_info(compiler->devinfo, prog_data);
- promoted_constants = v16->promoted_constants;
}
}
/* We should always be able to do SIMD32 for compute shaders */
assert(!v16 || v16->max_dispatch_width >= 32);
- if (!fail_msg && (min_dispatch_width > 16 || (INTEL_DEBUG & DEBUG_DO32))) {
+ if (!fail_msg && (min_dispatch_width > 16 || (INTEL_DEBUG & DEBUG_DO32)) &&
+ max_dispatch_width >= 32) {
/* Try a SIMD32 compile */
nir_shader *nir32 = compile_cs_to_nir(compiler, mem_ctx, key,
src_shader, 32);
- v32 = new fs_visitor(compiler, log_data, mem_ctx, key, &prog_data->base,
- NULL, /* Never used in core profile */
+ v32 = new fs_visitor(compiler, log_data, mem_ctx, &key->base,
+ &prog_data->base,
nir32, 32, shader_time_index);
if (v8)
v32->import_uniforms(v8);
if (!v32->run_cs(min_dispatch_width)) {
compiler->shader_perf_log(log_data,
"SIMD32 shader failed to compile: %s",
- v16->fail_msg);
- if (!cfg) {
+ v32->fail_msg);
+ if (!v) {
fail_msg =
"Couldn't generate SIMD32 program and not "
"enough threads for SIMD16";
}
} else {
- cfg = v32->cfg;
+ v = v32;
cs_set_simd_size(prog_data, 32);
cs_fill_push_const_info(compiler->devinfo, prog_data);
- promoted_constants = v32->promoted_constants;
}
}
const unsigned *ret = NULL;
- if (unlikely(cfg == NULL)) {
+ if (unlikely(v == NULL)) {
assert(fail_msg);
if (error_str)
*error_str = ralloc_strdup(mem_ctx, fail_msg);
} else {
fs_generator g(compiler, log_data, mem_ctx, &prog_data->base,
- promoted_constants, false, MESA_SHADER_COMPUTE);
+ v->shader_stats, v->runtime_check_aads_emit,
+ MESA_SHADER_COMPUTE);
if (INTEL_DEBUG & DEBUG_CS) {
char *name = ralloc_asprintf(mem_ctx, "%s compute shader %s",
src_shader->info.label ?
g.enable_debug(name);
}
- g.generate_code(cfg, prog_data->simd_size);
+ g.generate_code(v->cfg, prog_data->simd_size, stats);
ret = g.get_assembly();
}