* 12.5 (p356).
*/
-#define ACP_HASH_SIZE 16
+#define ACP_HASH_SIZE 64
#include "util/bitset.h"
+#include "util/u_math.h"
#include "brw_fs.h"
#include "brw_fs_live_variables.h"
#include "brw_cfg.h"
struct acp_entry : public exec_node {
fs_reg dst;
fs_reg src;
+ unsigned global_idx;
uint8_t size_written;
uint8_t size_read;
enum opcode opcode;
foreach_in_list(acp_entry, entry, &out_acp[block->num][i]) {
acp[next_acp] = entry;
+ entry->global_idx = next_acp;
+
/* opt_copy_propagation_local populates out_acp with copies created
* in a block which are still live at the end of the block. This
* is exactly what we want in the COPY set.
fs_copy_prop_dataflow::setup_initial_values()
{
/* Initialize the COPY and KILL sets. */
- foreach_block (block, cfg) {
- foreach_inst_in_block(fs_inst, inst, block) {
- if (inst->dst.file != VGRF)
- continue;
+ {
+ /* Create a temporary table of ACP entries which we'll use for efficient
+ * look-up. Unfortunately, we have to do this in two steps because we
+ * have to match both sources and destinations and an ACP entry can only
+ * be in one list at a time.
+ *
+ * We choose to make the table size between num_acp/2 and num_acp/4 to
+ * try and trade off between the time it takes to initialize the table
+ * via exec_list constructors or make_empty() and the cost of
+ * collisions. In practice, it doesn't appear to matter too much what
+ * size we make the table as long as it's roughly the same order of
+ * magnitude as num_acp. We get most of the benefit of the table
+ * approach even if we use a table of size ACP_HASH_SIZE though a
+ * full-sized table is 1-2% faster in practice.
+ */
+ unsigned acp_table_size = util_next_power_of_two(num_acp) / 4;
+ acp_table_size = MAX2(acp_table_size, ACP_HASH_SIZE);
+ exec_list *acp_table = new exec_list[acp_table_size];
+
+ /* First, get all the KILLs for instructions which overwrite ACP
+ * destinations.
+ */
+ for (int i = 0; i < num_acp; i++) {
+ unsigned idx = acp[i]->dst.nr & (acp_table_size - 1);
+ acp_table[idx].push_tail(acp[i]);
+ }
+
+ foreach_block (block, cfg) {
+ foreach_inst_in_block(fs_inst, inst, block) {
+ if (inst->dst.file != VGRF)
+ continue;
+
+ unsigned idx = inst->dst.nr & (acp_table_size - 1);
+ foreach_in_list(acp_entry, entry, &acp_table[idx]) {
+ if (regions_overlap(inst->dst, inst->size_written,
+ entry->dst, entry->size_written))
+ BITSET_SET(bd[block->num].kill, entry->global_idx);
+ }
+ }
+ }
+
+ /* Clear the table for the second pass */
+ for (unsigned i = 0; i < acp_table_size; i++)
+ acp_table[i].make_empty();
- /* Mark ACP entries which are killed by this instruction. */
- for (int i = 0; i < num_acp; i++) {
- if (regions_overlap(inst->dst, inst->size_written,
- acp[i]->dst, acp[i]->size_written) ||
- regions_overlap(inst->dst, inst->size_written,
- acp[i]->src, acp[i]->size_read)) {
- BITSET_SET(bd[block->num].kill, i);
+ /* Next, get all the KILLs for instructions which overwrite ACP
+ * sources.
+ */
+ for (int i = 0; i < num_acp; i++) {
+ unsigned idx = acp[i]->src.nr & (acp_table_size - 1);
+ acp_table[idx].push_tail(acp[i]);
+ }
+
+ foreach_block (block, cfg) {
+ foreach_inst_in_block(fs_inst, inst, block) {
+ if (inst->dst.file != VGRF)
+ continue;
+
+ unsigned idx = inst->dst.nr & (acp_table_size - 1);
+ foreach_in_list(acp_entry, entry, &acp_table[idx]) {
+ if (regions_overlap(inst->dst, inst->size_written,
+ entry->src, entry->size_read))
+ BITSET_SET(bd[block->num].kill, entry->global_idx);
}
}
}
+
+ delete [] acp_table;
}
/* Populate the initial values for the livein and liveout sets. For the
if (stride > 4)
return false;
+ /* Bail if the channels of the source need to be aligned to the byte offset
+ * of the corresponding channel of the destination, and the provided stride
+ * would break this restriction.
+ */
+ if (has_dst_aligned_region_restriction(devinfo, inst) &&
+ !(type_sz(inst->src[arg].type) * stride ==
+ type_sz(inst->dst.type) * inst->dst.stride ||
+ stride == 0))
+ return false;
+
/* 3-source instructions can only be Align16, which restricts what strides
* they can take. They can only take a stride of 1 (the usual case), or 0
* with a special "repctrl" bit. But the repctrl bit doesn't work for
return true;
}
+static bool
+instruction_requires_packed_data(fs_inst *inst)
+{
+ switch (inst->opcode) {
+ case FS_OPCODE_DDX_FINE:
+ case FS_OPCODE_DDX_COARSE:
+ case FS_OPCODE_DDY_FINE:
+ case FS_OPCODE_DDY_COARSE:
+ return true;
+ default:
+ return false;
+ }
+}
+
bool
fs_visitor::try_copy_propagate(fs_inst *inst, int arg, acp_entry *entry)
{
inst->opcode == SHADER_OPCODE_GEN4_SCRATCH_WRITE)
return false;
+ /* Some instructions implemented in the generator backend, such as
+ * derivatives, assume that their operands are packed so we can't
+ * generally propagate strided regions to them.
+ */
+ if (instruction_requires_packed_data(inst) && entry->src.stride > 1)
+ return false;
+
/* Bail if the result of composing both strides would exceed the
* hardware limit.
*/
}
break;
- case SHADER_OPCODE_UNTYPED_ATOMIC:
- case SHADER_OPCODE_UNTYPED_SURFACE_READ:
- case SHADER_OPCODE_UNTYPED_SURFACE_WRITE:
- case SHADER_OPCODE_TYPED_ATOMIC:
- case SHADER_OPCODE_TYPED_SURFACE_READ:
- case SHADER_OPCODE_TYPED_SURFACE_WRITE:
- case SHADER_OPCODE_BYTE_SCATTERED_WRITE:
- case SHADER_OPCODE_BYTE_SCATTERED_READ:
- /* We only propagate into the surface argument of the
- * instruction. Everything else goes through LOAD_PAYLOAD.
- */
- if (i == 1) {
- inst->src[i] = val;
- progress = true;
- }
- break;
-
case FS_OPCODE_FB_WRITE_LOGICAL:
/* The stencil and omask sources of FS_OPCODE_FB_WRITE_LOGICAL are
* bit-cast using a strided region so they cannot be immediates.
case SHADER_OPCODE_TG4_LOGICAL:
case SHADER_OPCODE_TG4_OFFSET_LOGICAL:
case SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL:
+ case SHADER_OPCODE_UNTYPED_ATOMIC_FLOAT_LOGICAL:
case SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL:
case SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL:
case SHADER_OPCODE_TYPED_ATOMIC_LOGICAL:
foreach_block (block, cfg) {
progress = opt_copy_propagation_local(copy_prop_ctx, block,
out_acp[block->num]) || progress;
+
+ /* If the destination of an ACP entry exists only within this block,
+ * then there's no need to keep it for dataflow analysis. We can delete
+ * it from the out_acp table and avoid growing the bitsets any bigger
+ * than we absolutely have to.
+ *
+ * Because nothing in opt_copy_propagation_local touches the block
+ * start/end IPs and opt_copy_propagation_local is incapable of
+ * extending the live range of an ACP destination beyond the block,
+ * it's safe to use the liveness information in this way.
+ */
+ for (unsigned a = 0; a < ACP_HASH_SIZE; a++) {
+ foreach_in_list_safe(acp_entry, entry, &out_acp[block->num][a]) {
+ assert(entry->dst.file == VGRF);
+ if (block->start_ip <= virtual_grf_start[entry->dst.nr] &&
+ virtual_grf_end[entry->dst.nr] <= block->end_ip)
+ entry->remove();
+ }
+ }
}
/* Do dataflow analysis for those available copies. */