{
bool progress = false;
int next_ip = 0;
-
- calculate_live_intervals();
+ const vec4_live_variables &live = live_analysis.require();
foreach_block_and_inst_safe (block, vec4_instruction, inst, cfg) {
int ip = next_ip;
/* Can't coalesce this GRF if someone else was going to
* read it later.
*/
- if (live_intervals->var_range_end(
- var_from_reg(alloc, dst_reg(inst->src[0])), 8) > ip)
+ if (live.var_range_end(var_from_reg(alloc, dst_reg(inst->src[0])), 8) > ip)
continue;
/* We need to check interference with the final destination between this
vec4_visitor::invalidate_analysis(brw::analysis_dependency_class c)
{
backend_shader::invalidate_analysis(c);
+ live_analysis.invalidate(c);
}
bool
int first_non_payload_grf;
unsigned int max_grf;
- brw::vec4_live_variables *live_intervals;
+ BRW_ANALYSIS(live_analysis, brw::vec4_live_variables,
+ backend_shader *) live_analysis;
bool need_all_constants_in_pull_buffer;
void move_push_constants_to_pull_constants();
void split_uniform_registers();
void pack_uniform_registers();
- void calculate_live_intervals();
void invalidate_live_intervals();
virtual void invalidate_analysis(brw::analysis_dependency_class c);
void split_virtual_grfs();
bool dead_code_eliminate();
bool opt_cmod_propagation();
bool opt_copy_propagation(bool do_constant_prop = true);
- bool opt_cse_local(bblock_t *block);
+ bool opt_cse_local(bblock_t *block, const vec4_live_variables &live);
bool opt_cse();
bool opt_algebraic();
bool opt_register_coalesce();
}
bool
-vec4_visitor::opt_cse_local(bblock_t *block)
+vec4_visitor::opt_cse_local(bblock_t *block, const vec4_live_variables &live)
{
bool progress = false;
exec_list aeb;
* more -- a sure sign they'll fail operands_match().
*/
if (src->file == VGRF) {
- if (live_intervals->var_range_end(
- var_from_reg(alloc, *src), 8) < ip) {
+ if (live.var_range_end(var_from_reg(alloc, *src), 8) < ip) {
entry->remove();
ralloc_free(entry);
break;
vec4_visitor::opt_cse()
{
bool progress = false;
-
- calculate_live_intervals();
+ const vec4_live_variables &live = live_analysis.require();
foreach_block (block, cfg) {
- progress = opt_cse_local(block) || progress;
+ progress = opt_cse_local(block, live) || progress;
}
if (progress)
{
bool progress = false;
- calculate_live_intervals();
-
- int num_vars = live_intervals->num_vars;
+ const vec4_live_variables &live_vars = live_analysis.require();
+ int num_vars = live_vars.num_vars;
BITSET_WORD *live = rzalloc_array(NULL, BITSET_WORD, BITSET_WORDS(num_vars));
BITSET_WORD *flag_live = rzalloc_array(NULL, BITSET_WORD, 1);
foreach_block_reverse_safe(block, cfg) {
- memcpy(live, live_intervals->block_data[block->num].liveout,
+ memcpy(live, live_vars.block_data[block->num].liveout,
sizeof(BITSET_WORD) * BITSET_WORDS(num_vars));
- memcpy(flag_live, live_intervals->block_data[block->num].flag_liveout,
+ memcpy(flag_live, live_vars.block_data[block->num].flag_liveout,
sizeof(BITSET_WORD));
foreach_inst_in_block_reverse_safe(vec4_instruction, inst, block) {
ralloc_free(mem_ctx);
}
-/**
- * Computes a conservative start/end of the live intervals for each virtual GRF.
- *
- * We could expose per-channel live intervals to the consumer based on the
- * information we computed in vec4_live_variables, except that our only
- * current user is virtual_grf_interferes(). So we instead union the
- * per-channel ranges into a per-vgrf range for vgrf_start[] and vgrf_end[].
- *
- * We could potentially have virtual_grf_interferes() do the test per-channel,
- * which would let some interesting register allocation occur (particularly on
- * code-generated GLSL sequences from the Cg compiler which does register
- * allocation at the GLSL level and thus reuses components of the variable
- * with distinct lifetimes). But right now the complexity of doing so doesn't
- * seem worth it, since having virtual_grf_interferes() be cheap is important
- * for register allocation performance.
- */
-void
-vec4_visitor::calculate_live_intervals()
-{
- if (this->live_intervals)
- return;
-
- /* Now, extend those intervals using our analysis of control flow.
- *
- * The control flow-aware analysis was done at a channel level, while at
- * this point we're distilling it down to vgrfs.
- */
- this->live_intervals = new(mem_ctx) vec4_live_variables(this);
-}
-
void
vec4_visitor::invalidate_live_intervals()
{
- ralloc_free(live_intervals);
- live_intervals = NULL;
+ /* XXX -- Leave this around for the moment to keep the vec4_vistor object
+ * concrete.
+ */
}
static bool
#define BRW_VEC4_LIVE_VARIABLES_H
#include "brw_ir_vec4.h"
+#include "brw_ir_analysis.h"
#include "util/bitset.h"
struct backend_shader;
BITSET_WORD flag_liveout[1];
};
- DECLARE_RALLOC_CXX_OPERATORS(vec4_live_variables)
-
vec4_live_variables(const backend_shader *s);
~vec4_live_variables();
bool
validate(const backend_shader *s) const;
+ analysis_dependency_class
+ dependency_class() const
+ {
+ return (DEPENDENCY_INSTRUCTION_IDENTITY |
+ DEPENDENCY_INSTRUCTION_DATA_FLOW |
+ DEPENDENCY_VARIABLES);
+ }
+
int num_vars;
int bitset_words;
if (0)
return reg_allocate_trivial();
- calculate_live_intervals();
-
+ const vec4_live_variables &live = live_analysis.require();
int node_count = alloc.count;
int first_payload_node = node_count;
node_count += payload_reg_count;
ra_set_node_class(g, i, compiler->vec4_reg_set.classes[size - 1]);
for (unsigned j = 0; j < i; j++) {
- if (live_intervals->vgrfs_interfere(i, j)) {
+ if (live.vgrfs_interfere(i, j)) {
ra_add_node_interference(g, i, j);
}
}
prog_data(prog_data),
fail_msg(NULL),
first_non_payload_grf(0),
+ live_analysis(this),
need_all_constants_in_pull_buffer(false),
no_spills(no_spills),
shader_time_index(shader_time_index),
memset(this->output_num_components, 0, sizeof(this->output_num_components));
- this->live_intervals = NULL;
-
this->max_grf = devinfo->gen >= 7 ? GEN7_MRF_HACK_START : BRW_MAX_GRF;
this->uniforms = 0;