{
bool progress = false;
- v->calculate_cfg();
-
foreach_block_safe (block, v->cfg) {
bblock_t *if_block = NULL, *else_block = NULL, *endif_block = block;
bool found = false;
void
fs_visitor::demote_pull_constants()
{
- calculate_cfg();
-
foreach_block_and_inst (block, fs_inst, inst, cfg) {
for (int i = 0; i < inst->sources; i++) {
if (inst->src[i].file != UNIFORM)
memset(last_mrf_move, 0, sizeof(last_mrf_move));
- calculate_cfg();
-
foreach_block_and_inst_safe (block, fs_inst, inst, cfg) {
if (inst->is_control_flow()) {
memset(last_mrf_move, 0, sizeof(last_mrf_move));
* have a .reg_offset of 0.
*/
- calculate_cfg();
-
foreach_block_and_inst(block, fs_inst, inst, cfg) {
if (inst->mlen != 0 && inst->dst.file == GRF) {
insert_gen4_pre_send_dependency_workarounds(block, inst);
void
fs_visitor::lower_uniform_pull_constant_loads()
{
- calculate_cfg();
-
foreach_block_and_inst (block, fs_inst, inst, cfg) {
if (inst->opcode != FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD)
continue;
{
bool progress = false;
- calculate_cfg();
-
foreach_block_and_inst_safe (block, fs_inst, inst, cfg) {
if (inst->opcode == SHADER_OPCODE_LOAD_PAYLOAD) {
fs_reg dst = inst->dst;
emit_fb_writes();
+ calculate_cfg();
+
split_virtual_grfs();
move_uniform_array_access_to_pull_constants();
*/
assert(sanity_param_count == prog->Parameters->NumParameters);
- calculate_cfg();
-
return !failed;
}
bool
fs_visitor::opt_copy_propagate()
{
- calculate_cfg();
-
bool progress = false;
void *copy_prop_ctx = ralloc_context(NULL);
exec_list *out_acp[cfg->num_blocks];
virtual_grf_end[i] = -1;
}
- calculate_cfg();
this->live_intervals = new(mem_ctx) fs_live_variables(this, cfg);
/* Merge the per-component live ranges to whole VGRF live ranges. */
{
bool progress = false;
- calculate_cfg();
-
foreach_block (block, cfg) {
/* BREAK and CONTINUE instructions, by definition, can only be found at
* the ends of basic blocks.
last_scratch += size * reg_size;
- calculate_cfg();
-
/* Generate spill/unspill instructions for the objects being
* spilled. Right now, we spill or unspill the whole thing to a
* virtual grf of the same size. For most instructions, though, we
{
bool progress = false;
- calculate_cfg();
-
foreach_block (block, cfg) {
/* IF instructions, by definition, can only be found at the ends of
* basic blocks.
this->remaining_grf_uses = NULL;
this->grf_active = NULL;
}
- v->calculate_cfg();
}
~instruction_scheduler()
{
bool progress = false;
- calculate_cfg();
-
foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
switch (inst->opcode) {
case BRW_OPCODE_ADD:
}
}
- calculate_cfg();
-
/* Now actually rewrite usage of the things we've moved to pull
* constants.
*/
vec4_instruction *last_mrf_write[BRW_MAX_GRF];
uint8_t mrf_channels_written[BRW_MAX_GRF];
- calculate_cfg();
-
assert(prog_data->total_grf ||
!"Must be called after register allocation");
emit_thread_end();
+ calculate_cfg();
+
/* Before any optimization, push array accesses out to scratch
* space where we need them to be. This pass may allocate new
* virtual GRFs, so we want to do it early. It also makes sure
*/
assert(sanity_param_count == prog->Parameters->NumParameters);
- calculate_cfg();
-
return !failed;
}
* The control flow-aware analysis was done at a channel level, while at
* this point we're distilling it down to vgrfs.
*/
- calculate_cfg();
vec4_live_variables livevars(this, cfg);
foreach_block (block, cfg) {
assert(virtual_grf_sizes[spill_reg_nr] == 1);
unsigned int spill_offset = c->last_scratch++;
- calculate_cfg();
-
/* Generate spill/unspill instructions for the objects being spilled. */
foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
for (unsigned int i = 0; i < 3; i++) {
scratch_loc[i] = -1;
}
- calculate_cfg();
-
/* First, calculate the set of virtual GRFs that need to be punted
* to scratch due to having any array access on them, and where in
* scratch.
pull_constant_loc[i] = -1;
}
- calculate_cfg();
-
/* Walk through and find array access of uniforms. Put a copy of that
* uniform in the pull constant buffer.
*