* blocks with successor/predecessor edges connecting them.
*/
+using namespace brw;
+
static bblock_t *
pop_stack(exec_list *list)
{
block_list.make_empty();
blocks = NULL;
num_blocks = 0;
- idom_dirty = true;
cycle_count = 0;
bblock_t *cur = NULL;
this->blocks[this->num_blocks - 1]->num = this->num_blocks - 2;
this->num_blocks--;
- idom_dirty = true;
}
bblock_t *
void
cfg_t::dump(backend_shader *s)
{
- if (idom_dirty)
- calculate_idom();
+ const idom_tree *idom = (s ? &s->idom_analysis.require() : NULL);
foreach_block (block, this) {
if (block->idom)
* (less than 1000 nodes) that this algorithm is significantly faster than
* others like Lengauer-Tarjan.
*/
-void
-cfg_t::calculate_idom()
+idom_tree::idom_tree(const backend_shader *s)
{
- foreach_block(block, this) {
+ foreach_block(block, s->cfg) {
block->idom = NULL;
}
- blocks[0]->idom = blocks[0];
+ s->cfg->blocks[0]->idom = s->cfg->blocks[0];
bool changed;
do {
changed = false;
- foreach_block(block, this) {
+ foreach_block(block, s->cfg) {
if (block->num == 0)
continue;
}
}
} while (changed);
-
- idom_dirty = false;
}
bblock_t *
-cfg_t::intersect(bblock_t *b1, bblock_t *b2)
+idom_tree::intersect(bblock_t *b1, bblock_t *b2) const
{
/* Note, the comparisons here are the opposite of what the paper says
* because we index blocks from beginning -> end (i.e. reverse post-order)
}
void
-cfg_t::dump_cfg()
+idom_tree::dump(const backend_shader *s) const
{
- printf("digraph CFG {\n");
- for (int b = 0; b < num_blocks; b++) {
- bblock_t *block = this->blocks[b];
-
- foreach_list_typed_safe (bblock_link, child, link, &block->children) {
- printf("\t%d -> %d\n", b, child->block->num);
+ printf("digraph DominanceTree {\n");
+ foreach_block(block, s->cfg) {
+ if (block->idom) {
+ printf("\t%d -> %d\n", block->idom->num, block->num);
}
}
printf("}\n");
}
void
-cfg_t::dump_domtree()
+cfg_t::dump_cfg()
{
- printf("digraph DominanceTree {\n");
- foreach_block(block, this) {
- if (block->idom) {
- printf("\t%d -> %d\n", block->idom->num, block->num);
+ printf("digraph CFG {\n");
+ for (int b = 0; b < num_blocks; b++) {
+ bblock_t *block = this->blocks[b];
+
+ foreach_list_typed_safe (bblock_link, child, link, &block->children) {
+ printf("\t%d -> %d\n", b, child->block->num);
}
}
printf("}\n");
#define BRW_CFG_H
#include "brw_ir.h"
+#ifdef __cplusplus
+#include "brw_ir_analysis.h"
+#endif
struct bblock_t;
bblock_t *new_block();
void set_next_block(bblock_t **cur, bblock_t *block, int ip);
void make_block_array();
- void calculate_idom();
- static bblock_t *intersect(bblock_t *b1, bblock_t *b2);
void dump(backend_shader *s);
void dump_cfg();
- void dump_domtree();
#endif
void *mem_ctx;
struct bblock_t **blocks;
int num_blocks;
- bool idom_dirty;
-
unsigned cycle_count;
};
!__scan_inst->is_head_sentinel(); \
__scan_inst = (__type *)__scan_inst->prev)
+#ifdef __cplusplus
+namespace brw {
+ /**
+ * Immediate dominator tree analysis of a shader.
+ */
+ struct idom_tree {
+ idom_tree(const backend_shader *s);
+
+ bool
+ validate(const backend_shader *) const
+ {
+ /* FINISHME */
+ return true;
+ }
+
+ analysis_dependency_class
+ dependency_class() const
+ {
+ return DEPENDENCY_BLOCKS;
+ }
+
+ bblock_t *
+ intersect(bblock_t *b1, bblock_t *b2) const;
+
+ void
+ dump(const backend_shader *s) const;
+ };
+}
+#endif
+
#endif /* BRW_CFG_H */
table.len = 0;
table.imm = ralloc_array(const_ctx, struct imm, table.size);
- cfg->calculate_idom();
+ const brw::idom_tree &idom = idom_analysis.require();
unsigned ip = -1;
/* Make a pass through all instructions and count the number of times each
struct imm *imm = find_imm(&table, data, size);
if (imm) {
- bblock_t *intersection = cfg_t::intersect(block, imm->block);
+ bblock_t *intersection = idom.intersect(block, imm->block);
if (intersection != imm->block)
imm->inst = NULL;
imm->block = intersection;