*
*/
-#pragma once
#ifndef BRW_CFG_H
#define BRW_CFG_H
-#include "brw_shader.h"
+#include "brw_ir.h"
+#ifdef __cplusplus
+#include "brw_ir_analysis.h"
+#endif
struct bblock_t;
+/**
+ * CFG edge types.
+ *
+ * A logical edge represents a potential control flow path of the original
+ * scalar program, while a physical edge represents a control flow path that
+ * may not have existed in the original program but was introduced during
+ * vectorization in order to implement divergent control flow of different
+ * shader invocations within the same SIMD thread.
+ *
+ * All logical edges in the CFG are considered to be physical edges but not
+ * the other way around -- I.e. the logical CFG is a subset of the physical
+ * one.
+ */
+enum bblock_link_kind {
+ bblock_link_logical = 0,
+ bblock_link_physical
+};
+
struct bblock_link {
#ifdef __cplusplus
DECLARE_RALLOC_CXX_OPERATORS(bblock_link)
- bblock_link(bblock_t *block)
- : block(block)
+ bblock_link(bblock_t *block, enum bblock_link_kind kind)
+ : block(block), kind(kind)
{
}
#endif
struct exec_node link;
struct bblock_t *block;
+
+ /* Type of this CFG edge. Because bblock_link_logical also implies
+ * bblock_link_physical, the proper way to test for membership of edge 'l'
+ * in CFG kind 'k' is 'l.kind <= k'.
+ */
+ enum bblock_link_kind kind;
};
-struct backend_instruction;
+struct backend_shader;
+struct cfg_t;
struct bblock_t {
#ifdef __cplusplus
explicit bblock_t(cfg_t *cfg);
- void add_successor(void *mem_ctx, bblock_t *successor);
- bool is_predecessor_of(const bblock_t *block) const;
- bool is_successor_of(const bblock_t *block) const;
+ void add_successor(void *mem_ctx, bblock_t *successor,
+ enum bblock_link_kind kind);
+ bool is_predecessor_of(const bblock_t *block,
+ enum bblock_link_kind kind) const;
+ bool is_successor_of(const bblock_t *block,
+ enum bblock_link_kind kind) const;
bool can_combine_with(const bblock_t *that) const;
void combine_with(bblock_t *that);
- void dump(backend_shader *s) const;
+ void dump() const;
backend_instruction *start();
const backend_instruction *start() const;
struct exec_node link;
struct cfg_t *cfg;
- struct bblock_t *idom;
int start_ip;
int end_ip;
#ifdef __cplusplus
DECLARE_RALLOC_CXX_OPERATORS(cfg_t)
- cfg_t(exec_list *instructions);
+ cfg_t(const backend_shader *s, exec_list *instructions);
~cfg_t();
void remove_block(bblock_t *block);
bblock_t *new_block();
void set_next_block(bblock_t **cur, bblock_t *block, int ip);
void make_block_array();
- void calculate_idom();
- static bblock_t *intersect(bblock_t *b1, bblock_t *b2);
- void dump(backend_shader *s);
+ void dump();
void dump_cfg();
- void dump_domtree();
#endif
+ const struct backend_shader *s;
void *mem_ctx;
/** Ordered list (by ip) of basic blocks */
struct bblock_t **blocks;
int num_blocks;
- bool idom_dirty;
-
unsigned cycle_count;
};
!__scan_inst->is_head_sentinel(); \
__scan_inst = (__type *)__scan_inst->prev)
+#ifdef __cplusplus
+namespace brw {
+ /**
+ * Immediate dominator tree analysis of a shader.
+ */
+ struct idom_tree {
+ idom_tree(const backend_shader *s);
+ ~idom_tree();
+
+ bool
+ validate(const backend_shader *) const
+ {
+ /* FINISHME */
+ return true;
+ }
+
+ analysis_dependency_class
+ dependency_class() const
+ {
+ return DEPENDENCY_BLOCKS;
+ }
+
+ const bblock_t *
+ parent(const bblock_t *b) const
+ {
+ assert(unsigned(b->num) < num_parents);
+ return parents[b->num];
+ }
+
+ bblock_t *
+ parent(bblock_t *b) const
+ {
+ assert(unsigned(b->num) < num_parents);
+ return parents[b->num];
+ }
+
+ bblock_t *
+ intersect(bblock_t *b1, bblock_t *b2) const;
+
+ void
+ dump() const;
+
+ private:
+ unsigned num_parents;
+ bblock_t **parents;
+ };
+}
+#endif
+
#endif /* BRW_CFG_H */