+2018-01-23 David Malcolm <dmalcolm@redhat.com>
+
+ PR tree-optimization/83510
+ * domwalk.c (set_all_edges_as_executable): New function.
+ (dom_walker::dom_walker): Convert bool param
+ "skip_unreachable_blocks" to enum reachability. Move setup of
+ edge flags to set_all_edges_as_executable and only do it when
+ reachability is REACHABLE_BLOCKS.
+ * domwalk.h (enum dom_walker::reachability): New enum.
+ (dom_walker::dom_walker): Convert bool param
+ "skip_unreachable_blocks" to enum reachability.
+ (set_all_edges_as_executable): New decl.
+ * graphite-scop-detection.c (gather_bbs::gather_bbs): Convert
+ from false for "skip_unreachable_blocks" to ALL_BLOCKS for
+ "reachability".
+ * tree-ssa-dom.c (dom_opt_dom_walker::dom_opt_dom_walker): Likewise,
+ but converting true to REACHABLE_BLOCKS.
+ * tree-ssa-sccvn.c (sccvn_dom_walker::sccvn_dom_walker): Likewise.
+ * tree-vrp.c
+ (check_array_bounds_dom_walker::check_array_bounds_dom_walker):
+ Likewise, but converting it to REACHABLE_BLOCKS_PRESERVING_FLAGS.
+ (vrp_dom_walker::vrp_dom_walker): Likewise, but converting it to
+ REACHABLE_BLOCKS.
+ (vrp_prop::vrp_finalize): Call set_all_edges_as_executable
+ if check_all_array_refs will be called.
+
2018-01-23 David Malcolm <dmalcolm@redhat.com>
* tree.c (selftest::test_location_wrappers): Add more test
qsort (bbs, n, sizeof *bbs, cmp_bb_postorder);
}
-/* Constructor for a dom walker.
+/* Set EDGE_EXECUTABLE on every edge within FN's CFG. */
+
+void
+set_all_edges_as_executable (function *fn)
+{
+ basic_block bb;
+ FOR_ALL_BB_FN (bb, fn)
+ {
+ edge_iterator ei;
+ edge e;
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ e->flags |= EDGE_EXECUTABLE;
+ }
+}
+
+/* Constructor for a dom walker. */
- If SKIP_UNREACHBLE_BLOCKS is true, then we need to set
- EDGE_EXECUTABLE on every edge in the CFG. */
dom_walker::dom_walker (cdi_direction direction,
- bool skip_unreachable_blocks,
+ enum reachability reachability,
int *bb_index_to_rpo)
: m_dom_direction (direction),
- m_skip_unreachable_blocks (skip_unreachable_blocks),
+ m_skip_unreachable_blocks (reachability != ALL_BLOCKS),
m_user_bb_to_rpo (bb_index_to_rpo != NULL),
m_unreachable_dom (NULL),
m_bb_to_rpo (bb_index_to_rpo)
free (postorder);
}
- /* If we are not skipping unreachable blocks, then there is nothing
- further to do. */
- if (!m_skip_unreachable_blocks)
- return;
-
- basic_block bb;
- FOR_ALL_BB_FN (bb, cfun)
+ /* Set up edge flags if need be. */
+ switch (reachability)
{
- edge_iterator ei;
- edge e;
- FOR_EACH_EDGE (e, ei, bb->succs)
- e->flags |= EDGE_EXECUTABLE;
+ default:
+ gcc_unreachable ();
+ case ALL_BLOCKS:
+ /* No need to touch edge flags. */
+ break;
+
+ case REACHABLE_BLOCKS:
+ set_all_edges_as_executable (cfun);
+ break;
+
+ case REACHABLE_BLOCKS_PRESERVING_FLAGS:
+ /* Preserve the edge flags. */
+ break;
}
}
public:
static const edge STOP;
- /* Use SKIP_UNREACHABLE_BLOCKS = true when your client can discover
- that some edges are not executable.
-
- If a client can discover that a COND, SWITCH or GOTO has a static
- target in the before_dom_children callback, the taken edge should
- be returned. The generic walker will clear EDGE_EXECUTABLE on all
- edges it can determine are not executable.
-
- You can provide a mapping of basic-block index to RPO if you
+ /* An enum for determining whether the dom walk should be constrained to
+ blocks reachable by executable edges. */
+
+ enum reachability
+ {
+ /* Walk all blocks within the CFG. */
+ ALL_BLOCKS,
+
+ /* Use REACHABLE_BLOCKS when your subclass can discover that some edges
+ are not executable.
+
+ If a subclass can discover that a COND, SWITCH or GOTO has a static
+ target in the before_dom_children callback, the taken edge should
+ be returned. The generic walker will clear EDGE_EXECUTABLE on all
+ edges it can determine are not executable.
+
+ With REACHABLE_BLOCKS, EDGE_EXECUTABLE will be set on every edge in
+ the dom_walker ctor; the flag will then be cleared on edges that are
+ determined to be not executable. */
+ REACHABLE_BLOCKS,
+
+ /* Identical to REACHABLE_BLOCKS, but the initial state of EDGE_EXECUTABLE
+ will instead be preserved in the ctor, allowing for information about
+ non-executable edges to be merged in from an earlier analysis (and
+ potentially for additional edges to be marked as non-executable). */
+ REACHABLE_BLOCKS_PRESERVING_FLAGS
+ };
+
+ /* You can provide a mapping of basic-block index to RPO if you
have that readily available or you do multiple walks. */
- dom_walker (cdi_direction direction, bool skip_unreachable_blocks = false,
+ dom_walker (cdi_direction direction, enum reachability = ALL_BLOCKS,
int *bb_index_to_rpo = NULL);
~dom_walker ();
};
+extern void set_all_edges_as_executable (function *fn);
+
#endif
};
}
gather_bbs::gather_bbs (cdi_direction direction, scop_p scop, int *bb_to_rpo)
- : dom_walker (direction, false, bb_to_rpo), scop (scop)
+ : dom_walker (direction, ALL_BLOCKS, bb_to_rpo), scop (scop)
{
}
+2018-01-23 David Malcolm <dmalcolm@redhat.com>
+
+ PR tree-optimization/83510
+ * gcc.c-torture/compile/pr83510.c: New test case.
+
2018-01-23 Richard Sandiford <richard.sandiford@linaro.org>
PR testsuite/83888
--- /dev/null
+/* Various examples of safe array access for which -Warray-bounds
+ shouldn't issue a warning at any optimization level
+ (PR tree-optimization/83510). */
+
+/* { dg-options "-Warray-bounds" } */
+
+extern int get_flag (void);
+
+unsigned int arr[10];
+
+struct xyz {
+ unsigned int a0;
+};
+
+extern void wfm(struct xyz *, int, unsigned int);
+
+static unsigned int f(struct xyz * ctx, unsigned int number)
+{
+ switch (number) {
+ case 0x9:
+ return ctx->a0;
+ case 0xA: case 0xB:
+ case 0xC: case 0xD: case 0xE: case 0xF:
+ case 0x10: case 0x11: case 0x12: case 0x13:
+ return arr[number - 0xa];
+ }
+ return 0;
+}
+
+int g(struct xyz * ctx) {
+ int i;
+
+ for (i = 0; i < 10; i++) {
+ wfm(ctx, i, f(ctx, i));
+ }
+
+ return 0;
+}
+
+static unsigned int f_signed(struct xyz * ctx, int number)
+{
+ switch (number) {
+ case 0x9:
+ return ctx->a0;
+ case 0xA: case 0xB:
+ case 0xC: case 0xD: case 0xE: case 0xF:
+ case 0x10: case 0x11: case 0x12: case 0x13:
+ return arr[number];
+ }
+ return 0;
+}
+
+int g_signed(struct xyz * ctx) {
+ int i;
+
+ for (i = 0; i < 10; i++) {
+ wfm(ctx, i, f(ctx, i));
+ }
+
+ return 0;
+}
+
+void test_2 (struct xyz * ctx)
+{
+ int i;
+
+ for (i = 0; i < 10; i++) {
+ if (get_flag ())
+ wfm(ctx, i, f(ctx, i));
+ }
+}
+
+void test_2_signed (struct xyz * ctx)
+{
+ int i;
+
+ for (i = 0; i < 10; i++) {
+ if (get_flag ())
+ wfm(ctx, i, f_signed(ctx, i));
+ }
+}
+
+void test_3 (struct xyz * ctx)
+{
+ unsigned int i;
+
+ for (i = 0; i < 10; i++) {
+ switch (i) {
+ case 0x9:
+ wfm(ctx, i, ctx->a0);
+ break;
+ case 0xA: case 0xB:
+ case 0xC: case 0xD: case 0xE: case 0xF:
+ case 0x10: case 0x11: case 0x12: case 0x13:
+ if (get_flag ())
+ wfm(ctx, i, arr[i - 0xa]);
+ break;
+ }
+ }
+}
+
+void test_3_signed (struct xyz * ctx)
+{
+ int i;
+
+ for (i = 0; i < 10; i++) {
+ switch (i) {
+ case 0x9:
+ wfm(ctx, i, ctx->a0);
+ break;
+ case 0xA: case 0xB:
+ case 0xC: case 0xD: case 0xE: case 0xF:
+ case 0x10: case 0x11: case 0x12: case 0x13:
+ if (get_flag ())
+ wfm(ctx, i, arr[i]);
+ break;
+ }
+ }
+}
+
+void test_4 (struct xyz * ctx)
+{
+ unsigned int i, j;
+
+ for (i = 0; i < 10; i++) {
+ switch (i) {
+ case 0x9:
+ wfm(ctx, i, ctx->a0);
+ break;
+ case 0xA: case 0xB:
+ case 0xC: case 0xD: case 0xE: case 0xF:
+ case 0x10: case 0x11: case 0x12: case 0x13:
+ for (j = 0; j < 5; j++)
+ wfm(ctx, i, arr[i - 0xa]);
+ break;
+ }
+ }
+}
+void test_4_signed (struct xyz * ctx)
+{
+ int i, j;
+
+ for (i = 0; i < 10; i++) {
+ switch (i) {
+ case 0x9:
+ wfm(ctx, i, ctx->a0);
+ break;
+ case 0xA: case 0xB:
+ case 0xC: case 0xD: case 0xE: case 0xF:
+ case 0x10: case 0x11: case 0x12: case 0x13:
+ for (j = 0; j < 5; j++)
+ wfm(ctx, i, arr[i]);
+ break;
+ }
+ }
+}
+
+void test_5 (struct xyz * ctx)
+{
+ unsigned int i;
+ for (i = 10; i < 20; i++) {
+ wfm(ctx, i, arr[i - 10]);
+ }
+}
+
+void test_5_signed (struct xyz * ctx)
+{
+ int i;
+ for (i = 10; i < 20; i++) {
+ wfm(ctx, i, arr[i - 10]);
+ }
+}
class const_and_copies *const_and_copies,
class avail_exprs_stack *avail_exprs_stack,
gcond *dummy_cond)
- : dom_walker (direction, true),
+ : dom_walker (direction, REACHABLE_BLOCKS),
m_const_and_copies (const_and_copies),
m_avail_exprs_stack (avail_exprs_stack),
m_dummy_cond (dummy_cond) { }
{
public:
sccvn_dom_walker ()
- : dom_walker (CDI_DOMINATORS, true), cond_stack (0) {}
+ : dom_walker (CDI_DOMINATORS, REACHABLE_BLOCKS), cond_stack (0) {}
virtual edge before_dom_children (basic_block);
virtual void after_dom_children (basic_block);
{
public:
check_array_bounds_dom_walker (vrp_prop *prop)
- : dom_walker (CDI_DOMINATORS, true), m_prop (prop) {}
+ : dom_walker (CDI_DOMINATORS,
+ /* Discover non-executable edges, preserving EDGE_EXECUTABLE
+ flags, so that we can merge in information on
+ non-executable edges from vrp_folder . */
+ REACHABLE_BLOCKS_PRESERVING_FLAGS),
+ m_prop (prop) {}
~check_array_bounds_dom_walker () {}
edge before_dom_children (basic_block) FINAL OVERRIDE;
vrp_dom_walker (cdi_direction direction,
class const_and_copies *const_and_copies,
class avail_exprs_stack *avail_exprs_stack)
- : dom_walker (direction, true),
+ : dom_walker (direction, REACHABLE_BLOCKS),
m_const_and_copies (const_and_copies),
m_avail_exprs_stack (avail_exprs_stack),
m_dummy_cond (NULL) {}
wi::to_wide (vr->max));
}
+ /* If we're checking array refs, we want to merge information on
+ the executability of each edge between vrp_folder and the
+ check_array_bounds_dom_walker: each can clear the
+ EDGE_EXECUTABLE flag on edges, in different ways.
+
+ Hence, if we're going to call check_all_array_refs, set
+ the flag on every edge now, rather than in
+ check_array_bounds_dom_walker's ctor; vrp_folder may clear
+ it from some edges. */
+ if (warn_array_bounds && warn_array_bounds_p)
+ set_all_edges_as_executable (cfun);
+
class vrp_folder vrp_folder;
vrp_folder.vr_values = &vr_values;
vrp_folder.substitute_and_fold ();