From: Richard Biener Date: Thu, 27 Aug 2020 11:04:19 +0000 (+0200) Subject: streamline TARGET_MEM_REF dumping X-Git-Url: https://git.libre-soc.org/?a=commitdiff_plain;h=9ceb3b8d8f6dd088f3efd590aae9e54a265e5b07;p=gcc.git streamline TARGET_MEM_REF dumping The following streamlines TARGET_MEM_REF dumping building on what we do for MEM_REF and thus dumping things like access type, TBAA type and base/clique. I've changed it to do semantic dumping aka base + offset + step * index rather than the odd base: A, step: way. 2020-08-27 Richard Biener * tree-pretty-print.c (dump_mem_ref): Handle TARGET_MEM_REFs. (dump_generic_node): Use dump_mem_ref also for TARGET_MEM_REF. * gcc.dg/tree-ssa/loop-19.c: Adjust. * gcc.dg/tree-ssa/loop-2.c: Likewise. * gcc.dg/tree-ssa/loop-3.c: Likewise. --- diff --git a/gcc/testsuite/gcc.dg/tree-ssa/loop-19.c b/gcc/testsuite/gcc.dg/tree-ssa/loop-19.c index af7a3daddec..0c73111c1ee 100644 --- a/gcc/testsuite/gcc.dg/tree-ssa/loop-19.c +++ b/gcc/testsuite/gcc.dg/tree-ssa/loop-19.c @@ -22,6 +22,6 @@ void tuned_STREAM_Copy() However, due to a bug in jump threading, we end up peeling one iteration from the loop, which creates an additional occurrence. */ -/* { dg-final { scan-tree-dump-times "MEM.(base: &|symbol: )a," 2 "optimized" } } */ -/* { dg-final { scan-tree-dump-times "MEM.(base: &|symbol: )c," 2 "optimized" } } */ +/* { dg-final { scan-tree-dump-times "MEM\[^;\]*&a" 1 "optimized" } } */ +/* { dg-final { scan-tree-dump-times "MEM\[^;\]*&c" 1 "optimized" } } */ diff --git a/gcc/testsuite/gcc.dg/tree-ssa/loop-2.c b/gcc/testsuite/gcc.dg/tree-ssa/loop-2.c index bda25167353..e58561a6650 100644 --- a/gcc/testsuite/gcc.dg/tree-ssa/loop-2.c +++ b/gcc/testsuite/gcc.dg/tree-ssa/loop-2.c @@ -27,7 +27,6 @@ void xxx(void) /* { dg-final { scan-tree-dump-times " \\* \[^\\n\\r\]*=" 0 "optimized" } } */ /* { dg-final { scan-tree-dump-times "\[^\\n\\r\]*= \\* " 0 "optimized" } } */ -/* { dg-final { scan-tree-dump-times "MEM\\\[base" 1 "optimized" } } */ /* 17 * iter should be strength reduced. */ diff --git a/gcc/testsuite/gcc.dg/tree-ssa/loop-3.c b/gcc/testsuite/gcc.dg/tree-ssa/loop-3.c index d3b26b7ad19..74491f80e49 100644 --- a/gcc/testsuite/gcc.dg/tree-ssa/loop-3.c +++ b/gcc/testsuite/gcc.dg/tree-ssa/loop-3.c @@ -20,8 +20,7 @@ void xxx(void) /* Access to arr_base[iter].y should not be strength reduced, since we have a memory mode including multiplication by 4. */ -/* { dg-final { scan-tree-dump-times "MEM" 1 "optimized" } } */ -/* { dg-final { scan-tree-dump-times "step:" 1 "optimized" } } */ +/* { dg-final { scan-tree-dump-times "MEM\[^;\]* \* 4" 1 "optimized" } } */ /* And original induction variable should be preserved. */ diff --git a/gcc/tree-pretty-print.c b/gcc/tree-pretty-print.c index 655061c174d..075a3fca766 100644 --- a/gcc/tree-pretty-print.c +++ b/gcc/tree-pretty-print.c @@ -1441,7 +1441,7 @@ dump_omp_atomic_memory_order (pretty_printer *pp, enum omp_memory_order mo) static void dump_mem_ref (pretty_printer *pp, tree node, int spc, dump_flags_t flags) { - if (flags & TDF_GIMPLE) + if (TREE_CODE (node) == MEM_REF && (flags & TDF_GIMPLE)) { pp_string (pp, "__MEM <"); dump_generic_node (pp, TREE_TYPE (node), @@ -1472,7 +1472,8 @@ dump_mem_ref (pretty_printer *pp, tree node, int spc, dump_flags_t flags) } pp_right_paren (pp); } - else if (integer_zerop (TREE_OPERAND (node, 1)) + else if (TREE_CODE (node) == MEM_REF + && integer_zerop (TREE_OPERAND (node, 1)) /* Dump the types of INTEGER_CSTs explicitly, for we can't infer them and MEM_ATTR caching will share MEM_REFs with differently-typed op0s. */ @@ -1541,12 +1542,33 @@ dump_mem_ref (pretty_printer *pp, tree node, int spc, dump_flags_t flags) dump_generic_node (pp, op1type, spc, flags | TDF_SLIM, false); pp_right_paren (pp); dump_generic_node (pp, op0, spc, flags, false); - if (!integer_zerop (op1)) - if (!integer_zerop (TREE_OPERAND (node, 1))) + if (TREE_CODE (node) == MEM_REF + && !integer_zerop (op1)) { pp_string (pp, " + "); dump_generic_node (pp, op1, spc, flags, false); } + if (TREE_CODE (node) == TARGET_MEM_REF) + { + tree tmp = TMR_INDEX2 (node); + if (tmp) + { + pp_string (pp, " + "); + dump_generic_node (pp, tmp, spc, flags, false); + } + tmp = TMR_INDEX (node); + if (tmp) + { + pp_string (pp, " + "); + dump_generic_node (pp, tmp, spc, flags, false); + tmp = TMR_STEP (node); + pp_string (pp, " * "); + if (tmp) + dump_generic_node (pp, tmp, spc, flags, false); + else + pp_string (pp, "1"); + } + } if ((flags & TDF_ALIAS) && MR_DEPENDENCE_CLIQUE (node) != 0) { @@ -1854,65 +1876,8 @@ dump_generic_node (pretty_printer *pp, tree node, int spc, dump_flags_t flags, break; case MEM_REF: - dump_mem_ref (pp, node, spc, flags); - break; - case TARGET_MEM_REF: - { - const char *sep = ""; - tree tmp; - - pp_string (pp, "MEM["); - - if (TREE_CODE (TMR_BASE (node)) == ADDR_EXPR) - { - pp_string (pp, sep); - sep = ", "; - pp_string (pp, "symbol: "); - dump_generic_node (pp, TREE_OPERAND (TMR_BASE (node), 0), - spc, flags, false); - } - else - { - pp_string (pp, sep); - sep = ", "; - pp_string (pp, "base: "); - dump_generic_node (pp, TMR_BASE (node), spc, flags, false); - } - tmp = TMR_INDEX2 (node); - if (tmp) - { - pp_string (pp, sep); - sep = ", "; - pp_string (pp, "base: "); - dump_generic_node (pp, tmp, spc, flags, false); - } - tmp = TMR_INDEX (node); - if (tmp) - { - pp_string (pp, sep); - sep = ", "; - pp_string (pp, "index: "); - dump_generic_node (pp, tmp, spc, flags, false); - } - tmp = TMR_STEP (node); - if (tmp) - { - pp_string (pp, sep); - sep = ", "; - pp_string (pp, "step: "); - dump_generic_node (pp, tmp, spc, flags, false); - } - tmp = TMR_OFFSET (node); - if (tmp) - { - pp_string (pp, sep); - sep = ", "; - pp_string (pp, "offset: "); - dump_generic_node (pp, tmp, spc, flags, false); - } - pp_right_bracket (pp); - } + dump_mem_ref (pp, node, spc, flags); break; case ARRAY_TYPE: