vperm-v4sf-1.c (dg-options): Use -msse.
[gcc.git] / gcc / tree-loop-distribution.c
1 /* Loop distribution.
2 Copyright (C) 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
3 Contributed by Georges-Andre Silber <Georges-Andre.Silber@ensmp.fr>
4 and Sebastian Pop <sebastian.pop@amd.com>.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published by the
10 Free Software Foundation; either version 3, or (at your option) any
11 later version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT
14 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 /* This pass performs loop distribution: for example, the loop
23
24 |DO I = 2, N
25 | A(I) = B(I) + C
26 | D(I) = A(I-1)*E
27 |ENDDO
28
29 is transformed to
30
31 |DOALL I = 2, N
32 | A(I) = B(I) + C
33 |ENDDO
34 |
35 |DOALL I = 2, N
36 | D(I) = A(I-1)*E
37 |ENDDO
38
39 This pass uses an RDG, Reduced Dependence Graph built on top of the
40 data dependence relations. The RDG is then topologically sorted to
41 obtain a map of information producers/consumers based on which it
42 generates the new loops. */
43
44 #include "config.h"
45 #include "system.h"
46 #include "coretypes.h"
47 #include "tm.h"
48 #include "ggc.h"
49 #include "tree.h"
50 #include "target.h"
51
52 #include "rtl.h"
53 #include "basic-block.h"
54 #include "diagnostic.h"
55 #include "tree-flow.h"
56 #include "tree-dump.h"
57 #include "timevar.h"
58 #include "cfgloop.h"
59 #include "expr.h"
60 #include "optabs.h"
61 #include "tree-chrec.h"
62 #include "tree-data-ref.h"
63 #include "tree-scalar-evolution.h"
64 #include "tree-pass.h"
65 #include "lambda.h"
66 #include "langhooks.h"
67 #include "tree-vectorizer.h"
68
69 /* If bit I is not set, it means that this node represents an
70 operation that has already been performed, and that should not be
71 performed again. This is the subgraph of remaining important
72 computations that is passed to the DFS algorithm for avoiding to
73 include several times the same stores in different loops. */
74 static bitmap remaining_stmts;
75
76 /* A node of the RDG is marked in this bitmap when it has as a
77 predecessor a node that writes to memory. */
78 static bitmap upstream_mem_writes;
79
80 /* Update the PHI nodes of NEW_LOOP. NEW_LOOP is a duplicate of
81 ORIG_LOOP. */
82
83 static void
84 update_phis_for_loop_copy (struct loop *orig_loop, struct loop *new_loop)
85 {
86 tree new_ssa_name;
87 gimple_stmt_iterator si_new, si_orig;
88 edge orig_loop_latch = loop_latch_edge (orig_loop);
89 edge orig_entry_e = loop_preheader_edge (orig_loop);
90 edge new_loop_entry_e = loop_preheader_edge (new_loop);
91
92 /* Scan the phis in the headers of the old and new loops
93 (they are organized in exactly the same order). */
94 for (si_new = gsi_start_phis (new_loop->header),
95 si_orig = gsi_start_phis (orig_loop->header);
96 !gsi_end_p (si_new) && !gsi_end_p (si_orig);
97 gsi_next (&si_new), gsi_next (&si_orig))
98 {
99 tree def;
100 source_location locus;
101 gimple phi_new = gsi_stmt (si_new);
102 gimple phi_orig = gsi_stmt (si_orig);
103
104 /* Add the first phi argument for the phi in NEW_LOOP (the one
105 associated with the entry of NEW_LOOP) */
106 def = PHI_ARG_DEF_FROM_EDGE (phi_orig, orig_entry_e);
107 locus = gimple_phi_arg_location_from_edge (phi_orig, orig_entry_e);
108 add_phi_arg (phi_new, def, new_loop_entry_e, locus);
109
110 /* Add the second phi argument for the phi in NEW_LOOP (the one
111 associated with the latch of NEW_LOOP) */
112 def = PHI_ARG_DEF_FROM_EDGE (phi_orig, orig_loop_latch);
113 locus = gimple_phi_arg_location_from_edge (phi_orig, orig_loop_latch);
114
115 if (TREE_CODE (def) == SSA_NAME)
116 {
117 new_ssa_name = get_current_def (def);
118
119 if (!new_ssa_name)
120 /* This only happens if there are no definitions inside the
121 loop. Use the phi_result in this case. */
122 new_ssa_name = PHI_RESULT (phi_new);
123 }
124 else
125 /* Could be an integer. */
126 new_ssa_name = def;
127
128 add_phi_arg (phi_new, new_ssa_name, loop_latch_edge (new_loop), locus);
129 }
130 }
131
132 /* Return a copy of LOOP placed before LOOP. */
133
134 static struct loop *
135 copy_loop_before (struct loop *loop)
136 {
137 struct loop *res;
138 edge preheader = loop_preheader_edge (loop);
139
140 if (!single_exit (loop))
141 return NULL;
142
143 initialize_original_copy_tables ();
144 res = slpeel_tree_duplicate_loop_to_edge_cfg (loop, preheader);
145 free_original_copy_tables ();
146
147 if (!res)
148 return NULL;
149
150 update_phis_for_loop_copy (loop, res);
151 rename_variables_in_loop (res);
152
153 return res;
154 }
155
156 /* Creates an empty basic block after LOOP. */
157
158 static void
159 create_bb_after_loop (struct loop *loop)
160 {
161 edge exit = single_exit (loop);
162
163 if (!exit)
164 return;
165
166 split_edge (exit);
167 }
168
169 /* Generate code for PARTITION from the code in LOOP. The loop is
170 copied when COPY_P is true. All the statements not flagged in the
171 PARTITION bitmap are removed from the loop or from its copy. The
172 statements are indexed in sequence inside a basic block, and the
173 basic blocks of a loop are taken in dom order. Returns true when
174 the code gen succeeded. */
175
176 static bool
177 generate_loops_for_partition (struct loop *loop, bitmap partition, bool copy_p)
178 {
179 unsigned i, x;
180 gimple_stmt_iterator bsi;
181 basic_block *bbs;
182
183 if (copy_p)
184 {
185 loop = copy_loop_before (loop);
186 create_preheader (loop, CP_SIMPLE_PREHEADERS);
187 create_bb_after_loop (loop);
188 }
189
190 if (loop == NULL)
191 return false;
192
193 /* Remove stmts not in the PARTITION bitmap. The order in which we
194 visit the phi nodes and the statements is exactly as in
195 stmts_from_loop. */
196 bbs = get_loop_body_in_dom_order (loop);
197
198 for (x = 0, i = 0; i < loop->num_nodes; i++)
199 {
200 basic_block bb = bbs[i];
201
202 for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi);)
203 if (!bitmap_bit_p (partition, x++))
204 remove_phi_node (&bsi, true);
205 else
206 gsi_next (&bsi);
207
208 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi);)
209 if (gimple_code (gsi_stmt (bsi)) != GIMPLE_LABEL
210 && !bitmap_bit_p (partition, x++))
211 gsi_remove (&bsi, false);
212 else
213 gsi_next (&bsi);
214
215 mark_virtual_ops_in_bb (bb);
216 }
217
218 free (bbs);
219 return true;
220 }
221
222 /* Build the size argument for a memset call. */
223
224 static inline tree
225 build_size_arg_loc (location_t loc, tree nb_iter, tree op, gimple_seq* stmt_list)
226 {
227 tree nb_bytes;
228 gimple_seq stmts = NULL;
229
230 nb_bytes = fold_build2_loc (loc, MULT_EXPR, size_type_node,
231 fold_convert_loc (loc, size_type_node, nb_iter),
232 fold_convert_loc (loc, size_type_node,
233 TYPE_SIZE_UNIT (TREE_TYPE (op))));
234 nb_bytes = force_gimple_operand (nb_bytes, &stmts, true, NULL);
235 gimple_seq_add_seq (stmt_list, stmts);
236
237 return nb_bytes;
238 }
239
240 /* Generate a call to memset. Return true when the operation succeeded. */
241
242 static bool
243 generate_memset_zero (gimple stmt, tree op0, tree nb_iter,
244 gimple_stmt_iterator bsi)
245 {
246 tree addr_base;
247 tree nb_bytes = NULL;
248 bool res = false;
249 gimple_seq stmts = NULL, stmt_list = NULL;
250 gimple fn_call;
251 tree mem, fndecl, fntype, fn;
252 gimple_stmt_iterator i;
253 struct data_reference *dr = XCNEW (struct data_reference);
254 location_t loc = gimple_location (stmt);
255
256 DR_STMT (dr) = stmt;
257 DR_REF (dr) = op0;
258 if (!dr_analyze_innermost (dr))
259 goto end;
260
261 /* Test for a positive stride, iterating over every element. */
262 if (integer_zerop (fold_build2_loc (loc,
263 MINUS_EXPR, integer_type_node, DR_STEP (dr),
264 TYPE_SIZE_UNIT (TREE_TYPE (op0)))))
265 {
266 tree offset = fold_convert_loc (loc, sizetype,
267 size_binop_loc (loc, PLUS_EXPR,
268 DR_OFFSET (dr),
269 DR_INIT (dr)));
270 addr_base = fold_build2_loc (loc, POINTER_PLUS_EXPR,
271 TREE_TYPE (DR_BASE_ADDRESS (dr)),
272 DR_BASE_ADDRESS (dr), offset);
273 }
274
275 /* Test for a negative stride, iterating over every element. */
276 else if (integer_zerop (fold_build2_loc (loc, PLUS_EXPR, integer_type_node,
277 TYPE_SIZE_UNIT (TREE_TYPE (op0)),
278 DR_STEP (dr))))
279 {
280 nb_bytes = build_size_arg_loc (loc, nb_iter, op0, &stmt_list);
281 addr_base = size_binop_loc (loc, PLUS_EXPR, DR_OFFSET (dr), DR_INIT (dr));
282 addr_base = fold_build2_loc (loc, MINUS_EXPR, sizetype, addr_base,
283 fold_convert_loc (loc, sizetype, nb_bytes));
284 addr_base = force_gimple_operand (addr_base, &stmts, true, NULL);
285 gimple_seq_add_seq (&stmt_list, stmts);
286
287 addr_base = fold_build2_loc (loc, POINTER_PLUS_EXPR,
288 TREE_TYPE (DR_BASE_ADDRESS (dr)),
289 DR_BASE_ADDRESS (dr), addr_base);
290 }
291 else
292 goto end;
293
294 mem = force_gimple_operand (addr_base, &stmts, true, NULL);
295 gimple_seq_add_seq (&stmt_list, stmts);
296
297 fndecl = implicit_built_in_decls [BUILT_IN_MEMSET];
298 fntype = TREE_TYPE (fndecl);
299 fn = build1 (ADDR_EXPR, build_pointer_type (fntype), fndecl);
300
301 if (!nb_bytes)
302 nb_bytes = build_size_arg_loc (loc, nb_iter, op0, &stmt_list);
303 fn_call = gimple_build_call (fn, 3, mem, integer_zero_node, nb_bytes);
304 gimple_seq_add_stmt (&stmt_list, fn_call);
305
306 for (i = gsi_start (stmt_list); !gsi_end_p (i); gsi_next (&i))
307 {
308 gimple s = gsi_stmt (i);
309 update_stmt_if_modified (s);
310 }
311
312 gsi_insert_seq_after (&bsi, stmt_list, GSI_CONTINUE_LINKING);
313 res = true;
314
315 if (dump_file && (dump_flags & TDF_DETAILS))
316 fprintf (dump_file, "generated memset zero\n");
317
318 end:
319 free_data_ref (dr);
320 return res;
321 }
322
323 /* Propagate phis in BB b to their uses and remove them. */
324
325 static void
326 prop_phis (basic_block b)
327 {
328 gimple_stmt_iterator psi;
329 gimple_seq phis = phi_nodes (b);
330
331 for (psi = gsi_start (phis); !gsi_end_p (psi); )
332 {
333 gimple phi = gsi_stmt (psi);
334 tree def = gimple_phi_result (phi), use = gimple_phi_arg_def (phi, 0);
335
336 gcc_assert (gimple_phi_num_args (phi) == 1);
337
338 if (!is_gimple_reg (def))
339 {
340 imm_use_iterator iter;
341 use_operand_p use_p;
342 gimple stmt;
343
344 FOR_EACH_IMM_USE_STMT (stmt, iter, def)
345 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
346 SET_USE (use_p, use);
347 }
348 else
349 replace_uses_by (def, use);
350
351 remove_phi_node (&psi, true);
352 }
353 }
354
355 /* Tries to generate a builtin function for the instructions of LOOP
356 pointed to by the bits set in PARTITION. Returns true when the
357 operation succeeded. */
358
359 static bool
360 generate_builtin (struct loop *loop, bitmap partition, bool copy_p)
361 {
362 bool res = false;
363 unsigned i, x = 0;
364 basic_block *bbs;
365 gimple write = NULL;
366 tree op0, op1;
367 gimple_stmt_iterator bsi;
368 tree nb_iter = number_of_exit_cond_executions (loop);
369
370 if (!nb_iter || nb_iter == chrec_dont_know)
371 return false;
372
373 bbs = get_loop_body_in_dom_order (loop);
374
375 for (i = 0; i < loop->num_nodes; i++)
376 {
377 basic_block bb = bbs[i];
378
379 for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi); gsi_next (&bsi))
380 x++;
381
382 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
383 {
384 gimple stmt = gsi_stmt (bsi);
385
386 if (bitmap_bit_p (partition, x++)
387 && is_gimple_assign (stmt)
388 && !is_gimple_reg (gimple_assign_lhs (stmt)))
389 {
390 /* Don't generate the builtins when there are more than
391 one memory write. */
392 if (write != NULL)
393 goto end;
394
395 write = stmt;
396 }
397 }
398 }
399
400 if (!write)
401 goto end;
402
403 op0 = gimple_assign_lhs (write);
404 op1 = gimple_assign_rhs1 (write);
405
406 if (!(TREE_CODE (op0) == ARRAY_REF
407 || TREE_CODE (op0) == INDIRECT_REF))
408 goto end;
409
410 /* The new statements will be placed before LOOP. */
411 bsi = gsi_last_bb (loop_preheader_edge (loop)->src);
412
413 if (gimple_assign_rhs_code (write) == INTEGER_CST
414 && (integer_zerop (op1) || real_zerop (op1)))
415 res = generate_memset_zero (write, op0, nb_iter, bsi);
416
417 /* If this is the last partition for which we generate code, we have
418 to destroy the loop. */
419 if (res && !copy_p)
420 {
421 unsigned nbbs = loop->num_nodes;
422 basic_block src = loop_preheader_edge (loop)->src;
423 basic_block dest = single_exit (loop)->dest;
424 prop_phis (dest);
425 make_edge (src, dest, EDGE_FALLTHRU);
426 cancel_loop_tree (loop);
427
428 for (i = 0; i < nbbs; i++)
429 delete_basic_block (bbs[i]);
430
431 set_immediate_dominator (CDI_DOMINATORS, dest,
432 recompute_dominator (CDI_DOMINATORS, dest));
433 }
434
435 end:
436 free (bbs);
437 return res;
438 }
439
440 /* Generates code for PARTITION. For simple loops, this function can
441 generate a built-in. */
442
443 static bool
444 generate_code_for_partition (struct loop *loop, bitmap partition, bool copy_p)
445 {
446 if (generate_builtin (loop, partition, copy_p))
447 return true;
448
449 return generate_loops_for_partition (loop, partition, copy_p);
450 }
451
452
453 /* Returns true if the node V of RDG cannot be recomputed. */
454
455 static bool
456 rdg_cannot_recompute_vertex_p (struct graph *rdg, int v)
457 {
458 if (RDG_MEM_WRITE_STMT (rdg, v))
459 return true;
460
461 return false;
462 }
463
464 /* Returns true when the vertex V has already been generated in the
465 current partition (V is in PROCESSED), or when V belongs to another
466 partition and cannot be recomputed (V is not in REMAINING_STMTS). */
467
468 static inline bool
469 already_processed_vertex_p (bitmap processed, int v)
470 {
471 return (bitmap_bit_p (processed, v)
472 || !bitmap_bit_p (remaining_stmts, v));
473 }
474
475 /* Returns NULL when there is no anti-dependence among the successors
476 of vertex V, otherwise returns the edge with the anti-dep. */
477
478 static struct graph_edge *
479 has_anti_dependence (struct vertex *v)
480 {
481 struct graph_edge *e;
482
483 if (v->succ)
484 for (e = v->succ; e; e = e->succ_next)
485 if (RDGE_TYPE (e) == anti_dd)
486 return e;
487
488 return NULL;
489 }
490
491 /* Returns true when V has an anti-dependence edge among its successors. */
492
493 static bool
494 predecessor_has_mem_write (struct graph *rdg, struct vertex *v)
495 {
496 struct graph_edge *e;
497
498 if (v->pred)
499 for (e = v->pred; e; e = e->pred_next)
500 if (bitmap_bit_p (upstream_mem_writes, e->src)
501 /* Don't consider flow channels: a write to memory followed
502 by a read from memory. These channels allow the split of
503 the RDG in different partitions. */
504 && !RDG_MEM_WRITE_STMT (rdg, e->src))
505 return true;
506
507 return false;
508 }
509
510 /* Initializes the upstream_mem_writes bitmap following the
511 information from RDG. */
512
513 static void
514 mark_nodes_having_upstream_mem_writes (struct graph *rdg)
515 {
516 int v, x;
517 bitmap seen = BITMAP_ALLOC (NULL);
518
519 for (v = rdg->n_vertices - 1; v >= 0; v--)
520 if (!bitmap_bit_p (seen, v))
521 {
522 unsigned i;
523 VEC (int, heap) *nodes = VEC_alloc (int, heap, 3);
524
525 graphds_dfs (rdg, &v, 1, &nodes, false, NULL);
526
527 for (i = 0; VEC_iterate (int, nodes, i, x); i++)
528 {
529 if (bitmap_bit_p (seen, x))
530 continue;
531
532 bitmap_set_bit (seen, x);
533
534 if (RDG_MEM_WRITE_STMT (rdg, x)
535 || predecessor_has_mem_write (rdg, &(rdg->vertices[x]))
536 /* In anti dependences the read should occur before
537 the write, this is why both the read and the write
538 should be placed in the same partition. */
539 || has_anti_dependence (&(rdg->vertices[x])))
540 {
541 bitmap_set_bit (upstream_mem_writes, x);
542 }
543 }
544
545 VEC_free (int, heap, nodes);
546 }
547 }
548
549 /* Returns true when vertex u has a memory write node as a predecessor
550 in RDG. */
551
552 static bool
553 has_upstream_mem_writes (int u)
554 {
555 return bitmap_bit_p (upstream_mem_writes, u);
556 }
557
558 static void rdg_flag_vertex_and_dependent (struct graph *, int, bitmap, bitmap,
559 bitmap, bool *);
560
561 /* Flag all the uses of U. */
562
563 static void
564 rdg_flag_all_uses (struct graph *rdg, int u, bitmap partition, bitmap loops,
565 bitmap processed, bool *part_has_writes)
566 {
567 struct graph_edge *e;
568
569 for (e = rdg->vertices[u].succ; e; e = e->succ_next)
570 if (!bitmap_bit_p (processed, e->dest))
571 {
572 rdg_flag_vertex_and_dependent (rdg, e->dest, partition, loops,
573 processed, part_has_writes);
574 rdg_flag_all_uses (rdg, e->dest, partition, loops, processed,
575 part_has_writes);
576 }
577 }
578
579 /* Flag the uses of U stopping following the information from
580 upstream_mem_writes. */
581
582 static void
583 rdg_flag_uses (struct graph *rdg, int u, bitmap partition, bitmap loops,
584 bitmap processed, bool *part_has_writes)
585 {
586 use_operand_p use_p;
587 struct vertex *x = &(rdg->vertices[u]);
588 gimple stmt = RDGV_STMT (x);
589 struct graph_edge *anti_dep = has_anti_dependence (x);
590
591 /* Keep in the same partition the destination of an antidependence,
592 because this is a store to the exact same location. Putting this
593 in another partition is bad for cache locality. */
594 if (anti_dep)
595 {
596 int v = anti_dep->dest;
597
598 if (!already_processed_vertex_p (processed, v))
599 rdg_flag_vertex_and_dependent (rdg, v, partition, loops,
600 processed, part_has_writes);
601 }
602
603 if (gimple_code (stmt) != GIMPLE_PHI)
604 {
605 if ((use_p = gimple_vuse_op (stmt)) != NULL_USE_OPERAND_P)
606 {
607 tree use = USE_FROM_PTR (use_p);
608
609 if (TREE_CODE (use) == SSA_NAME)
610 {
611 gimple def_stmt = SSA_NAME_DEF_STMT (use);
612 int v = rdg_vertex_for_stmt (rdg, def_stmt);
613
614 if (v >= 0
615 && !already_processed_vertex_p (processed, v))
616 rdg_flag_vertex_and_dependent (rdg, v, partition, loops,
617 processed, part_has_writes);
618 }
619 }
620 }
621
622 if (is_gimple_assign (stmt) && has_upstream_mem_writes (u))
623 {
624 tree op0 = gimple_assign_lhs (stmt);
625
626 /* Scalar channels don't have enough space for transmitting data
627 between tasks, unless we add more storage by privatizing. */
628 if (is_gimple_reg (op0))
629 {
630 use_operand_p use_p;
631 imm_use_iterator iter;
632
633 FOR_EACH_IMM_USE_FAST (use_p, iter, op0)
634 {
635 int v = rdg_vertex_for_stmt (rdg, USE_STMT (use_p));
636
637 if (!already_processed_vertex_p (processed, v))
638 rdg_flag_vertex_and_dependent (rdg, v, partition, loops,
639 processed, part_has_writes);
640 }
641 }
642 }
643 }
644
645 /* Flag V from RDG as part of PARTITION, and also flag its loop number
646 in LOOPS. */
647
648 static void
649 rdg_flag_vertex (struct graph *rdg, int v, bitmap partition, bitmap loops,
650 bool *part_has_writes)
651 {
652 struct loop *loop;
653
654 if (bitmap_bit_p (partition, v))
655 return;
656
657 loop = loop_containing_stmt (RDG_STMT (rdg, v));
658 bitmap_set_bit (loops, loop->num);
659 bitmap_set_bit (partition, v);
660
661 if (rdg_cannot_recompute_vertex_p (rdg, v))
662 {
663 *part_has_writes = true;
664 bitmap_clear_bit (remaining_stmts, v);
665 }
666 }
667
668 /* Flag in the bitmap PARTITION the vertex V and all its predecessors.
669 Also flag their loop number in LOOPS. */
670
671 static void
672 rdg_flag_vertex_and_dependent (struct graph *rdg, int v, bitmap partition,
673 bitmap loops, bitmap processed,
674 bool *part_has_writes)
675 {
676 unsigned i;
677 VEC (int, heap) *nodes = VEC_alloc (int, heap, 3);
678 int x;
679
680 bitmap_set_bit (processed, v);
681 rdg_flag_uses (rdg, v, partition, loops, processed, part_has_writes);
682 graphds_dfs (rdg, &v, 1, &nodes, false, remaining_stmts);
683 rdg_flag_vertex (rdg, v, partition, loops, part_has_writes);
684
685 for (i = 0; VEC_iterate (int, nodes, i, x); i++)
686 if (!already_processed_vertex_p (processed, x))
687 rdg_flag_vertex_and_dependent (rdg, x, partition, loops, processed,
688 part_has_writes);
689
690 VEC_free (int, heap, nodes);
691 }
692
693 /* Initialize CONDS with all the condition statements from the basic
694 blocks of LOOP. */
695
696 static void
697 collect_condition_stmts (struct loop *loop, VEC (gimple, heap) **conds)
698 {
699 unsigned i;
700 edge e;
701 VEC (edge, heap) *exits = get_loop_exit_edges (loop);
702
703 for (i = 0; VEC_iterate (edge, exits, i, e); i++)
704 {
705 gimple cond = last_stmt (e->src);
706
707 if (cond)
708 VEC_safe_push (gimple, heap, *conds, cond);
709 }
710
711 VEC_free (edge, heap, exits);
712 }
713
714 /* Add to PARTITION all the exit condition statements for LOOPS
715 together with all their dependent statements determined from
716 RDG. */
717
718 static void
719 rdg_flag_loop_exits (struct graph *rdg, bitmap loops, bitmap partition,
720 bitmap processed, bool *part_has_writes)
721 {
722 unsigned i;
723 bitmap_iterator bi;
724 VEC (gimple, heap) *conds = VEC_alloc (gimple, heap, 3);
725
726 EXECUTE_IF_SET_IN_BITMAP (loops, 0, i, bi)
727 collect_condition_stmts (get_loop (i), &conds);
728
729 while (!VEC_empty (gimple, conds))
730 {
731 gimple cond = VEC_pop (gimple, conds);
732 int v = rdg_vertex_for_stmt (rdg, cond);
733 bitmap new_loops = BITMAP_ALLOC (NULL);
734
735 if (!already_processed_vertex_p (processed, v))
736 rdg_flag_vertex_and_dependent (rdg, v, partition, new_loops, processed,
737 part_has_writes);
738
739 EXECUTE_IF_SET_IN_BITMAP (new_loops, 0, i, bi)
740 if (!bitmap_bit_p (loops, i))
741 {
742 bitmap_set_bit (loops, i);
743 collect_condition_stmts (get_loop (i), &conds);
744 }
745
746 BITMAP_FREE (new_loops);
747 }
748 }
749
750 /* Flag all the nodes of RDG containing memory accesses that could
751 potentially belong to arrays already accessed in the current
752 PARTITION. */
753
754 static void
755 rdg_flag_similar_memory_accesses (struct graph *rdg, bitmap partition,
756 bitmap loops, bitmap processed,
757 VEC (int, heap) **other_stores)
758 {
759 bool foo;
760 unsigned i, n;
761 int j, k, kk;
762 bitmap_iterator ii;
763 struct graph_edge *e;
764
765 EXECUTE_IF_SET_IN_BITMAP (partition, 0, i, ii)
766 if (RDG_MEM_WRITE_STMT (rdg, i)
767 || RDG_MEM_READS_STMT (rdg, i))
768 {
769 for (j = 0; j < rdg->n_vertices; j++)
770 if (!bitmap_bit_p (processed, j)
771 && (RDG_MEM_WRITE_STMT (rdg, j)
772 || RDG_MEM_READS_STMT (rdg, j))
773 && rdg_has_similar_memory_accesses (rdg, i, j))
774 {
775 /* Flag first the node J itself, and all the nodes that
776 are needed to compute J. */
777 rdg_flag_vertex_and_dependent (rdg, j, partition, loops,
778 processed, &foo);
779
780 /* When J is a read, we want to coalesce in the same
781 PARTITION all the nodes that are using J: this is
782 needed for better cache locality. */
783 rdg_flag_all_uses (rdg, j, partition, loops, processed, &foo);
784
785 /* Remove from OTHER_STORES the vertex that we flagged. */
786 if (RDG_MEM_WRITE_STMT (rdg, j))
787 for (k = 0; VEC_iterate (int, *other_stores, k, kk); k++)
788 if (kk == j)
789 {
790 VEC_unordered_remove (int, *other_stores, k);
791 break;
792 }
793 }
794
795 /* If the node I has two uses, then keep these together in the
796 same PARTITION. */
797 for (n = 0, e = rdg->vertices[i].succ; e; e = e->succ_next, n++);
798
799 if (n > 1)
800 rdg_flag_all_uses (rdg, i, partition, loops, processed, &foo);
801 }
802 }
803
804 /* Returns a bitmap in which all the statements needed for computing
805 the strongly connected component C of the RDG are flagged, also
806 including the loop exit conditions. */
807
808 static bitmap
809 build_rdg_partition_for_component (struct graph *rdg, rdgc c,
810 bool *part_has_writes,
811 VEC (int, heap) **other_stores)
812 {
813 int i, v;
814 bitmap partition = BITMAP_ALLOC (NULL);
815 bitmap loops = BITMAP_ALLOC (NULL);
816 bitmap processed = BITMAP_ALLOC (NULL);
817
818 for (i = 0; VEC_iterate (int, c->vertices, i, v); i++)
819 if (!already_processed_vertex_p (processed, v))
820 rdg_flag_vertex_and_dependent (rdg, v, partition, loops, processed,
821 part_has_writes);
822
823 /* Also iterate on the array of stores not in the starting vertices,
824 and determine those vertices that have some memory affinity with
825 the current nodes in the component: these are stores to the same
826 arrays, i.e. we're taking care of cache locality. */
827 rdg_flag_similar_memory_accesses (rdg, partition, loops, processed,
828 other_stores);
829
830 rdg_flag_loop_exits (rdg, loops, partition, processed, part_has_writes);
831
832 BITMAP_FREE (processed);
833 BITMAP_FREE (loops);
834 return partition;
835 }
836
837 /* Free memory for COMPONENTS. */
838
839 static void
840 free_rdg_components (VEC (rdgc, heap) *components)
841 {
842 int i;
843 rdgc x;
844
845 for (i = 0; VEC_iterate (rdgc, components, i, x); i++)
846 {
847 VEC_free (int, heap, x->vertices);
848 free (x);
849 }
850 }
851
852 /* Build the COMPONENTS vector with the strongly connected components
853 of RDG in which the STARTING_VERTICES occur. */
854
855 static void
856 rdg_build_components (struct graph *rdg, VEC (int, heap) *starting_vertices,
857 VEC (rdgc, heap) **components)
858 {
859 int i, v;
860 bitmap saved_components = BITMAP_ALLOC (NULL);
861 int n_components = graphds_scc (rdg, NULL);
862 VEC (int, heap) **all_components = XNEWVEC (VEC (int, heap) *, n_components);
863
864 for (i = 0; i < n_components; i++)
865 all_components[i] = VEC_alloc (int, heap, 3);
866
867 for (i = 0; i < rdg->n_vertices; i++)
868 VEC_safe_push (int, heap, all_components[rdg->vertices[i].component], i);
869
870 for (i = 0; VEC_iterate (int, starting_vertices, i, v); i++)
871 {
872 int c = rdg->vertices[v].component;
873
874 if (!bitmap_bit_p (saved_components, c))
875 {
876 rdgc x = XCNEW (struct rdg_component);
877 x->num = c;
878 x->vertices = all_components[c];
879
880 VEC_safe_push (rdgc, heap, *components, x);
881 bitmap_set_bit (saved_components, c);
882 }
883 }
884
885 for (i = 0; i < n_components; i++)
886 if (!bitmap_bit_p (saved_components, i))
887 VEC_free (int, heap, all_components[i]);
888
889 free (all_components);
890 BITMAP_FREE (saved_components);
891 }
892
893 /* Aggregate several components into a useful partition that is
894 registered in the PARTITIONS vector. Partitions will be
895 distributed in different loops. */
896
897 static void
898 rdg_build_partitions (struct graph *rdg, VEC (rdgc, heap) *components,
899 VEC (int, heap) **other_stores,
900 VEC (bitmap, heap) **partitions, bitmap processed)
901 {
902 int i;
903 rdgc x;
904 bitmap partition = BITMAP_ALLOC (NULL);
905
906 for (i = 0; VEC_iterate (rdgc, components, i, x); i++)
907 {
908 bitmap np;
909 bool part_has_writes = false;
910 int v = VEC_index (int, x->vertices, 0);
911
912 if (bitmap_bit_p (processed, v))
913 continue;
914
915 np = build_rdg_partition_for_component (rdg, x, &part_has_writes,
916 other_stores);
917 bitmap_ior_into (partition, np);
918 bitmap_ior_into (processed, np);
919 BITMAP_FREE (np);
920
921 if (part_has_writes)
922 {
923 if (dump_file && (dump_flags & TDF_DETAILS))
924 {
925 fprintf (dump_file, "ldist useful partition:\n");
926 dump_bitmap (dump_file, partition);
927 }
928
929 VEC_safe_push (bitmap, heap, *partitions, partition);
930 partition = BITMAP_ALLOC (NULL);
931 }
932 }
933
934 /* Add the nodes from the RDG that were not marked as processed, and
935 that are used outside the current loop. These are scalar
936 computations that are not yet part of previous partitions. */
937 for (i = 0; i < rdg->n_vertices; i++)
938 if (!bitmap_bit_p (processed, i)
939 && rdg_defs_used_in_other_loops_p (rdg, i))
940 VEC_safe_push (int, heap, *other_stores, i);
941
942 /* If there are still statements left in the OTHER_STORES array,
943 create other components and partitions with these stores and
944 their dependences. */
945 if (VEC_length (int, *other_stores) > 0)
946 {
947 VEC (rdgc, heap) *comps = VEC_alloc (rdgc, heap, 3);
948 VEC (int, heap) *foo = VEC_alloc (int, heap, 3);
949
950 rdg_build_components (rdg, *other_stores, &comps);
951 rdg_build_partitions (rdg, comps, &foo, partitions, processed);
952
953 VEC_free (int, heap, foo);
954 free_rdg_components (comps);
955 }
956
957 /* If there is something left in the last partition, save it. */
958 if (bitmap_count_bits (partition) > 0)
959 VEC_safe_push (bitmap, heap, *partitions, partition);
960 else
961 BITMAP_FREE (partition);
962 }
963
964 /* Dump to FILE the PARTITIONS. */
965
966 static void
967 dump_rdg_partitions (FILE *file, VEC (bitmap, heap) *partitions)
968 {
969 int i;
970 bitmap partition;
971
972 for (i = 0; VEC_iterate (bitmap, partitions, i, partition); i++)
973 debug_bitmap_file (file, partition);
974 }
975
976 /* Debug PARTITIONS. */
977 extern void debug_rdg_partitions (VEC (bitmap, heap) *);
978
979 void
980 debug_rdg_partitions (VEC (bitmap, heap) *partitions)
981 {
982 dump_rdg_partitions (stderr, partitions);
983 }
984
985 /* Returns the number of read and write operations in the RDG. */
986
987 static int
988 number_of_rw_in_rdg (struct graph *rdg)
989 {
990 int i, res = 0;
991
992 for (i = 0; i < rdg->n_vertices; i++)
993 {
994 if (RDG_MEM_WRITE_STMT (rdg, i))
995 ++res;
996
997 if (RDG_MEM_READS_STMT (rdg, i))
998 ++res;
999 }
1000
1001 return res;
1002 }
1003
1004 /* Returns the number of read and write operations in a PARTITION of
1005 the RDG. */
1006
1007 static int
1008 number_of_rw_in_partition (struct graph *rdg, bitmap partition)
1009 {
1010 int res = 0;
1011 unsigned i;
1012 bitmap_iterator ii;
1013
1014 EXECUTE_IF_SET_IN_BITMAP (partition, 0, i, ii)
1015 {
1016 if (RDG_MEM_WRITE_STMT (rdg, i))
1017 ++res;
1018
1019 if (RDG_MEM_READS_STMT (rdg, i))
1020 ++res;
1021 }
1022
1023 return res;
1024 }
1025
1026 /* Returns true when one of the PARTITIONS contains all the read or
1027 write operations of RDG. */
1028
1029 static bool
1030 partition_contains_all_rw (struct graph *rdg, VEC (bitmap, heap) *partitions)
1031 {
1032 int i;
1033 bitmap partition;
1034 int nrw = number_of_rw_in_rdg (rdg);
1035
1036 for (i = 0; VEC_iterate (bitmap, partitions, i, partition); i++)
1037 if (nrw == number_of_rw_in_partition (rdg, partition))
1038 return true;
1039
1040 return false;
1041 }
1042
1043 /* Generate code from STARTING_VERTICES in RDG. Returns the number of
1044 distributed loops. */
1045
1046 static int
1047 ldist_gen (struct loop *loop, struct graph *rdg,
1048 VEC (int, heap) *starting_vertices)
1049 {
1050 int i, nbp;
1051 VEC (rdgc, heap) *components = VEC_alloc (rdgc, heap, 3);
1052 VEC (bitmap, heap) *partitions = VEC_alloc (bitmap, heap, 3);
1053 VEC (int, heap) *other_stores = VEC_alloc (int, heap, 3);
1054 bitmap partition, processed = BITMAP_ALLOC (NULL);
1055
1056 remaining_stmts = BITMAP_ALLOC (NULL);
1057 upstream_mem_writes = BITMAP_ALLOC (NULL);
1058
1059 for (i = 0; i < rdg->n_vertices; i++)
1060 {
1061 bitmap_set_bit (remaining_stmts, i);
1062
1063 /* Save in OTHER_STORES all the memory writes that are not in
1064 STARTING_VERTICES. */
1065 if (RDG_MEM_WRITE_STMT (rdg, i))
1066 {
1067 int v;
1068 unsigned j;
1069 bool found = false;
1070
1071 for (j = 0; VEC_iterate (int, starting_vertices, j, v); j++)
1072 if (i == v)
1073 {
1074 found = true;
1075 break;
1076 }
1077
1078 if (!found)
1079 VEC_safe_push (int, heap, other_stores, i);
1080 }
1081 }
1082
1083 mark_nodes_having_upstream_mem_writes (rdg);
1084 rdg_build_components (rdg, starting_vertices, &components);
1085 rdg_build_partitions (rdg, components, &other_stores, &partitions,
1086 processed);
1087 BITMAP_FREE (processed);
1088 nbp = VEC_length (bitmap, partitions);
1089
1090 if (nbp <= 1
1091 || partition_contains_all_rw (rdg, partitions))
1092 goto ldist_done;
1093
1094 if (dump_file && (dump_flags & TDF_DETAILS))
1095 dump_rdg_partitions (dump_file, partitions);
1096
1097 for (i = 0; VEC_iterate (bitmap, partitions, i, partition); i++)
1098 if (!generate_code_for_partition (loop, partition, i < nbp - 1))
1099 goto ldist_done;
1100
1101 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
1102 update_ssa (TODO_update_ssa_only_virtuals | TODO_update_ssa);
1103
1104 ldist_done:
1105
1106 BITMAP_FREE (remaining_stmts);
1107 BITMAP_FREE (upstream_mem_writes);
1108
1109 for (i = 0; VEC_iterate (bitmap, partitions, i, partition); i++)
1110 BITMAP_FREE (partition);
1111
1112 VEC_free (int, heap, other_stores);
1113 VEC_free (bitmap, heap, partitions);
1114 free_rdg_components (components);
1115 return nbp;
1116 }
1117
1118 /* Distributes the code from LOOP in such a way that producer
1119 statements are placed before consumer statements. When STMTS is
1120 NULL, performs the maximal distribution, if STMTS is not NULL,
1121 tries to separate only these statements from the LOOP's body.
1122 Returns the number of distributed loops. */
1123
1124 static int
1125 distribute_loop (struct loop *loop, VEC (gimple, heap) *stmts)
1126 {
1127 bool res = false;
1128 struct graph *rdg;
1129 gimple s;
1130 unsigned i;
1131 VEC (int, heap) *vertices;
1132
1133 if (loop->num_nodes > 2)
1134 {
1135 if (dump_file && (dump_flags & TDF_DETAILS))
1136 fprintf (dump_file,
1137 "FIXME: Loop %d not distributed: it has more than two basic blocks.\n",
1138 loop->num);
1139
1140 return res;
1141 }
1142
1143 rdg = build_rdg (loop);
1144
1145 if (!rdg)
1146 {
1147 if (dump_file && (dump_flags & TDF_DETAILS))
1148 fprintf (dump_file,
1149 "FIXME: Loop %d not distributed: failed to build the RDG.\n",
1150 loop->num);
1151
1152 return res;
1153 }
1154
1155 vertices = VEC_alloc (int, heap, 3);
1156
1157 if (dump_file && (dump_flags & TDF_DETAILS))
1158 dump_rdg (dump_file, rdg);
1159
1160 for (i = 0; VEC_iterate (gimple, stmts, i, s); i++)
1161 {
1162 int v = rdg_vertex_for_stmt (rdg, s);
1163
1164 if (v >= 0)
1165 {
1166 VEC_safe_push (int, heap, vertices, v);
1167
1168 if (dump_file && (dump_flags & TDF_DETAILS))
1169 fprintf (dump_file,
1170 "ldist asked to generate code for vertex %d\n", v);
1171 }
1172 }
1173
1174 res = ldist_gen (loop, rdg, vertices);
1175 VEC_free (int, heap, vertices);
1176 free_rdg (rdg);
1177
1178 return res;
1179 }
1180
1181 /* Distribute all loops in the current function. */
1182
1183 static unsigned int
1184 tree_loop_distribution (void)
1185 {
1186 struct loop *loop;
1187 loop_iterator li;
1188 int nb_generated_loops = 0;
1189
1190 FOR_EACH_LOOP (li, loop, 0)
1191 {
1192 VEC (gimple, heap) *work_list = VEC_alloc (gimple, heap, 3);
1193
1194 /* With the following working list, we're asking distribute_loop
1195 to separate the stores of the loop: when dependences allow,
1196 it will end on having one store per loop. */
1197 stores_from_loop (loop, &work_list);
1198
1199 /* A simple heuristic for cache locality is to not split stores
1200 to the same array. Without this call, an unrolled loop would
1201 be split into as many loops as unroll factor, each loop
1202 storing in the same array. */
1203 remove_similar_memory_refs (&work_list);
1204
1205 nb_generated_loops = distribute_loop (loop, work_list);
1206
1207 if (dump_file && (dump_flags & TDF_DETAILS))
1208 {
1209 if (nb_generated_loops > 1)
1210 fprintf (dump_file, "Loop %d distributed: split to %d loops.\n",
1211 loop->num, nb_generated_loops);
1212 else
1213 fprintf (dump_file, "Loop %d is the same.\n", loop->num);
1214 }
1215
1216 verify_loop_structure ();
1217
1218 VEC_free (gimple, heap, work_list);
1219 }
1220
1221 return 0;
1222 }
1223
1224 static bool
1225 gate_tree_loop_distribution (void)
1226 {
1227 return flag_tree_loop_distribution != 0;
1228 }
1229
1230 struct gimple_opt_pass pass_loop_distribution =
1231 {
1232 {
1233 GIMPLE_PASS,
1234 "ldist", /* name */
1235 gate_tree_loop_distribution, /* gate */
1236 tree_loop_distribution, /* execute */
1237 NULL, /* sub */
1238 NULL, /* next */
1239 0, /* static_pass_number */
1240 TV_TREE_LOOP_DISTRIBUTION, /* tv_id */
1241 PROP_cfg | PROP_ssa, /* properties_required */
1242 0, /* properties_provided */
1243 0, /* properties_destroyed */
1244 0, /* todo_flags_start */
1245 TODO_dump_func | TODO_verify_loops /* todo_flags_finish */
1246 }
1247 };