Remove trailing white spaces.
[gcc.git] / gcc / tree-loop-distribution.c
1 /* Loop distribution.
2 Copyright (C) 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
3 Contributed by Georges-Andre Silber <Georges-Andre.Silber@ensmp.fr>
4 and Sebastian Pop <sebastian.pop@amd.com>.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published by the
10 Free Software Foundation; either version 3, or (at your option) any
11 later version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT
14 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 /* This pass performs loop distribution: for example, the loop
23
24 |DO I = 2, N
25 | A(I) = B(I) + C
26 | D(I) = A(I-1)*E
27 |ENDDO
28
29 is transformed to
30
31 |DOALL I = 2, N
32 | A(I) = B(I) + C
33 |ENDDO
34 |
35 |DOALL I = 2, N
36 | D(I) = A(I-1)*E
37 |ENDDO
38
39 This pass uses an RDG, Reduced Dependence Graph built on top of the
40 data dependence relations. The RDG is then topologically sorted to
41 obtain a map of information producers/consumers based on which it
42 generates the new loops. */
43
44 #include "config.h"
45 #include "system.h"
46 #include "coretypes.h"
47 #include "tm.h"
48 #include "ggc.h"
49 #include "tree.h"
50 #include "target.h"
51
52 #include "rtl.h"
53 #include "basic-block.h"
54 #include "diagnostic.h"
55 #include "tree-flow.h"
56 #include "tree-dump.h"
57 #include "timevar.h"
58 #include "cfgloop.h"
59 #include "expr.h"
60 #include "optabs.h"
61 #include "tree-chrec.h"
62 #include "tree-data-ref.h"
63 #include "tree-scalar-evolution.h"
64 #include "tree-pass.h"
65 #include "lambda.h"
66 #include "langhooks.h"
67 #include "tree-vectorizer.h"
68
69 /* If bit I is not set, it means that this node represents an
70 operation that has already been performed, and that should not be
71 performed again. This is the subgraph of remaining important
72 computations that is passed to the DFS algorithm for avoiding to
73 include several times the same stores in different loops. */
74 static bitmap remaining_stmts;
75
76 /* A node of the RDG is marked in this bitmap when it has as a
77 predecessor a node that writes to memory. */
78 static bitmap upstream_mem_writes;
79
80 /* Update the PHI nodes of NEW_LOOP. NEW_LOOP is a duplicate of
81 ORIG_LOOP. */
82
83 static void
84 update_phis_for_loop_copy (struct loop *orig_loop, struct loop *new_loop)
85 {
86 tree new_ssa_name;
87 gimple_stmt_iterator si_new, si_orig;
88 edge orig_loop_latch = loop_latch_edge (orig_loop);
89 edge orig_entry_e = loop_preheader_edge (orig_loop);
90 edge new_loop_entry_e = loop_preheader_edge (new_loop);
91
92 /* Scan the phis in the headers of the old and new loops
93 (they are organized in exactly the same order). */
94 for (si_new = gsi_start_phis (new_loop->header),
95 si_orig = gsi_start_phis (orig_loop->header);
96 !gsi_end_p (si_new) && !gsi_end_p (si_orig);
97 gsi_next (&si_new), gsi_next (&si_orig))
98 {
99 tree def;
100 source_location locus;
101 gimple phi_new = gsi_stmt (si_new);
102 gimple phi_orig = gsi_stmt (si_orig);
103
104 /* Add the first phi argument for the phi in NEW_LOOP (the one
105 associated with the entry of NEW_LOOP) */
106 def = PHI_ARG_DEF_FROM_EDGE (phi_orig, orig_entry_e);
107 locus = gimple_phi_arg_location_from_edge (phi_orig, orig_entry_e);
108 add_phi_arg (phi_new, def, new_loop_entry_e, locus);
109
110 /* Add the second phi argument for the phi in NEW_LOOP (the one
111 associated with the latch of NEW_LOOP) */
112 def = PHI_ARG_DEF_FROM_EDGE (phi_orig, orig_loop_latch);
113 locus = gimple_phi_arg_location_from_edge (phi_orig, orig_loop_latch);
114
115 if (TREE_CODE (def) == SSA_NAME)
116 {
117 new_ssa_name = get_current_def (def);
118
119 if (!new_ssa_name)
120 /* This only happens if there are no definitions inside the
121 loop. Use the phi_result in this case. */
122 new_ssa_name = PHI_RESULT (phi_new);
123 }
124 else
125 /* Could be an integer. */
126 new_ssa_name = def;
127
128 add_phi_arg (phi_new, new_ssa_name, loop_latch_edge (new_loop), locus);
129 }
130 }
131
132 /* Return a copy of LOOP placed before LOOP. */
133
134 static struct loop *
135 copy_loop_before (struct loop *loop)
136 {
137 struct loop *res;
138 edge preheader = loop_preheader_edge (loop);
139
140 if (!single_exit (loop))
141 return NULL;
142
143 initialize_original_copy_tables ();
144 res = slpeel_tree_duplicate_loop_to_edge_cfg (loop, preheader);
145 free_original_copy_tables ();
146
147 if (!res)
148 return NULL;
149
150 update_phis_for_loop_copy (loop, res);
151 rename_variables_in_loop (res);
152
153 return res;
154 }
155
156 /* Creates an empty basic block after LOOP. */
157
158 static void
159 create_bb_after_loop (struct loop *loop)
160 {
161 edge exit = single_exit (loop);
162
163 if (!exit)
164 return;
165
166 split_edge (exit);
167 }
168
169 /* Generate code for PARTITION from the code in LOOP. The loop is
170 copied when COPY_P is true. All the statements not flagged in the
171 PARTITION bitmap are removed from the loop or from its copy. The
172 statements are indexed in sequence inside a basic block, and the
173 basic blocks of a loop are taken in dom order. Returns true when
174 the code gen succeeded. */
175
176 static bool
177 generate_loops_for_partition (struct loop *loop, bitmap partition, bool copy_p)
178 {
179 unsigned i, x;
180 gimple_stmt_iterator bsi;
181 basic_block *bbs;
182
183 if (copy_p)
184 {
185 loop = copy_loop_before (loop);
186 create_preheader (loop, CP_SIMPLE_PREHEADERS);
187 create_bb_after_loop (loop);
188 }
189
190 if (loop == NULL)
191 return false;
192
193 /* Remove stmts not in the PARTITION bitmap. The order in which we
194 visit the phi nodes and the statements is exactly as in
195 stmts_from_loop. */
196 bbs = get_loop_body_in_dom_order (loop);
197
198 for (x = 0, i = 0; i < loop->num_nodes; i++)
199 {
200 basic_block bb = bbs[i];
201
202 for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi);)
203 if (!bitmap_bit_p (partition, x++))
204 remove_phi_node (&bsi, true);
205 else
206 gsi_next (&bsi);
207
208 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi);)
209 if (gimple_code (gsi_stmt (bsi)) != GIMPLE_LABEL
210 && !bitmap_bit_p (partition, x++))
211 gsi_remove (&bsi, false);
212 else
213 gsi_next (&bsi);
214
215 mark_virtual_ops_in_bb (bb);
216 }
217
218 free (bbs);
219 return true;
220 }
221
222 /* Build the size argument for a memset call. */
223
224 static inline tree
225 build_size_arg_loc (location_t loc, tree nb_iter, tree op, gimple_seq* stmt_list)
226 {
227 tree nb_bytes;
228 gimple_seq stmts = NULL;
229
230 nb_bytes = fold_build2_loc (loc, MULT_EXPR, size_type_node,
231 fold_convert_loc (loc, size_type_node, nb_iter),
232 fold_convert_loc (loc, size_type_node,
233 TYPE_SIZE_UNIT (TREE_TYPE (op))));
234 nb_bytes = force_gimple_operand (nb_bytes, &stmts, true, NULL);
235 gimple_seq_add_seq (stmt_list, stmts);
236
237 return nb_bytes;
238 }
239
240 /* Generate a call to memset. Return true when the operation succeeded. */
241
242 static bool
243 generate_memset_zero (gimple stmt, tree op0, tree nb_iter,
244 gimple_stmt_iterator bsi)
245 {
246 tree addr_base;
247 tree nb_bytes = NULL;
248 bool res = false;
249 gimple_seq stmts = NULL, stmt_list = NULL;
250 gimple fn_call;
251 tree mem, fndecl, fntype, fn;
252 gimple_stmt_iterator i;
253 struct data_reference *dr = XCNEW (struct data_reference);
254 location_t loc = gimple_location (stmt);
255
256 DR_STMT (dr) = stmt;
257 DR_REF (dr) = op0;
258 if (!dr_analyze_innermost (dr))
259 goto end;
260
261 /* Test for a positive stride, iterating over every element. */
262 if (integer_zerop (fold_build2_loc (loc,
263 MINUS_EXPR, integer_type_node, DR_STEP (dr),
264 TYPE_SIZE_UNIT (TREE_TYPE (op0)))))
265 {
266 tree offset = fold_convert_loc (loc, sizetype,
267 size_binop_loc (loc, PLUS_EXPR,
268 DR_OFFSET (dr),
269 DR_INIT (dr)));
270 addr_base = fold_build2_loc (loc, POINTER_PLUS_EXPR,
271 TREE_TYPE (DR_BASE_ADDRESS (dr)),
272 DR_BASE_ADDRESS (dr), offset);
273 }
274
275 /* Test for a negative stride, iterating over every element. */
276 else if (integer_zerop (fold_build2_loc (loc, PLUS_EXPR, integer_type_node,
277 TYPE_SIZE_UNIT (TREE_TYPE (op0)),
278 DR_STEP (dr))))
279 {
280 nb_bytes = build_size_arg_loc (loc, nb_iter, op0, &stmt_list);
281 addr_base = size_binop_loc (loc, PLUS_EXPR, DR_OFFSET (dr), DR_INIT (dr));
282 addr_base = fold_build2_loc (loc, MINUS_EXPR, sizetype, addr_base,
283 fold_convert_loc (loc, sizetype, nb_bytes));
284 addr_base = force_gimple_operand (addr_base, &stmts, true, NULL);
285 gimple_seq_add_seq (&stmt_list, stmts);
286
287 addr_base = fold_build2_loc (loc, POINTER_PLUS_EXPR,
288 TREE_TYPE (DR_BASE_ADDRESS (dr)),
289 DR_BASE_ADDRESS (dr), addr_base);
290 }
291 else
292 goto end;
293
294 mem = force_gimple_operand (addr_base, &stmts, true, NULL);
295 gimple_seq_add_seq (&stmt_list, stmts);
296
297 fndecl = implicit_built_in_decls [BUILT_IN_MEMSET];
298 fntype = TREE_TYPE (fndecl);
299 fn = build1 (ADDR_EXPR, build_pointer_type (fntype), fndecl);
300
301 if (!nb_bytes)
302 nb_bytes = build_size_arg_loc (loc, nb_iter, op0, &stmt_list);
303 fn_call = gimple_build_call (fn, 3, mem, integer_zero_node, nb_bytes);
304 gimple_seq_add_stmt (&stmt_list, fn_call);
305
306 for (i = gsi_start (stmt_list); !gsi_end_p (i); gsi_next (&i))
307 {
308 gimple s = gsi_stmt (i);
309 update_stmt_if_modified (s);
310 }
311
312 gsi_insert_seq_after (&bsi, stmt_list, GSI_CONTINUE_LINKING);
313 res = true;
314
315 if (dump_file && (dump_flags & TDF_DETAILS))
316 fprintf (dump_file, "generated memset zero\n");
317
318 end:
319 free_data_ref (dr);
320 return res;
321 }
322
323 /* Propagate phis in BB b to their uses and remove them. */
324
325 static void
326 prop_phis (basic_block b)
327 {
328 gimple_stmt_iterator psi;
329 gimple_seq phis = phi_nodes (b);
330
331 for (psi = gsi_start (phis); !gsi_end_p (psi); )
332 {
333 gimple phi = gsi_stmt (psi);
334 tree def = gimple_phi_result (phi), use = gimple_phi_arg_def (phi, 0);
335
336 gcc_assert (gimple_phi_num_args (phi) == 1);
337
338 if (!is_gimple_reg (def))
339 {
340 imm_use_iterator iter;
341 use_operand_p use_p;
342 gimple stmt;
343
344 FOR_EACH_IMM_USE_STMT (stmt, iter, def)
345 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
346 SET_USE (use_p, use);
347 }
348 else
349 replace_uses_by (def, use);
350
351 remove_phi_node (&psi, true);
352 }
353 }
354
355 /* Tries to generate a builtin function for the instructions of LOOP
356 pointed to by the bits set in PARTITION. Returns true when the
357 operation succeeded. */
358
359 static bool
360 generate_builtin (struct loop *loop, bitmap partition, bool copy_p)
361 {
362 bool res = false;
363 unsigned i, x = 0;
364 basic_block *bbs;
365 gimple write = NULL;
366 tree op0, op1;
367 gimple_stmt_iterator bsi;
368 tree nb_iter = number_of_exit_cond_executions (loop);
369
370 if (!nb_iter || nb_iter == chrec_dont_know)
371 return false;
372
373 bbs = get_loop_body_in_dom_order (loop);
374
375 for (i = 0; i < loop->num_nodes; i++)
376 {
377 basic_block bb = bbs[i];
378
379 for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi); gsi_next (&bsi))
380 x++;
381
382 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
383 {
384 gimple stmt = gsi_stmt (bsi);
385
386 if (bitmap_bit_p (partition, x++)
387 && is_gimple_assign (stmt)
388 && !is_gimple_reg (gimple_assign_lhs (stmt)))
389 {
390 /* Don't generate the builtins when there are more than
391 one memory write. */
392 if (write != NULL)
393 goto end;
394
395 write = stmt;
396 }
397 }
398 }
399
400 if (!write)
401 goto end;
402
403 op0 = gimple_assign_lhs (write);
404 op1 = gimple_assign_rhs1 (write);
405
406 if (!(TREE_CODE (op0) == ARRAY_REF
407 || TREE_CODE (op0) == INDIRECT_REF))
408 goto end;
409
410 /* The new statements will be placed before LOOP. */
411 bsi = gsi_last_bb (loop_preheader_edge (loop)->src);
412
413 if (gimple_assign_rhs_code (write) == INTEGER_CST
414 && (integer_zerop (op1) || real_zerop (op1)))
415 res = generate_memset_zero (write, op0, nb_iter, bsi);
416
417 /* If this is the last partition for which we generate code, we have
418 to destroy the loop. */
419 if (res && !copy_p)
420 {
421 unsigned nbbs = loop->num_nodes;
422 basic_block src = loop_preheader_edge (loop)->src;
423 basic_block dest = single_exit (loop)->dest;
424 prop_phis (dest);
425 make_edge (src, dest, EDGE_FALLTHRU);
426 cancel_loop_tree (loop);
427
428 for (i = 0; i < nbbs; i++)
429 delete_basic_block (bbs[i]);
430
431 set_immediate_dominator (CDI_DOMINATORS, dest,
432 recompute_dominator (CDI_DOMINATORS, dest));
433 }
434
435 end:
436 free (bbs);
437 return res;
438 }
439
440 /* Generates code for PARTITION. For simple loops, this function can
441 generate a built-in. */
442
443 static bool
444 generate_code_for_partition (struct loop *loop, bitmap partition, bool copy_p)
445 {
446 if (generate_builtin (loop, partition, copy_p))
447 return true;
448
449 return generate_loops_for_partition (loop, partition, copy_p);
450 }
451
452
453 /* Returns true if the node V of RDG cannot be recomputed. */
454
455 static bool
456 rdg_cannot_recompute_vertex_p (struct graph *rdg, int v)
457 {
458 if (RDG_MEM_WRITE_STMT (rdg, v))
459 return true;
460
461 return false;
462 }
463
464 /* Returns true when the vertex V has already been generated in the
465 current partition (V is in PROCESSED), or when V belongs to another
466 partition and cannot be recomputed (V is not in REMAINING_STMTS). */
467
468 static inline bool
469 already_processed_vertex_p (bitmap processed, int v)
470 {
471 return (bitmap_bit_p (processed, v)
472 || !bitmap_bit_p (remaining_stmts, v));
473 }
474
475 /* Returns NULL when there is no anti-dependence among the successors
476 of vertex V, otherwise returns the edge with the anti-dep. */
477
478 static struct graph_edge *
479 has_anti_dependence (struct vertex *v)
480 {
481 struct graph_edge *e;
482
483 if (v->succ)
484 for (e = v->succ; e; e = e->succ_next)
485 if (RDGE_TYPE (e) == anti_dd)
486 return e;
487
488 return NULL;
489 }
490
491 /* Returns true when V has an anti-dependence edge among its successors. */
492
493 static bool
494 predecessor_has_mem_write (struct graph *rdg, struct vertex *v)
495 {
496 struct graph_edge *e;
497
498 if (v->pred)
499 for (e = v->pred; e; e = e->pred_next)
500 if (bitmap_bit_p (upstream_mem_writes, e->src)
501 /* Don't consider flow channels: a write to memory followed
502 by a read from memory. These channels allow the split of
503 the RDG in different partitions. */
504 && !RDG_MEM_WRITE_STMT (rdg, e->src))
505 return true;
506
507 return false;
508 }
509
510 /* Initializes the upstream_mem_writes bitmap following the
511 information from RDG. */
512
513 static void
514 mark_nodes_having_upstream_mem_writes (struct graph *rdg)
515 {
516 int v, x;
517 bitmap seen = BITMAP_ALLOC (NULL);
518
519 for (v = rdg->n_vertices - 1; v >= 0; v--)
520 if (!bitmap_bit_p (seen, v))
521 {
522 unsigned i;
523 VEC (int, heap) *nodes = VEC_alloc (int, heap, 3);
524 bool has_upstream_mem_write_p = false;
525
526 graphds_dfs (rdg, &v, 1, &nodes, false, NULL);
527
528 for (i = 0; VEC_iterate (int, nodes, i, x); i++)
529 {
530 if (bitmap_bit_p (seen, x))
531 continue;
532
533 bitmap_set_bit (seen, x);
534
535 if (RDG_MEM_WRITE_STMT (rdg, x)
536 || predecessor_has_mem_write (rdg, &(rdg->vertices[x]))
537 /* In anti dependences the read should occur before
538 the write, this is why both the read and the write
539 should be placed in the same partition. */
540 || has_anti_dependence (&(rdg->vertices[x])))
541 {
542 has_upstream_mem_write_p = true;
543 bitmap_set_bit (upstream_mem_writes, x);
544 }
545 }
546
547 VEC_free (int, heap, nodes);
548 }
549 }
550
551 /* Returns true when vertex u has a memory write node as a predecessor
552 in RDG. */
553
554 static bool
555 has_upstream_mem_writes (int u)
556 {
557 return bitmap_bit_p (upstream_mem_writes, u);
558 }
559
560 static void rdg_flag_vertex_and_dependent (struct graph *, int, bitmap, bitmap,
561 bitmap, bool *);
562
563 /* Flag all the uses of U. */
564
565 static void
566 rdg_flag_all_uses (struct graph *rdg, int u, bitmap partition, bitmap loops,
567 bitmap processed, bool *part_has_writes)
568 {
569 struct graph_edge *e;
570
571 for (e = rdg->vertices[u].succ; e; e = e->succ_next)
572 if (!bitmap_bit_p (processed, e->dest))
573 {
574 rdg_flag_vertex_and_dependent (rdg, e->dest, partition, loops,
575 processed, part_has_writes);
576 rdg_flag_all_uses (rdg, e->dest, partition, loops, processed,
577 part_has_writes);
578 }
579 }
580
581 /* Flag the uses of U stopping following the information from
582 upstream_mem_writes. */
583
584 static void
585 rdg_flag_uses (struct graph *rdg, int u, bitmap partition, bitmap loops,
586 bitmap processed, bool *part_has_writes)
587 {
588 use_operand_p use_p;
589 struct vertex *x = &(rdg->vertices[u]);
590 gimple stmt = RDGV_STMT (x);
591 struct graph_edge *anti_dep = has_anti_dependence (x);
592
593 /* Keep in the same partition the destination of an antidependence,
594 because this is a store to the exact same location. Putting this
595 in another partition is bad for cache locality. */
596 if (anti_dep)
597 {
598 int v = anti_dep->dest;
599
600 if (!already_processed_vertex_p (processed, v))
601 rdg_flag_vertex_and_dependent (rdg, v, partition, loops,
602 processed, part_has_writes);
603 }
604
605 if (gimple_code (stmt) != GIMPLE_PHI)
606 {
607 if ((use_p = gimple_vuse_op (stmt)) != NULL_USE_OPERAND_P)
608 {
609 tree use = USE_FROM_PTR (use_p);
610
611 if (TREE_CODE (use) == SSA_NAME)
612 {
613 gimple def_stmt = SSA_NAME_DEF_STMT (use);
614 int v = rdg_vertex_for_stmt (rdg, def_stmt);
615
616 if (v >= 0
617 && !already_processed_vertex_p (processed, v))
618 rdg_flag_vertex_and_dependent (rdg, v, partition, loops,
619 processed, part_has_writes);
620 }
621 }
622 }
623
624 if (is_gimple_assign (stmt) && has_upstream_mem_writes (u))
625 {
626 tree op0 = gimple_assign_lhs (stmt);
627
628 /* Scalar channels don't have enough space for transmitting data
629 between tasks, unless we add more storage by privatizing. */
630 if (is_gimple_reg (op0))
631 {
632 use_operand_p use_p;
633 imm_use_iterator iter;
634
635 FOR_EACH_IMM_USE_FAST (use_p, iter, op0)
636 {
637 int v = rdg_vertex_for_stmt (rdg, USE_STMT (use_p));
638
639 if (!already_processed_vertex_p (processed, v))
640 rdg_flag_vertex_and_dependent (rdg, v, partition, loops,
641 processed, part_has_writes);
642 }
643 }
644 }
645 }
646
647 /* Flag V from RDG as part of PARTITION, and also flag its loop number
648 in LOOPS. */
649
650 static void
651 rdg_flag_vertex (struct graph *rdg, int v, bitmap partition, bitmap loops,
652 bool *part_has_writes)
653 {
654 struct loop *loop;
655
656 if (bitmap_bit_p (partition, v))
657 return;
658
659 loop = loop_containing_stmt (RDG_STMT (rdg, v));
660 bitmap_set_bit (loops, loop->num);
661 bitmap_set_bit (partition, v);
662
663 if (rdg_cannot_recompute_vertex_p (rdg, v))
664 {
665 *part_has_writes = true;
666 bitmap_clear_bit (remaining_stmts, v);
667 }
668 }
669
670 /* Flag in the bitmap PARTITION the vertex V and all its predecessors.
671 Also flag their loop number in LOOPS. */
672
673 static void
674 rdg_flag_vertex_and_dependent (struct graph *rdg, int v, bitmap partition,
675 bitmap loops, bitmap processed,
676 bool *part_has_writes)
677 {
678 unsigned i;
679 VEC (int, heap) *nodes = VEC_alloc (int, heap, 3);
680 int x;
681
682 bitmap_set_bit (processed, v);
683 rdg_flag_uses (rdg, v, partition, loops, processed, part_has_writes);
684 graphds_dfs (rdg, &v, 1, &nodes, false, remaining_stmts);
685 rdg_flag_vertex (rdg, v, partition, loops, part_has_writes);
686
687 for (i = 0; VEC_iterate (int, nodes, i, x); i++)
688 if (!already_processed_vertex_p (processed, x))
689 rdg_flag_vertex_and_dependent (rdg, x, partition, loops, processed,
690 part_has_writes);
691
692 VEC_free (int, heap, nodes);
693 }
694
695 /* Initialize CONDS with all the condition statements from the basic
696 blocks of LOOP. */
697
698 static void
699 collect_condition_stmts (struct loop *loop, VEC (gimple, heap) **conds)
700 {
701 unsigned i;
702 edge e;
703 VEC (edge, heap) *exits = get_loop_exit_edges (loop);
704
705 for (i = 0; VEC_iterate (edge, exits, i, e); i++)
706 {
707 gimple cond = last_stmt (e->src);
708
709 if (cond)
710 VEC_safe_push (gimple, heap, *conds, cond);
711 }
712
713 VEC_free (edge, heap, exits);
714 }
715
716 /* Add to PARTITION all the exit condition statements for LOOPS
717 together with all their dependent statements determined from
718 RDG. */
719
720 static void
721 rdg_flag_loop_exits (struct graph *rdg, bitmap loops, bitmap partition,
722 bitmap processed, bool *part_has_writes)
723 {
724 unsigned i;
725 bitmap_iterator bi;
726 VEC (gimple, heap) *conds = VEC_alloc (gimple, heap, 3);
727
728 EXECUTE_IF_SET_IN_BITMAP (loops, 0, i, bi)
729 collect_condition_stmts (get_loop (i), &conds);
730
731 while (!VEC_empty (gimple, conds))
732 {
733 gimple cond = VEC_pop (gimple, conds);
734 int v = rdg_vertex_for_stmt (rdg, cond);
735 bitmap new_loops = BITMAP_ALLOC (NULL);
736
737 if (!already_processed_vertex_p (processed, v))
738 rdg_flag_vertex_and_dependent (rdg, v, partition, new_loops, processed,
739 part_has_writes);
740
741 EXECUTE_IF_SET_IN_BITMAP (new_loops, 0, i, bi)
742 if (!bitmap_bit_p (loops, i))
743 {
744 bitmap_set_bit (loops, i);
745 collect_condition_stmts (get_loop (i), &conds);
746 }
747
748 BITMAP_FREE (new_loops);
749 }
750 }
751
752 /* Flag all the nodes of RDG containing memory accesses that could
753 potentially belong to arrays already accessed in the current
754 PARTITION. */
755
756 static void
757 rdg_flag_similar_memory_accesses (struct graph *rdg, bitmap partition,
758 bitmap loops, bitmap processed,
759 VEC (int, heap) **other_stores)
760 {
761 bool foo;
762 unsigned i, n;
763 int j, k, kk;
764 bitmap_iterator ii;
765 struct graph_edge *e;
766
767 EXECUTE_IF_SET_IN_BITMAP (partition, 0, i, ii)
768 if (RDG_MEM_WRITE_STMT (rdg, i)
769 || RDG_MEM_READS_STMT (rdg, i))
770 {
771 for (j = 0; j < rdg->n_vertices; j++)
772 if (!bitmap_bit_p (processed, j)
773 && (RDG_MEM_WRITE_STMT (rdg, j)
774 || RDG_MEM_READS_STMT (rdg, j))
775 && rdg_has_similar_memory_accesses (rdg, i, j))
776 {
777 /* Flag first the node J itself, and all the nodes that
778 are needed to compute J. */
779 rdg_flag_vertex_and_dependent (rdg, j, partition, loops,
780 processed, &foo);
781
782 /* When J is a read, we want to coalesce in the same
783 PARTITION all the nodes that are using J: this is
784 needed for better cache locality. */
785 rdg_flag_all_uses (rdg, j, partition, loops, processed, &foo);
786
787 /* Remove from OTHER_STORES the vertex that we flagged. */
788 if (RDG_MEM_WRITE_STMT (rdg, j))
789 for (k = 0; VEC_iterate (int, *other_stores, k, kk); k++)
790 if (kk == j)
791 {
792 VEC_unordered_remove (int, *other_stores, k);
793 break;
794 }
795 }
796
797 /* If the node I has two uses, then keep these together in the
798 same PARTITION. */
799 for (n = 0, e = rdg->vertices[i].succ; e; e = e->succ_next, n++);
800
801 if (n > 1)
802 rdg_flag_all_uses (rdg, i, partition, loops, processed, &foo);
803 }
804 }
805
806 /* Returns a bitmap in which all the statements needed for computing
807 the strongly connected component C of the RDG are flagged, also
808 including the loop exit conditions. */
809
810 static bitmap
811 build_rdg_partition_for_component (struct graph *rdg, rdgc c,
812 bool *part_has_writes,
813 VEC (int, heap) **other_stores)
814 {
815 int i, v;
816 bitmap partition = BITMAP_ALLOC (NULL);
817 bitmap loops = BITMAP_ALLOC (NULL);
818 bitmap processed = BITMAP_ALLOC (NULL);
819
820 for (i = 0; VEC_iterate (int, c->vertices, i, v); i++)
821 if (!already_processed_vertex_p (processed, v))
822 rdg_flag_vertex_and_dependent (rdg, v, partition, loops, processed,
823 part_has_writes);
824
825 /* Also iterate on the array of stores not in the starting vertices,
826 and determine those vertices that have some memory affinity with
827 the current nodes in the component: these are stores to the same
828 arrays, i.e. we're taking care of cache locality. */
829 rdg_flag_similar_memory_accesses (rdg, partition, loops, processed,
830 other_stores);
831
832 rdg_flag_loop_exits (rdg, loops, partition, processed, part_has_writes);
833
834 BITMAP_FREE (processed);
835 BITMAP_FREE (loops);
836 return partition;
837 }
838
839 /* Free memory for COMPONENTS. */
840
841 static void
842 free_rdg_components (VEC (rdgc, heap) *components)
843 {
844 int i;
845 rdgc x;
846
847 for (i = 0; VEC_iterate (rdgc, components, i, x); i++)
848 {
849 VEC_free (int, heap, x->vertices);
850 free (x);
851 }
852 }
853
854 /* Build the COMPONENTS vector with the strongly connected components
855 of RDG in which the STARTING_VERTICES occur. */
856
857 static void
858 rdg_build_components (struct graph *rdg, VEC (int, heap) *starting_vertices,
859 VEC (rdgc, heap) **components)
860 {
861 int i, v;
862 bitmap saved_components = BITMAP_ALLOC (NULL);
863 int n_components = graphds_scc (rdg, NULL);
864 VEC (int, heap) **all_components = XNEWVEC (VEC (int, heap) *, n_components);
865
866 for (i = 0; i < n_components; i++)
867 all_components[i] = VEC_alloc (int, heap, 3);
868
869 for (i = 0; i < rdg->n_vertices; i++)
870 VEC_safe_push (int, heap, all_components[rdg->vertices[i].component], i);
871
872 for (i = 0; VEC_iterate (int, starting_vertices, i, v); i++)
873 {
874 int c = rdg->vertices[v].component;
875
876 if (!bitmap_bit_p (saved_components, c))
877 {
878 rdgc x = XCNEW (struct rdg_component);
879 x->num = c;
880 x->vertices = all_components[c];
881
882 VEC_safe_push (rdgc, heap, *components, x);
883 bitmap_set_bit (saved_components, c);
884 }
885 }
886
887 for (i = 0; i < n_components; i++)
888 if (!bitmap_bit_p (saved_components, i))
889 VEC_free (int, heap, all_components[i]);
890
891 free (all_components);
892 BITMAP_FREE (saved_components);
893 }
894
895 /* Aggregate several components into a useful partition that is
896 registered in the PARTITIONS vector. Partitions will be
897 distributed in different loops. */
898
899 static void
900 rdg_build_partitions (struct graph *rdg, VEC (rdgc, heap) *components,
901 VEC (int, heap) **other_stores,
902 VEC (bitmap, heap) **partitions, bitmap processed)
903 {
904 int i;
905 rdgc x;
906 bitmap partition = BITMAP_ALLOC (NULL);
907
908 for (i = 0; VEC_iterate (rdgc, components, i, x); i++)
909 {
910 bitmap np;
911 bool part_has_writes = false;
912 int v = VEC_index (int, x->vertices, 0);
913
914 if (bitmap_bit_p (processed, v))
915 continue;
916
917 np = build_rdg_partition_for_component (rdg, x, &part_has_writes,
918 other_stores);
919 bitmap_ior_into (partition, np);
920 bitmap_ior_into (processed, np);
921 BITMAP_FREE (np);
922
923 if (part_has_writes)
924 {
925 if (dump_file && (dump_flags & TDF_DETAILS))
926 {
927 fprintf (dump_file, "ldist useful partition:\n");
928 dump_bitmap (dump_file, partition);
929 }
930
931 VEC_safe_push (bitmap, heap, *partitions, partition);
932 partition = BITMAP_ALLOC (NULL);
933 }
934 }
935
936 /* Add the nodes from the RDG that were not marked as processed, and
937 that are used outside the current loop. These are scalar
938 computations that are not yet part of previous partitions. */
939 for (i = 0; i < rdg->n_vertices; i++)
940 if (!bitmap_bit_p (processed, i)
941 && rdg_defs_used_in_other_loops_p (rdg, i))
942 VEC_safe_push (int, heap, *other_stores, i);
943
944 /* If there are still statements left in the OTHER_STORES array,
945 create other components and partitions with these stores and
946 their dependences. */
947 if (VEC_length (int, *other_stores) > 0)
948 {
949 VEC (rdgc, heap) *comps = VEC_alloc (rdgc, heap, 3);
950 VEC (int, heap) *foo = VEC_alloc (int, heap, 3);
951
952 rdg_build_components (rdg, *other_stores, &comps);
953 rdg_build_partitions (rdg, comps, &foo, partitions, processed);
954
955 VEC_free (int, heap, foo);
956 free_rdg_components (comps);
957 }
958
959 /* If there is something left in the last partition, save it. */
960 if (bitmap_count_bits (partition) > 0)
961 VEC_safe_push (bitmap, heap, *partitions, partition);
962 else
963 BITMAP_FREE (partition);
964 }
965
966 /* Dump to FILE the PARTITIONS. */
967
968 static void
969 dump_rdg_partitions (FILE *file, VEC (bitmap, heap) *partitions)
970 {
971 int i;
972 bitmap partition;
973
974 for (i = 0; VEC_iterate (bitmap, partitions, i, partition); i++)
975 debug_bitmap_file (file, partition);
976 }
977
978 /* Debug PARTITIONS. */
979 extern void debug_rdg_partitions (VEC (bitmap, heap) *);
980
981 void
982 debug_rdg_partitions (VEC (bitmap, heap) *partitions)
983 {
984 dump_rdg_partitions (stderr, partitions);
985 }
986
987 /* Returns the number of read and write operations in the RDG. */
988
989 static int
990 number_of_rw_in_rdg (struct graph *rdg)
991 {
992 int i, res = 0;
993
994 for (i = 0; i < rdg->n_vertices; i++)
995 {
996 if (RDG_MEM_WRITE_STMT (rdg, i))
997 ++res;
998
999 if (RDG_MEM_READS_STMT (rdg, i))
1000 ++res;
1001 }
1002
1003 return res;
1004 }
1005
1006 /* Returns the number of read and write operations in a PARTITION of
1007 the RDG. */
1008
1009 static int
1010 number_of_rw_in_partition (struct graph *rdg, bitmap partition)
1011 {
1012 int res = 0;
1013 unsigned i;
1014 bitmap_iterator ii;
1015
1016 EXECUTE_IF_SET_IN_BITMAP (partition, 0, i, ii)
1017 {
1018 if (RDG_MEM_WRITE_STMT (rdg, i))
1019 ++res;
1020
1021 if (RDG_MEM_READS_STMT (rdg, i))
1022 ++res;
1023 }
1024
1025 return res;
1026 }
1027
1028 /* Returns true when one of the PARTITIONS contains all the read or
1029 write operations of RDG. */
1030
1031 static bool
1032 partition_contains_all_rw (struct graph *rdg, VEC (bitmap, heap) *partitions)
1033 {
1034 int i;
1035 bitmap partition;
1036 int nrw = number_of_rw_in_rdg (rdg);
1037
1038 for (i = 0; VEC_iterate (bitmap, partitions, i, partition); i++)
1039 if (nrw == number_of_rw_in_partition (rdg, partition))
1040 return true;
1041
1042 return false;
1043 }
1044
1045 /* Generate code from STARTING_VERTICES in RDG. Returns the number of
1046 distributed loops. */
1047
1048 static int
1049 ldist_gen (struct loop *loop, struct graph *rdg,
1050 VEC (int, heap) *starting_vertices)
1051 {
1052 int i, nbp;
1053 VEC (rdgc, heap) *components = VEC_alloc (rdgc, heap, 3);
1054 VEC (bitmap, heap) *partitions = VEC_alloc (bitmap, heap, 3);
1055 VEC (int, heap) *other_stores = VEC_alloc (int, heap, 3);
1056 bitmap partition, processed = BITMAP_ALLOC (NULL);
1057
1058 remaining_stmts = BITMAP_ALLOC (NULL);
1059 upstream_mem_writes = BITMAP_ALLOC (NULL);
1060
1061 for (i = 0; i < rdg->n_vertices; i++)
1062 {
1063 bitmap_set_bit (remaining_stmts, i);
1064
1065 /* Save in OTHER_STORES all the memory writes that are not in
1066 STARTING_VERTICES. */
1067 if (RDG_MEM_WRITE_STMT (rdg, i))
1068 {
1069 int v;
1070 unsigned j;
1071 bool found = false;
1072
1073 for (j = 0; VEC_iterate (int, starting_vertices, j, v); j++)
1074 if (i == v)
1075 {
1076 found = true;
1077 break;
1078 }
1079
1080 if (!found)
1081 VEC_safe_push (int, heap, other_stores, i);
1082 }
1083 }
1084
1085 mark_nodes_having_upstream_mem_writes (rdg);
1086 rdg_build_components (rdg, starting_vertices, &components);
1087 rdg_build_partitions (rdg, components, &other_stores, &partitions,
1088 processed);
1089 BITMAP_FREE (processed);
1090 nbp = VEC_length (bitmap, partitions);
1091
1092 if (nbp <= 1
1093 || partition_contains_all_rw (rdg, partitions))
1094 goto ldist_done;
1095
1096 if (dump_file && (dump_flags & TDF_DETAILS))
1097 dump_rdg_partitions (dump_file, partitions);
1098
1099 for (i = 0; VEC_iterate (bitmap, partitions, i, partition); i++)
1100 if (!generate_code_for_partition (loop, partition, i < nbp - 1))
1101 goto ldist_done;
1102
1103 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
1104 update_ssa (TODO_update_ssa_only_virtuals | TODO_update_ssa);
1105
1106 ldist_done:
1107
1108 BITMAP_FREE (remaining_stmts);
1109 BITMAP_FREE (upstream_mem_writes);
1110
1111 for (i = 0; VEC_iterate (bitmap, partitions, i, partition); i++)
1112 BITMAP_FREE (partition);
1113
1114 VEC_free (int, heap, other_stores);
1115 VEC_free (bitmap, heap, partitions);
1116 free_rdg_components (components);
1117 return nbp;
1118 }
1119
1120 /* Distributes the code from LOOP in such a way that producer
1121 statements are placed before consumer statements. When STMTS is
1122 NULL, performs the maximal distribution, if STMTS is not NULL,
1123 tries to separate only these statements from the LOOP's body.
1124 Returns the number of distributed loops. */
1125
1126 static int
1127 distribute_loop (struct loop *loop, VEC (gimple, heap) *stmts)
1128 {
1129 bool res = false;
1130 struct graph *rdg;
1131 gimple s;
1132 unsigned i;
1133 VEC (int, heap) *vertices;
1134
1135 if (loop->num_nodes > 2)
1136 {
1137 if (dump_file && (dump_flags & TDF_DETAILS))
1138 fprintf (dump_file,
1139 "FIXME: Loop %d not distributed: it has more than two basic blocks.\n",
1140 loop->num);
1141
1142 return res;
1143 }
1144
1145 rdg = build_rdg (loop);
1146
1147 if (!rdg)
1148 {
1149 if (dump_file && (dump_flags & TDF_DETAILS))
1150 fprintf (dump_file,
1151 "FIXME: Loop %d not distributed: failed to build the RDG.\n",
1152 loop->num);
1153
1154 return res;
1155 }
1156
1157 vertices = VEC_alloc (int, heap, 3);
1158
1159 if (dump_file && (dump_flags & TDF_DETAILS))
1160 dump_rdg (dump_file, rdg);
1161
1162 for (i = 0; VEC_iterate (gimple, stmts, i, s); i++)
1163 {
1164 int v = rdg_vertex_for_stmt (rdg, s);
1165
1166 if (v >= 0)
1167 {
1168 VEC_safe_push (int, heap, vertices, v);
1169
1170 if (dump_file && (dump_flags & TDF_DETAILS))
1171 fprintf (dump_file,
1172 "ldist asked to generate code for vertex %d\n", v);
1173 }
1174 }
1175
1176 res = ldist_gen (loop, rdg, vertices);
1177 VEC_free (int, heap, vertices);
1178 free_rdg (rdg);
1179
1180 return res;
1181 }
1182
1183 /* Distribute all loops in the current function. */
1184
1185 static unsigned int
1186 tree_loop_distribution (void)
1187 {
1188 struct loop *loop;
1189 loop_iterator li;
1190 int nb_generated_loops = 0;
1191
1192 FOR_EACH_LOOP (li, loop, 0)
1193 {
1194 VEC (gimple, heap) *work_list = VEC_alloc (gimple, heap, 3);
1195
1196 /* With the following working list, we're asking distribute_loop
1197 to separate the stores of the loop: when dependences allow,
1198 it will end on having one store per loop. */
1199 stores_from_loop (loop, &work_list);
1200
1201 /* A simple heuristic for cache locality is to not split stores
1202 to the same array. Without this call, an unrolled loop would
1203 be split into as many loops as unroll factor, each loop
1204 storing in the same array. */
1205 remove_similar_memory_refs (&work_list);
1206
1207 nb_generated_loops = distribute_loop (loop, work_list);
1208
1209 if (dump_file && (dump_flags & TDF_DETAILS))
1210 {
1211 if (nb_generated_loops > 1)
1212 fprintf (dump_file, "Loop %d distributed: split to %d loops.\n",
1213 loop->num, nb_generated_loops);
1214 else
1215 fprintf (dump_file, "Loop %d is the same.\n", loop->num);
1216 }
1217
1218 verify_loop_structure ();
1219
1220 VEC_free (gimple, heap, work_list);
1221 }
1222
1223 return 0;
1224 }
1225
1226 static bool
1227 gate_tree_loop_distribution (void)
1228 {
1229 return flag_tree_loop_distribution != 0;
1230 }
1231
1232 struct gimple_opt_pass pass_loop_distribution =
1233 {
1234 {
1235 GIMPLE_PASS,
1236 "ldist", /* name */
1237 gate_tree_loop_distribution, /* gate */
1238 tree_loop_distribution, /* execute */
1239 NULL, /* sub */
1240 NULL, /* next */
1241 0, /* static_pass_number */
1242 TV_TREE_LOOP_DISTRIBUTION, /* tv_id */
1243 PROP_cfg | PROP_ssa, /* properties_required */
1244 0, /* properties_provided */
1245 0, /* properties_destroyed */
1246 0, /* todo_flags_start */
1247 TODO_dump_func | TODO_verify_loops /* todo_flags_finish */
1248 }
1249 };