intrinsic.h (gfc_check_selected_real_kind, [...]): Update prototypes.
[gcc.git] / gcc / tree-loop-distribution.c
1 /* Loop distribution.
2 Copyright (C) 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Georges-Andre Silber <Georges-Andre.Silber@ensmp.fr>
5 and Sebastian Pop <sebastian.pop@amd.com>.
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify it
10 under the terms of the GNU General Public License as published by the
11 Free Software Foundation; either version 3, or (at your option) any
12 later version.
13
14 GCC is distributed in the hope that it will be useful, but WITHOUT
15 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23 /* This pass performs loop distribution: for example, the loop
24
25 |DO I = 2, N
26 | A(I) = B(I) + C
27 | D(I) = A(I-1)*E
28 |ENDDO
29
30 is transformed to
31
32 |DOALL I = 2, N
33 | A(I) = B(I) + C
34 |ENDDO
35 |
36 |DOALL I = 2, N
37 | D(I) = A(I-1)*E
38 |ENDDO
39
40 This pass uses an RDG, Reduced Dependence Graph built on top of the
41 data dependence relations. The RDG is then topologically sorted to
42 obtain a map of information producers/consumers based on which it
43 generates the new loops. */
44
45 #include "config.h"
46 #include "system.h"
47 #include "coretypes.h"
48 #include "tm.h"
49 #include "tree.h"
50 #include "basic-block.h"
51 #include "tree-flow.h"
52 #include "tree-dump.h"
53 #include "timevar.h"
54 #include "cfgloop.h"
55 #include "tree-chrec.h"
56 #include "tree-data-ref.h"
57 #include "tree-scalar-evolution.h"
58 #include "tree-pass.h"
59 #include "lambda.h"
60 #include "langhooks.h"
61 #include "tree-vectorizer.h"
62
63 /* If bit I is not set, it means that this node represents an
64 operation that has already been performed, and that should not be
65 performed again. This is the subgraph of remaining important
66 computations that is passed to the DFS algorithm for avoiding to
67 include several times the same stores in different loops. */
68 static bitmap remaining_stmts;
69
70 /* A node of the RDG is marked in this bitmap when it has as a
71 predecessor a node that writes to memory. */
72 static bitmap upstream_mem_writes;
73
74 /* Update the PHI nodes of NEW_LOOP. NEW_LOOP is a duplicate of
75 ORIG_LOOP. */
76
77 static void
78 update_phis_for_loop_copy (struct loop *orig_loop, struct loop *new_loop)
79 {
80 tree new_ssa_name;
81 gimple_stmt_iterator si_new, si_orig;
82 edge orig_loop_latch = loop_latch_edge (orig_loop);
83 edge orig_entry_e = loop_preheader_edge (orig_loop);
84 edge new_loop_entry_e = loop_preheader_edge (new_loop);
85
86 /* Scan the phis in the headers of the old and new loops
87 (they are organized in exactly the same order). */
88 for (si_new = gsi_start_phis (new_loop->header),
89 si_orig = gsi_start_phis (orig_loop->header);
90 !gsi_end_p (si_new) && !gsi_end_p (si_orig);
91 gsi_next (&si_new), gsi_next (&si_orig))
92 {
93 tree def;
94 source_location locus;
95 gimple phi_new = gsi_stmt (si_new);
96 gimple phi_orig = gsi_stmt (si_orig);
97
98 /* Add the first phi argument for the phi in NEW_LOOP (the one
99 associated with the entry of NEW_LOOP) */
100 def = PHI_ARG_DEF_FROM_EDGE (phi_orig, orig_entry_e);
101 locus = gimple_phi_arg_location_from_edge (phi_orig, orig_entry_e);
102 add_phi_arg (phi_new, def, new_loop_entry_e, locus);
103
104 /* Add the second phi argument for the phi in NEW_LOOP (the one
105 associated with the latch of NEW_LOOP) */
106 def = PHI_ARG_DEF_FROM_EDGE (phi_orig, orig_loop_latch);
107 locus = gimple_phi_arg_location_from_edge (phi_orig, orig_loop_latch);
108
109 if (TREE_CODE (def) == SSA_NAME)
110 {
111 new_ssa_name = get_current_def (def);
112
113 if (!new_ssa_name)
114 /* This only happens if there are no definitions inside the
115 loop. Use the the invariant in the new loop as is. */
116 new_ssa_name = def;
117 }
118 else
119 /* Could be an integer. */
120 new_ssa_name = def;
121
122 add_phi_arg (phi_new, new_ssa_name, loop_latch_edge (new_loop), locus);
123 }
124 }
125
126 /* Return a copy of LOOP placed before LOOP. */
127
128 static struct loop *
129 copy_loop_before (struct loop *loop)
130 {
131 struct loop *res;
132 edge preheader = loop_preheader_edge (loop);
133
134 if (!single_exit (loop))
135 return NULL;
136
137 initialize_original_copy_tables ();
138 res = slpeel_tree_duplicate_loop_to_edge_cfg (loop, preheader);
139 free_original_copy_tables ();
140
141 if (!res)
142 return NULL;
143
144 update_phis_for_loop_copy (loop, res);
145 rename_variables_in_loop (res);
146
147 return res;
148 }
149
150 /* Creates an empty basic block after LOOP. */
151
152 static void
153 create_bb_after_loop (struct loop *loop)
154 {
155 edge exit = single_exit (loop);
156
157 if (!exit)
158 return;
159
160 split_edge (exit);
161 }
162
163 /* Generate code for PARTITION from the code in LOOP. The loop is
164 copied when COPY_P is true. All the statements not flagged in the
165 PARTITION bitmap are removed from the loop or from its copy. The
166 statements are indexed in sequence inside a basic block, and the
167 basic blocks of a loop are taken in dom order. Returns true when
168 the code gen succeeded. */
169
170 static bool
171 generate_loops_for_partition (struct loop *loop, bitmap partition, bool copy_p)
172 {
173 unsigned i, x;
174 gimple_stmt_iterator bsi;
175 basic_block *bbs;
176
177 if (copy_p)
178 {
179 loop = copy_loop_before (loop);
180 create_preheader (loop, CP_SIMPLE_PREHEADERS);
181 create_bb_after_loop (loop);
182 }
183
184 if (loop == NULL)
185 return false;
186
187 /* Remove stmts not in the PARTITION bitmap. The order in which we
188 visit the phi nodes and the statements is exactly as in
189 stmts_from_loop. */
190 bbs = get_loop_body_in_dom_order (loop);
191
192 for (x = 0, i = 0; i < loop->num_nodes; i++)
193 {
194 basic_block bb = bbs[i];
195
196 for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi);)
197 if (!bitmap_bit_p (partition, x++))
198 remove_phi_node (&bsi, true);
199 else
200 gsi_next (&bsi);
201
202 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi);)
203 if (gimple_code (gsi_stmt (bsi)) != GIMPLE_LABEL
204 && !bitmap_bit_p (partition, x++))
205 gsi_remove (&bsi, false);
206 else
207 gsi_next (&bsi);
208
209 mark_virtual_ops_in_bb (bb);
210 }
211
212 free (bbs);
213 return true;
214 }
215
216 /* Build the size argument for a memset call. */
217
218 static inline tree
219 build_size_arg_loc (location_t loc, tree nb_iter, tree op,
220 gimple_seq *stmt_list)
221 {
222 gimple_seq stmts;
223 tree x = size_binop_loc (loc, MULT_EXPR,
224 fold_convert_loc (loc, sizetype, nb_iter),
225 TYPE_SIZE_UNIT (TREE_TYPE (op)));
226 x = force_gimple_operand (x, &stmts, true, NULL);
227 gimple_seq_add_seq (stmt_list, stmts);
228
229 return x;
230 }
231
232 /* Generate a call to memset. Return true when the operation succeeded. */
233
234 static bool
235 generate_memset_zero (gimple stmt, tree op0, tree nb_iter,
236 gimple_stmt_iterator bsi)
237 {
238 tree addr_base, nb_bytes;
239 bool res = false;
240 gimple_seq stmt_list = NULL, stmts;
241 gimple fn_call;
242 tree mem, fn;
243 gimple_stmt_iterator i;
244 struct data_reference *dr = XCNEW (struct data_reference);
245 location_t loc = gimple_location (stmt);
246
247 DR_STMT (dr) = stmt;
248 DR_REF (dr) = op0;
249 if (!dr_analyze_innermost (dr))
250 goto end;
251
252 /* Test for a positive stride, iterating over every element. */
253 if (integer_zerop (size_binop (MINUS_EXPR,
254 fold_convert (sizetype, DR_STEP (dr)),
255 TYPE_SIZE_UNIT (TREE_TYPE (op0)))))
256 {
257 addr_base = fold_convert_loc (loc, sizetype,
258 size_binop_loc (loc, PLUS_EXPR,
259 DR_OFFSET (dr),
260 DR_INIT (dr)));
261 addr_base = fold_build2_loc (loc, POINTER_PLUS_EXPR,
262 TREE_TYPE (DR_BASE_ADDRESS (dr)),
263 DR_BASE_ADDRESS (dr), addr_base);
264
265 nb_bytes = build_size_arg_loc (loc, nb_iter, op0, &stmt_list);
266 }
267
268 /* Test for a negative stride, iterating over every element. */
269 else if (integer_zerop (size_binop (PLUS_EXPR,
270 TYPE_SIZE_UNIT (TREE_TYPE (op0)),
271 fold_convert (sizetype, DR_STEP (dr)))))
272 {
273 nb_bytes = build_size_arg_loc (loc, nb_iter, op0, &stmt_list);
274
275 addr_base = size_binop_loc (loc, PLUS_EXPR, DR_OFFSET (dr), DR_INIT (dr));
276 addr_base = fold_convert_loc (loc, sizetype, addr_base);
277 addr_base = size_binop_loc (loc, MINUS_EXPR, addr_base,
278 fold_convert_loc (loc, sizetype, nb_bytes));
279 addr_base = size_binop_loc (loc, PLUS_EXPR, addr_base,
280 TYPE_SIZE_UNIT (TREE_TYPE (op0)));
281 addr_base = fold_build2_loc (loc, POINTER_PLUS_EXPR,
282 TREE_TYPE (DR_BASE_ADDRESS (dr)),
283 DR_BASE_ADDRESS (dr), addr_base);
284 }
285 else
286 goto end;
287
288 mem = force_gimple_operand (addr_base, &stmts, true, NULL);
289 gimple_seq_add_seq (&stmt_list, stmts);
290
291 fn = build_fold_addr_expr (implicit_built_in_decls [BUILT_IN_MEMSET]);
292 fn_call = gimple_build_call (fn, 3, mem, integer_zero_node, nb_bytes);
293 gimple_seq_add_stmt (&stmt_list, fn_call);
294
295 for (i = gsi_start (stmt_list); !gsi_end_p (i); gsi_next (&i))
296 {
297 gimple s = gsi_stmt (i);
298 update_stmt_if_modified (s);
299 }
300
301 gsi_insert_seq_after (&bsi, stmt_list, GSI_CONTINUE_LINKING);
302 res = true;
303
304 if (dump_file && (dump_flags & TDF_DETAILS))
305 fprintf (dump_file, "generated memset zero\n");
306
307 end:
308 free_data_ref (dr);
309 return res;
310 }
311
312 /* Propagate phis in BB b to their uses and remove them. */
313
314 static void
315 prop_phis (basic_block b)
316 {
317 gimple_stmt_iterator psi;
318 gimple_seq phis = phi_nodes (b);
319
320 for (psi = gsi_start (phis); !gsi_end_p (psi); )
321 {
322 gimple phi = gsi_stmt (psi);
323 tree def = gimple_phi_result (phi), use = gimple_phi_arg_def (phi, 0);
324
325 gcc_assert (gimple_phi_num_args (phi) == 1);
326
327 if (!is_gimple_reg (def))
328 {
329 imm_use_iterator iter;
330 use_operand_p use_p;
331 gimple stmt;
332
333 FOR_EACH_IMM_USE_STMT (stmt, iter, def)
334 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
335 SET_USE (use_p, use);
336 }
337 else
338 replace_uses_by (def, use);
339
340 remove_phi_node (&psi, true);
341 }
342 }
343
344 /* Tries to generate a builtin function for the instructions of LOOP
345 pointed to by the bits set in PARTITION. Returns true when the
346 operation succeeded. */
347
348 static bool
349 generate_builtin (struct loop *loop, bitmap partition, bool copy_p)
350 {
351 bool res = false;
352 unsigned i, x = 0;
353 basic_block *bbs;
354 gimple write = NULL;
355 tree op0, op1;
356 gimple_stmt_iterator bsi;
357 tree nb_iter = number_of_exit_cond_executions (loop);
358
359 if (!nb_iter || nb_iter == chrec_dont_know)
360 return false;
361
362 bbs = get_loop_body_in_dom_order (loop);
363
364 for (i = 0; i < loop->num_nodes; i++)
365 {
366 basic_block bb = bbs[i];
367
368 for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi); gsi_next (&bsi))
369 x++;
370
371 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
372 {
373 gimple stmt = gsi_stmt (bsi);
374
375 if (bitmap_bit_p (partition, x++)
376 && is_gimple_assign (stmt)
377 && !is_gimple_reg (gimple_assign_lhs (stmt)))
378 {
379 /* Don't generate the builtins when there are more than
380 one memory write. */
381 if (write != NULL)
382 goto end;
383
384 write = stmt;
385 if (bb == loop->latch)
386 nb_iter = number_of_latch_executions (loop);
387 }
388 }
389 }
390
391 if (!write)
392 goto end;
393
394 op0 = gimple_assign_lhs (write);
395 op1 = gimple_assign_rhs1 (write);
396
397 if (!(TREE_CODE (op0) == ARRAY_REF
398 || TREE_CODE (op0) == INDIRECT_REF))
399 goto end;
400
401 /* The new statements will be placed before LOOP. */
402 bsi = gsi_last_bb (loop_preheader_edge (loop)->src);
403
404 if (gimple_assign_rhs_code (write) == INTEGER_CST
405 && (integer_zerop (op1) || real_zerop (op1)))
406 res = generate_memset_zero (write, op0, nb_iter, bsi);
407
408 /* If this is the last partition for which we generate code, we have
409 to destroy the loop. */
410 if (res && !copy_p)
411 {
412 unsigned nbbs = loop->num_nodes;
413 basic_block src = loop_preheader_edge (loop)->src;
414 basic_block dest = single_exit (loop)->dest;
415 prop_phis (dest);
416 make_edge (src, dest, EDGE_FALLTHRU);
417 cancel_loop_tree (loop);
418
419 for (i = 0; i < nbbs; i++)
420 delete_basic_block (bbs[i]);
421
422 set_immediate_dominator (CDI_DOMINATORS, dest,
423 recompute_dominator (CDI_DOMINATORS, dest));
424 }
425
426 end:
427 free (bbs);
428 return res;
429 }
430
431 /* Generates code for PARTITION. For simple loops, this function can
432 generate a built-in. */
433
434 static bool
435 generate_code_for_partition (struct loop *loop, bitmap partition, bool copy_p)
436 {
437 if (generate_builtin (loop, partition, copy_p))
438 return true;
439
440 return generate_loops_for_partition (loop, partition, copy_p);
441 }
442
443
444 /* Returns true if the node V of RDG cannot be recomputed. */
445
446 static bool
447 rdg_cannot_recompute_vertex_p (struct graph *rdg, int v)
448 {
449 if (RDG_MEM_WRITE_STMT (rdg, v))
450 return true;
451
452 return false;
453 }
454
455 /* Returns true when the vertex V has already been generated in the
456 current partition (V is in PROCESSED), or when V belongs to another
457 partition and cannot be recomputed (V is not in REMAINING_STMTS). */
458
459 static inline bool
460 already_processed_vertex_p (bitmap processed, int v)
461 {
462 return (bitmap_bit_p (processed, v)
463 || !bitmap_bit_p (remaining_stmts, v));
464 }
465
466 /* Returns NULL when there is no anti-dependence among the successors
467 of vertex V, otherwise returns the edge with the anti-dep. */
468
469 static struct graph_edge *
470 has_anti_dependence (struct vertex *v)
471 {
472 struct graph_edge *e;
473
474 if (v->succ)
475 for (e = v->succ; e; e = e->succ_next)
476 if (RDGE_TYPE (e) == anti_dd)
477 return e;
478
479 return NULL;
480 }
481
482 /* Returns true when V has an anti-dependence edge among its successors. */
483
484 static bool
485 predecessor_has_mem_write (struct graph *rdg, struct vertex *v)
486 {
487 struct graph_edge *e;
488
489 if (v->pred)
490 for (e = v->pred; e; e = e->pred_next)
491 if (bitmap_bit_p (upstream_mem_writes, e->src)
492 /* Don't consider flow channels: a write to memory followed
493 by a read from memory. These channels allow the split of
494 the RDG in different partitions. */
495 && !RDG_MEM_WRITE_STMT (rdg, e->src))
496 return true;
497
498 return false;
499 }
500
501 /* Initializes the upstream_mem_writes bitmap following the
502 information from RDG. */
503
504 static void
505 mark_nodes_having_upstream_mem_writes (struct graph *rdg)
506 {
507 int v, x;
508 bitmap seen = BITMAP_ALLOC (NULL);
509
510 for (v = rdg->n_vertices - 1; v >= 0; v--)
511 if (!bitmap_bit_p (seen, v))
512 {
513 unsigned i;
514 VEC (int, heap) *nodes = VEC_alloc (int, heap, 3);
515
516 graphds_dfs (rdg, &v, 1, &nodes, false, NULL);
517
518 for (i = 0; VEC_iterate (int, nodes, i, x); i++)
519 {
520 if (bitmap_bit_p (seen, x))
521 continue;
522
523 bitmap_set_bit (seen, x);
524
525 if (RDG_MEM_WRITE_STMT (rdg, x)
526 || predecessor_has_mem_write (rdg, &(rdg->vertices[x]))
527 /* In anti dependences the read should occur before
528 the write, this is why both the read and the write
529 should be placed in the same partition. */
530 || has_anti_dependence (&(rdg->vertices[x])))
531 {
532 bitmap_set_bit (upstream_mem_writes, x);
533 }
534 }
535
536 VEC_free (int, heap, nodes);
537 }
538 }
539
540 /* Returns true when vertex u has a memory write node as a predecessor
541 in RDG. */
542
543 static bool
544 has_upstream_mem_writes (int u)
545 {
546 return bitmap_bit_p (upstream_mem_writes, u);
547 }
548
549 static void rdg_flag_vertex_and_dependent (struct graph *, int, bitmap, bitmap,
550 bitmap, bool *);
551
552 /* Flag all the uses of U. */
553
554 static void
555 rdg_flag_all_uses (struct graph *rdg, int u, bitmap partition, bitmap loops,
556 bitmap processed, bool *part_has_writes)
557 {
558 struct graph_edge *e;
559
560 for (e = rdg->vertices[u].succ; e; e = e->succ_next)
561 if (!bitmap_bit_p (processed, e->dest))
562 {
563 rdg_flag_vertex_and_dependent (rdg, e->dest, partition, loops,
564 processed, part_has_writes);
565 rdg_flag_all_uses (rdg, e->dest, partition, loops, processed,
566 part_has_writes);
567 }
568 }
569
570 /* Flag the uses of U stopping following the information from
571 upstream_mem_writes. */
572
573 static void
574 rdg_flag_uses (struct graph *rdg, int u, bitmap partition, bitmap loops,
575 bitmap processed, bool *part_has_writes)
576 {
577 use_operand_p use_p;
578 struct vertex *x = &(rdg->vertices[u]);
579 gimple stmt = RDGV_STMT (x);
580 struct graph_edge *anti_dep = has_anti_dependence (x);
581
582 /* Keep in the same partition the destination of an antidependence,
583 because this is a store to the exact same location. Putting this
584 in another partition is bad for cache locality. */
585 if (anti_dep)
586 {
587 int v = anti_dep->dest;
588
589 if (!already_processed_vertex_p (processed, v))
590 rdg_flag_vertex_and_dependent (rdg, v, partition, loops,
591 processed, part_has_writes);
592 }
593
594 if (gimple_code (stmt) != GIMPLE_PHI)
595 {
596 if ((use_p = gimple_vuse_op (stmt)) != NULL_USE_OPERAND_P)
597 {
598 tree use = USE_FROM_PTR (use_p);
599
600 if (TREE_CODE (use) == SSA_NAME)
601 {
602 gimple def_stmt = SSA_NAME_DEF_STMT (use);
603 int v = rdg_vertex_for_stmt (rdg, def_stmt);
604
605 if (v >= 0
606 && !already_processed_vertex_p (processed, v))
607 rdg_flag_vertex_and_dependent (rdg, v, partition, loops,
608 processed, part_has_writes);
609 }
610 }
611 }
612
613 if (is_gimple_assign (stmt) && has_upstream_mem_writes (u))
614 {
615 tree op0 = gimple_assign_lhs (stmt);
616
617 /* Scalar channels don't have enough space for transmitting data
618 between tasks, unless we add more storage by privatizing. */
619 if (is_gimple_reg (op0))
620 {
621 use_operand_p use_p;
622 imm_use_iterator iter;
623
624 FOR_EACH_IMM_USE_FAST (use_p, iter, op0)
625 {
626 int v = rdg_vertex_for_stmt (rdg, USE_STMT (use_p));
627
628 if (!already_processed_vertex_p (processed, v))
629 rdg_flag_vertex_and_dependent (rdg, v, partition, loops,
630 processed, part_has_writes);
631 }
632 }
633 }
634 }
635
636 /* Flag V from RDG as part of PARTITION, and also flag its loop number
637 in LOOPS. */
638
639 static void
640 rdg_flag_vertex (struct graph *rdg, int v, bitmap partition, bitmap loops,
641 bool *part_has_writes)
642 {
643 struct loop *loop;
644
645 if (bitmap_bit_p (partition, v))
646 return;
647
648 loop = loop_containing_stmt (RDG_STMT (rdg, v));
649 bitmap_set_bit (loops, loop->num);
650 bitmap_set_bit (partition, v);
651
652 if (rdg_cannot_recompute_vertex_p (rdg, v))
653 {
654 *part_has_writes = true;
655 bitmap_clear_bit (remaining_stmts, v);
656 }
657 }
658
659 /* Flag in the bitmap PARTITION the vertex V and all its predecessors.
660 Also flag their loop number in LOOPS. */
661
662 static void
663 rdg_flag_vertex_and_dependent (struct graph *rdg, int v, bitmap partition,
664 bitmap loops, bitmap processed,
665 bool *part_has_writes)
666 {
667 unsigned i;
668 VEC (int, heap) *nodes = VEC_alloc (int, heap, 3);
669 int x;
670
671 bitmap_set_bit (processed, v);
672 rdg_flag_uses (rdg, v, partition, loops, processed, part_has_writes);
673 graphds_dfs (rdg, &v, 1, &nodes, false, remaining_stmts);
674 rdg_flag_vertex (rdg, v, partition, loops, part_has_writes);
675
676 for (i = 0; VEC_iterate (int, nodes, i, x); i++)
677 if (!already_processed_vertex_p (processed, x))
678 rdg_flag_vertex_and_dependent (rdg, x, partition, loops, processed,
679 part_has_writes);
680
681 VEC_free (int, heap, nodes);
682 }
683
684 /* Initialize CONDS with all the condition statements from the basic
685 blocks of LOOP. */
686
687 static void
688 collect_condition_stmts (struct loop *loop, VEC (gimple, heap) **conds)
689 {
690 unsigned i;
691 edge e;
692 VEC (edge, heap) *exits = get_loop_exit_edges (loop);
693
694 for (i = 0; VEC_iterate (edge, exits, i, e); i++)
695 {
696 gimple cond = last_stmt (e->src);
697
698 if (cond)
699 VEC_safe_push (gimple, heap, *conds, cond);
700 }
701
702 VEC_free (edge, heap, exits);
703 }
704
705 /* Add to PARTITION all the exit condition statements for LOOPS
706 together with all their dependent statements determined from
707 RDG. */
708
709 static void
710 rdg_flag_loop_exits (struct graph *rdg, bitmap loops, bitmap partition,
711 bitmap processed, bool *part_has_writes)
712 {
713 unsigned i;
714 bitmap_iterator bi;
715 VEC (gimple, heap) *conds = VEC_alloc (gimple, heap, 3);
716
717 EXECUTE_IF_SET_IN_BITMAP (loops, 0, i, bi)
718 collect_condition_stmts (get_loop (i), &conds);
719
720 while (!VEC_empty (gimple, conds))
721 {
722 gimple cond = VEC_pop (gimple, conds);
723 int v = rdg_vertex_for_stmt (rdg, cond);
724 bitmap new_loops = BITMAP_ALLOC (NULL);
725
726 if (!already_processed_vertex_p (processed, v))
727 rdg_flag_vertex_and_dependent (rdg, v, partition, new_loops, processed,
728 part_has_writes);
729
730 EXECUTE_IF_SET_IN_BITMAP (new_loops, 0, i, bi)
731 if (!bitmap_bit_p (loops, i))
732 {
733 bitmap_set_bit (loops, i);
734 collect_condition_stmts (get_loop (i), &conds);
735 }
736
737 BITMAP_FREE (new_loops);
738 }
739 }
740
741 /* Flag all the nodes of RDG containing memory accesses that could
742 potentially belong to arrays already accessed in the current
743 PARTITION. */
744
745 static void
746 rdg_flag_similar_memory_accesses (struct graph *rdg, bitmap partition,
747 bitmap loops, bitmap processed,
748 VEC (int, heap) **other_stores)
749 {
750 bool foo;
751 unsigned i, n;
752 int j, k, kk;
753 bitmap_iterator ii;
754 struct graph_edge *e;
755
756 EXECUTE_IF_SET_IN_BITMAP (partition, 0, i, ii)
757 if (RDG_MEM_WRITE_STMT (rdg, i)
758 || RDG_MEM_READS_STMT (rdg, i))
759 {
760 for (j = 0; j < rdg->n_vertices; j++)
761 if (!bitmap_bit_p (processed, j)
762 && (RDG_MEM_WRITE_STMT (rdg, j)
763 || RDG_MEM_READS_STMT (rdg, j))
764 && rdg_has_similar_memory_accesses (rdg, i, j))
765 {
766 /* Flag first the node J itself, and all the nodes that
767 are needed to compute J. */
768 rdg_flag_vertex_and_dependent (rdg, j, partition, loops,
769 processed, &foo);
770
771 /* When J is a read, we want to coalesce in the same
772 PARTITION all the nodes that are using J: this is
773 needed for better cache locality. */
774 rdg_flag_all_uses (rdg, j, partition, loops, processed, &foo);
775
776 /* Remove from OTHER_STORES the vertex that we flagged. */
777 if (RDG_MEM_WRITE_STMT (rdg, j))
778 for (k = 0; VEC_iterate (int, *other_stores, k, kk); k++)
779 if (kk == j)
780 {
781 VEC_unordered_remove (int, *other_stores, k);
782 break;
783 }
784 }
785
786 /* If the node I has two uses, then keep these together in the
787 same PARTITION. */
788 for (n = 0, e = rdg->vertices[i].succ; e; e = e->succ_next, n++);
789
790 if (n > 1)
791 rdg_flag_all_uses (rdg, i, partition, loops, processed, &foo);
792 }
793 }
794
795 /* Returns a bitmap in which all the statements needed for computing
796 the strongly connected component C of the RDG are flagged, also
797 including the loop exit conditions. */
798
799 static bitmap
800 build_rdg_partition_for_component (struct graph *rdg, rdgc c,
801 bool *part_has_writes,
802 VEC (int, heap) **other_stores)
803 {
804 int i, v;
805 bitmap partition = BITMAP_ALLOC (NULL);
806 bitmap loops = BITMAP_ALLOC (NULL);
807 bitmap processed = BITMAP_ALLOC (NULL);
808
809 for (i = 0; VEC_iterate (int, c->vertices, i, v); i++)
810 if (!already_processed_vertex_p (processed, v))
811 rdg_flag_vertex_and_dependent (rdg, v, partition, loops, processed,
812 part_has_writes);
813
814 /* Also iterate on the array of stores not in the starting vertices,
815 and determine those vertices that have some memory affinity with
816 the current nodes in the component: these are stores to the same
817 arrays, i.e. we're taking care of cache locality. */
818 rdg_flag_similar_memory_accesses (rdg, partition, loops, processed,
819 other_stores);
820
821 rdg_flag_loop_exits (rdg, loops, partition, processed, part_has_writes);
822
823 BITMAP_FREE (processed);
824 BITMAP_FREE (loops);
825 return partition;
826 }
827
828 /* Free memory for COMPONENTS. */
829
830 static void
831 free_rdg_components (VEC (rdgc, heap) *components)
832 {
833 int i;
834 rdgc x;
835
836 for (i = 0; VEC_iterate (rdgc, components, i, x); i++)
837 {
838 VEC_free (int, heap, x->vertices);
839 free (x);
840 }
841 }
842
843 /* Build the COMPONENTS vector with the strongly connected components
844 of RDG in which the STARTING_VERTICES occur. */
845
846 static void
847 rdg_build_components (struct graph *rdg, VEC (int, heap) *starting_vertices,
848 VEC (rdgc, heap) **components)
849 {
850 int i, v;
851 bitmap saved_components = BITMAP_ALLOC (NULL);
852 int n_components = graphds_scc (rdg, NULL);
853 VEC (int, heap) **all_components = XNEWVEC (VEC (int, heap) *, n_components);
854
855 for (i = 0; i < n_components; i++)
856 all_components[i] = VEC_alloc (int, heap, 3);
857
858 for (i = 0; i < rdg->n_vertices; i++)
859 VEC_safe_push (int, heap, all_components[rdg->vertices[i].component], i);
860
861 for (i = 0; VEC_iterate (int, starting_vertices, i, v); i++)
862 {
863 int c = rdg->vertices[v].component;
864
865 if (!bitmap_bit_p (saved_components, c))
866 {
867 rdgc x = XCNEW (struct rdg_component);
868 x->num = c;
869 x->vertices = all_components[c];
870
871 VEC_safe_push (rdgc, heap, *components, x);
872 bitmap_set_bit (saved_components, c);
873 }
874 }
875
876 for (i = 0; i < n_components; i++)
877 if (!bitmap_bit_p (saved_components, i))
878 VEC_free (int, heap, all_components[i]);
879
880 free (all_components);
881 BITMAP_FREE (saved_components);
882 }
883
884 /* Aggregate several components into a useful partition that is
885 registered in the PARTITIONS vector. Partitions will be
886 distributed in different loops. */
887
888 static void
889 rdg_build_partitions (struct graph *rdg, VEC (rdgc, heap) *components,
890 VEC (int, heap) **other_stores,
891 VEC (bitmap, heap) **partitions, bitmap processed)
892 {
893 int i;
894 rdgc x;
895 bitmap partition = BITMAP_ALLOC (NULL);
896
897 for (i = 0; VEC_iterate (rdgc, components, i, x); i++)
898 {
899 bitmap np;
900 bool part_has_writes = false;
901 int v = VEC_index (int, x->vertices, 0);
902
903 if (bitmap_bit_p (processed, v))
904 continue;
905
906 np = build_rdg_partition_for_component (rdg, x, &part_has_writes,
907 other_stores);
908 bitmap_ior_into (partition, np);
909 bitmap_ior_into (processed, np);
910 BITMAP_FREE (np);
911
912 if (part_has_writes)
913 {
914 if (dump_file && (dump_flags & TDF_DETAILS))
915 {
916 fprintf (dump_file, "ldist useful partition:\n");
917 dump_bitmap (dump_file, partition);
918 }
919
920 VEC_safe_push (bitmap, heap, *partitions, partition);
921 partition = BITMAP_ALLOC (NULL);
922 }
923 }
924
925 /* Add the nodes from the RDG that were not marked as processed, and
926 that are used outside the current loop. These are scalar
927 computations that are not yet part of previous partitions. */
928 for (i = 0; i < rdg->n_vertices; i++)
929 if (!bitmap_bit_p (processed, i)
930 && rdg_defs_used_in_other_loops_p (rdg, i))
931 VEC_safe_push (int, heap, *other_stores, i);
932
933 /* If there are still statements left in the OTHER_STORES array,
934 create other components and partitions with these stores and
935 their dependences. */
936 if (VEC_length (int, *other_stores) > 0)
937 {
938 VEC (rdgc, heap) *comps = VEC_alloc (rdgc, heap, 3);
939 VEC (int, heap) *foo = VEC_alloc (int, heap, 3);
940
941 rdg_build_components (rdg, *other_stores, &comps);
942 rdg_build_partitions (rdg, comps, &foo, partitions, processed);
943
944 VEC_free (int, heap, foo);
945 free_rdg_components (comps);
946 }
947
948 /* If there is something left in the last partition, save it. */
949 if (bitmap_count_bits (partition) > 0)
950 VEC_safe_push (bitmap, heap, *partitions, partition);
951 else
952 BITMAP_FREE (partition);
953 }
954
955 /* Dump to FILE the PARTITIONS. */
956
957 static void
958 dump_rdg_partitions (FILE *file, VEC (bitmap, heap) *partitions)
959 {
960 int i;
961 bitmap partition;
962
963 for (i = 0; VEC_iterate (bitmap, partitions, i, partition); i++)
964 debug_bitmap_file (file, partition);
965 }
966
967 /* Debug PARTITIONS. */
968 extern void debug_rdg_partitions (VEC (bitmap, heap) *);
969
970 DEBUG_FUNCTION void
971 debug_rdg_partitions (VEC (bitmap, heap) *partitions)
972 {
973 dump_rdg_partitions (stderr, partitions);
974 }
975
976 /* Returns the number of read and write operations in the RDG. */
977
978 static int
979 number_of_rw_in_rdg (struct graph *rdg)
980 {
981 int i, res = 0;
982
983 for (i = 0; i < rdg->n_vertices; i++)
984 {
985 if (RDG_MEM_WRITE_STMT (rdg, i))
986 ++res;
987
988 if (RDG_MEM_READS_STMT (rdg, i))
989 ++res;
990 }
991
992 return res;
993 }
994
995 /* Returns the number of read and write operations in a PARTITION of
996 the RDG. */
997
998 static int
999 number_of_rw_in_partition (struct graph *rdg, bitmap partition)
1000 {
1001 int res = 0;
1002 unsigned i;
1003 bitmap_iterator ii;
1004
1005 EXECUTE_IF_SET_IN_BITMAP (partition, 0, i, ii)
1006 {
1007 if (RDG_MEM_WRITE_STMT (rdg, i))
1008 ++res;
1009
1010 if (RDG_MEM_READS_STMT (rdg, i))
1011 ++res;
1012 }
1013
1014 return res;
1015 }
1016
1017 /* Returns true when one of the PARTITIONS contains all the read or
1018 write operations of RDG. */
1019
1020 static bool
1021 partition_contains_all_rw (struct graph *rdg, VEC (bitmap, heap) *partitions)
1022 {
1023 int i;
1024 bitmap partition;
1025 int nrw = number_of_rw_in_rdg (rdg);
1026
1027 for (i = 0; VEC_iterate (bitmap, partitions, i, partition); i++)
1028 if (nrw == number_of_rw_in_partition (rdg, partition))
1029 return true;
1030
1031 return false;
1032 }
1033
1034 /* Generate code from STARTING_VERTICES in RDG. Returns the number of
1035 distributed loops. */
1036
1037 static int
1038 ldist_gen (struct loop *loop, struct graph *rdg,
1039 VEC (int, heap) *starting_vertices)
1040 {
1041 int i, nbp;
1042 VEC (rdgc, heap) *components = VEC_alloc (rdgc, heap, 3);
1043 VEC (bitmap, heap) *partitions = VEC_alloc (bitmap, heap, 3);
1044 VEC (int, heap) *other_stores = VEC_alloc (int, heap, 3);
1045 bitmap partition, processed = BITMAP_ALLOC (NULL);
1046
1047 remaining_stmts = BITMAP_ALLOC (NULL);
1048 upstream_mem_writes = BITMAP_ALLOC (NULL);
1049
1050 for (i = 0; i < rdg->n_vertices; i++)
1051 {
1052 bitmap_set_bit (remaining_stmts, i);
1053
1054 /* Save in OTHER_STORES all the memory writes that are not in
1055 STARTING_VERTICES. */
1056 if (RDG_MEM_WRITE_STMT (rdg, i))
1057 {
1058 int v;
1059 unsigned j;
1060 bool found = false;
1061
1062 for (j = 0; VEC_iterate (int, starting_vertices, j, v); j++)
1063 if (i == v)
1064 {
1065 found = true;
1066 break;
1067 }
1068
1069 if (!found)
1070 VEC_safe_push (int, heap, other_stores, i);
1071 }
1072 }
1073
1074 mark_nodes_having_upstream_mem_writes (rdg);
1075 rdg_build_components (rdg, starting_vertices, &components);
1076 rdg_build_partitions (rdg, components, &other_stores, &partitions,
1077 processed);
1078 BITMAP_FREE (processed);
1079 nbp = VEC_length (bitmap, partitions);
1080
1081 if (nbp <= 1
1082 || partition_contains_all_rw (rdg, partitions))
1083 goto ldist_done;
1084
1085 if (dump_file && (dump_flags & TDF_DETAILS))
1086 dump_rdg_partitions (dump_file, partitions);
1087
1088 for (i = 0; VEC_iterate (bitmap, partitions, i, partition); i++)
1089 if (!generate_code_for_partition (loop, partition, i < nbp - 1))
1090 goto ldist_done;
1091
1092 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
1093 update_ssa (TODO_update_ssa_only_virtuals | TODO_update_ssa);
1094
1095 ldist_done:
1096
1097 BITMAP_FREE (remaining_stmts);
1098 BITMAP_FREE (upstream_mem_writes);
1099
1100 for (i = 0; VEC_iterate (bitmap, partitions, i, partition); i++)
1101 BITMAP_FREE (partition);
1102
1103 VEC_free (int, heap, other_stores);
1104 VEC_free (bitmap, heap, partitions);
1105 free_rdg_components (components);
1106 return nbp;
1107 }
1108
1109 /* Distributes the code from LOOP in such a way that producer
1110 statements are placed before consumer statements. When STMTS is
1111 NULL, performs the maximal distribution, if STMTS is not NULL,
1112 tries to separate only these statements from the LOOP's body.
1113 Returns the number of distributed loops. */
1114
1115 static int
1116 distribute_loop (struct loop *loop, VEC (gimple, heap) *stmts)
1117 {
1118 int res = 0;
1119 struct graph *rdg;
1120 gimple s;
1121 unsigned i;
1122 VEC (int, heap) *vertices;
1123
1124 if (loop->num_nodes > 2)
1125 {
1126 if (dump_file && (dump_flags & TDF_DETAILS))
1127 fprintf (dump_file,
1128 "FIXME: Loop %d not distributed: it has more than two basic blocks.\n",
1129 loop->num);
1130
1131 return res;
1132 }
1133
1134 rdg = build_rdg (loop);
1135
1136 if (!rdg)
1137 {
1138 if (dump_file && (dump_flags & TDF_DETAILS))
1139 fprintf (dump_file,
1140 "FIXME: Loop %d not distributed: failed to build the RDG.\n",
1141 loop->num);
1142
1143 return res;
1144 }
1145
1146 vertices = VEC_alloc (int, heap, 3);
1147
1148 if (dump_file && (dump_flags & TDF_DETAILS))
1149 dump_rdg (dump_file, rdg);
1150
1151 for (i = 0; VEC_iterate (gimple, stmts, i, s); i++)
1152 {
1153 int v = rdg_vertex_for_stmt (rdg, s);
1154
1155 if (v >= 0)
1156 {
1157 VEC_safe_push (int, heap, vertices, v);
1158
1159 if (dump_file && (dump_flags & TDF_DETAILS))
1160 fprintf (dump_file,
1161 "ldist asked to generate code for vertex %d\n", v);
1162 }
1163 }
1164
1165 res = ldist_gen (loop, rdg, vertices);
1166 VEC_free (int, heap, vertices);
1167 free_rdg (rdg);
1168
1169 return res;
1170 }
1171
1172 /* Distribute all loops in the current function. */
1173
1174 static unsigned int
1175 tree_loop_distribution (void)
1176 {
1177 struct loop *loop;
1178 loop_iterator li;
1179 int nb_generated_loops = 0;
1180
1181 FOR_EACH_LOOP (li, loop, 0)
1182 {
1183 VEC (gimple, heap) *work_list = VEC_alloc (gimple, heap, 3);
1184
1185 /* With the following working list, we're asking distribute_loop
1186 to separate the stores of the loop: when dependences allow,
1187 it will end on having one store per loop. */
1188 stores_from_loop (loop, &work_list);
1189
1190 /* A simple heuristic for cache locality is to not split stores
1191 to the same array. Without this call, an unrolled loop would
1192 be split into as many loops as unroll factor, each loop
1193 storing in the same array. */
1194 remove_similar_memory_refs (&work_list);
1195
1196 nb_generated_loops = distribute_loop (loop, work_list);
1197
1198 if (dump_file && (dump_flags & TDF_DETAILS))
1199 {
1200 if (nb_generated_loops > 1)
1201 fprintf (dump_file, "Loop %d distributed: split to %d loops.\n",
1202 loop->num, nb_generated_loops);
1203 else
1204 fprintf (dump_file, "Loop %d is the same.\n", loop->num);
1205 }
1206
1207 verify_loop_structure ();
1208
1209 VEC_free (gimple, heap, work_list);
1210 }
1211
1212 return 0;
1213 }
1214
1215 static bool
1216 gate_tree_loop_distribution (void)
1217 {
1218 return flag_tree_loop_distribution != 0;
1219 }
1220
1221 struct gimple_opt_pass pass_loop_distribution =
1222 {
1223 {
1224 GIMPLE_PASS,
1225 "ldist", /* name */
1226 gate_tree_loop_distribution, /* gate */
1227 tree_loop_distribution, /* execute */
1228 NULL, /* sub */
1229 NULL, /* next */
1230 0, /* static_pass_number */
1231 TV_TREE_LOOP_DISTRIBUTION, /* tv_id */
1232 PROP_cfg | PROP_ssa, /* properties_required */
1233 0, /* properties_provided */
1234 0, /* properties_destroyed */
1235 0, /* todo_flags_start */
1236 TODO_dump_func /* todo_flags_finish */
1237 }
1238 };