epiphany.opt (mfp-iarith): New option.
[gcc.git] / gcc / tree-ssa-loop-im.c
1 /* Loop invariant motion.
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2010
3 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
10 later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "tree.h"
26 #include "tm_p.h"
27 #include "basic-block.h"
28 #include "gimple-pretty-print.h"
29 #include "tree-flow.h"
30 #include "cfgloop.h"
31 #include "domwalk.h"
32 #include "params.h"
33 #include "tree-pass.h"
34 #include "flags.h"
35 #include "hashtab.h"
36 #include "tree-affine.h"
37 #include "pointer-set.h"
38 #include "tree-ssa-propagate.h"
39
40 /* TODO: Support for predicated code motion. I.e.
41
42 while (1)
43 {
44 if (cond)
45 {
46 a = inv;
47 something;
48 }
49 }
50
51 Where COND and INV are invariants, but evaluating INV may trap or be
52 invalid from some other reason if !COND. This may be transformed to
53
54 if (cond)
55 a = inv;
56 while (1)
57 {
58 if (cond)
59 something;
60 } */
61
62 /* A type for the list of statements that have to be moved in order to be able
63 to hoist an invariant computation. */
64
65 struct depend
66 {
67 gimple stmt;
68 struct depend *next;
69 };
70
71 /* The auxiliary data kept for each statement. */
72
73 struct lim_aux_data
74 {
75 struct loop *max_loop; /* The outermost loop in that the statement
76 is invariant. */
77
78 struct loop *tgt_loop; /* The loop out of that we want to move the
79 invariant. */
80
81 struct loop *always_executed_in;
82 /* The outermost loop for that we are sure
83 the statement is executed if the loop
84 is entered. */
85
86 unsigned cost; /* Cost of the computation performed by the
87 statement. */
88
89 struct depend *depends; /* List of statements that must be also hoisted
90 out of the loop when this statement is
91 hoisted; i.e. those that define the operands
92 of the statement and are inside of the
93 MAX_LOOP loop. */
94 };
95
96 /* Maps statements to their lim_aux_data. */
97
98 static struct pointer_map_t *lim_aux_data_map;
99
100 /* Description of a memory reference location. */
101
102 typedef struct mem_ref_loc
103 {
104 tree *ref; /* The reference itself. */
105 gimple stmt; /* The statement in that it occurs. */
106 } *mem_ref_loc_p;
107
108
109 /* The list of memory reference locations in a loop. */
110
111 typedef struct mem_ref_locs
112 {
113 vec<mem_ref_loc_p> locs;
114 } *mem_ref_locs_p;
115
116
117 /* Description of a memory reference. */
118
119 typedef struct mem_ref
120 {
121 tree mem; /* The memory itself. */
122 unsigned id; /* ID assigned to the memory reference
123 (its index in memory_accesses.refs_list) */
124 hashval_t hash; /* Its hash value. */
125 bitmap stored; /* The set of loops in that this memory location
126 is stored to. */
127 vec<mem_ref_locs_p> accesses_in_loop;
128 /* The locations of the accesses. Vector
129 indexed by the loop number. */
130
131 /* The following sets are computed on demand. We keep both set and
132 its complement, so that we know whether the information was
133 already computed or not. */
134 bitmap indep_loop; /* The set of loops in that the memory
135 reference is independent, meaning:
136 If it is stored in the loop, this store
137 is independent on all other loads and
138 stores.
139 If it is only loaded, then it is independent
140 on all stores in the loop. */
141 bitmap dep_loop; /* The complement of INDEP_LOOP. */
142
143 bitmap indep_ref; /* The set of memory references on that
144 this reference is independent. */
145 bitmap dep_ref; /* The complement of INDEP_REF. */
146 } *mem_ref_p;
147
148
149
150
151 /* Description of memory accesses in loops. */
152
153 static struct
154 {
155 /* The hash table of memory references accessed in loops. */
156 htab_t refs;
157
158 /* The list of memory references. */
159 vec<mem_ref_p> refs_list;
160
161 /* The set of memory references accessed in each loop. */
162 vec<bitmap> refs_in_loop;
163
164 /* The set of memory references accessed in each loop, including
165 subloops. */
166 vec<bitmap> all_refs_in_loop;
167
168 /* The set of memory references stored in each loop, including
169 subloops. */
170 vec<bitmap> all_refs_stored_in_loop;
171
172 /* Cache for expanding memory addresses. */
173 struct pointer_map_t *ttae_cache;
174 } memory_accesses;
175
176 /* Obstack for the bitmaps in the above data structures. */
177 static bitmap_obstack lim_bitmap_obstack;
178
179 static bool ref_indep_loop_p (struct loop *, mem_ref_p);
180
181 /* Minimum cost of an expensive expression. */
182 #define LIM_EXPENSIVE ((unsigned) PARAM_VALUE (PARAM_LIM_EXPENSIVE))
183
184 /* The outermost loop for which execution of the header guarantees that the
185 block will be executed. */
186 #define ALWAYS_EXECUTED_IN(BB) ((struct loop *) (BB)->aux)
187 #define SET_ALWAYS_EXECUTED_IN(BB, VAL) ((BB)->aux = (void *) (VAL))
188
189 /* Whether the reference was analyzable. */
190 #define MEM_ANALYZABLE(REF) ((REF)->mem != error_mark_node)
191
192 static struct lim_aux_data *
193 init_lim_data (gimple stmt)
194 {
195 void **p = pointer_map_insert (lim_aux_data_map, stmt);
196
197 *p = XCNEW (struct lim_aux_data);
198 return (struct lim_aux_data *) *p;
199 }
200
201 static struct lim_aux_data *
202 get_lim_data (gimple stmt)
203 {
204 void **p = pointer_map_contains (lim_aux_data_map, stmt);
205 if (!p)
206 return NULL;
207
208 return (struct lim_aux_data *) *p;
209 }
210
211 /* Releases the memory occupied by DATA. */
212
213 static void
214 free_lim_aux_data (struct lim_aux_data *data)
215 {
216 struct depend *dep, *next;
217
218 for (dep = data->depends; dep; dep = next)
219 {
220 next = dep->next;
221 free (dep);
222 }
223 free (data);
224 }
225
226 static void
227 clear_lim_data (gimple stmt)
228 {
229 void **p = pointer_map_contains (lim_aux_data_map, stmt);
230 if (!p)
231 return;
232
233 free_lim_aux_data ((struct lim_aux_data *) *p);
234 *p = NULL;
235 }
236
237 /* Calls CBCK for each index in memory reference ADDR_P. There are two
238 kinds situations handled; in each of these cases, the memory reference
239 and DATA are passed to the callback:
240
241 Access to an array: ARRAY_{RANGE_}REF (base, index). In this case we also
242 pass the pointer to the index to the callback.
243
244 Pointer dereference: INDIRECT_REF (addr). In this case we also pass the
245 pointer to addr to the callback.
246
247 If the callback returns false, the whole search stops and false is returned.
248 Otherwise the function returns true after traversing through the whole
249 reference *ADDR_P. */
250
251 bool
252 for_each_index (tree *addr_p, bool (*cbck) (tree, tree *, void *), void *data)
253 {
254 tree *nxt, *idx;
255
256 for (; ; addr_p = nxt)
257 {
258 switch (TREE_CODE (*addr_p))
259 {
260 case SSA_NAME:
261 return cbck (*addr_p, addr_p, data);
262
263 case MEM_REF:
264 nxt = &TREE_OPERAND (*addr_p, 0);
265 return cbck (*addr_p, nxt, data);
266
267 case BIT_FIELD_REF:
268 case VIEW_CONVERT_EXPR:
269 case REALPART_EXPR:
270 case IMAGPART_EXPR:
271 nxt = &TREE_OPERAND (*addr_p, 0);
272 break;
273
274 case COMPONENT_REF:
275 /* If the component has varying offset, it behaves like index
276 as well. */
277 idx = &TREE_OPERAND (*addr_p, 2);
278 if (*idx
279 && !cbck (*addr_p, idx, data))
280 return false;
281
282 nxt = &TREE_OPERAND (*addr_p, 0);
283 break;
284
285 case ARRAY_REF:
286 case ARRAY_RANGE_REF:
287 nxt = &TREE_OPERAND (*addr_p, 0);
288 if (!cbck (*addr_p, &TREE_OPERAND (*addr_p, 1), data))
289 return false;
290 break;
291
292 case VAR_DECL:
293 case PARM_DECL:
294 case STRING_CST:
295 case RESULT_DECL:
296 case VECTOR_CST:
297 case COMPLEX_CST:
298 case INTEGER_CST:
299 case REAL_CST:
300 case FIXED_CST:
301 case CONSTRUCTOR:
302 return true;
303
304 case ADDR_EXPR:
305 gcc_assert (is_gimple_min_invariant (*addr_p));
306 return true;
307
308 case TARGET_MEM_REF:
309 idx = &TMR_BASE (*addr_p);
310 if (*idx
311 && !cbck (*addr_p, idx, data))
312 return false;
313 idx = &TMR_INDEX (*addr_p);
314 if (*idx
315 && !cbck (*addr_p, idx, data))
316 return false;
317 idx = &TMR_INDEX2 (*addr_p);
318 if (*idx
319 && !cbck (*addr_p, idx, data))
320 return false;
321 return true;
322
323 default:
324 gcc_unreachable ();
325 }
326 }
327 }
328
329 /* If it is possible to hoist the statement STMT unconditionally,
330 returns MOVE_POSSIBLE.
331 If it is possible to hoist the statement STMT, but we must avoid making
332 it executed if it would not be executed in the original program (e.g.
333 because it may trap), return MOVE_PRESERVE_EXECUTION.
334 Otherwise return MOVE_IMPOSSIBLE. */
335
336 enum move_pos
337 movement_possibility (gimple stmt)
338 {
339 tree lhs;
340 enum move_pos ret = MOVE_POSSIBLE;
341
342 if (flag_unswitch_loops
343 && gimple_code (stmt) == GIMPLE_COND)
344 {
345 /* If we perform unswitching, force the operands of the invariant
346 condition to be moved out of the loop. */
347 return MOVE_POSSIBLE;
348 }
349
350 if (gimple_code (stmt) == GIMPLE_PHI
351 && gimple_phi_num_args (stmt) <= 2
352 && !virtual_operand_p (gimple_phi_result (stmt))
353 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_phi_result (stmt)))
354 return MOVE_POSSIBLE;
355
356 if (gimple_get_lhs (stmt) == NULL_TREE)
357 return MOVE_IMPOSSIBLE;
358
359 if (gimple_vdef (stmt))
360 return MOVE_IMPOSSIBLE;
361
362 if (stmt_ends_bb_p (stmt)
363 || gimple_has_volatile_ops (stmt)
364 || gimple_has_side_effects (stmt)
365 || stmt_could_throw_p (stmt))
366 return MOVE_IMPOSSIBLE;
367
368 if (is_gimple_call (stmt))
369 {
370 /* While pure or const call is guaranteed to have no side effects, we
371 cannot move it arbitrarily. Consider code like
372
373 char *s = something ();
374
375 while (1)
376 {
377 if (s)
378 t = strlen (s);
379 else
380 t = 0;
381 }
382
383 Here the strlen call cannot be moved out of the loop, even though
384 s is invariant. In addition to possibly creating a call with
385 invalid arguments, moving out a function call that is not executed
386 may cause performance regressions in case the call is costly and
387 not executed at all. */
388 ret = MOVE_PRESERVE_EXECUTION;
389 lhs = gimple_call_lhs (stmt);
390 }
391 else if (is_gimple_assign (stmt))
392 lhs = gimple_assign_lhs (stmt);
393 else
394 return MOVE_IMPOSSIBLE;
395
396 if (TREE_CODE (lhs) == SSA_NAME
397 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
398 return MOVE_IMPOSSIBLE;
399
400 if (TREE_CODE (lhs) != SSA_NAME
401 || gimple_could_trap_p (stmt))
402 return MOVE_PRESERVE_EXECUTION;
403
404 /* Non local loads in a transaction cannot be hoisted out. Well,
405 unless the load happens on every path out of the loop, but we
406 don't take this into account yet. */
407 if (flag_tm
408 && gimple_in_transaction (stmt)
409 && gimple_assign_single_p (stmt))
410 {
411 tree rhs = gimple_assign_rhs1 (stmt);
412 if (DECL_P (rhs) && is_global_var (rhs))
413 {
414 if (dump_file)
415 {
416 fprintf (dump_file, "Cannot hoist conditional load of ");
417 print_generic_expr (dump_file, rhs, TDF_SLIM);
418 fprintf (dump_file, " because it is in a transaction.\n");
419 }
420 return MOVE_IMPOSSIBLE;
421 }
422 }
423
424 return ret;
425 }
426
427 /* Suppose that operand DEF is used inside the LOOP. Returns the outermost
428 loop to that we could move the expression using DEF if it did not have
429 other operands, i.e. the outermost loop enclosing LOOP in that the value
430 of DEF is invariant. */
431
432 static struct loop *
433 outermost_invariant_loop (tree def, struct loop *loop)
434 {
435 gimple def_stmt;
436 basic_block def_bb;
437 struct loop *max_loop;
438 struct lim_aux_data *lim_data;
439
440 if (!def)
441 return superloop_at_depth (loop, 1);
442
443 if (TREE_CODE (def) != SSA_NAME)
444 {
445 gcc_assert (is_gimple_min_invariant (def));
446 return superloop_at_depth (loop, 1);
447 }
448
449 def_stmt = SSA_NAME_DEF_STMT (def);
450 def_bb = gimple_bb (def_stmt);
451 if (!def_bb)
452 return superloop_at_depth (loop, 1);
453
454 max_loop = find_common_loop (loop, def_bb->loop_father);
455
456 lim_data = get_lim_data (def_stmt);
457 if (lim_data != NULL && lim_data->max_loop != NULL)
458 max_loop = find_common_loop (max_loop,
459 loop_outer (lim_data->max_loop));
460 if (max_loop == loop)
461 return NULL;
462 max_loop = superloop_at_depth (loop, loop_depth (max_loop) + 1);
463
464 return max_loop;
465 }
466
467 /* DATA is a structure containing information associated with a statement
468 inside LOOP. DEF is one of the operands of this statement.
469
470 Find the outermost loop enclosing LOOP in that value of DEF is invariant
471 and record this in DATA->max_loop field. If DEF itself is defined inside
472 this loop as well (i.e. we need to hoist it out of the loop if we want
473 to hoist the statement represented by DATA), record the statement in that
474 DEF is defined to the DATA->depends list. Additionally if ADD_COST is true,
475 add the cost of the computation of DEF to the DATA->cost.
476
477 If DEF is not invariant in LOOP, return false. Otherwise return TRUE. */
478
479 static bool
480 add_dependency (tree def, struct lim_aux_data *data, struct loop *loop,
481 bool add_cost)
482 {
483 gimple def_stmt = SSA_NAME_DEF_STMT (def);
484 basic_block def_bb = gimple_bb (def_stmt);
485 struct loop *max_loop;
486 struct depend *dep;
487 struct lim_aux_data *def_data;
488
489 if (!def_bb)
490 return true;
491
492 max_loop = outermost_invariant_loop (def, loop);
493 if (!max_loop)
494 return false;
495
496 if (flow_loop_nested_p (data->max_loop, max_loop))
497 data->max_loop = max_loop;
498
499 def_data = get_lim_data (def_stmt);
500 if (!def_data)
501 return true;
502
503 if (add_cost
504 /* Only add the cost if the statement defining DEF is inside LOOP,
505 i.e. if it is likely that by moving the invariants dependent
506 on it, we will be able to avoid creating a new register for
507 it (since it will be only used in these dependent invariants). */
508 && def_bb->loop_father == loop)
509 data->cost += def_data->cost;
510
511 dep = XNEW (struct depend);
512 dep->stmt = def_stmt;
513 dep->next = data->depends;
514 data->depends = dep;
515
516 return true;
517 }
518
519 /* Returns an estimate for a cost of statement STMT. The values here
520 are just ad-hoc constants, similar to costs for inlining. */
521
522 static unsigned
523 stmt_cost (gimple stmt)
524 {
525 /* Always try to create possibilities for unswitching. */
526 if (gimple_code (stmt) == GIMPLE_COND
527 || gimple_code (stmt) == GIMPLE_PHI)
528 return LIM_EXPENSIVE;
529
530 /* We should be hoisting calls if possible. */
531 if (is_gimple_call (stmt))
532 {
533 tree fndecl;
534
535 /* Unless the call is a builtin_constant_p; this always folds to a
536 constant, so moving it is useless. */
537 fndecl = gimple_call_fndecl (stmt);
538 if (fndecl
539 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
540 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_CONSTANT_P)
541 return 0;
542
543 return LIM_EXPENSIVE;
544 }
545
546 /* Hoisting memory references out should almost surely be a win. */
547 if (gimple_references_memory_p (stmt))
548 return LIM_EXPENSIVE;
549
550 if (gimple_code (stmt) != GIMPLE_ASSIGN)
551 return 1;
552
553 switch (gimple_assign_rhs_code (stmt))
554 {
555 case MULT_EXPR:
556 case WIDEN_MULT_EXPR:
557 case WIDEN_MULT_PLUS_EXPR:
558 case WIDEN_MULT_MINUS_EXPR:
559 case DOT_PROD_EXPR:
560 case FMA_EXPR:
561 case TRUNC_DIV_EXPR:
562 case CEIL_DIV_EXPR:
563 case FLOOR_DIV_EXPR:
564 case ROUND_DIV_EXPR:
565 case EXACT_DIV_EXPR:
566 case CEIL_MOD_EXPR:
567 case FLOOR_MOD_EXPR:
568 case ROUND_MOD_EXPR:
569 case TRUNC_MOD_EXPR:
570 case RDIV_EXPR:
571 /* Division and multiplication are usually expensive. */
572 return LIM_EXPENSIVE;
573
574 case LSHIFT_EXPR:
575 case RSHIFT_EXPR:
576 case WIDEN_LSHIFT_EXPR:
577 case LROTATE_EXPR:
578 case RROTATE_EXPR:
579 /* Shifts and rotates are usually expensive. */
580 return LIM_EXPENSIVE;
581
582 case CONSTRUCTOR:
583 /* Make vector construction cost proportional to the number
584 of elements. */
585 return CONSTRUCTOR_NELTS (gimple_assign_rhs1 (stmt));
586
587 case SSA_NAME:
588 case PAREN_EXPR:
589 /* Whether or not something is wrapped inside a PAREN_EXPR
590 should not change move cost. Nor should an intermediate
591 unpropagated SSA name copy. */
592 return 0;
593
594 default:
595 return 1;
596 }
597 }
598
599 /* Finds the outermost loop between OUTER and LOOP in that the memory reference
600 REF is independent. If REF is not independent in LOOP, NULL is returned
601 instead. */
602
603 static struct loop *
604 outermost_indep_loop (struct loop *outer, struct loop *loop, mem_ref_p ref)
605 {
606 struct loop *aloop;
607
608 if (bitmap_bit_p (ref->stored, loop->num))
609 return NULL;
610
611 for (aloop = outer;
612 aloop != loop;
613 aloop = superloop_at_depth (loop, loop_depth (aloop) + 1))
614 if (!bitmap_bit_p (ref->stored, aloop->num)
615 && ref_indep_loop_p (aloop, ref))
616 return aloop;
617
618 if (ref_indep_loop_p (loop, ref))
619 return loop;
620 else
621 return NULL;
622 }
623
624 /* If there is a simple load or store to a memory reference in STMT, returns
625 the location of the memory reference, and sets IS_STORE according to whether
626 it is a store or load. Otherwise, returns NULL. */
627
628 static tree *
629 simple_mem_ref_in_stmt (gimple stmt, bool *is_store)
630 {
631 tree *lhs, *rhs;
632
633 /* Recognize SSA_NAME = MEM and MEM = (SSA_NAME | invariant) patterns. */
634 if (!gimple_assign_single_p (stmt))
635 return NULL;
636
637 lhs = gimple_assign_lhs_ptr (stmt);
638 rhs = gimple_assign_rhs1_ptr (stmt);
639
640 if (TREE_CODE (*lhs) == SSA_NAME && gimple_vuse (stmt))
641 {
642 *is_store = false;
643 return rhs;
644 }
645 else if (gimple_vdef (stmt)
646 && (TREE_CODE (*rhs) == SSA_NAME || is_gimple_min_invariant (*rhs)))
647 {
648 *is_store = true;
649 return lhs;
650 }
651 else
652 return NULL;
653 }
654
655 /* Returns the memory reference contained in STMT. */
656
657 static mem_ref_p
658 mem_ref_in_stmt (gimple stmt)
659 {
660 bool store;
661 tree *mem = simple_mem_ref_in_stmt (stmt, &store);
662 hashval_t hash;
663 mem_ref_p ref;
664
665 if (!mem)
666 return NULL;
667 gcc_assert (!store);
668
669 hash = iterative_hash_expr (*mem, 0);
670 ref = (mem_ref_p) htab_find_with_hash (memory_accesses.refs, *mem, hash);
671
672 gcc_assert (ref != NULL);
673 return ref;
674 }
675
676 /* From a controlling predicate in DOM determine the arguments from
677 the PHI node PHI that are chosen if the predicate evaluates to
678 true and false and store them to *TRUE_ARG_P and *FALSE_ARG_P if
679 they are non-NULL. Returns true if the arguments can be determined,
680 else return false. */
681
682 static bool
683 extract_true_false_args_from_phi (basic_block dom, gimple phi,
684 tree *true_arg_p, tree *false_arg_p)
685 {
686 basic_block bb = gimple_bb (phi);
687 edge true_edge, false_edge, tem;
688 tree arg0 = NULL_TREE, arg1 = NULL_TREE;
689
690 /* We have to verify that one edge into the PHI node is dominated
691 by the true edge of the predicate block and the other edge
692 dominated by the false edge. This ensures that the PHI argument
693 we are going to take is completely determined by the path we
694 take from the predicate block.
695 We can only use BB dominance checks below if the destination of
696 the true/false edges are dominated by their edge, thus only
697 have a single predecessor. */
698 extract_true_false_edges_from_block (dom, &true_edge, &false_edge);
699 tem = EDGE_PRED (bb, 0);
700 if (tem == true_edge
701 || (single_pred_p (true_edge->dest)
702 && (tem->src == true_edge->dest
703 || dominated_by_p (CDI_DOMINATORS,
704 tem->src, true_edge->dest))))
705 arg0 = PHI_ARG_DEF (phi, tem->dest_idx);
706 else if (tem == false_edge
707 || (single_pred_p (false_edge->dest)
708 && (tem->src == false_edge->dest
709 || dominated_by_p (CDI_DOMINATORS,
710 tem->src, false_edge->dest))))
711 arg1 = PHI_ARG_DEF (phi, tem->dest_idx);
712 else
713 return false;
714 tem = EDGE_PRED (bb, 1);
715 if (tem == true_edge
716 || (single_pred_p (true_edge->dest)
717 && (tem->src == true_edge->dest
718 || dominated_by_p (CDI_DOMINATORS,
719 tem->src, true_edge->dest))))
720 arg0 = PHI_ARG_DEF (phi, tem->dest_idx);
721 else if (tem == false_edge
722 || (single_pred_p (false_edge->dest)
723 && (tem->src == false_edge->dest
724 || dominated_by_p (CDI_DOMINATORS,
725 tem->src, false_edge->dest))))
726 arg1 = PHI_ARG_DEF (phi, tem->dest_idx);
727 else
728 return false;
729 if (!arg0 || !arg1)
730 return false;
731
732 if (true_arg_p)
733 *true_arg_p = arg0;
734 if (false_arg_p)
735 *false_arg_p = arg1;
736
737 return true;
738 }
739
740 /* Determine the outermost loop to that it is possible to hoist a statement
741 STMT and store it to LIM_DATA (STMT)->max_loop. To do this we determine
742 the outermost loop in that the value computed by STMT is invariant.
743 If MUST_PRESERVE_EXEC is true, additionally choose such a loop that
744 we preserve the fact whether STMT is executed. It also fills other related
745 information to LIM_DATA (STMT).
746
747 The function returns false if STMT cannot be hoisted outside of the loop it
748 is defined in, and true otherwise. */
749
750 static bool
751 determine_max_movement (gimple stmt, bool must_preserve_exec)
752 {
753 basic_block bb = gimple_bb (stmt);
754 struct loop *loop = bb->loop_father;
755 struct loop *level;
756 struct lim_aux_data *lim_data = get_lim_data (stmt);
757 tree val;
758 ssa_op_iter iter;
759
760 if (must_preserve_exec)
761 level = ALWAYS_EXECUTED_IN (bb);
762 else
763 level = superloop_at_depth (loop, 1);
764 lim_data->max_loop = level;
765
766 if (gimple_code (stmt) == GIMPLE_PHI)
767 {
768 use_operand_p use_p;
769 unsigned min_cost = UINT_MAX;
770 unsigned total_cost = 0;
771 struct lim_aux_data *def_data;
772
773 /* We will end up promoting dependencies to be unconditionally
774 evaluated. For this reason the PHI cost (and thus the
775 cost we remove from the loop by doing the invariant motion)
776 is that of the cheapest PHI argument dependency chain. */
777 FOR_EACH_PHI_ARG (use_p, stmt, iter, SSA_OP_USE)
778 {
779 val = USE_FROM_PTR (use_p);
780 if (TREE_CODE (val) != SSA_NAME)
781 continue;
782 if (!add_dependency (val, lim_data, loop, false))
783 return false;
784 def_data = get_lim_data (SSA_NAME_DEF_STMT (val));
785 if (def_data)
786 {
787 min_cost = MIN (min_cost, def_data->cost);
788 total_cost += def_data->cost;
789 }
790 }
791
792 lim_data->cost += min_cost;
793
794 if (gimple_phi_num_args (stmt) > 1)
795 {
796 basic_block dom = get_immediate_dominator (CDI_DOMINATORS, bb);
797 gimple cond;
798 if (gsi_end_p (gsi_last_bb (dom)))
799 return false;
800 cond = gsi_stmt (gsi_last_bb (dom));
801 if (gimple_code (cond) != GIMPLE_COND)
802 return false;
803 /* Verify that this is an extended form of a diamond and
804 the PHI arguments are completely controlled by the
805 predicate in DOM. */
806 if (!extract_true_false_args_from_phi (dom, stmt, NULL, NULL))
807 return false;
808
809 /* Fold in dependencies and cost of the condition. */
810 FOR_EACH_SSA_TREE_OPERAND (val, cond, iter, SSA_OP_USE)
811 {
812 if (!add_dependency (val, lim_data, loop, false))
813 return false;
814 def_data = get_lim_data (SSA_NAME_DEF_STMT (val));
815 if (def_data)
816 total_cost += def_data->cost;
817 }
818
819 /* We want to avoid unconditionally executing very expensive
820 operations. As costs for our dependencies cannot be
821 negative just claim we are not invariand for this case.
822 We also are not sure whether the control-flow inside the
823 loop will vanish. */
824 if (total_cost - min_cost >= 2 * LIM_EXPENSIVE
825 && !(min_cost != 0
826 && total_cost / min_cost <= 2))
827 return false;
828
829 /* Assume that the control-flow in the loop will vanish.
830 ??? We should verify this and not artificially increase
831 the cost if that is not the case. */
832 lim_data->cost += stmt_cost (stmt);
833 }
834
835 return true;
836 }
837 else
838 FOR_EACH_SSA_TREE_OPERAND (val, stmt, iter, SSA_OP_USE)
839 if (!add_dependency (val, lim_data, loop, true))
840 return false;
841
842 if (gimple_vuse (stmt))
843 {
844 mem_ref_p ref = mem_ref_in_stmt (stmt);
845
846 if (ref)
847 {
848 lim_data->max_loop
849 = outermost_indep_loop (lim_data->max_loop, loop, ref);
850 if (!lim_data->max_loop)
851 return false;
852 }
853 else
854 {
855 if ((val = gimple_vuse (stmt)) != NULL_TREE)
856 {
857 if (!add_dependency (val, lim_data, loop, false))
858 return false;
859 }
860 }
861 }
862
863 lim_data->cost += stmt_cost (stmt);
864
865 return true;
866 }
867
868 /* Suppose that some statement in ORIG_LOOP is hoisted to the loop LEVEL,
869 and that one of the operands of this statement is computed by STMT.
870 Ensure that STMT (together with all the statements that define its
871 operands) is hoisted at least out of the loop LEVEL. */
872
873 static void
874 set_level (gimple stmt, struct loop *orig_loop, struct loop *level)
875 {
876 struct loop *stmt_loop = gimple_bb (stmt)->loop_father;
877 struct depend *dep;
878 struct lim_aux_data *lim_data;
879
880 stmt_loop = find_common_loop (orig_loop, stmt_loop);
881 lim_data = get_lim_data (stmt);
882 if (lim_data != NULL && lim_data->tgt_loop != NULL)
883 stmt_loop = find_common_loop (stmt_loop,
884 loop_outer (lim_data->tgt_loop));
885 if (flow_loop_nested_p (stmt_loop, level))
886 return;
887
888 gcc_assert (level == lim_data->max_loop
889 || flow_loop_nested_p (lim_data->max_loop, level));
890
891 lim_data->tgt_loop = level;
892 for (dep = lim_data->depends; dep; dep = dep->next)
893 set_level (dep->stmt, orig_loop, level);
894 }
895
896 /* Determines an outermost loop from that we want to hoist the statement STMT.
897 For now we chose the outermost possible loop. TODO -- use profiling
898 information to set it more sanely. */
899
900 static void
901 set_profitable_level (gimple stmt)
902 {
903 set_level (stmt, gimple_bb (stmt)->loop_father, get_lim_data (stmt)->max_loop);
904 }
905
906 /* Returns true if STMT is a call that has side effects. */
907
908 static bool
909 nonpure_call_p (gimple stmt)
910 {
911 if (gimple_code (stmt) != GIMPLE_CALL)
912 return false;
913
914 return gimple_has_side_effects (stmt);
915 }
916
917 /* Rewrite a/b to a*(1/b). Return the invariant stmt to process. */
918
919 static gimple
920 rewrite_reciprocal (gimple_stmt_iterator *bsi)
921 {
922 gimple stmt, stmt1, stmt2;
923 tree name, lhs, type;
924 tree real_one;
925 gimple_stmt_iterator gsi;
926
927 stmt = gsi_stmt (*bsi);
928 lhs = gimple_assign_lhs (stmt);
929 type = TREE_TYPE (lhs);
930
931 real_one = build_one_cst (type);
932
933 name = make_temp_ssa_name (type, NULL, "reciptmp");
934 stmt1 = gimple_build_assign_with_ops (RDIV_EXPR, name, real_one,
935 gimple_assign_rhs2 (stmt));
936
937 stmt2 = gimple_build_assign_with_ops (MULT_EXPR, lhs, name,
938 gimple_assign_rhs1 (stmt));
939
940 /* Replace division stmt with reciprocal and multiply stmts.
941 The multiply stmt is not invariant, so update iterator
942 and avoid rescanning. */
943 gsi = *bsi;
944 gsi_insert_before (bsi, stmt1, GSI_NEW_STMT);
945 gsi_replace (&gsi, stmt2, true);
946
947 /* Continue processing with invariant reciprocal statement. */
948 return stmt1;
949 }
950
951 /* Check if the pattern at *BSI is a bittest of the form
952 (A >> B) & 1 != 0 and in this case rewrite it to A & (1 << B) != 0. */
953
954 static gimple
955 rewrite_bittest (gimple_stmt_iterator *bsi)
956 {
957 gimple stmt, use_stmt, stmt1, stmt2;
958 tree lhs, name, t, a, b;
959 use_operand_p use;
960
961 stmt = gsi_stmt (*bsi);
962 lhs = gimple_assign_lhs (stmt);
963
964 /* Verify that the single use of lhs is a comparison against zero. */
965 if (TREE_CODE (lhs) != SSA_NAME
966 || !single_imm_use (lhs, &use, &use_stmt)
967 || gimple_code (use_stmt) != GIMPLE_COND)
968 return stmt;
969 if (gimple_cond_lhs (use_stmt) != lhs
970 || (gimple_cond_code (use_stmt) != NE_EXPR
971 && gimple_cond_code (use_stmt) != EQ_EXPR)
972 || !integer_zerop (gimple_cond_rhs (use_stmt)))
973 return stmt;
974
975 /* Get at the operands of the shift. The rhs is TMP1 & 1. */
976 stmt1 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
977 if (gimple_code (stmt1) != GIMPLE_ASSIGN)
978 return stmt;
979
980 /* There is a conversion in between possibly inserted by fold. */
981 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt1)))
982 {
983 t = gimple_assign_rhs1 (stmt1);
984 if (TREE_CODE (t) != SSA_NAME
985 || !has_single_use (t))
986 return stmt;
987 stmt1 = SSA_NAME_DEF_STMT (t);
988 if (gimple_code (stmt1) != GIMPLE_ASSIGN)
989 return stmt;
990 }
991
992 /* Verify that B is loop invariant but A is not. Verify that with
993 all the stmt walking we are still in the same loop. */
994 if (gimple_assign_rhs_code (stmt1) != RSHIFT_EXPR
995 || loop_containing_stmt (stmt1) != loop_containing_stmt (stmt))
996 return stmt;
997
998 a = gimple_assign_rhs1 (stmt1);
999 b = gimple_assign_rhs2 (stmt1);
1000
1001 if (outermost_invariant_loop (b, loop_containing_stmt (stmt1)) != NULL
1002 && outermost_invariant_loop (a, loop_containing_stmt (stmt1)) == NULL)
1003 {
1004 gimple_stmt_iterator rsi;
1005
1006 /* 1 << B */
1007 t = fold_build2 (LSHIFT_EXPR, TREE_TYPE (a),
1008 build_int_cst (TREE_TYPE (a), 1), b);
1009 name = make_temp_ssa_name (TREE_TYPE (a), NULL, "shifttmp");
1010 stmt1 = gimple_build_assign (name, t);
1011
1012 /* A & (1 << B) */
1013 t = fold_build2 (BIT_AND_EXPR, TREE_TYPE (a), a, name);
1014 name = make_temp_ssa_name (TREE_TYPE (a), NULL, "shifttmp");
1015 stmt2 = gimple_build_assign (name, t);
1016
1017 /* Replace the SSA_NAME we compare against zero. Adjust
1018 the type of zero accordingly. */
1019 SET_USE (use, name);
1020 gimple_cond_set_rhs (use_stmt, build_int_cst_type (TREE_TYPE (name), 0));
1021
1022 /* Don't use gsi_replace here, none of the new assignments sets
1023 the variable originally set in stmt. Move bsi to stmt1, and
1024 then remove the original stmt, so that we get a chance to
1025 retain debug info for it. */
1026 rsi = *bsi;
1027 gsi_insert_before (bsi, stmt1, GSI_NEW_STMT);
1028 gsi_insert_before (&rsi, stmt2, GSI_SAME_STMT);
1029 gsi_remove (&rsi, true);
1030
1031 return stmt1;
1032 }
1033
1034 return stmt;
1035 }
1036
1037
1038 /* Determine the outermost loops in that statements in basic block BB are
1039 invariant, and record them to the LIM_DATA associated with the statements.
1040 Callback for walk_dominator_tree. */
1041
1042 static void
1043 determine_invariantness_stmt (struct dom_walk_data *dw_data ATTRIBUTE_UNUSED,
1044 basic_block bb)
1045 {
1046 enum move_pos pos;
1047 gimple_stmt_iterator bsi;
1048 gimple stmt;
1049 bool maybe_never = ALWAYS_EXECUTED_IN (bb) == NULL;
1050 struct loop *outermost = ALWAYS_EXECUTED_IN (bb);
1051 struct lim_aux_data *lim_data;
1052
1053 if (!loop_outer (bb->loop_father))
1054 return;
1055
1056 if (dump_file && (dump_flags & TDF_DETAILS))
1057 fprintf (dump_file, "Basic block %d (loop %d -- depth %d):\n\n",
1058 bb->index, bb->loop_father->num, loop_depth (bb->loop_father));
1059
1060 /* Look at PHI nodes, but only if there is at most two.
1061 ??? We could relax this further by post-processing the inserted
1062 code and transforming adjacent cond-exprs with the same predicate
1063 to control flow again. */
1064 bsi = gsi_start_phis (bb);
1065 if (!gsi_end_p (bsi)
1066 && ((gsi_next (&bsi), gsi_end_p (bsi))
1067 || (gsi_next (&bsi), gsi_end_p (bsi))))
1068 for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1069 {
1070 stmt = gsi_stmt (bsi);
1071
1072 pos = movement_possibility (stmt);
1073 if (pos == MOVE_IMPOSSIBLE)
1074 continue;
1075
1076 lim_data = init_lim_data (stmt);
1077 lim_data->always_executed_in = outermost;
1078
1079 if (!determine_max_movement (stmt, false))
1080 {
1081 lim_data->max_loop = NULL;
1082 continue;
1083 }
1084
1085 if (dump_file && (dump_flags & TDF_DETAILS))
1086 {
1087 print_gimple_stmt (dump_file, stmt, 2, 0);
1088 fprintf (dump_file, " invariant up to level %d, cost %d.\n\n",
1089 loop_depth (lim_data->max_loop),
1090 lim_data->cost);
1091 }
1092
1093 if (lim_data->cost >= LIM_EXPENSIVE)
1094 set_profitable_level (stmt);
1095 }
1096
1097 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1098 {
1099 stmt = gsi_stmt (bsi);
1100
1101 pos = movement_possibility (stmt);
1102 if (pos == MOVE_IMPOSSIBLE)
1103 {
1104 if (nonpure_call_p (stmt))
1105 {
1106 maybe_never = true;
1107 outermost = NULL;
1108 }
1109 /* Make sure to note always_executed_in for stores to make
1110 store-motion work. */
1111 else if (stmt_makes_single_store (stmt))
1112 {
1113 struct lim_aux_data *lim_data = init_lim_data (stmt);
1114 lim_data->always_executed_in = outermost;
1115 }
1116 continue;
1117 }
1118
1119 if (is_gimple_assign (stmt)
1120 && (get_gimple_rhs_class (gimple_assign_rhs_code (stmt))
1121 == GIMPLE_BINARY_RHS))
1122 {
1123 tree op0 = gimple_assign_rhs1 (stmt);
1124 tree op1 = gimple_assign_rhs2 (stmt);
1125 struct loop *ol1 = outermost_invariant_loop (op1,
1126 loop_containing_stmt (stmt));
1127
1128 /* If divisor is invariant, convert a/b to a*(1/b), allowing reciprocal
1129 to be hoisted out of loop, saving expensive divide. */
1130 if (pos == MOVE_POSSIBLE
1131 && gimple_assign_rhs_code (stmt) == RDIV_EXPR
1132 && flag_unsafe_math_optimizations
1133 && !flag_trapping_math
1134 && ol1 != NULL
1135 && outermost_invariant_loop (op0, ol1) == NULL)
1136 stmt = rewrite_reciprocal (&bsi);
1137
1138 /* If the shift count is invariant, convert (A >> B) & 1 to
1139 A & (1 << B) allowing the bit mask to be hoisted out of the loop
1140 saving an expensive shift. */
1141 if (pos == MOVE_POSSIBLE
1142 && gimple_assign_rhs_code (stmt) == BIT_AND_EXPR
1143 && integer_onep (op1)
1144 && TREE_CODE (op0) == SSA_NAME
1145 && has_single_use (op0))
1146 stmt = rewrite_bittest (&bsi);
1147 }
1148
1149 lim_data = init_lim_data (stmt);
1150 lim_data->always_executed_in = outermost;
1151
1152 if (maybe_never && pos == MOVE_PRESERVE_EXECUTION)
1153 continue;
1154
1155 if (!determine_max_movement (stmt, pos == MOVE_PRESERVE_EXECUTION))
1156 {
1157 lim_data->max_loop = NULL;
1158 continue;
1159 }
1160
1161 if (dump_file && (dump_flags & TDF_DETAILS))
1162 {
1163 print_gimple_stmt (dump_file, stmt, 2, 0);
1164 fprintf (dump_file, " invariant up to level %d, cost %d.\n\n",
1165 loop_depth (lim_data->max_loop),
1166 lim_data->cost);
1167 }
1168
1169 if (lim_data->cost >= LIM_EXPENSIVE)
1170 set_profitable_level (stmt);
1171 }
1172 }
1173
1174 /* For each statement determines the outermost loop in that it is invariant,
1175 statements on whose motion it depends and the cost of the computation.
1176 This information is stored to the LIM_DATA structure associated with
1177 each statement. */
1178
1179 static void
1180 determine_invariantness (void)
1181 {
1182 struct dom_walk_data walk_data;
1183
1184 memset (&walk_data, 0, sizeof (struct dom_walk_data));
1185 walk_data.dom_direction = CDI_DOMINATORS;
1186 walk_data.before_dom_children = determine_invariantness_stmt;
1187
1188 init_walk_dominator_tree (&walk_data);
1189 walk_dominator_tree (&walk_data, ENTRY_BLOCK_PTR);
1190 fini_walk_dominator_tree (&walk_data);
1191 }
1192
1193 /* Hoist the statements in basic block BB out of the loops prescribed by
1194 data stored in LIM_DATA structures associated with each statement. Callback
1195 for walk_dominator_tree. */
1196
1197 static void
1198 move_computations_stmt (struct dom_walk_data *dw_data,
1199 basic_block bb)
1200 {
1201 struct loop *level;
1202 gimple_stmt_iterator bsi;
1203 gimple stmt;
1204 unsigned cost = 0;
1205 struct lim_aux_data *lim_data;
1206
1207 if (!loop_outer (bb->loop_father))
1208 return;
1209
1210 for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi); )
1211 {
1212 gimple new_stmt;
1213 stmt = gsi_stmt (bsi);
1214
1215 lim_data = get_lim_data (stmt);
1216 if (lim_data == NULL)
1217 {
1218 gsi_next (&bsi);
1219 continue;
1220 }
1221
1222 cost = lim_data->cost;
1223 level = lim_data->tgt_loop;
1224 clear_lim_data (stmt);
1225
1226 if (!level)
1227 {
1228 gsi_next (&bsi);
1229 continue;
1230 }
1231
1232 if (dump_file && (dump_flags & TDF_DETAILS))
1233 {
1234 fprintf (dump_file, "Moving PHI node\n");
1235 print_gimple_stmt (dump_file, stmt, 0, 0);
1236 fprintf (dump_file, "(cost %u) out of loop %d.\n\n",
1237 cost, level->num);
1238 }
1239
1240 if (gimple_phi_num_args (stmt) == 1)
1241 {
1242 tree arg = PHI_ARG_DEF (stmt, 0);
1243 new_stmt = gimple_build_assign_with_ops (TREE_CODE (arg),
1244 gimple_phi_result (stmt),
1245 arg, NULL_TREE);
1246 SSA_NAME_DEF_STMT (gimple_phi_result (stmt)) = new_stmt;
1247 }
1248 else
1249 {
1250 basic_block dom = get_immediate_dominator (CDI_DOMINATORS, bb);
1251 gimple cond = gsi_stmt (gsi_last_bb (dom));
1252 tree arg0 = NULL_TREE, arg1 = NULL_TREE, t;
1253 /* Get the PHI arguments corresponding to the true and false
1254 edges of COND. */
1255 extract_true_false_args_from_phi (dom, stmt, &arg0, &arg1);
1256 gcc_assert (arg0 && arg1);
1257 t = build2 (gimple_cond_code (cond), boolean_type_node,
1258 gimple_cond_lhs (cond), gimple_cond_rhs (cond));
1259 new_stmt = gimple_build_assign_with_ops (COND_EXPR,
1260 gimple_phi_result (stmt),
1261 t, arg0, arg1);
1262 SSA_NAME_DEF_STMT (gimple_phi_result (stmt)) = new_stmt;
1263 *((unsigned int *)(dw_data->global_data)) |= TODO_cleanup_cfg;
1264 }
1265 gsi_insert_on_edge (loop_preheader_edge (level), new_stmt);
1266 remove_phi_node (&bsi, false);
1267 }
1268
1269 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); )
1270 {
1271 edge e;
1272
1273 stmt = gsi_stmt (bsi);
1274
1275 lim_data = get_lim_data (stmt);
1276 if (lim_data == NULL)
1277 {
1278 gsi_next (&bsi);
1279 continue;
1280 }
1281
1282 cost = lim_data->cost;
1283 level = lim_data->tgt_loop;
1284 clear_lim_data (stmt);
1285
1286 if (!level)
1287 {
1288 gsi_next (&bsi);
1289 continue;
1290 }
1291
1292 /* We do not really want to move conditionals out of the loop; we just
1293 placed it here to force its operands to be moved if necessary. */
1294 if (gimple_code (stmt) == GIMPLE_COND)
1295 continue;
1296
1297 if (dump_file && (dump_flags & TDF_DETAILS))
1298 {
1299 fprintf (dump_file, "Moving statement\n");
1300 print_gimple_stmt (dump_file, stmt, 0, 0);
1301 fprintf (dump_file, "(cost %u) out of loop %d.\n\n",
1302 cost, level->num);
1303 }
1304
1305 e = loop_preheader_edge (level);
1306 gcc_assert (!gimple_vdef (stmt));
1307 if (gimple_vuse (stmt))
1308 {
1309 /* The new VUSE is the one from the virtual PHI in the loop
1310 header or the one already present. */
1311 gimple_stmt_iterator gsi2;
1312 for (gsi2 = gsi_start_phis (e->dest);
1313 !gsi_end_p (gsi2); gsi_next (&gsi2))
1314 {
1315 gimple phi = gsi_stmt (gsi2);
1316 if (virtual_operand_p (gimple_phi_result (phi)))
1317 {
1318 gimple_set_vuse (stmt, PHI_ARG_DEF_FROM_EDGE (phi, e));
1319 break;
1320 }
1321 }
1322 }
1323 gsi_remove (&bsi, false);
1324 gsi_insert_on_edge (e, stmt);
1325 }
1326 }
1327
1328 /* Hoist the statements out of the loops prescribed by data stored in
1329 LIM_DATA structures associated with each statement.*/
1330
1331 static unsigned int
1332 move_computations (void)
1333 {
1334 struct dom_walk_data walk_data;
1335 unsigned int todo = 0;
1336
1337 memset (&walk_data, 0, sizeof (struct dom_walk_data));
1338 walk_data.global_data = &todo;
1339 walk_data.dom_direction = CDI_DOMINATORS;
1340 walk_data.before_dom_children = move_computations_stmt;
1341
1342 init_walk_dominator_tree (&walk_data);
1343 walk_dominator_tree (&walk_data, ENTRY_BLOCK_PTR);
1344 fini_walk_dominator_tree (&walk_data);
1345
1346 gsi_commit_edge_inserts ();
1347 if (need_ssa_update_p (cfun))
1348 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
1349
1350 return todo;
1351 }
1352
1353 /* Checks whether the statement defining variable *INDEX can be hoisted
1354 out of the loop passed in DATA. Callback for for_each_index. */
1355
1356 static bool
1357 may_move_till (tree ref, tree *index, void *data)
1358 {
1359 struct loop *loop = (struct loop *) data, *max_loop;
1360
1361 /* If REF is an array reference, check also that the step and the lower
1362 bound is invariant in LOOP. */
1363 if (TREE_CODE (ref) == ARRAY_REF)
1364 {
1365 tree step = TREE_OPERAND (ref, 3);
1366 tree lbound = TREE_OPERAND (ref, 2);
1367
1368 max_loop = outermost_invariant_loop (step, loop);
1369 if (!max_loop)
1370 return false;
1371
1372 max_loop = outermost_invariant_loop (lbound, loop);
1373 if (!max_loop)
1374 return false;
1375 }
1376
1377 max_loop = outermost_invariant_loop (*index, loop);
1378 if (!max_loop)
1379 return false;
1380
1381 return true;
1382 }
1383
1384 /* If OP is SSA NAME, force the statement that defines it to be
1385 moved out of the LOOP. ORIG_LOOP is the loop in that EXPR is used. */
1386
1387 static void
1388 force_move_till_op (tree op, struct loop *orig_loop, struct loop *loop)
1389 {
1390 gimple stmt;
1391
1392 if (!op
1393 || is_gimple_min_invariant (op))
1394 return;
1395
1396 gcc_assert (TREE_CODE (op) == SSA_NAME);
1397
1398 stmt = SSA_NAME_DEF_STMT (op);
1399 if (gimple_nop_p (stmt))
1400 return;
1401
1402 set_level (stmt, orig_loop, loop);
1403 }
1404
1405 /* Forces statement defining invariants in REF (and *INDEX) to be moved out of
1406 the LOOP. The reference REF is used in the loop ORIG_LOOP. Callback for
1407 for_each_index. */
1408
1409 struct fmt_data
1410 {
1411 struct loop *loop;
1412 struct loop *orig_loop;
1413 };
1414
1415 static bool
1416 force_move_till (tree ref, tree *index, void *data)
1417 {
1418 struct fmt_data *fmt_data = (struct fmt_data *) data;
1419
1420 if (TREE_CODE (ref) == ARRAY_REF)
1421 {
1422 tree step = TREE_OPERAND (ref, 3);
1423 tree lbound = TREE_OPERAND (ref, 2);
1424
1425 force_move_till_op (step, fmt_data->orig_loop, fmt_data->loop);
1426 force_move_till_op (lbound, fmt_data->orig_loop, fmt_data->loop);
1427 }
1428
1429 force_move_till_op (*index, fmt_data->orig_loop, fmt_data->loop);
1430
1431 return true;
1432 }
1433
1434 /* A hash function for struct mem_ref object OBJ. */
1435
1436 static hashval_t
1437 memref_hash (const void *obj)
1438 {
1439 const struct mem_ref *const mem = (const struct mem_ref *) obj;
1440
1441 return mem->hash;
1442 }
1443
1444 /* An equality function for struct mem_ref object OBJ1 with
1445 memory reference OBJ2. */
1446
1447 static int
1448 memref_eq (const void *obj1, const void *obj2)
1449 {
1450 const struct mem_ref *const mem1 = (const struct mem_ref *) obj1;
1451
1452 return operand_equal_p (mem1->mem, (const_tree) obj2, 0);
1453 }
1454
1455 /* Releases list of memory reference locations ACCS. */
1456
1457 static void
1458 free_mem_ref_locs (mem_ref_locs_p accs)
1459 {
1460 unsigned i;
1461 mem_ref_loc_p loc;
1462
1463 if (!accs)
1464 return;
1465
1466 FOR_EACH_VEC_ELT (accs->locs, i, loc)
1467 free (loc);
1468 accs->locs.release ();
1469 free (accs);
1470 }
1471
1472 /* A function to free the mem_ref object OBJ. */
1473
1474 static void
1475 memref_free (struct mem_ref *mem)
1476 {
1477 unsigned i;
1478 mem_ref_locs_p accs;
1479
1480 FOR_EACH_VEC_ELT (mem->accesses_in_loop, i, accs)
1481 free_mem_ref_locs (accs);
1482 mem->accesses_in_loop.release ();
1483
1484 free (mem);
1485 }
1486
1487 /* Allocates and returns a memory reference description for MEM whose hash
1488 value is HASH and id is ID. */
1489
1490 static mem_ref_p
1491 mem_ref_alloc (tree mem, unsigned hash, unsigned id)
1492 {
1493 mem_ref_p ref = XNEW (struct mem_ref);
1494 ref->mem = mem;
1495 ref->id = id;
1496 ref->hash = hash;
1497 ref->stored = BITMAP_ALLOC (&lim_bitmap_obstack);
1498 ref->indep_loop = BITMAP_ALLOC (&lim_bitmap_obstack);
1499 ref->dep_loop = BITMAP_ALLOC (&lim_bitmap_obstack);
1500 ref->indep_ref = BITMAP_ALLOC (&lim_bitmap_obstack);
1501 ref->dep_ref = BITMAP_ALLOC (&lim_bitmap_obstack);
1502 ref->accesses_in_loop.create (0);
1503
1504 return ref;
1505 }
1506
1507 /* Allocates and returns the new list of locations. */
1508
1509 static mem_ref_locs_p
1510 mem_ref_locs_alloc (void)
1511 {
1512 mem_ref_locs_p accs = XNEW (struct mem_ref_locs);
1513 accs->locs.create (0);
1514 return accs;
1515 }
1516
1517 /* Records memory reference location *LOC in LOOP to the memory reference
1518 description REF. The reference occurs in statement STMT. */
1519
1520 static void
1521 record_mem_ref_loc (mem_ref_p ref, struct loop *loop, gimple stmt, tree *loc)
1522 {
1523 mem_ref_loc_p aref = XNEW (struct mem_ref_loc);
1524 mem_ref_locs_p accs;
1525 bitmap ril = memory_accesses.refs_in_loop[loop->num];
1526
1527 if (ref->accesses_in_loop.length ()
1528 <= (unsigned) loop->num)
1529 ref->accesses_in_loop.safe_grow_cleared (loop->num + 1);
1530 accs = ref->accesses_in_loop[loop->num];
1531 if (!accs)
1532 {
1533 accs = mem_ref_locs_alloc ();
1534 ref->accesses_in_loop[loop->num] = accs;
1535 }
1536
1537 aref->stmt = stmt;
1538 aref->ref = loc;
1539
1540 accs->locs.safe_push (aref);
1541 bitmap_set_bit (ril, ref->id);
1542 }
1543
1544 /* Marks reference REF as stored in LOOP. */
1545
1546 static void
1547 mark_ref_stored (mem_ref_p ref, struct loop *loop)
1548 {
1549 for (;
1550 loop != current_loops->tree_root
1551 && !bitmap_bit_p (ref->stored, loop->num);
1552 loop = loop_outer (loop))
1553 bitmap_set_bit (ref->stored, loop->num);
1554 }
1555
1556 /* Gathers memory references in statement STMT in LOOP, storing the
1557 information about them in the memory_accesses structure. Marks
1558 the vops accessed through unrecognized statements there as
1559 well. */
1560
1561 static void
1562 gather_mem_refs_stmt (struct loop *loop, gimple stmt)
1563 {
1564 tree *mem = NULL;
1565 hashval_t hash;
1566 PTR *slot;
1567 mem_ref_p ref;
1568 bool is_stored;
1569 unsigned id;
1570
1571 if (!gimple_vuse (stmt))
1572 return;
1573
1574 mem = simple_mem_ref_in_stmt (stmt, &is_stored);
1575 if (!mem)
1576 {
1577 id = memory_accesses.refs_list.length ();
1578 ref = mem_ref_alloc (error_mark_node, 0, id);
1579 memory_accesses.refs_list.safe_push (ref);
1580 if (dump_file && (dump_flags & TDF_DETAILS))
1581 {
1582 fprintf (dump_file, "Unanalyzed memory reference %u: ", id);
1583 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
1584 }
1585 if (gimple_vdef (stmt))
1586 mark_ref_stored (ref, loop);
1587 record_mem_ref_loc (ref, loop, stmt, mem);
1588 return;
1589 }
1590
1591 hash = iterative_hash_expr (*mem, 0);
1592 slot = htab_find_slot_with_hash (memory_accesses.refs, *mem, hash, INSERT);
1593
1594 if (*slot)
1595 {
1596 ref = (mem_ref_p) *slot;
1597 id = ref->id;
1598 }
1599 else
1600 {
1601 id = memory_accesses.refs_list.length ();
1602 ref = mem_ref_alloc (*mem, hash, id);
1603 memory_accesses.refs_list.safe_push (ref);
1604 *slot = ref;
1605
1606 if (dump_file && (dump_flags & TDF_DETAILS))
1607 {
1608 fprintf (dump_file, "Memory reference %u: ", id);
1609 print_generic_expr (dump_file, ref->mem, TDF_SLIM);
1610 fprintf (dump_file, "\n");
1611 }
1612 }
1613
1614 if (is_stored)
1615 mark_ref_stored (ref, loop);
1616
1617 record_mem_ref_loc (ref, loop, stmt, mem);
1618 return;
1619 }
1620
1621 /* Gathers memory references in loops. */
1622
1623 static void
1624 gather_mem_refs_in_loops (void)
1625 {
1626 gimple_stmt_iterator bsi;
1627 basic_block bb;
1628 struct loop *loop;
1629 loop_iterator li;
1630 bitmap lrefs, alrefs, alrefso;
1631
1632 FOR_EACH_BB (bb)
1633 {
1634 loop = bb->loop_father;
1635 if (loop == current_loops->tree_root)
1636 continue;
1637
1638 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1639 gather_mem_refs_stmt (loop, gsi_stmt (bsi));
1640 }
1641
1642 /* Propagate the information about accessed memory references up
1643 the loop hierarchy. */
1644 FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
1645 {
1646 lrefs = memory_accesses.refs_in_loop[loop->num];
1647 alrefs = memory_accesses.all_refs_in_loop[loop->num];
1648 bitmap_ior_into (alrefs, lrefs);
1649
1650 if (loop_outer (loop) == current_loops->tree_root)
1651 continue;
1652
1653 alrefso = memory_accesses.all_refs_in_loop[loop_outer (loop)->num];
1654 bitmap_ior_into (alrefso, alrefs);
1655 }
1656 }
1657
1658 /* Create a mapping from virtual operands to references that touch them
1659 in LOOP. */
1660
1661 static void
1662 create_vop_ref_mapping_loop (struct loop *loop)
1663 {
1664 bitmap refs = memory_accesses.refs_in_loop[loop->num];
1665 struct loop *sloop;
1666 bitmap_iterator bi;
1667 unsigned i;
1668 mem_ref_p ref;
1669
1670 EXECUTE_IF_SET_IN_BITMAP (refs, 0, i, bi)
1671 {
1672 ref = memory_accesses.refs_list[i];
1673 for (sloop = loop; sloop != current_loops->tree_root;
1674 sloop = loop_outer (sloop))
1675 if (bitmap_bit_p (ref->stored, loop->num))
1676 {
1677 bitmap refs_stored
1678 = memory_accesses.all_refs_stored_in_loop[sloop->num];
1679 bitmap_set_bit (refs_stored, ref->id);
1680 }
1681 }
1682 }
1683
1684 /* For each non-clobbered virtual operand and each loop, record the memory
1685 references in this loop that touch the operand. */
1686
1687 static void
1688 create_vop_ref_mapping (void)
1689 {
1690 loop_iterator li;
1691 struct loop *loop;
1692
1693 FOR_EACH_LOOP (li, loop, 0)
1694 {
1695 create_vop_ref_mapping_loop (loop);
1696 }
1697 }
1698
1699 /* Gathers information about memory accesses in the loops. */
1700
1701 static void
1702 analyze_memory_references (void)
1703 {
1704 unsigned i;
1705 bitmap empty;
1706
1707 memory_accesses.refs = htab_create (100, memref_hash, memref_eq, NULL);
1708 memory_accesses.refs_list.create (0);
1709 memory_accesses.refs_in_loop.create (number_of_loops ());
1710 memory_accesses.all_refs_in_loop.create (number_of_loops ());
1711 memory_accesses.all_refs_stored_in_loop.create (number_of_loops ());
1712
1713 for (i = 0; i < number_of_loops (); i++)
1714 {
1715 empty = BITMAP_ALLOC (&lim_bitmap_obstack);
1716 memory_accesses.refs_in_loop.quick_push (empty);
1717 empty = BITMAP_ALLOC (&lim_bitmap_obstack);
1718 memory_accesses.all_refs_in_loop.quick_push (empty);
1719 empty = BITMAP_ALLOC (&lim_bitmap_obstack);
1720 memory_accesses.all_refs_stored_in_loop.quick_push (empty);
1721 }
1722
1723 memory_accesses.ttae_cache = NULL;
1724
1725 gather_mem_refs_in_loops ();
1726 create_vop_ref_mapping ();
1727 }
1728
1729 /* Returns true if MEM1 and MEM2 may alias. TTAE_CACHE is used as a cache in
1730 tree_to_aff_combination_expand. */
1731
1732 static bool
1733 mem_refs_may_alias_p (tree mem1, tree mem2, struct pointer_map_t **ttae_cache)
1734 {
1735 /* Perform BASE + OFFSET analysis -- if MEM1 and MEM2 are based on the same
1736 object and their offset differ in such a way that the locations cannot
1737 overlap, then they cannot alias. */
1738 double_int size1, size2;
1739 aff_tree off1, off2;
1740
1741 /* Perform basic offset and type-based disambiguation. */
1742 if (!refs_may_alias_p (mem1, mem2))
1743 return false;
1744
1745 /* The expansion of addresses may be a bit expensive, thus we only do
1746 the check at -O2 and higher optimization levels. */
1747 if (optimize < 2)
1748 return true;
1749
1750 get_inner_reference_aff (mem1, &off1, &size1);
1751 get_inner_reference_aff (mem2, &off2, &size2);
1752 aff_combination_expand (&off1, ttae_cache);
1753 aff_combination_expand (&off2, ttae_cache);
1754 aff_combination_scale (&off1, double_int_minus_one);
1755 aff_combination_add (&off2, &off1);
1756
1757 if (aff_comb_cannot_overlap_p (&off2, size1, size2))
1758 return false;
1759
1760 return true;
1761 }
1762
1763 /* Rewrites location LOC by TMP_VAR. */
1764
1765 static void
1766 rewrite_mem_ref_loc (mem_ref_loc_p loc, tree tmp_var)
1767 {
1768 *loc->ref = tmp_var;
1769 update_stmt (loc->stmt);
1770 }
1771
1772 /* Adds all locations of REF in LOOP and its subloops to LOCS. */
1773
1774 static void
1775 get_all_locs_in_loop (struct loop *loop, mem_ref_p ref,
1776 vec<mem_ref_loc_p> *locs)
1777 {
1778 mem_ref_locs_p accs;
1779 unsigned i;
1780 mem_ref_loc_p loc;
1781 bitmap refs = memory_accesses.all_refs_in_loop[loop->num];
1782 struct loop *subloop;
1783
1784 if (!bitmap_bit_p (refs, ref->id))
1785 return;
1786
1787 if (ref->accesses_in_loop.length ()
1788 > (unsigned) loop->num)
1789 {
1790 accs = ref->accesses_in_loop[loop->num];
1791 if (accs)
1792 {
1793 FOR_EACH_VEC_ELT (accs->locs, i, loc)
1794 locs->safe_push (loc);
1795 }
1796 }
1797
1798 for (subloop = loop->inner; subloop != NULL; subloop = subloop->next)
1799 get_all_locs_in_loop (subloop, ref, locs);
1800 }
1801
1802 /* Rewrites all references to REF in LOOP by variable TMP_VAR. */
1803
1804 static void
1805 rewrite_mem_refs (struct loop *loop, mem_ref_p ref, tree tmp_var)
1806 {
1807 unsigned i;
1808 mem_ref_loc_p loc;
1809 vec<mem_ref_loc_p> locs = vNULL;
1810
1811 get_all_locs_in_loop (loop, ref, &locs);
1812 FOR_EACH_VEC_ELT (locs, i, loc)
1813 rewrite_mem_ref_loc (loc, tmp_var);
1814 locs.release ();
1815 }
1816
1817 /* The name and the length of the currently generated variable
1818 for lsm. */
1819 #define MAX_LSM_NAME_LENGTH 40
1820 static char lsm_tmp_name[MAX_LSM_NAME_LENGTH + 1];
1821 static int lsm_tmp_name_length;
1822
1823 /* Adds S to lsm_tmp_name. */
1824
1825 static void
1826 lsm_tmp_name_add (const char *s)
1827 {
1828 int l = strlen (s) + lsm_tmp_name_length;
1829 if (l > MAX_LSM_NAME_LENGTH)
1830 return;
1831
1832 strcpy (lsm_tmp_name + lsm_tmp_name_length, s);
1833 lsm_tmp_name_length = l;
1834 }
1835
1836 /* Stores the name for temporary variable that replaces REF to
1837 lsm_tmp_name. */
1838
1839 static void
1840 gen_lsm_tmp_name (tree ref)
1841 {
1842 const char *name;
1843
1844 switch (TREE_CODE (ref))
1845 {
1846 case MEM_REF:
1847 case TARGET_MEM_REF:
1848 gen_lsm_tmp_name (TREE_OPERAND (ref, 0));
1849 lsm_tmp_name_add ("_");
1850 break;
1851
1852 case ADDR_EXPR:
1853 gen_lsm_tmp_name (TREE_OPERAND (ref, 0));
1854 break;
1855
1856 case BIT_FIELD_REF:
1857 case VIEW_CONVERT_EXPR:
1858 case ARRAY_RANGE_REF:
1859 gen_lsm_tmp_name (TREE_OPERAND (ref, 0));
1860 break;
1861
1862 case REALPART_EXPR:
1863 gen_lsm_tmp_name (TREE_OPERAND (ref, 0));
1864 lsm_tmp_name_add ("_RE");
1865 break;
1866
1867 case IMAGPART_EXPR:
1868 gen_lsm_tmp_name (TREE_OPERAND (ref, 0));
1869 lsm_tmp_name_add ("_IM");
1870 break;
1871
1872 case COMPONENT_REF:
1873 gen_lsm_tmp_name (TREE_OPERAND (ref, 0));
1874 lsm_tmp_name_add ("_");
1875 name = get_name (TREE_OPERAND (ref, 1));
1876 if (!name)
1877 name = "F";
1878 lsm_tmp_name_add (name);
1879 break;
1880
1881 case ARRAY_REF:
1882 gen_lsm_tmp_name (TREE_OPERAND (ref, 0));
1883 lsm_tmp_name_add ("_I");
1884 break;
1885
1886 case SSA_NAME:
1887 case VAR_DECL:
1888 case PARM_DECL:
1889 name = get_name (ref);
1890 if (!name)
1891 name = "D";
1892 lsm_tmp_name_add (name);
1893 break;
1894
1895 case STRING_CST:
1896 lsm_tmp_name_add ("S");
1897 break;
1898
1899 case RESULT_DECL:
1900 lsm_tmp_name_add ("R");
1901 break;
1902
1903 case INTEGER_CST:
1904 /* Nothing. */
1905 break;
1906
1907 default:
1908 gcc_unreachable ();
1909 }
1910 }
1911
1912 /* Determines name for temporary variable that replaces REF.
1913 The name is accumulated into the lsm_tmp_name variable.
1914 N is added to the name of the temporary. */
1915
1916 char *
1917 get_lsm_tmp_name (tree ref, unsigned n)
1918 {
1919 char ns[2];
1920
1921 lsm_tmp_name_length = 0;
1922 gen_lsm_tmp_name (ref);
1923 lsm_tmp_name_add ("_lsm");
1924 if (n < 10)
1925 {
1926 ns[0] = '0' + n;
1927 ns[1] = 0;
1928 lsm_tmp_name_add (ns);
1929 }
1930 return lsm_tmp_name;
1931 }
1932
1933 struct prev_flag_edges {
1934 /* Edge to insert new flag comparison code. */
1935 edge append_cond_position;
1936
1937 /* Edge for fall through from previous flag comparison. */
1938 edge last_cond_fallthru;
1939 };
1940
1941 /* Helper function for execute_sm. Emit code to store TMP_VAR into
1942 MEM along edge EX.
1943
1944 The store is only done if MEM has changed. We do this so no
1945 changes to MEM occur on code paths that did not originally store
1946 into it.
1947
1948 The common case for execute_sm will transform:
1949
1950 for (...) {
1951 if (foo)
1952 stuff;
1953 else
1954 MEM = TMP_VAR;
1955 }
1956
1957 into:
1958
1959 lsm = MEM;
1960 for (...) {
1961 if (foo)
1962 stuff;
1963 else
1964 lsm = TMP_VAR;
1965 }
1966 MEM = lsm;
1967
1968 This function will generate:
1969
1970 lsm = MEM;
1971
1972 lsm_flag = false;
1973 ...
1974 for (...) {
1975 if (foo)
1976 stuff;
1977 else {
1978 lsm = TMP_VAR;
1979 lsm_flag = true;
1980 }
1981 }
1982 if (lsm_flag) <--
1983 MEM = lsm; <--
1984 */
1985
1986 static void
1987 execute_sm_if_changed (edge ex, tree mem, tree tmp_var, tree flag)
1988 {
1989 basic_block new_bb, then_bb, old_dest;
1990 bool loop_has_only_one_exit;
1991 edge then_old_edge, orig_ex = ex;
1992 gimple_stmt_iterator gsi;
1993 gimple stmt;
1994 struct prev_flag_edges *prev_edges = (struct prev_flag_edges *) ex->aux;
1995
1996 /* ?? Insert store after previous store if applicable. See note
1997 below. */
1998 if (prev_edges)
1999 ex = prev_edges->append_cond_position;
2000
2001 loop_has_only_one_exit = single_pred_p (ex->dest);
2002
2003 if (loop_has_only_one_exit)
2004 ex = split_block_after_labels (ex->dest);
2005
2006 old_dest = ex->dest;
2007 new_bb = split_edge (ex);
2008 then_bb = create_empty_bb (new_bb);
2009 if (current_loops && new_bb->loop_father)
2010 add_bb_to_loop (then_bb, new_bb->loop_father);
2011
2012 gsi = gsi_start_bb (new_bb);
2013 stmt = gimple_build_cond (NE_EXPR, flag, boolean_false_node,
2014 NULL_TREE, NULL_TREE);
2015 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2016
2017 gsi = gsi_start_bb (then_bb);
2018 /* Insert actual store. */
2019 stmt = gimple_build_assign (unshare_expr (mem), tmp_var);
2020 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2021
2022 make_edge (new_bb, then_bb, EDGE_TRUE_VALUE);
2023 make_edge (new_bb, old_dest, EDGE_FALSE_VALUE);
2024 then_old_edge = make_edge (then_bb, old_dest, EDGE_FALLTHRU);
2025
2026 set_immediate_dominator (CDI_DOMINATORS, then_bb, new_bb);
2027
2028 if (prev_edges)
2029 {
2030 basic_block prevbb = prev_edges->last_cond_fallthru->src;
2031 redirect_edge_succ (prev_edges->last_cond_fallthru, new_bb);
2032 set_immediate_dominator (CDI_DOMINATORS, new_bb, prevbb);
2033 set_immediate_dominator (CDI_DOMINATORS, old_dest,
2034 recompute_dominator (CDI_DOMINATORS, old_dest));
2035 }
2036
2037 /* ?? Because stores may alias, they must happen in the exact
2038 sequence they originally happened. Save the position right after
2039 the (_lsm) store we just created so we can continue appending after
2040 it and maintain the original order. */
2041 {
2042 struct prev_flag_edges *p;
2043
2044 if (orig_ex->aux)
2045 orig_ex->aux = NULL;
2046 alloc_aux_for_edge (orig_ex, sizeof (struct prev_flag_edges));
2047 p = (struct prev_flag_edges *) orig_ex->aux;
2048 p->append_cond_position = then_old_edge;
2049 p->last_cond_fallthru = find_edge (new_bb, old_dest);
2050 orig_ex->aux = (void *) p;
2051 }
2052
2053 if (!loop_has_only_one_exit)
2054 for (gsi = gsi_start_phis (old_dest); !gsi_end_p (gsi); gsi_next (&gsi))
2055 {
2056 gimple phi = gsi_stmt (gsi);
2057 unsigned i;
2058
2059 for (i = 0; i < gimple_phi_num_args (phi); i++)
2060 if (gimple_phi_arg_edge (phi, i)->src == new_bb)
2061 {
2062 tree arg = gimple_phi_arg_def (phi, i);
2063 add_phi_arg (phi, arg, then_old_edge, UNKNOWN_LOCATION);
2064 update_stmt (phi);
2065 }
2066 }
2067 /* Remove the original fall through edge. This was the
2068 single_succ_edge (new_bb). */
2069 EDGE_SUCC (new_bb, 0)->flags &= ~EDGE_FALLTHRU;
2070 }
2071
2072 /* Helper function for execute_sm. On every location where REF is
2073 set, set an appropriate flag indicating the store. */
2074
2075 static tree
2076 execute_sm_if_changed_flag_set (struct loop *loop, mem_ref_p ref)
2077 {
2078 unsigned i;
2079 mem_ref_loc_p loc;
2080 tree flag;
2081 vec<mem_ref_loc_p> locs = vNULL;
2082 char *str = get_lsm_tmp_name (ref->mem, ~0);
2083
2084 lsm_tmp_name_add ("_flag");
2085 flag = create_tmp_reg (boolean_type_node, str);
2086 get_all_locs_in_loop (loop, ref, &locs);
2087 FOR_EACH_VEC_ELT (locs, i, loc)
2088 {
2089 gimple_stmt_iterator gsi;
2090 gimple stmt;
2091
2092 /* Only set the flag for writes. */
2093 if (is_gimple_assign (loc->stmt)
2094 && gimple_assign_lhs_ptr (loc->stmt) == loc->ref)
2095 {
2096 gsi = gsi_for_stmt (loc->stmt);
2097 stmt = gimple_build_assign (flag, boolean_true_node);
2098 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2099 }
2100 }
2101 locs.release ();
2102 return flag;
2103 }
2104
2105 /* Executes store motion of memory reference REF from LOOP.
2106 Exits from the LOOP are stored in EXITS. The initialization of the
2107 temporary variable is put to the preheader of the loop, and assignments
2108 to the reference from the temporary variable are emitted to exits. */
2109
2110 static void
2111 execute_sm (struct loop *loop, vec<edge> exits, mem_ref_p ref)
2112 {
2113 tree tmp_var, store_flag;
2114 unsigned i;
2115 gimple load;
2116 struct fmt_data fmt_data;
2117 edge ex, latch_edge;
2118 struct lim_aux_data *lim_data;
2119 bool multi_threaded_model_p = false;
2120
2121 if (dump_file && (dump_flags & TDF_DETAILS))
2122 {
2123 fprintf (dump_file, "Executing store motion of ");
2124 print_generic_expr (dump_file, ref->mem, 0);
2125 fprintf (dump_file, " from loop %d\n", loop->num);
2126 }
2127
2128 tmp_var = create_tmp_reg (TREE_TYPE (ref->mem),
2129 get_lsm_tmp_name (ref->mem, ~0));
2130
2131 fmt_data.loop = loop;
2132 fmt_data.orig_loop = loop;
2133 for_each_index (&ref->mem, force_move_till, &fmt_data);
2134
2135 if (block_in_transaction (loop_preheader_edge (loop)->src)
2136 || !PARAM_VALUE (PARAM_ALLOW_STORE_DATA_RACES))
2137 multi_threaded_model_p = true;
2138
2139 if (multi_threaded_model_p)
2140 store_flag = execute_sm_if_changed_flag_set (loop, ref);
2141
2142 rewrite_mem_refs (loop, ref, tmp_var);
2143
2144 /* Emit the load code into the latch, so that we are sure it will
2145 be processed after all dependencies. */
2146 latch_edge = loop_latch_edge (loop);
2147
2148 /* FIXME/TODO: For the multi-threaded variant, we could avoid this
2149 load altogether, since the store is predicated by a flag. We
2150 could, do the load only if it was originally in the loop. */
2151 load = gimple_build_assign (tmp_var, unshare_expr (ref->mem));
2152 lim_data = init_lim_data (load);
2153 lim_data->max_loop = loop;
2154 lim_data->tgt_loop = loop;
2155 gsi_insert_on_edge (latch_edge, load);
2156
2157 if (multi_threaded_model_p)
2158 {
2159 load = gimple_build_assign (store_flag, boolean_false_node);
2160 lim_data = init_lim_data (load);
2161 lim_data->max_loop = loop;
2162 lim_data->tgt_loop = loop;
2163 gsi_insert_on_edge (latch_edge, load);
2164 }
2165
2166 /* Sink the store to every exit from the loop. */
2167 FOR_EACH_VEC_ELT (exits, i, ex)
2168 if (!multi_threaded_model_p)
2169 {
2170 gimple store;
2171 store = gimple_build_assign (unshare_expr (ref->mem), tmp_var);
2172 gsi_insert_on_edge (ex, store);
2173 }
2174 else
2175 execute_sm_if_changed (ex, ref->mem, tmp_var, store_flag);
2176 }
2177
2178 /* Hoists memory references MEM_REFS out of LOOP. EXITS is the list of exit
2179 edges of the LOOP. */
2180
2181 static void
2182 hoist_memory_references (struct loop *loop, bitmap mem_refs,
2183 vec<edge> exits)
2184 {
2185 mem_ref_p ref;
2186 unsigned i;
2187 bitmap_iterator bi;
2188
2189 EXECUTE_IF_SET_IN_BITMAP (mem_refs, 0, i, bi)
2190 {
2191 ref = memory_accesses.refs_list[i];
2192 execute_sm (loop, exits, ref);
2193 }
2194 }
2195
2196 /* Returns true if REF is always accessed in LOOP. If STORED_P is true
2197 make sure REF is always stored to in LOOP. */
2198
2199 static bool
2200 ref_always_accessed_p (struct loop *loop, mem_ref_p ref, bool stored_p)
2201 {
2202 vec<mem_ref_loc_p> locs = vNULL;
2203 unsigned i;
2204 mem_ref_loc_p loc;
2205 bool ret = false;
2206 struct loop *must_exec;
2207 tree base;
2208
2209 base = get_base_address (ref->mem);
2210 if (INDIRECT_REF_P (base)
2211 || TREE_CODE (base) == MEM_REF)
2212 base = TREE_OPERAND (base, 0);
2213
2214 get_all_locs_in_loop (loop, ref, &locs);
2215 FOR_EACH_VEC_ELT (locs, i, loc)
2216 {
2217 if (!get_lim_data (loc->stmt))
2218 continue;
2219
2220 /* If we require an always executed store make sure the statement
2221 stores to the reference. */
2222 if (stored_p)
2223 {
2224 tree lhs;
2225 if (!gimple_get_lhs (loc->stmt))
2226 continue;
2227 lhs = get_base_address (gimple_get_lhs (loc->stmt));
2228 if (!lhs)
2229 continue;
2230 if (INDIRECT_REF_P (lhs)
2231 || TREE_CODE (lhs) == MEM_REF)
2232 lhs = TREE_OPERAND (lhs, 0);
2233 if (lhs != base)
2234 continue;
2235 }
2236
2237 must_exec = get_lim_data (loc->stmt)->always_executed_in;
2238 if (!must_exec)
2239 continue;
2240
2241 if (must_exec == loop
2242 || flow_loop_nested_p (must_exec, loop))
2243 {
2244 ret = true;
2245 break;
2246 }
2247 }
2248 locs.release ();
2249
2250 return ret;
2251 }
2252
2253 /* Returns true if REF1 and REF2 are independent. */
2254
2255 static bool
2256 refs_independent_p (mem_ref_p ref1, mem_ref_p ref2)
2257 {
2258 if (ref1 == ref2
2259 || bitmap_bit_p (ref1->indep_ref, ref2->id))
2260 return true;
2261 if (bitmap_bit_p (ref1->dep_ref, ref2->id))
2262 return false;
2263 if (!MEM_ANALYZABLE (ref1)
2264 || !MEM_ANALYZABLE (ref2))
2265 return false;
2266
2267 if (dump_file && (dump_flags & TDF_DETAILS))
2268 fprintf (dump_file, "Querying dependency of refs %u and %u: ",
2269 ref1->id, ref2->id);
2270
2271 if (mem_refs_may_alias_p (ref1->mem, ref2->mem,
2272 &memory_accesses.ttae_cache))
2273 {
2274 bitmap_set_bit (ref1->dep_ref, ref2->id);
2275 bitmap_set_bit (ref2->dep_ref, ref1->id);
2276 if (dump_file && (dump_flags & TDF_DETAILS))
2277 fprintf (dump_file, "dependent.\n");
2278 return false;
2279 }
2280 else
2281 {
2282 bitmap_set_bit (ref1->indep_ref, ref2->id);
2283 bitmap_set_bit (ref2->indep_ref, ref1->id);
2284 if (dump_file && (dump_flags & TDF_DETAILS))
2285 fprintf (dump_file, "independent.\n");
2286 return true;
2287 }
2288 }
2289
2290 /* Records the information whether REF is independent in LOOP (according
2291 to INDEP). */
2292
2293 static void
2294 record_indep_loop (struct loop *loop, mem_ref_p ref, bool indep)
2295 {
2296 if (indep)
2297 bitmap_set_bit (ref->indep_loop, loop->num);
2298 else
2299 bitmap_set_bit (ref->dep_loop, loop->num);
2300 }
2301
2302 /* Returns true if REF is independent on all other memory references in
2303 LOOP. */
2304
2305 static bool
2306 ref_indep_loop_p_1 (struct loop *loop, mem_ref_p ref)
2307 {
2308 bitmap refs_to_check;
2309 unsigned i;
2310 bitmap_iterator bi;
2311 bool ret = true, stored = bitmap_bit_p (ref->stored, loop->num);
2312 mem_ref_p aref;
2313
2314 if (stored)
2315 refs_to_check = memory_accesses.all_refs_in_loop[loop->num];
2316 else
2317 refs_to_check = memory_accesses.all_refs_stored_in_loop[loop->num];
2318
2319 EXECUTE_IF_SET_IN_BITMAP (refs_to_check, 0, i, bi)
2320 {
2321 aref = memory_accesses.refs_list[i];
2322 if (!MEM_ANALYZABLE (aref)
2323 || !refs_independent_p (ref, aref))
2324 {
2325 ret = false;
2326 record_indep_loop (loop, aref, false);
2327 break;
2328 }
2329 }
2330
2331 return ret;
2332 }
2333
2334 /* Returns true if REF is independent on all other memory references in
2335 LOOP. Wrapper over ref_indep_loop_p_1, caching its results. */
2336
2337 static bool
2338 ref_indep_loop_p (struct loop *loop, mem_ref_p ref)
2339 {
2340 bool ret;
2341
2342 if (bitmap_bit_p (ref->indep_loop, loop->num))
2343 return true;
2344 if (bitmap_bit_p (ref->dep_loop, loop->num))
2345 return false;
2346
2347 ret = ref_indep_loop_p_1 (loop, ref);
2348
2349 if (dump_file && (dump_flags & TDF_DETAILS))
2350 fprintf (dump_file, "Querying dependencies of ref %u in loop %d: %s\n",
2351 ref->id, loop->num, ret ? "independent" : "dependent");
2352
2353 record_indep_loop (loop, ref, ret);
2354
2355 return ret;
2356 }
2357
2358 /* Returns true if we can perform store motion of REF from LOOP. */
2359
2360 static bool
2361 can_sm_ref_p (struct loop *loop, mem_ref_p ref)
2362 {
2363 tree base;
2364
2365 /* Can't hoist unanalyzable refs. */
2366 if (!MEM_ANALYZABLE (ref))
2367 return false;
2368
2369 /* Unless the reference is stored in the loop, there is nothing to do. */
2370 if (!bitmap_bit_p (ref->stored, loop->num))
2371 return false;
2372
2373 /* It should be movable. */
2374 if (!is_gimple_reg_type (TREE_TYPE (ref->mem))
2375 || TREE_THIS_VOLATILE (ref->mem)
2376 || !for_each_index (&ref->mem, may_move_till, loop))
2377 return false;
2378
2379 /* If it can throw fail, we do not properly update EH info. */
2380 if (tree_could_throw_p (ref->mem))
2381 return false;
2382
2383 /* If it can trap, it must be always executed in LOOP.
2384 Readonly memory locations may trap when storing to them, but
2385 tree_could_trap_p is a predicate for rvalues, so check that
2386 explicitly. */
2387 base = get_base_address (ref->mem);
2388 if ((tree_could_trap_p (ref->mem)
2389 || (DECL_P (base) && TREE_READONLY (base)))
2390 && !ref_always_accessed_p (loop, ref, true))
2391 return false;
2392
2393 /* And it must be independent on all other memory references
2394 in LOOP. */
2395 if (!ref_indep_loop_p (loop, ref))
2396 return false;
2397
2398 return true;
2399 }
2400
2401 /* Marks the references in LOOP for that store motion should be performed
2402 in REFS_TO_SM. SM_EXECUTED is the set of references for that store
2403 motion was performed in one of the outer loops. */
2404
2405 static void
2406 find_refs_for_sm (struct loop *loop, bitmap sm_executed, bitmap refs_to_sm)
2407 {
2408 bitmap refs = memory_accesses.all_refs_in_loop[loop->num];
2409 unsigned i;
2410 bitmap_iterator bi;
2411 mem_ref_p ref;
2412
2413 EXECUTE_IF_AND_COMPL_IN_BITMAP (refs, sm_executed, 0, i, bi)
2414 {
2415 ref = memory_accesses.refs_list[i];
2416 if (can_sm_ref_p (loop, ref))
2417 bitmap_set_bit (refs_to_sm, i);
2418 }
2419 }
2420
2421 /* Checks whether LOOP (with exits stored in EXITS array) is suitable
2422 for a store motion optimization (i.e. whether we can insert statement
2423 on its exits). */
2424
2425 static bool
2426 loop_suitable_for_sm (struct loop *loop ATTRIBUTE_UNUSED,
2427 vec<edge> exits)
2428 {
2429 unsigned i;
2430 edge ex;
2431
2432 FOR_EACH_VEC_ELT (exits, i, ex)
2433 if (ex->flags & (EDGE_ABNORMAL | EDGE_EH))
2434 return false;
2435
2436 return true;
2437 }
2438
2439 /* Try to perform store motion for all memory references modified inside
2440 LOOP. SM_EXECUTED is the bitmap of the memory references for that
2441 store motion was executed in one of the outer loops. */
2442
2443 static void
2444 store_motion_loop (struct loop *loop, bitmap sm_executed)
2445 {
2446 vec<edge> exits = get_loop_exit_edges (loop);
2447 struct loop *subloop;
2448 bitmap sm_in_loop = BITMAP_ALLOC (NULL);
2449
2450 if (loop_suitable_for_sm (loop, exits))
2451 {
2452 find_refs_for_sm (loop, sm_executed, sm_in_loop);
2453 hoist_memory_references (loop, sm_in_loop, exits);
2454 }
2455 exits.release ();
2456
2457 bitmap_ior_into (sm_executed, sm_in_loop);
2458 for (subloop = loop->inner; subloop != NULL; subloop = subloop->next)
2459 store_motion_loop (subloop, sm_executed);
2460 bitmap_and_compl_into (sm_executed, sm_in_loop);
2461 BITMAP_FREE (sm_in_loop);
2462 }
2463
2464 /* Try to perform store motion for all memory references modified inside
2465 loops. */
2466
2467 static void
2468 store_motion (void)
2469 {
2470 struct loop *loop;
2471 bitmap sm_executed = BITMAP_ALLOC (NULL);
2472
2473 for (loop = current_loops->tree_root->inner; loop != NULL; loop = loop->next)
2474 store_motion_loop (loop, sm_executed);
2475
2476 BITMAP_FREE (sm_executed);
2477 gsi_commit_edge_inserts ();
2478 }
2479
2480 /* Fills ALWAYS_EXECUTED_IN information for basic blocks of LOOP, i.e.
2481 for each such basic block bb records the outermost loop for that execution
2482 of its header implies execution of bb. CONTAINS_CALL is the bitmap of
2483 blocks that contain a nonpure call. */
2484
2485 static void
2486 fill_always_executed_in (struct loop *loop, sbitmap contains_call)
2487 {
2488 basic_block bb = NULL, *bbs, last = NULL;
2489 unsigned i;
2490 edge e;
2491 struct loop *inn_loop = loop;
2492
2493 if (ALWAYS_EXECUTED_IN (loop->header) == NULL)
2494 {
2495 bbs = get_loop_body_in_dom_order (loop);
2496
2497 for (i = 0; i < loop->num_nodes; i++)
2498 {
2499 edge_iterator ei;
2500 bb = bbs[i];
2501
2502 if (dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
2503 last = bb;
2504
2505 if (bitmap_bit_p (contains_call, bb->index))
2506 break;
2507
2508 FOR_EACH_EDGE (e, ei, bb->succs)
2509 if (!flow_bb_inside_loop_p (loop, e->dest))
2510 break;
2511 if (e)
2512 break;
2513
2514 /* A loop might be infinite (TODO use simple loop analysis
2515 to disprove this if possible). */
2516 if (bb->flags & BB_IRREDUCIBLE_LOOP)
2517 break;
2518
2519 if (!flow_bb_inside_loop_p (inn_loop, bb))
2520 break;
2521
2522 if (bb->loop_father->header == bb)
2523 {
2524 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
2525 break;
2526
2527 /* In a loop that is always entered we may proceed anyway.
2528 But record that we entered it and stop once we leave it. */
2529 inn_loop = bb->loop_father;
2530 }
2531 }
2532
2533 while (1)
2534 {
2535 SET_ALWAYS_EXECUTED_IN (last, loop);
2536 if (last == loop->header)
2537 break;
2538 last = get_immediate_dominator (CDI_DOMINATORS, last);
2539 }
2540
2541 free (bbs);
2542 }
2543
2544 for (loop = loop->inner; loop; loop = loop->next)
2545 fill_always_executed_in (loop, contains_call);
2546 }
2547
2548 /* Compute the global information needed by the loop invariant motion pass. */
2549
2550 static void
2551 tree_ssa_lim_initialize (void)
2552 {
2553 sbitmap contains_call = sbitmap_alloc (last_basic_block);
2554 gimple_stmt_iterator bsi;
2555 struct loop *loop;
2556 basic_block bb;
2557
2558 bitmap_obstack_initialize (&lim_bitmap_obstack);
2559
2560 bitmap_clear (contains_call);
2561 FOR_EACH_BB (bb)
2562 {
2563 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
2564 {
2565 if (nonpure_call_p (gsi_stmt (bsi)))
2566 break;
2567 }
2568
2569 if (!gsi_end_p (bsi))
2570 bitmap_set_bit (contains_call, bb->index);
2571 }
2572
2573 for (loop = current_loops->tree_root->inner; loop; loop = loop->next)
2574 fill_always_executed_in (loop, contains_call);
2575
2576 sbitmap_free (contains_call);
2577
2578 lim_aux_data_map = pointer_map_create ();
2579
2580 if (flag_tm)
2581 compute_transaction_bits ();
2582
2583 alloc_aux_for_edges (0);
2584 }
2585
2586 /* Cleans up after the invariant motion pass. */
2587
2588 static void
2589 tree_ssa_lim_finalize (void)
2590 {
2591 basic_block bb;
2592 unsigned i;
2593 mem_ref_p ref;
2594
2595 free_aux_for_edges ();
2596
2597 FOR_EACH_BB (bb)
2598 SET_ALWAYS_EXECUTED_IN (bb, NULL);
2599
2600 bitmap_obstack_release (&lim_bitmap_obstack);
2601 pointer_map_destroy (lim_aux_data_map);
2602
2603 htab_delete (memory_accesses.refs);
2604
2605 FOR_EACH_VEC_ELT (memory_accesses.refs_list, i, ref)
2606 memref_free (ref);
2607 memory_accesses.refs_list.release ();
2608
2609 memory_accesses.refs_in_loop.release ();
2610 memory_accesses.all_refs_in_loop.release ();
2611 memory_accesses.all_refs_stored_in_loop.release ();
2612
2613 if (memory_accesses.ttae_cache)
2614 free_affine_expand_cache (&memory_accesses.ttae_cache);
2615 }
2616
2617 /* Moves invariants from loops. Only "expensive" invariants are moved out --
2618 i.e. those that are likely to be win regardless of the register pressure. */
2619
2620 unsigned int
2621 tree_ssa_lim (void)
2622 {
2623 unsigned int todo;
2624
2625 tree_ssa_lim_initialize ();
2626
2627 /* Gathers information about memory accesses in the loops. */
2628 analyze_memory_references ();
2629
2630 /* For each statement determine the outermost loop in that it is
2631 invariant and cost for computing the invariant. */
2632 determine_invariantness ();
2633
2634 /* Execute store motion. Force the necessary invariants to be moved
2635 out of the loops as well. */
2636 store_motion ();
2637
2638 /* Move the expressions that are expensive enough. */
2639 todo = move_computations ();
2640
2641 tree_ssa_lim_finalize ();
2642
2643 return todo;
2644 }