ad34d76e84082dce1e8ddbaa59992c29737d26c8
[gcc.git] / gcc / tree-ssa-loop-im.c
1 /* Loop invariant motion.
2 Copyright (C) 2003-2013 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "tree.h"
25 #include "tm_p.h"
26 #include "basic-block.h"
27 #include "gimple-pretty-print.h"
28 #include "gimple.h"
29 #include "gimplify.h"
30 #include "gimple-iterator.h"
31 #include "gimplify-me.h"
32 #include "gimple-ssa.h"
33 #include "tree-cfg.h"
34 #include "tree-phinodes.h"
35 #include "ssa-iterators.h"
36 #include "tree-ssanames.h"
37 #include "tree-ssa-loop-manip.h"
38 #include "tree-ssa-loop.h"
39 #include "tree-into-ssa.h"
40 #include "cfgloop.h"
41 #include "domwalk.h"
42 #include "params.h"
43 #include "tree-pass.h"
44 #include "flags.h"
45 #include "hash-table.h"
46 #include "tree-affine.h"
47 #include "pointer-set.h"
48 #include "tree-ssa-propagate.h"
49
50 /* TODO: Support for predicated code motion. I.e.
51
52 while (1)
53 {
54 if (cond)
55 {
56 a = inv;
57 something;
58 }
59 }
60
61 Where COND and INV are invariants, but evaluating INV may trap or be
62 invalid from some other reason if !COND. This may be transformed to
63
64 if (cond)
65 a = inv;
66 while (1)
67 {
68 if (cond)
69 something;
70 } */
71
72 /* The auxiliary data kept for each statement. */
73
74 struct lim_aux_data
75 {
76 struct loop *max_loop; /* The outermost loop in that the statement
77 is invariant. */
78
79 struct loop *tgt_loop; /* The loop out of that we want to move the
80 invariant. */
81
82 struct loop *always_executed_in;
83 /* The outermost loop for that we are sure
84 the statement is executed if the loop
85 is entered. */
86
87 unsigned cost; /* Cost of the computation performed by the
88 statement. */
89
90 vec<gimple> depends; /* Vector of statements that must be also
91 hoisted out of the loop when this statement
92 is hoisted; i.e. those that define the
93 operands of the statement and are inside of
94 the MAX_LOOP loop. */
95 };
96
97 /* Maps statements to their lim_aux_data. */
98
99 static struct pointer_map_t *lim_aux_data_map;
100
101 /* Description of a memory reference location. */
102
103 typedef struct mem_ref_loc
104 {
105 tree *ref; /* The reference itself. */
106 gimple stmt; /* The statement in that it occurs. */
107 } *mem_ref_loc_p;
108
109
110 /* Description of a memory reference. */
111
112 typedef struct mem_ref
113 {
114 unsigned id; /* ID assigned to the memory reference
115 (its index in memory_accesses.refs_list) */
116 hashval_t hash; /* Its hash value. */
117
118 /* The memory access itself and associated caching of alias-oracle
119 query meta-data. */
120 ao_ref mem;
121
122 bitmap_head stored; /* The set of loops in that this memory location
123 is stored to. */
124 vec<vec<mem_ref_loc> > accesses_in_loop;
125 /* The locations of the accesses. Vector
126 indexed by the loop number. */
127
128 /* The following sets are computed on demand. We keep both set and
129 its complement, so that we know whether the information was
130 already computed or not. */
131 bitmap_head indep_loop; /* The set of loops in that the memory
132 reference is independent, meaning:
133 If it is stored in the loop, this store
134 is independent on all other loads and
135 stores.
136 If it is only loaded, then it is independent
137 on all stores in the loop. */
138 bitmap_head dep_loop; /* The complement of INDEP_LOOP. */
139 } *mem_ref_p;
140
141 /* We use two bits per loop in the ref->{in,}dep_loop bitmaps, the first
142 to record (in)dependence against stores in the loop and its subloops, the
143 second to record (in)dependence against all references in the loop
144 and its subloops. */
145 #define LOOP_DEP_BIT(loopnum, storedp) (2 * (loopnum) + (storedp ? 1 : 0))
146
147 /* Mem_ref hashtable helpers. */
148
149 struct mem_ref_hasher : typed_noop_remove <mem_ref>
150 {
151 typedef mem_ref value_type;
152 typedef tree_node compare_type;
153 static inline hashval_t hash (const value_type *);
154 static inline bool equal (const value_type *, const compare_type *);
155 };
156
157 /* A hash function for struct mem_ref object OBJ. */
158
159 inline hashval_t
160 mem_ref_hasher::hash (const value_type *mem)
161 {
162 return mem->hash;
163 }
164
165 /* An equality function for struct mem_ref object MEM1 with
166 memory reference OBJ2. */
167
168 inline bool
169 mem_ref_hasher::equal (const value_type *mem1, const compare_type *obj2)
170 {
171 return operand_equal_p (mem1->mem.ref, (const_tree) obj2, 0);
172 }
173
174
175 /* Description of memory accesses in loops. */
176
177 static struct
178 {
179 /* The hash table of memory references accessed in loops. */
180 hash_table <mem_ref_hasher> refs;
181
182 /* The list of memory references. */
183 vec<mem_ref_p> refs_list;
184
185 /* The set of memory references accessed in each loop. */
186 vec<bitmap_head> refs_in_loop;
187
188 /* The set of memory references stored in each loop. */
189 vec<bitmap_head> refs_stored_in_loop;
190
191 /* The set of memory references stored in each loop, including subloops . */
192 vec<bitmap_head> all_refs_stored_in_loop;
193
194 /* Cache for expanding memory addresses. */
195 struct pointer_map_t *ttae_cache;
196 } memory_accesses;
197
198 /* Obstack for the bitmaps in the above data structures. */
199 static bitmap_obstack lim_bitmap_obstack;
200
201 static bool ref_indep_loop_p (struct loop *, mem_ref_p);
202
203 /* Minimum cost of an expensive expression. */
204 #define LIM_EXPENSIVE ((unsigned) PARAM_VALUE (PARAM_LIM_EXPENSIVE))
205
206 /* The outermost loop for which execution of the header guarantees that the
207 block will be executed. */
208 #define ALWAYS_EXECUTED_IN(BB) ((struct loop *) (BB)->aux)
209 #define SET_ALWAYS_EXECUTED_IN(BB, VAL) ((BB)->aux = (void *) (VAL))
210
211 /* ID of the shared unanalyzable mem. */
212 #define UNANALYZABLE_MEM_ID 0
213
214 /* Whether the reference was analyzable. */
215 #define MEM_ANALYZABLE(REF) ((REF)->id != UNANALYZABLE_MEM_ID)
216
217 static struct lim_aux_data *
218 init_lim_data (gimple stmt)
219 {
220 void **p = pointer_map_insert (lim_aux_data_map, stmt);
221
222 *p = XCNEW (struct lim_aux_data);
223 return (struct lim_aux_data *) *p;
224 }
225
226 static struct lim_aux_data *
227 get_lim_data (gimple stmt)
228 {
229 void **p = pointer_map_contains (lim_aux_data_map, stmt);
230 if (!p)
231 return NULL;
232
233 return (struct lim_aux_data *) *p;
234 }
235
236 /* Releases the memory occupied by DATA. */
237
238 static void
239 free_lim_aux_data (struct lim_aux_data *data)
240 {
241 data->depends.release ();
242 free (data);
243 }
244
245 static void
246 clear_lim_data (gimple stmt)
247 {
248 void **p = pointer_map_contains (lim_aux_data_map, stmt);
249 if (!p)
250 return;
251
252 free_lim_aux_data ((struct lim_aux_data *) *p);
253 *p = NULL;
254 }
255
256
257 /* The possibilities of statement movement. */
258 enum move_pos
259 {
260 MOVE_IMPOSSIBLE, /* No movement -- side effect expression. */
261 MOVE_PRESERVE_EXECUTION, /* Must not cause the non-executed statement
262 become executed -- memory accesses, ... */
263 MOVE_POSSIBLE /* Unlimited movement. */
264 };
265
266
267 /* If it is possible to hoist the statement STMT unconditionally,
268 returns MOVE_POSSIBLE.
269 If it is possible to hoist the statement STMT, but we must avoid making
270 it executed if it would not be executed in the original program (e.g.
271 because it may trap), return MOVE_PRESERVE_EXECUTION.
272 Otherwise return MOVE_IMPOSSIBLE. */
273
274 enum move_pos
275 movement_possibility (gimple stmt)
276 {
277 tree lhs;
278 enum move_pos ret = MOVE_POSSIBLE;
279
280 if (flag_unswitch_loops
281 && gimple_code (stmt) == GIMPLE_COND)
282 {
283 /* If we perform unswitching, force the operands of the invariant
284 condition to be moved out of the loop. */
285 return MOVE_POSSIBLE;
286 }
287
288 if (gimple_code (stmt) == GIMPLE_PHI
289 && gimple_phi_num_args (stmt) <= 2
290 && !virtual_operand_p (gimple_phi_result (stmt))
291 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_phi_result (stmt)))
292 return MOVE_POSSIBLE;
293
294 if (gimple_get_lhs (stmt) == NULL_TREE)
295 return MOVE_IMPOSSIBLE;
296
297 if (gimple_vdef (stmt))
298 return MOVE_IMPOSSIBLE;
299
300 if (stmt_ends_bb_p (stmt)
301 || gimple_has_volatile_ops (stmt)
302 || gimple_has_side_effects (stmt)
303 || stmt_could_throw_p (stmt))
304 return MOVE_IMPOSSIBLE;
305
306 if (is_gimple_call (stmt))
307 {
308 /* While pure or const call is guaranteed to have no side effects, we
309 cannot move it arbitrarily. Consider code like
310
311 char *s = something ();
312
313 while (1)
314 {
315 if (s)
316 t = strlen (s);
317 else
318 t = 0;
319 }
320
321 Here the strlen call cannot be moved out of the loop, even though
322 s is invariant. In addition to possibly creating a call with
323 invalid arguments, moving out a function call that is not executed
324 may cause performance regressions in case the call is costly and
325 not executed at all. */
326 ret = MOVE_PRESERVE_EXECUTION;
327 lhs = gimple_call_lhs (stmt);
328 }
329 else if (is_gimple_assign (stmt))
330 lhs = gimple_assign_lhs (stmt);
331 else
332 return MOVE_IMPOSSIBLE;
333
334 if (TREE_CODE (lhs) == SSA_NAME
335 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
336 return MOVE_IMPOSSIBLE;
337
338 if (TREE_CODE (lhs) != SSA_NAME
339 || gimple_could_trap_p (stmt))
340 return MOVE_PRESERVE_EXECUTION;
341
342 /* Non local loads in a transaction cannot be hoisted out. Well,
343 unless the load happens on every path out of the loop, but we
344 don't take this into account yet. */
345 if (flag_tm
346 && gimple_in_transaction (stmt)
347 && gimple_assign_single_p (stmt))
348 {
349 tree rhs = gimple_assign_rhs1 (stmt);
350 if (DECL_P (rhs) && is_global_var (rhs))
351 {
352 if (dump_file)
353 {
354 fprintf (dump_file, "Cannot hoist conditional load of ");
355 print_generic_expr (dump_file, rhs, TDF_SLIM);
356 fprintf (dump_file, " because it is in a transaction.\n");
357 }
358 return MOVE_IMPOSSIBLE;
359 }
360 }
361
362 return ret;
363 }
364
365 /* Suppose that operand DEF is used inside the LOOP. Returns the outermost
366 loop to that we could move the expression using DEF if it did not have
367 other operands, i.e. the outermost loop enclosing LOOP in that the value
368 of DEF is invariant. */
369
370 static struct loop *
371 outermost_invariant_loop (tree def, struct loop *loop)
372 {
373 gimple def_stmt;
374 basic_block def_bb;
375 struct loop *max_loop;
376 struct lim_aux_data *lim_data;
377
378 if (!def)
379 return superloop_at_depth (loop, 1);
380
381 if (TREE_CODE (def) != SSA_NAME)
382 {
383 gcc_assert (is_gimple_min_invariant (def));
384 return superloop_at_depth (loop, 1);
385 }
386
387 def_stmt = SSA_NAME_DEF_STMT (def);
388 def_bb = gimple_bb (def_stmt);
389 if (!def_bb)
390 return superloop_at_depth (loop, 1);
391
392 max_loop = find_common_loop (loop, def_bb->loop_father);
393
394 lim_data = get_lim_data (def_stmt);
395 if (lim_data != NULL && lim_data->max_loop != NULL)
396 max_loop = find_common_loop (max_loop,
397 loop_outer (lim_data->max_loop));
398 if (max_loop == loop)
399 return NULL;
400 max_loop = superloop_at_depth (loop, loop_depth (max_loop) + 1);
401
402 return max_loop;
403 }
404
405 /* DATA is a structure containing information associated with a statement
406 inside LOOP. DEF is one of the operands of this statement.
407
408 Find the outermost loop enclosing LOOP in that value of DEF is invariant
409 and record this in DATA->max_loop field. If DEF itself is defined inside
410 this loop as well (i.e. we need to hoist it out of the loop if we want
411 to hoist the statement represented by DATA), record the statement in that
412 DEF is defined to the DATA->depends list. Additionally if ADD_COST is true,
413 add the cost of the computation of DEF to the DATA->cost.
414
415 If DEF is not invariant in LOOP, return false. Otherwise return TRUE. */
416
417 static bool
418 add_dependency (tree def, struct lim_aux_data *data, struct loop *loop,
419 bool add_cost)
420 {
421 gimple def_stmt = SSA_NAME_DEF_STMT (def);
422 basic_block def_bb = gimple_bb (def_stmt);
423 struct loop *max_loop;
424 struct lim_aux_data *def_data;
425
426 if (!def_bb)
427 return true;
428
429 max_loop = outermost_invariant_loop (def, loop);
430 if (!max_loop)
431 return false;
432
433 if (flow_loop_nested_p (data->max_loop, max_loop))
434 data->max_loop = max_loop;
435
436 def_data = get_lim_data (def_stmt);
437 if (!def_data)
438 return true;
439
440 if (add_cost
441 /* Only add the cost if the statement defining DEF is inside LOOP,
442 i.e. if it is likely that by moving the invariants dependent
443 on it, we will be able to avoid creating a new register for
444 it (since it will be only used in these dependent invariants). */
445 && def_bb->loop_father == loop)
446 data->cost += def_data->cost;
447
448 data->depends.safe_push (def_stmt);
449
450 return true;
451 }
452
453 /* Returns an estimate for a cost of statement STMT. The values here
454 are just ad-hoc constants, similar to costs for inlining. */
455
456 static unsigned
457 stmt_cost (gimple stmt)
458 {
459 /* Always try to create possibilities for unswitching. */
460 if (gimple_code (stmt) == GIMPLE_COND
461 || gimple_code (stmt) == GIMPLE_PHI)
462 return LIM_EXPENSIVE;
463
464 /* We should be hoisting calls if possible. */
465 if (is_gimple_call (stmt))
466 {
467 tree fndecl;
468
469 /* Unless the call is a builtin_constant_p; this always folds to a
470 constant, so moving it is useless. */
471 fndecl = gimple_call_fndecl (stmt);
472 if (fndecl
473 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
474 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_CONSTANT_P)
475 return 0;
476
477 return LIM_EXPENSIVE;
478 }
479
480 /* Hoisting memory references out should almost surely be a win. */
481 if (gimple_references_memory_p (stmt))
482 return LIM_EXPENSIVE;
483
484 if (gimple_code (stmt) != GIMPLE_ASSIGN)
485 return 1;
486
487 switch (gimple_assign_rhs_code (stmt))
488 {
489 case MULT_EXPR:
490 case WIDEN_MULT_EXPR:
491 case WIDEN_MULT_PLUS_EXPR:
492 case WIDEN_MULT_MINUS_EXPR:
493 case DOT_PROD_EXPR:
494 case FMA_EXPR:
495 case TRUNC_DIV_EXPR:
496 case CEIL_DIV_EXPR:
497 case FLOOR_DIV_EXPR:
498 case ROUND_DIV_EXPR:
499 case EXACT_DIV_EXPR:
500 case CEIL_MOD_EXPR:
501 case FLOOR_MOD_EXPR:
502 case ROUND_MOD_EXPR:
503 case TRUNC_MOD_EXPR:
504 case RDIV_EXPR:
505 /* Division and multiplication are usually expensive. */
506 return LIM_EXPENSIVE;
507
508 case LSHIFT_EXPR:
509 case RSHIFT_EXPR:
510 case WIDEN_LSHIFT_EXPR:
511 case LROTATE_EXPR:
512 case RROTATE_EXPR:
513 /* Shifts and rotates are usually expensive. */
514 return LIM_EXPENSIVE;
515
516 case CONSTRUCTOR:
517 /* Make vector construction cost proportional to the number
518 of elements. */
519 return CONSTRUCTOR_NELTS (gimple_assign_rhs1 (stmt));
520
521 case SSA_NAME:
522 case PAREN_EXPR:
523 /* Whether or not something is wrapped inside a PAREN_EXPR
524 should not change move cost. Nor should an intermediate
525 unpropagated SSA name copy. */
526 return 0;
527
528 default:
529 return 1;
530 }
531 }
532
533 /* Finds the outermost loop between OUTER and LOOP in that the memory reference
534 REF is independent. If REF is not independent in LOOP, NULL is returned
535 instead. */
536
537 static struct loop *
538 outermost_indep_loop (struct loop *outer, struct loop *loop, mem_ref_p ref)
539 {
540 struct loop *aloop;
541
542 if (bitmap_bit_p (&ref->stored, loop->num))
543 return NULL;
544
545 for (aloop = outer;
546 aloop != loop;
547 aloop = superloop_at_depth (loop, loop_depth (aloop) + 1))
548 if (!bitmap_bit_p (&ref->stored, aloop->num)
549 && ref_indep_loop_p (aloop, ref))
550 return aloop;
551
552 if (ref_indep_loop_p (loop, ref))
553 return loop;
554 else
555 return NULL;
556 }
557
558 /* If there is a simple load or store to a memory reference in STMT, returns
559 the location of the memory reference, and sets IS_STORE according to whether
560 it is a store or load. Otherwise, returns NULL. */
561
562 static tree *
563 simple_mem_ref_in_stmt (gimple stmt, bool *is_store)
564 {
565 tree *lhs, *rhs;
566
567 /* Recognize SSA_NAME = MEM and MEM = (SSA_NAME | invariant) patterns. */
568 if (!gimple_assign_single_p (stmt))
569 return NULL;
570
571 lhs = gimple_assign_lhs_ptr (stmt);
572 rhs = gimple_assign_rhs1_ptr (stmt);
573
574 if (TREE_CODE (*lhs) == SSA_NAME && gimple_vuse (stmt))
575 {
576 *is_store = false;
577 return rhs;
578 }
579 else if (gimple_vdef (stmt)
580 && (TREE_CODE (*rhs) == SSA_NAME || is_gimple_min_invariant (*rhs)))
581 {
582 *is_store = true;
583 return lhs;
584 }
585 else
586 return NULL;
587 }
588
589 /* Returns the memory reference contained in STMT. */
590
591 static mem_ref_p
592 mem_ref_in_stmt (gimple stmt)
593 {
594 bool store;
595 tree *mem = simple_mem_ref_in_stmt (stmt, &store);
596 hashval_t hash;
597 mem_ref_p ref;
598
599 if (!mem)
600 return NULL;
601 gcc_assert (!store);
602
603 hash = iterative_hash_expr (*mem, 0);
604 ref = memory_accesses.refs.find_with_hash (*mem, hash);
605
606 gcc_assert (ref != NULL);
607 return ref;
608 }
609
610 /* From a controlling predicate in DOM determine the arguments from
611 the PHI node PHI that are chosen if the predicate evaluates to
612 true and false and store them to *TRUE_ARG_P and *FALSE_ARG_P if
613 they are non-NULL. Returns true if the arguments can be determined,
614 else return false. */
615
616 static bool
617 extract_true_false_args_from_phi (basic_block dom, gimple phi,
618 tree *true_arg_p, tree *false_arg_p)
619 {
620 basic_block bb = gimple_bb (phi);
621 edge true_edge, false_edge, tem;
622 tree arg0 = NULL_TREE, arg1 = NULL_TREE;
623
624 /* We have to verify that one edge into the PHI node is dominated
625 by the true edge of the predicate block and the other edge
626 dominated by the false edge. This ensures that the PHI argument
627 we are going to take is completely determined by the path we
628 take from the predicate block.
629 We can only use BB dominance checks below if the destination of
630 the true/false edges are dominated by their edge, thus only
631 have a single predecessor. */
632 extract_true_false_edges_from_block (dom, &true_edge, &false_edge);
633 tem = EDGE_PRED (bb, 0);
634 if (tem == true_edge
635 || (single_pred_p (true_edge->dest)
636 && (tem->src == true_edge->dest
637 || dominated_by_p (CDI_DOMINATORS,
638 tem->src, true_edge->dest))))
639 arg0 = PHI_ARG_DEF (phi, tem->dest_idx);
640 else if (tem == false_edge
641 || (single_pred_p (false_edge->dest)
642 && (tem->src == false_edge->dest
643 || dominated_by_p (CDI_DOMINATORS,
644 tem->src, false_edge->dest))))
645 arg1 = PHI_ARG_DEF (phi, tem->dest_idx);
646 else
647 return false;
648 tem = EDGE_PRED (bb, 1);
649 if (tem == true_edge
650 || (single_pred_p (true_edge->dest)
651 && (tem->src == true_edge->dest
652 || dominated_by_p (CDI_DOMINATORS,
653 tem->src, true_edge->dest))))
654 arg0 = PHI_ARG_DEF (phi, tem->dest_idx);
655 else if (tem == false_edge
656 || (single_pred_p (false_edge->dest)
657 && (tem->src == false_edge->dest
658 || dominated_by_p (CDI_DOMINATORS,
659 tem->src, false_edge->dest))))
660 arg1 = PHI_ARG_DEF (phi, tem->dest_idx);
661 else
662 return false;
663 if (!arg0 || !arg1)
664 return false;
665
666 if (true_arg_p)
667 *true_arg_p = arg0;
668 if (false_arg_p)
669 *false_arg_p = arg1;
670
671 return true;
672 }
673
674 /* Determine the outermost loop to that it is possible to hoist a statement
675 STMT and store it to LIM_DATA (STMT)->max_loop. To do this we determine
676 the outermost loop in that the value computed by STMT is invariant.
677 If MUST_PRESERVE_EXEC is true, additionally choose such a loop that
678 we preserve the fact whether STMT is executed. It also fills other related
679 information to LIM_DATA (STMT).
680
681 The function returns false if STMT cannot be hoisted outside of the loop it
682 is defined in, and true otherwise. */
683
684 static bool
685 determine_max_movement (gimple stmt, bool must_preserve_exec)
686 {
687 basic_block bb = gimple_bb (stmt);
688 struct loop *loop = bb->loop_father;
689 struct loop *level;
690 struct lim_aux_data *lim_data = get_lim_data (stmt);
691 tree val;
692 ssa_op_iter iter;
693
694 if (must_preserve_exec)
695 level = ALWAYS_EXECUTED_IN (bb);
696 else
697 level = superloop_at_depth (loop, 1);
698 lim_data->max_loop = level;
699
700 if (gimple_code (stmt) == GIMPLE_PHI)
701 {
702 use_operand_p use_p;
703 unsigned min_cost = UINT_MAX;
704 unsigned total_cost = 0;
705 struct lim_aux_data *def_data;
706
707 /* We will end up promoting dependencies to be unconditionally
708 evaluated. For this reason the PHI cost (and thus the
709 cost we remove from the loop by doing the invariant motion)
710 is that of the cheapest PHI argument dependency chain. */
711 FOR_EACH_PHI_ARG (use_p, stmt, iter, SSA_OP_USE)
712 {
713 val = USE_FROM_PTR (use_p);
714 if (TREE_CODE (val) != SSA_NAME)
715 continue;
716 if (!add_dependency (val, lim_data, loop, false))
717 return false;
718 def_data = get_lim_data (SSA_NAME_DEF_STMT (val));
719 if (def_data)
720 {
721 min_cost = MIN (min_cost, def_data->cost);
722 total_cost += def_data->cost;
723 }
724 }
725
726 lim_data->cost += min_cost;
727
728 if (gimple_phi_num_args (stmt) > 1)
729 {
730 basic_block dom = get_immediate_dominator (CDI_DOMINATORS, bb);
731 gimple cond;
732 if (gsi_end_p (gsi_last_bb (dom)))
733 return false;
734 cond = gsi_stmt (gsi_last_bb (dom));
735 if (gimple_code (cond) != GIMPLE_COND)
736 return false;
737 /* Verify that this is an extended form of a diamond and
738 the PHI arguments are completely controlled by the
739 predicate in DOM. */
740 if (!extract_true_false_args_from_phi (dom, stmt, NULL, NULL))
741 return false;
742
743 /* Fold in dependencies and cost of the condition. */
744 FOR_EACH_SSA_TREE_OPERAND (val, cond, iter, SSA_OP_USE)
745 {
746 if (!add_dependency (val, lim_data, loop, false))
747 return false;
748 def_data = get_lim_data (SSA_NAME_DEF_STMT (val));
749 if (def_data)
750 total_cost += def_data->cost;
751 }
752
753 /* We want to avoid unconditionally executing very expensive
754 operations. As costs for our dependencies cannot be
755 negative just claim we are not invariand for this case.
756 We also are not sure whether the control-flow inside the
757 loop will vanish. */
758 if (total_cost - min_cost >= 2 * LIM_EXPENSIVE
759 && !(min_cost != 0
760 && total_cost / min_cost <= 2))
761 return false;
762
763 /* Assume that the control-flow in the loop will vanish.
764 ??? We should verify this and not artificially increase
765 the cost if that is not the case. */
766 lim_data->cost += stmt_cost (stmt);
767 }
768
769 return true;
770 }
771 else
772 FOR_EACH_SSA_TREE_OPERAND (val, stmt, iter, SSA_OP_USE)
773 if (!add_dependency (val, lim_data, loop, true))
774 return false;
775
776 if (gimple_vuse (stmt))
777 {
778 mem_ref_p ref = mem_ref_in_stmt (stmt);
779
780 if (ref)
781 {
782 lim_data->max_loop
783 = outermost_indep_loop (lim_data->max_loop, loop, ref);
784 if (!lim_data->max_loop)
785 return false;
786 }
787 else
788 {
789 if ((val = gimple_vuse (stmt)) != NULL_TREE)
790 {
791 if (!add_dependency (val, lim_data, loop, false))
792 return false;
793 }
794 }
795 }
796
797 lim_data->cost += stmt_cost (stmt);
798
799 return true;
800 }
801
802 /* Suppose that some statement in ORIG_LOOP is hoisted to the loop LEVEL,
803 and that one of the operands of this statement is computed by STMT.
804 Ensure that STMT (together with all the statements that define its
805 operands) is hoisted at least out of the loop LEVEL. */
806
807 static void
808 set_level (gimple stmt, struct loop *orig_loop, struct loop *level)
809 {
810 struct loop *stmt_loop = gimple_bb (stmt)->loop_father;
811 struct lim_aux_data *lim_data;
812 gimple dep_stmt;
813 unsigned i;
814
815 stmt_loop = find_common_loop (orig_loop, stmt_loop);
816 lim_data = get_lim_data (stmt);
817 if (lim_data != NULL && lim_data->tgt_loop != NULL)
818 stmt_loop = find_common_loop (stmt_loop,
819 loop_outer (lim_data->tgt_loop));
820 if (flow_loop_nested_p (stmt_loop, level))
821 return;
822
823 gcc_assert (level == lim_data->max_loop
824 || flow_loop_nested_p (lim_data->max_loop, level));
825
826 lim_data->tgt_loop = level;
827 FOR_EACH_VEC_ELT (lim_data->depends, i, dep_stmt)
828 set_level (dep_stmt, orig_loop, level);
829 }
830
831 /* Determines an outermost loop from that we want to hoist the statement STMT.
832 For now we chose the outermost possible loop. TODO -- use profiling
833 information to set it more sanely. */
834
835 static void
836 set_profitable_level (gimple stmt)
837 {
838 set_level (stmt, gimple_bb (stmt)->loop_father, get_lim_data (stmt)->max_loop);
839 }
840
841 /* Returns true if STMT is a call that has side effects. */
842
843 static bool
844 nonpure_call_p (gimple stmt)
845 {
846 if (gimple_code (stmt) != GIMPLE_CALL)
847 return false;
848
849 return gimple_has_side_effects (stmt);
850 }
851
852 /* Rewrite a/b to a*(1/b). Return the invariant stmt to process. */
853
854 static gimple
855 rewrite_reciprocal (gimple_stmt_iterator *bsi)
856 {
857 gimple stmt, stmt1, stmt2;
858 tree name, lhs, type;
859 tree real_one;
860 gimple_stmt_iterator gsi;
861
862 stmt = gsi_stmt (*bsi);
863 lhs = gimple_assign_lhs (stmt);
864 type = TREE_TYPE (lhs);
865
866 real_one = build_one_cst (type);
867
868 name = make_temp_ssa_name (type, NULL, "reciptmp");
869 stmt1 = gimple_build_assign_with_ops (RDIV_EXPR, name, real_one,
870 gimple_assign_rhs2 (stmt));
871
872 stmt2 = gimple_build_assign_with_ops (MULT_EXPR, lhs, name,
873 gimple_assign_rhs1 (stmt));
874
875 /* Replace division stmt with reciprocal and multiply stmts.
876 The multiply stmt is not invariant, so update iterator
877 and avoid rescanning. */
878 gsi = *bsi;
879 gsi_insert_before (bsi, stmt1, GSI_NEW_STMT);
880 gsi_replace (&gsi, stmt2, true);
881
882 /* Continue processing with invariant reciprocal statement. */
883 return stmt1;
884 }
885
886 /* Check if the pattern at *BSI is a bittest of the form
887 (A >> B) & 1 != 0 and in this case rewrite it to A & (1 << B) != 0. */
888
889 static gimple
890 rewrite_bittest (gimple_stmt_iterator *bsi)
891 {
892 gimple stmt, use_stmt, stmt1, stmt2;
893 tree lhs, name, t, a, b;
894 use_operand_p use;
895
896 stmt = gsi_stmt (*bsi);
897 lhs = gimple_assign_lhs (stmt);
898
899 /* Verify that the single use of lhs is a comparison against zero. */
900 if (TREE_CODE (lhs) != SSA_NAME
901 || !single_imm_use (lhs, &use, &use_stmt)
902 || gimple_code (use_stmt) != GIMPLE_COND)
903 return stmt;
904 if (gimple_cond_lhs (use_stmt) != lhs
905 || (gimple_cond_code (use_stmt) != NE_EXPR
906 && gimple_cond_code (use_stmt) != EQ_EXPR)
907 || !integer_zerop (gimple_cond_rhs (use_stmt)))
908 return stmt;
909
910 /* Get at the operands of the shift. The rhs is TMP1 & 1. */
911 stmt1 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
912 if (gimple_code (stmt1) != GIMPLE_ASSIGN)
913 return stmt;
914
915 /* There is a conversion in between possibly inserted by fold. */
916 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt1)))
917 {
918 t = gimple_assign_rhs1 (stmt1);
919 if (TREE_CODE (t) != SSA_NAME
920 || !has_single_use (t))
921 return stmt;
922 stmt1 = SSA_NAME_DEF_STMT (t);
923 if (gimple_code (stmt1) != GIMPLE_ASSIGN)
924 return stmt;
925 }
926
927 /* Verify that B is loop invariant but A is not. Verify that with
928 all the stmt walking we are still in the same loop. */
929 if (gimple_assign_rhs_code (stmt1) != RSHIFT_EXPR
930 || loop_containing_stmt (stmt1) != loop_containing_stmt (stmt))
931 return stmt;
932
933 a = gimple_assign_rhs1 (stmt1);
934 b = gimple_assign_rhs2 (stmt1);
935
936 if (outermost_invariant_loop (b, loop_containing_stmt (stmt1)) != NULL
937 && outermost_invariant_loop (a, loop_containing_stmt (stmt1)) == NULL)
938 {
939 gimple_stmt_iterator rsi;
940
941 /* 1 << B */
942 t = fold_build2 (LSHIFT_EXPR, TREE_TYPE (a),
943 build_int_cst (TREE_TYPE (a), 1), b);
944 name = make_temp_ssa_name (TREE_TYPE (a), NULL, "shifttmp");
945 stmt1 = gimple_build_assign (name, t);
946
947 /* A & (1 << B) */
948 t = fold_build2 (BIT_AND_EXPR, TREE_TYPE (a), a, name);
949 name = make_temp_ssa_name (TREE_TYPE (a), NULL, "shifttmp");
950 stmt2 = gimple_build_assign (name, t);
951
952 /* Replace the SSA_NAME we compare against zero. Adjust
953 the type of zero accordingly. */
954 SET_USE (use, name);
955 gimple_cond_set_rhs (use_stmt, build_int_cst_type (TREE_TYPE (name), 0));
956
957 /* Don't use gsi_replace here, none of the new assignments sets
958 the variable originally set in stmt. Move bsi to stmt1, and
959 then remove the original stmt, so that we get a chance to
960 retain debug info for it. */
961 rsi = *bsi;
962 gsi_insert_before (bsi, stmt1, GSI_NEW_STMT);
963 gsi_insert_before (&rsi, stmt2, GSI_SAME_STMT);
964 gsi_remove (&rsi, true);
965
966 return stmt1;
967 }
968
969 return stmt;
970 }
971
972 /* For each statement determines the outermost loop in that it is invariant,
973 - statements on whose motion it depends and the cost of the computation.
974 - This information is stored to the LIM_DATA structure associated with
975 - each statement. */
976 class invariantness_dom_walker : public dom_walker
977 {
978 public:
979 invariantness_dom_walker (cdi_direction direction)
980 : dom_walker (direction) {}
981
982 virtual void before_dom_children (basic_block);
983 };
984
985 /* Determine the outermost loops in that statements in basic block BB are
986 invariant, and record them to the LIM_DATA associated with the statements.
987 Callback for dom_walker. */
988
989 void
990 invariantness_dom_walker::before_dom_children (basic_block bb)
991 {
992 enum move_pos pos;
993 gimple_stmt_iterator bsi;
994 gimple stmt;
995 bool maybe_never = ALWAYS_EXECUTED_IN (bb) == NULL;
996 struct loop *outermost = ALWAYS_EXECUTED_IN (bb);
997 struct lim_aux_data *lim_data;
998
999 if (!loop_outer (bb->loop_father))
1000 return;
1001
1002 if (dump_file && (dump_flags & TDF_DETAILS))
1003 fprintf (dump_file, "Basic block %d (loop %d -- depth %d):\n\n",
1004 bb->index, bb->loop_father->num, loop_depth (bb->loop_father));
1005
1006 /* Look at PHI nodes, but only if there is at most two.
1007 ??? We could relax this further by post-processing the inserted
1008 code and transforming adjacent cond-exprs with the same predicate
1009 to control flow again. */
1010 bsi = gsi_start_phis (bb);
1011 if (!gsi_end_p (bsi)
1012 && ((gsi_next (&bsi), gsi_end_p (bsi))
1013 || (gsi_next (&bsi), gsi_end_p (bsi))))
1014 for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1015 {
1016 stmt = gsi_stmt (bsi);
1017
1018 pos = movement_possibility (stmt);
1019 if (pos == MOVE_IMPOSSIBLE)
1020 continue;
1021
1022 lim_data = init_lim_data (stmt);
1023 lim_data->always_executed_in = outermost;
1024
1025 if (!determine_max_movement (stmt, false))
1026 {
1027 lim_data->max_loop = NULL;
1028 continue;
1029 }
1030
1031 if (dump_file && (dump_flags & TDF_DETAILS))
1032 {
1033 print_gimple_stmt (dump_file, stmt, 2, 0);
1034 fprintf (dump_file, " invariant up to level %d, cost %d.\n\n",
1035 loop_depth (lim_data->max_loop),
1036 lim_data->cost);
1037 }
1038
1039 if (lim_data->cost >= LIM_EXPENSIVE)
1040 set_profitable_level (stmt);
1041 }
1042
1043 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1044 {
1045 stmt = gsi_stmt (bsi);
1046
1047 pos = movement_possibility (stmt);
1048 if (pos == MOVE_IMPOSSIBLE)
1049 {
1050 if (nonpure_call_p (stmt))
1051 {
1052 maybe_never = true;
1053 outermost = NULL;
1054 }
1055 /* Make sure to note always_executed_in for stores to make
1056 store-motion work. */
1057 else if (stmt_makes_single_store (stmt))
1058 {
1059 struct lim_aux_data *lim_data = init_lim_data (stmt);
1060 lim_data->always_executed_in = outermost;
1061 }
1062 continue;
1063 }
1064
1065 if (is_gimple_assign (stmt)
1066 && (get_gimple_rhs_class (gimple_assign_rhs_code (stmt))
1067 == GIMPLE_BINARY_RHS))
1068 {
1069 tree op0 = gimple_assign_rhs1 (stmt);
1070 tree op1 = gimple_assign_rhs2 (stmt);
1071 struct loop *ol1 = outermost_invariant_loop (op1,
1072 loop_containing_stmt (stmt));
1073
1074 /* If divisor is invariant, convert a/b to a*(1/b), allowing reciprocal
1075 to be hoisted out of loop, saving expensive divide. */
1076 if (pos == MOVE_POSSIBLE
1077 && gimple_assign_rhs_code (stmt) == RDIV_EXPR
1078 && flag_unsafe_math_optimizations
1079 && !flag_trapping_math
1080 && ol1 != NULL
1081 && outermost_invariant_loop (op0, ol1) == NULL)
1082 stmt = rewrite_reciprocal (&bsi);
1083
1084 /* If the shift count is invariant, convert (A >> B) & 1 to
1085 A & (1 << B) allowing the bit mask to be hoisted out of the loop
1086 saving an expensive shift. */
1087 if (pos == MOVE_POSSIBLE
1088 && gimple_assign_rhs_code (stmt) == BIT_AND_EXPR
1089 && integer_onep (op1)
1090 && TREE_CODE (op0) == SSA_NAME
1091 && has_single_use (op0))
1092 stmt = rewrite_bittest (&bsi);
1093 }
1094
1095 lim_data = init_lim_data (stmt);
1096 lim_data->always_executed_in = outermost;
1097
1098 if (maybe_never && pos == MOVE_PRESERVE_EXECUTION)
1099 continue;
1100
1101 if (!determine_max_movement (stmt, pos == MOVE_PRESERVE_EXECUTION))
1102 {
1103 lim_data->max_loop = NULL;
1104 continue;
1105 }
1106
1107 if (dump_file && (dump_flags & TDF_DETAILS))
1108 {
1109 print_gimple_stmt (dump_file, stmt, 2, 0);
1110 fprintf (dump_file, " invariant up to level %d, cost %d.\n\n",
1111 loop_depth (lim_data->max_loop),
1112 lim_data->cost);
1113 }
1114
1115 if (lim_data->cost >= LIM_EXPENSIVE)
1116 set_profitable_level (stmt);
1117 }
1118 }
1119
1120 class move_computations_dom_walker : public dom_walker
1121 {
1122 public:
1123 move_computations_dom_walker (cdi_direction direction)
1124 : dom_walker (direction), todo_ (0) {}
1125
1126 virtual void before_dom_children (basic_block);
1127
1128 unsigned int todo_;
1129 };
1130
1131 /* Return true if CODE is an operation that when operating on signed
1132 integer types involves undefined behavior on overflow and the
1133 operation can be expressed with unsigned arithmetic. */
1134
1135 static bool
1136 arith_code_with_undefined_signed_overflow (tree_code code)
1137 {
1138 switch (code)
1139 {
1140 case PLUS_EXPR:
1141 case MINUS_EXPR:
1142 case MULT_EXPR:
1143 case NEGATE_EXPR:
1144 case POINTER_PLUS_EXPR:
1145 return true;
1146 default:
1147 return false;
1148 }
1149 }
1150
1151 /* Rewrite STMT, an assignment with a signed integer or pointer arithmetic
1152 operation that can be transformed to unsigned arithmetic by converting
1153 its operand, carrying out the operation in the corresponding unsigned
1154 type and converting the result back to the original type.
1155
1156 Returns a sequence of statements that replace STMT and also contain
1157 a modified form of STMT itself. */
1158
1159 static gimple_seq
1160 rewrite_to_defined_overflow (gimple stmt)
1161 {
1162 if (dump_file && (dump_flags & TDF_DETAILS))
1163 {
1164 fprintf (dump_file, "rewriting stmt with undefined signed "
1165 "overflow ");
1166 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
1167 }
1168
1169 tree lhs = gimple_assign_lhs (stmt);
1170 tree type = unsigned_type_for (TREE_TYPE (lhs));
1171 gimple_seq stmts = NULL;
1172 for (unsigned i = 1; i < gimple_num_ops (stmt); ++i)
1173 {
1174 gimple_seq stmts2 = NULL;
1175 gimple_set_op (stmt, i,
1176 force_gimple_operand (fold_convert (type,
1177 gimple_op (stmt, i)),
1178 &stmts2, true, NULL_TREE));
1179 gimple_seq_add_seq (&stmts, stmts2);
1180 }
1181 gimple_assign_set_lhs (stmt, make_ssa_name (type, stmt));
1182 if (gimple_assign_rhs_code (stmt) == POINTER_PLUS_EXPR)
1183 gimple_assign_set_rhs_code (stmt, PLUS_EXPR);
1184 gimple_seq_add_stmt (&stmts, stmt);
1185 gimple cvt = gimple_build_assign_with_ops
1186 (NOP_EXPR, lhs, gimple_assign_lhs (stmt), NULL_TREE);
1187 gimple_seq_add_stmt (&stmts, cvt);
1188
1189 return stmts;
1190 }
1191
1192 /* Hoist the statements in basic block BB out of the loops prescribed by
1193 data stored in LIM_DATA structures associated with each statement. Callback
1194 for walk_dominator_tree. */
1195
1196 void
1197 move_computations_dom_walker::before_dom_children (basic_block bb)
1198 {
1199 struct loop *level;
1200 gimple_stmt_iterator bsi;
1201 gimple stmt;
1202 unsigned cost = 0;
1203 struct lim_aux_data *lim_data;
1204
1205 if (!loop_outer (bb->loop_father))
1206 return;
1207
1208 for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi); )
1209 {
1210 gimple new_stmt;
1211 stmt = gsi_stmt (bsi);
1212
1213 lim_data = get_lim_data (stmt);
1214 if (lim_data == NULL)
1215 {
1216 gsi_next (&bsi);
1217 continue;
1218 }
1219
1220 cost = lim_data->cost;
1221 level = lim_data->tgt_loop;
1222 clear_lim_data (stmt);
1223
1224 if (!level)
1225 {
1226 gsi_next (&bsi);
1227 continue;
1228 }
1229
1230 if (dump_file && (dump_flags & TDF_DETAILS))
1231 {
1232 fprintf (dump_file, "Moving PHI node\n");
1233 print_gimple_stmt (dump_file, stmt, 0, 0);
1234 fprintf (dump_file, "(cost %u) out of loop %d.\n\n",
1235 cost, level->num);
1236 }
1237
1238 if (gimple_phi_num_args (stmt) == 1)
1239 {
1240 tree arg = PHI_ARG_DEF (stmt, 0);
1241 new_stmt = gimple_build_assign_with_ops (TREE_CODE (arg),
1242 gimple_phi_result (stmt),
1243 arg, NULL_TREE);
1244 }
1245 else
1246 {
1247 basic_block dom = get_immediate_dominator (CDI_DOMINATORS, bb);
1248 gimple cond = gsi_stmt (gsi_last_bb (dom));
1249 tree arg0 = NULL_TREE, arg1 = NULL_TREE, t;
1250 /* Get the PHI arguments corresponding to the true and false
1251 edges of COND. */
1252 extract_true_false_args_from_phi (dom, stmt, &arg0, &arg1);
1253 gcc_assert (arg0 && arg1);
1254 t = build2 (gimple_cond_code (cond), boolean_type_node,
1255 gimple_cond_lhs (cond), gimple_cond_rhs (cond));
1256 new_stmt = gimple_build_assign_with_ops (COND_EXPR,
1257 gimple_phi_result (stmt),
1258 t, arg0, arg1);
1259 todo_ |= TODO_cleanup_cfg;
1260 }
1261 gsi_insert_on_edge (loop_preheader_edge (level), new_stmt);
1262 remove_phi_node (&bsi, false);
1263 }
1264
1265 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); )
1266 {
1267 edge e;
1268
1269 stmt = gsi_stmt (bsi);
1270
1271 lim_data = get_lim_data (stmt);
1272 if (lim_data == NULL)
1273 {
1274 gsi_next (&bsi);
1275 continue;
1276 }
1277
1278 cost = lim_data->cost;
1279 level = lim_data->tgt_loop;
1280 clear_lim_data (stmt);
1281
1282 if (!level)
1283 {
1284 gsi_next (&bsi);
1285 continue;
1286 }
1287
1288 /* We do not really want to move conditionals out of the loop; we just
1289 placed it here to force its operands to be moved if necessary. */
1290 if (gimple_code (stmt) == GIMPLE_COND)
1291 continue;
1292
1293 if (dump_file && (dump_flags & TDF_DETAILS))
1294 {
1295 fprintf (dump_file, "Moving statement\n");
1296 print_gimple_stmt (dump_file, stmt, 0, 0);
1297 fprintf (dump_file, "(cost %u) out of loop %d.\n\n",
1298 cost, level->num);
1299 }
1300
1301 e = loop_preheader_edge (level);
1302 gcc_assert (!gimple_vdef (stmt));
1303 if (gimple_vuse (stmt))
1304 {
1305 /* The new VUSE is the one from the virtual PHI in the loop
1306 header or the one already present. */
1307 gimple_stmt_iterator gsi2;
1308 for (gsi2 = gsi_start_phis (e->dest);
1309 !gsi_end_p (gsi2); gsi_next (&gsi2))
1310 {
1311 gimple phi = gsi_stmt (gsi2);
1312 if (virtual_operand_p (gimple_phi_result (phi)))
1313 {
1314 gimple_set_vuse (stmt, PHI_ARG_DEF_FROM_EDGE (phi, e));
1315 break;
1316 }
1317 }
1318 }
1319 gsi_remove (&bsi, false);
1320 /* In case this is a stmt that is not unconditionally executed
1321 when the target loop header is executed and the stmt may
1322 invoke undefined integer or pointer overflow rewrite it to
1323 unsigned arithmetic. */
1324 if (is_gimple_assign (stmt)
1325 && INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_lhs (stmt)))
1326 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (gimple_assign_lhs (stmt)))
1327 && arith_code_with_undefined_signed_overflow
1328 (gimple_assign_rhs_code (stmt))
1329 && (!ALWAYS_EXECUTED_IN (bb)
1330 || !(ALWAYS_EXECUTED_IN (bb) == level
1331 || flow_loop_nested_p (ALWAYS_EXECUTED_IN (bb), level))))
1332 gsi_insert_seq_on_edge (e, rewrite_to_defined_overflow (stmt));
1333 else
1334 gsi_insert_on_edge (e, stmt);
1335 }
1336 }
1337
1338 /* Hoist the statements out of the loops prescribed by data stored in
1339 LIM_DATA structures associated with each statement.*/
1340
1341 static unsigned int
1342 move_computations (void)
1343 {
1344 move_computations_dom_walker walker (CDI_DOMINATORS);
1345 walker.walk (cfun->cfg->x_entry_block_ptr);
1346
1347 gsi_commit_edge_inserts ();
1348 if (need_ssa_update_p (cfun))
1349 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
1350
1351 return walker.todo_;
1352 }
1353
1354 /* Checks whether the statement defining variable *INDEX can be hoisted
1355 out of the loop passed in DATA. Callback for for_each_index. */
1356
1357 static bool
1358 may_move_till (tree ref, tree *index, void *data)
1359 {
1360 struct loop *loop = (struct loop *) data, *max_loop;
1361
1362 /* If REF is an array reference, check also that the step and the lower
1363 bound is invariant in LOOP. */
1364 if (TREE_CODE (ref) == ARRAY_REF)
1365 {
1366 tree step = TREE_OPERAND (ref, 3);
1367 tree lbound = TREE_OPERAND (ref, 2);
1368
1369 max_loop = outermost_invariant_loop (step, loop);
1370 if (!max_loop)
1371 return false;
1372
1373 max_loop = outermost_invariant_loop (lbound, loop);
1374 if (!max_loop)
1375 return false;
1376 }
1377
1378 max_loop = outermost_invariant_loop (*index, loop);
1379 if (!max_loop)
1380 return false;
1381
1382 return true;
1383 }
1384
1385 /* If OP is SSA NAME, force the statement that defines it to be
1386 moved out of the LOOP. ORIG_LOOP is the loop in that EXPR is used. */
1387
1388 static void
1389 force_move_till_op (tree op, struct loop *orig_loop, struct loop *loop)
1390 {
1391 gimple stmt;
1392
1393 if (!op
1394 || is_gimple_min_invariant (op))
1395 return;
1396
1397 gcc_assert (TREE_CODE (op) == SSA_NAME);
1398
1399 stmt = SSA_NAME_DEF_STMT (op);
1400 if (gimple_nop_p (stmt))
1401 return;
1402
1403 set_level (stmt, orig_loop, loop);
1404 }
1405
1406 /* Forces statement defining invariants in REF (and *INDEX) to be moved out of
1407 the LOOP. The reference REF is used in the loop ORIG_LOOP. Callback for
1408 for_each_index. */
1409
1410 struct fmt_data
1411 {
1412 struct loop *loop;
1413 struct loop *orig_loop;
1414 };
1415
1416 static bool
1417 force_move_till (tree ref, tree *index, void *data)
1418 {
1419 struct fmt_data *fmt_data = (struct fmt_data *) data;
1420
1421 if (TREE_CODE (ref) == ARRAY_REF)
1422 {
1423 tree step = TREE_OPERAND (ref, 3);
1424 tree lbound = TREE_OPERAND (ref, 2);
1425
1426 force_move_till_op (step, fmt_data->orig_loop, fmt_data->loop);
1427 force_move_till_op (lbound, fmt_data->orig_loop, fmt_data->loop);
1428 }
1429
1430 force_move_till_op (*index, fmt_data->orig_loop, fmt_data->loop);
1431
1432 return true;
1433 }
1434
1435 /* A function to free the mem_ref object OBJ. */
1436
1437 static void
1438 memref_free (struct mem_ref *mem)
1439 {
1440 unsigned i;
1441 vec<mem_ref_loc> *accs;
1442
1443 FOR_EACH_VEC_ELT (mem->accesses_in_loop, i, accs)
1444 accs->release ();
1445 mem->accesses_in_loop.release ();
1446
1447 free (mem);
1448 }
1449
1450 /* Allocates and returns a memory reference description for MEM whose hash
1451 value is HASH and id is ID. */
1452
1453 static mem_ref_p
1454 mem_ref_alloc (tree mem, unsigned hash, unsigned id)
1455 {
1456 mem_ref_p ref = XNEW (struct mem_ref);
1457 ao_ref_init (&ref->mem, mem);
1458 ref->id = id;
1459 ref->hash = hash;
1460 bitmap_initialize (&ref->stored, &lim_bitmap_obstack);
1461 bitmap_initialize (&ref->indep_loop, &lim_bitmap_obstack);
1462 bitmap_initialize (&ref->dep_loop, &lim_bitmap_obstack);
1463 ref->accesses_in_loop.create (0);
1464
1465 return ref;
1466 }
1467
1468 /* Records memory reference location *LOC in LOOP to the memory reference
1469 description REF. The reference occurs in statement STMT. */
1470
1471 static void
1472 record_mem_ref_loc (mem_ref_p ref, struct loop *loop, gimple stmt, tree *loc)
1473 {
1474 mem_ref_loc aref;
1475
1476 if (ref->accesses_in_loop.length ()
1477 <= (unsigned) loop->num)
1478 ref->accesses_in_loop.safe_grow_cleared (loop->num + 1);
1479
1480 aref.stmt = stmt;
1481 aref.ref = loc;
1482 ref->accesses_in_loop[loop->num].safe_push (aref);
1483 }
1484
1485 /* Marks reference REF as stored in LOOP. */
1486
1487 static void
1488 mark_ref_stored (mem_ref_p ref, struct loop *loop)
1489 {
1490 while (loop != current_loops->tree_root
1491 && bitmap_set_bit (&ref->stored, loop->num))
1492 loop = loop_outer (loop);
1493 }
1494
1495 /* Gathers memory references in statement STMT in LOOP, storing the
1496 information about them in the memory_accesses structure. Marks
1497 the vops accessed through unrecognized statements there as
1498 well. */
1499
1500 static void
1501 gather_mem_refs_stmt (struct loop *loop, gimple stmt)
1502 {
1503 tree *mem = NULL;
1504 hashval_t hash;
1505 mem_ref **slot;
1506 mem_ref_p ref;
1507 bool is_stored;
1508 unsigned id;
1509
1510 if (!gimple_vuse (stmt))
1511 return;
1512
1513 mem = simple_mem_ref_in_stmt (stmt, &is_stored);
1514 if (!mem)
1515 {
1516 /* We use the shared mem_ref for all unanalyzable refs. */
1517 id = UNANALYZABLE_MEM_ID;
1518 ref = memory_accesses.refs_list[id];
1519 if (dump_file && (dump_flags & TDF_DETAILS))
1520 {
1521 fprintf (dump_file, "Unanalyzed memory reference %u: ", id);
1522 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
1523 }
1524 is_stored = gimple_vdef (stmt);
1525 }
1526 else
1527 {
1528 hash = iterative_hash_expr (*mem, 0);
1529 slot = memory_accesses.refs.find_slot_with_hash (*mem, hash, INSERT);
1530 if (*slot)
1531 {
1532 ref = (mem_ref_p) *slot;
1533 id = ref->id;
1534 }
1535 else
1536 {
1537 id = memory_accesses.refs_list.length ();
1538 ref = mem_ref_alloc (*mem, hash, id);
1539 memory_accesses.refs_list.safe_push (ref);
1540 *slot = ref;
1541
1542 if (dump_file && (dump_flags & TDF_DETAILS))
1543 {
1544 fprintf (dump_file, "Memory reference %u: ", id);
1545 print_generic_expr (dump_file, ref->mem.ref, TDF_SLIM);
1546 fprintf (dump_file, "\n");
1547 }
1548 }
1549
1550 record_mem_ref_loc (ref, loop, stmt, mem);
1551 }
1552 bitmap_set_bit (&memory_accesses.refs_in_loop[loop->num], ref->id);
1553 if (is_stored)
1554 {
1555 bitmap_set_bit (&memory_accesses.refs_stored_in_loop[loop->num], ref->id);
1556 mark_ref_stored (ref, loop);
1557 }
1558 return;
1559 }
1560
1561 static unsigned *bb_loop_postorder;
1562
1563 /* qsort sort function to sort blocks after their loop fathers postorder. */
1564
1565 static int
1566 sort_bbs_in_loop_postorder_cmp (const void *bb1_, const void *bb2_)
1567 {
1568 basic_block bb1 = *(basic_block *)const_cast<void *>(bb1_);
1569 basic_block bb2 = *(basic_block *)const_cast<void *>(bb2_);
1570 struct loop *loop1 = bb1->loop_father;
1571 struct loop *loop2 = bb2->loop_father;
1572 if (loop1->num == loop2->num)
1573 return 0;
1574 return bb_loop_postorder[loop1->num] < bb_loop_postorder[loop2->num] ? -1 : 1;
1575 }
1576
1577 /* Gathers memory references in loops. */
1578
1579 static void
1580 analyze_memory_references (void)
1581 {
1582 gimple_stmt_iterator bsi;
1583 basic_block bb, *bbs;
1584 struct loop *loop, *outer;
1585 loop_iterator li;
1586 unsigned i, n;
1587
1588 /* Initialize bb_loop_postorder with a mapping from loop->num to
1589 its postorder index. */
1590 i = 0;
1591 bb_loop_postorder = XNEWVEC (unsigned, number_of_loops (cfun));
1592 FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
1593 bb_loop_postorder[loop->num] = i++;
1594 /* Collect all basic-blocks in loops and sort them after their
1595 loops postorder. */
1596 i = 0;
1597 bbs = XNEWVEC (basic_block, n_basic_blocks - NUM_FIXED_BLOCKS);
1598 FOR_EACH_BB (bb)
1599 if (bb->loop_father != current_loops->tree_root)
1600 bbs[i++] = bb;
1601 n = i;
1602 qsort (bbs, n, sizeof (basic_block), sort_bbs_in_loop_postorder_cmp);
1603 free (bb_loop_postorder);
1604
1605 /* Visit blocks in loop postorder and assign mem-ref IDs in that order.
1606 That results in better locality for all the bitmaps. */
1607 for (i = 0; i < n; ++i)
1608 {
1609 basic_block bb = bbs[i];
1610 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1611 gather_mem_refs_stmt (bb->loop_father, gsi_stmt (bsi));
1612 }
1613
1614 free (bbs);
1615
1616 /* Propagate the information about accessed memory references up
1617 the loop hierarchy. */
1618 FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
1619 {
1620 /* Finalize the overall touched references (including subloops). */
1621 bitmap_ior_into (&memory_accesses.all_refs_stored_in_loop[loop->num],
1622 &memory_accesses.refs_stored_in_loop[loop->num]);
1623
1624 /* Propagate the information about accessed memory references up
1625 the loop hierarchy. */
1626 outer = loop_outer (loop);
1627 if (outer == current_loops->tree_root)
1628 continue;
1629
1630 bitmap_ior_into (&memory_accesses.all_refs_stored_in_loop[outer->num],
1631 &memory_accesses.all_refs_stored_in_loop[loop->num]);
1632 }
1633 }
1634
1635 /* Returns true if MEM1 and MEM2 may alias. TTAE_CACHE is used as a cache in
1636 tree_to_aff_combination_expand. */
1637
1638 static bool
1639 mem_refs_may_alias_p (mem_ref_p mem1, mem_ref_p mem2,
1640 struct pointer_map_t **ttae_cache)
1641 {
1642 /* Perform BASE + OFFSET analysis -- if MEM1 and MEM2 are based on the same
1643 object and their offset differ in such a way that the locations cannot
1644 overlap, then they cannot alias. */
1645 double_int size1, size2;
1646 aff_tree off1, off2;
1647
1648 /* Perform basic offset and type-based disambiguation. */
1649 if (!refs_may_alias_p_1 (&mem1->mem, &mem2->mem, true))
1650 return false;
1651
1652 /* The expansion of addresses may be a bit expensive, thus we only do
1653 the check at -O2 and higher optimization levels. */
1654 if (optimize < 2)
1655 return true;
1656
1657 get_inner_reference_aff (mem1->mem.ref, &off1, &size1);
1658 get_inner_reference_aff (mem2->mem.ref, &off2, &size2);
1659 aff_combination_expand (&off1, ttae_cache);
1660 aff_combination_expand (&off2, ttae_cache);
1661 aff_combination_scale (&off1, double_int_minus_one);
1662 aff_combination_add (&off2, &off1);
1663
1664 if (aff_comb_cannot_overlap_p (&off2, size1, size2))
1665 return false;
1666
1667 return true;
1668 }
1669
1670 /* Iterates over all locations of REF in LOOP and its subloops calling
1671 fn.operator() with the location as argument. When that operator
1672 returns true the iteration is stopped and true is returned.
1673 Otherwise false is returned. */
1674
1675 template <typename FN>
1676 static bool
1677 for_all_locs_in_loop (struct loop *loop, mem_ref_p ref, FN fn)
1678 {
1679 unsigned i;
1680 mem_ref_loc_p loc;
1681 struct loop *subloop;
1682
1683 if (ref->accesses_in_loop.length () > (unsigned) loop->num)
1684 FOR_EACH_VEC_ELT (ref->accesses_in_loop[loop->num], i, loc)
1685 if (fn (loc))
1686 return true;
1687
1688 for (subloop = loop->inner; subloop != NULL; subloop = subloop->next)
1689 if (for_all_locs_in_loop (subloop, ref, fn))
1690 return true;
1691
1692 return false;
1693 }
1694
1695 /* Rewrites location LOC by TMP_VAR. */
1696
1697 struct rewrite_mem_ref_loc
1698 {
1699 rewrite_mem_ref_loc (tree tmp_var_) : tmp_var (tmp_var_) {}
1700 bool operator () (mem_ref_loc_p loc);
1701 tree tmp_var;
1702 };
1703
1704 bool
1705 rewrite_mem_ref_loc::operator () (mem_ref_loc_p loc)
1706 {
1707 *loc->ref = tmp_var;
1708 update_stmt (loc->stmt);
1709 return false;
1710 }
1711
1712 /* Rewrites all references to REF in LOOP by variable TMP_VAR. */
1713
1714 static void
1715 rewrite_mem_refs (struct loop *loop, mem_ref_p ref, tree tmp_var)
1716 {
1717 for_all_locs_in_loop (loop, ref, rewrite_mem_ref_loc (tmp_var));
1718 }
1719
1720 /* Stores the first reference location in LOCP. */
1721
1722 struct first_mem_ref_loc_1
1723 {
1724 first_mem_ref_loc_1 (mem_ref_loc_p *locp_) : locp (locp_) {}
1725 bool operator () (mem_ref_loc_p loc);
1726 mem_ref_loc_p *locp;
1727 };
1728
1729 bool
1730 first_mem_ref_loc_1::operator () (mem_ref_loc_p loc)
1731 {
1732 *locp = loc;
1733 return true;
1734 }
1735
1736 /* Returns the first reference location to REF in LOOP. */
1737
1738 static mem_ref_loc_p
1739 first_mem_ref_loc (struct loop *loop, mem_ref_p ref)
1740 {
1741 mem_ref_loc_p locp = NULL;
1742 for_all_locs_in_loop (loop, ref, first_mem_ref_loc_1 (&locp));
1743 return locp;
1744 }
1745
1746 struct prev_flag_edges {
1747 /* Edge to insert new flag comparison code. */
1748 edge append_cond_position;
1749
1750 /* Edge for fall through from previous flag comparison. */
1751 edge last_cond_fallthru;
1752 };
1753
1754 /* Helper function for execute_sm. Emit code to store TMP_VAR into
1755 MEM along edge EX.
1756
1757 The store is only done if MEM has changed. We do this so no
1758 changes to MEM occur on code paths that did not originally store
1759 into it.
1760
1761 The common case for execute_sm will transform:
1762
1763 for (...) {
1764 if (foo)
1765 stuff;
1766 else
1767 MEM = TMP_VAR;
1768 }
1769
1770 into:
1771
1772 lsm = MEM;
1773 for (...) {
1774 if (foo)
1775 stuff;
1776 else
1777 lsm = TMP_VAR;
1778 }
1779 MEM = lsm;
1780
1781 This function will generate:
1782
1783 lsm = MEM;
1784
1785 lsm_flag = false;
1786 ...
1787 for (...) {
1788 if (foo)
1789 stuff;
1790 else {
1791 lsm = TMP_VAR;
1792 lsm_flag = true;
1793 }
1794 }
1795 if (lsm_flag) <--
1796 MEM = lsm; <--
1797 */
1798
1799 static void
1800 execute_sm_if_changed (edge ex, tree mem, tree tmp_var, tree flag)
1801 {
1802 basic_block new_bb, then_bb, old_dest;
1803 bool loop_has_only_one_exit;
1804 edge then_old_edge, orig_ex = ex;
1805 gimple_stmt_iterator gsi;
1806 gimple stmt;
1807 struct prev_flag_edges *prev_edges = (struct prev_flag_edges *) ex->aux;
1808
1809 /* ?? Insert store after previous store if applicable. See note
1810 below. */
1811 if (prev_edges)
1812 ex = prev_edges->append_cond_position;
1813
1814 loop_has_only_one_exit = single_pred_p (ex->dest);
1815
1816 if (loop_has_only_one_exit)
1817 ex = split_block_after_labels (ex->dest);
1818
1819 old_dest = ex->dest;
1820 new_bb = split_edge (ex);
1821 then_bb = create_empty_bb (new_bb);
1822 if (current_loops && new_bb->loop_father)
1823 add_bb_to_loop (then_bb, new_bb->loop_father);
1824
1825 gsi = gsi_start_bb (new_bb);
1826 stmt = gimple_build_cond (NE_EXPR, flag, boolean_false_node,
1827 NULL_TREE, NULL_TREE);
1828 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1829
1830 gsi = gsi_start_bb (then_bb);
1831 /* Insert actual store. */
1832 stmt = gimple_build_assign (unshare_expr (mem), tmp_var);
1833 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1834
1835 make_edge (new_bb, then_bb, EDGE_TRUE_VALUE);
1836 make_edge (new_bb, old_dest, EDGE_FALSE_VALUE);
1837 then_old_edge = make_edge (then_bb, old_dest, EDGE_FALLTHRU);
1838
1839 set_immediate_dominator (CDI_DOMINATORS, then_bb, new_bb);
1840
1841 if (prev_edges)
1842 {
1843 basic_block prevbb = prev_edges->last_cond_fallthru->src;
1844 redirect_edge_succ (prev_edges->last_cond_fallthru, new_bb);
1845 set_immediate_dominator (CDI_DOMINATORS, new_bb, prevbb);
1846 set_immediate_dominator (CDI_DOMINATORS, old_dest,
1847 recompute_dominator (CDI_DOMINATORS, old_dest));
1848 }
1849
1850 /* ?? Because stores may alias, they must happen in the exact
1851 sequence they originally happened. Save the position right after
1852 the (_lsm) store we just created so we can continue appending after
1853 it and maintain the original order. */
1854 {
1855 struct prev_flag_edges *p;
1856
1857 if (orig_ex->aux)
1858 orig_ex->aux = NULL;
1859 alloc_aux_for_edge (orig_ex, sizeof (struct prev_flag_edges));
1860 p = (struct prev_flag_edges *) orig_ex->aux;
1861 p->append_cond_position = then_old_edge;
1862 p->last_cond_fallthru = find_edge (new_bb, old_dest);
1863 orig_ex->aux = (void *) p;
1864 }
1865
1866 if (!loop_has_only_one_exit)
1867 for (gsi = gsi_start_phis (old_dest); !gsi_end_p (gsi); gsi_next (&gsi))
1868 {
1869 gimple phi = gsi_stmt (gsi);
1870 unsigned i;
1871
1872 for (i = 0; i < gimple_phi_num_args (phi); i++)
1873 if (gimple_phi_arg_edge (phi, i)->src == new_bb)
1874 {
1875 tree arg = gimple_phi_arg_def (phi, i);
1876 add_phi_arg (phi, arg, then_old_edge, UNKNOWN_LOCATION);
1877 update_stmt (phi);
1878 }
1879 }
1880 /* Remove the original fall through edge. This was the
1881 single_succ_edge (new_bb). */
1882 EDGE_SUCC (new_bb, 0)->flags &= ~EDGE_FALLTHRU;
1883 }
1884
1885 /* When REF is set on the location, set flag indicating the store. */
1886
1887 struct sm_set_flag_if_changed
1888 {
1889 sm_set_flag_if_changed (tree flag_) : flag (flag_) {}
1890 bool operator () (mem_ref_loc_p loc);
1891 tree flag;
1892 };
1893
1894 bool
1895 sm_set_flag_if_changed::operator () (mem_ref_loc_p loc)
1896 {
1897 /* Only set the flag for writes. */
1898 if (is_gimple_assign (loc->stmt)
1899 && gimple_assign_lhs_ptr (loc->stmt) == loc->ref)
1900 {
1901 gimple_stmt_iterator gsi = gsi_for_stmt (loc->stmt);
1902 gimple stmt = gimple_build_assign (flag, boolean_true_node);
1903 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1904 }
1905 return false;
1906 }
1907
1908 /* Helper function for execute_sm. On every location where REF is
1909 set, set an appropriate flag indicating the store. */
1910
1911 static tree
1912 execute_sm_if_changed_flag_set (struct loop *loop, mem_ref_p ref)
1913 {
1914 tree flag;
1915 char *str = get_lsm_tmp_name (ref->mem.ref, ~0, "_flag");
1916 flag = create_tmp_reg (boolean_type_node, str);
1917 for_all_locs_in_loop (loop, ref, sm_set_flag_if_changed (flag));
1918 return flag;
1919 }
1920
1921 /* Executes store motion of memory reference REF from LOOP.
1922 Exits from the LOOP are stored in EXITS. The initialization of the
1923 temporary variable is put to the preheader of the loop, and assignments
1924 to the reference from the temporary variable are emitted to exits. */
1925
1926 static void
1927 execute_sm (struct loop *loop, vec<edge> exits, mem_ref_p ref)
1928 {
1929 tree tmp_var, store_flag;
1930 unsigned i;
1931 gimple load;
1932 struct fmt_data fmt_data;
1933 edge ex;
1934 struct lim_aux_data *lim_data;
1935 bool multi_threaded_model_p = false;
1936 gimple_stmt_iterator gsi;
1937
1938 if (dump_file && (dump_flags & TDF_DETAILS))
1939 {
1940 fprintf (dump_file, "Executing store motion of ");
1941 print_generic_expr (dump_file, ref->mem.ref, 0);
1942 fprintf (dump_file, " from loop %d\n", loop->num);
1943 }
1944
1945 tmp_var = create_tmp_reg (TREE_TYPE (ref->mem.ref),
1946 get_lsm_tmp_name (ref->mem.ref, ~0));
1947
1948 fmt_data.loop = loop;
1949 fmt_data.orig_loop = loop;
1950 for_each_index (&ref->mem.ref, force_move_till, &fmt_data);
1951
1952 if (bb_in_transaction (loop_preheader_edge (loop)->src)
1953 || !PARAM_VALUE (PARAM_ALLOW_STORE_DATA_RACES))
1954 multi_threaded_model_p = true;
1955
1956 if (multi_threaded_model_p)
1957 store_flag = execute_sm_if_changed_flag_set (loop, ref);
1958
1959 rewrite_mem_refs (loop, ref, tmp_var);
1960
1961 /* Emit the load code on a random exit edge or into the latch if
1962 the loop does not exit, so that we are sure it will be processed
1963 by move_computations after all dependencies. */
1964 gsi = gsi_for_stmt (first_mem_ref_loc (loop, ref)->stmt);
1965
1966 /* FIXME/TODO: For the multi-threaded variant, we could avoid this
1967 load altogether, since the store is predicated by a flag. We
1968 could, do the load only if it was originally in the loop. */
1969 load = gimple_build_assign (tmp_var, unshare_expr (ref->mem.ref));
1970 lim_data = init_lim_data (load);
1971 lim_data->max_loop = loop;
1972 lim_data->tgt_loop = loop;
1973 gsi_insert_before (&gsi, load, GSI_SAME_STMT);
1974
1975 if (multi_threaded_model_p)
1976 {
1977 load = gimple_build_assign (store_flag, boolean_false_node);
1978 lim_data = init_lim_data (load);
1979 lim_data->max_loop = loop;
1980 lim_data->tgt_loop = loop;
1981 gsi_insert_before (&gsi, load, GSI_SAME_STMT);
1982 }
1983
1984 /* Sink the store to every exit from the loop. */
1985 FOR_EACH_VEC_ELT (exits, i, ex)
1986 if (!multi_threaded_model_p)
1987 {
1988 gimple store;
1989 store = gimple_build_assign (unshare_expr (ref->mem.ref), tmp_var);
1990 gsi_insert_on_edge (ex, store);
1991 }
1992 else
1993 execute_sm_if_changed (ex, ref->mem.ref, tmp_var, store_flag);
1994 }
1995
1996 /* Hoists memory references MEM_REFS out of LOOP. EXITS is the list of exit
1997 edges of the LOOP. */
1998
1999 static void
2000 hoist_memory_references (struct loop *loop, bitmap mem_refs,
2001 vec<edge> exits)
2002 {
2003 mem_ref_p ref;
2004 unsigned i;
2005 bitmap_iterator bi;
2006
2007 EXECUTE_IF_SET_IN_BITMAP (mem_refs, 0, i, bi)
2008 {
2009 ref = memory_accesses.refs_list[i];
2010 execute_sm (loop, exits, ref);
2011 }
2012 }
2013
2014 struct ref_always_accessed
2015 {
2016 ref_always_accessed (struct loop *loop_, tree base_, bool stored_p_)
2017 : loop (loop_), base (base_), stored_p (stored_p_) {}
2018 bool operator () (mem_ref_loc_p loc);
2019 struct loop *loop;
2020 tree base;
2021 bool stored_p;
2022 };
2023
2024 bool
2025 ref_always_accessed::operator () (mem_ref_loc_p loc)
2026 {
2027 struct loop *must_exec;
2028
2029 if (!get_lim_data (loc->stmt))
2030 return false;
2031
2032 /* If we require an always executed store make sure the statement
2033 stores to the reference. */
2034 if (stored_p)
2035 {
2036 tree lhs;
2037 if (!gimple_get_lhs (loc->stmt))
2038 return false;
2039 lhs = get_base_address (gimple_get_lhs (loc->stmt));
2040 if (!lhs)
2041 return false;
2042 if (INDIRECT_REF_P (lhs)
2043 || TREE_CODE (lhs) == MEM_REF)
2044 lhs = TREE_OPERAND (lhs, 0);
2045 if (lhs != base)
2046 return false;
2047 }
2048
2049 must_exec = get_lim_data (loc->stmt)->always_executed_in;
2050 if (!must_exec)
2051 return false;
2052
2053 if (must_exec == loop
2054 || flow_loop_nested_p (must_exec, loop))
2055 return true;
2056
2057 return false;
2058 }
2059
2060 /* Returns true if REF is always accessed in LOOP. If STORED_P is true
2061 make sure REF is always stored to in LOOP. */
2062
2063 static bool
2064 ref_always_accessed_p (struct loop *loop, mem_ref_p ref, bool stored_p)
2065 {
2066 tree base = ao_ref_base (&ref->mem);
2067 if (TREE_CODE (base) == MEM_REF)
2068 base = TREE_OPERAND (base, 0);
2069
2070 return for_all_locs_in_loop (loop, ref,
2071 ref_always_accessed (loop, base, stored_p));
2072 }
2073
2074 /* Returns true if REF1 and REF2 are independent. */
2075
2076 static bool
2077 refs_independent_p (mem_ref_p ref1, mem_ref_p ref2)
2078 {
2079 if (ref1 == ref2)
2080 return true;
2081
2082 if (dump_file && (dump_flags & TDF_DETAILS))
2083 fprintf (dump_file, "Querying dependency of refs %u and %u: ",
2084 ref1->id, ref2->id);
2085
2086 if (mem_refs_may_alias_p (ref1, ref2, &memory_accesses.ttae_cache))
2087 {
2088 if (dump_file && (dump_flags & TDF_DETAILS))
2089 fprintf (dump_file, "dependent.\n");
2090 return false;
2091 }
2092 else
2093 {
2094 if (dump_file && (dump_flags & TDF_DETAILS))
2095 fprintf (dump_file, "independent.\n");
2096 return true;
2097 }
2098 }
2099
2100 /* Mark REF dependent on stores or loads (according to STORED_P) in LOOP
2101 and its super-loops. */
2102
2103 static void
2104 record_dep_loop (struct loop *loop, mem_ref_p ref, bool stored_p)
2105 {
2106 /* We can propagate dependent-in-loop bits up the loop
2107 hierarchy to all outer loops. */
2108 while (loop != current_loops->tree_root
2109 && bitmap_set_bit (&ref->dep_loop, LOOP_DEP_BIT (loop->num, stored_p)))
2110 loop = loop_outer (loop);
2111 }
2112
2113 /* Returns true if REF is independent on all other memory references in
2114 LOOP. */
2115
2116 static bool
2117 ref_indep_loop_p_1 (struct loop *loop, mem_ref_p ref, bool stored_p)
2118 {
2119 bitmap refs_to_check;
2120 unsigned i;
2121 bitmap_iterator bi;
2122 mem_ref_p aref;
2123
2124 if (stored_p)
2125 refs_to_check = &memory_accesses.refs_in_loop[loop->num];
2126 else
2127 refs_to_check = &memory_accesses.refs_stored_in_loop[loop->num];
2128
2129 if (bitmap_bit_p (refs_to_check, UNANALYZABLE_MEM_ID))
2130 return false;
2131
2132 EXECUTE_IF_SET_IN_BITMAP (refs_to_check, 0, i, bi)
2133 {
2134 aref = memory_accesses.refs_list[i];
2135 if (!refs_independent_p (ref, aref))
2136 return false;
2137 }
2138
2139 return true;
2140 }
2141
2142 /* Returns true if REF is independent on all other memory references in
2143 LOOP. Wrapper over ref_indep_loop_p_1, caching its results. */
2144
2145 static bool
2146 ref_indep_loop_p_2 (struct loop *loop, mem_ref_p ref, bool stored_p)
2147 {
2148 stored_p |= bitmap_bit_p (&ref->stored, loop->num);
2149
2150 if (bitmap_bit_p (&ref->indep_loop, LOOP_DEP_BIT (loop->num, stored_p)))
2151 return true;
2152 if (bitmap_bit_p (&ref->dep_loop, LOOP_DEP_BIT (loop->num, stored_p)))
2153 return false;
2154
2155 struct loop *inner = loop->inner;
2156 while (inner)
2157 {
2158 if (!ref_indep_loop_p_2 (inner, ref, stored_p))
2159 return false;
2160 inner = inner->next;
2161 }
2162
2163 bool indep_p = ref_indep_loop_p_1 (loop, ref, stored_p);
2164
2165 if (dump_file && (dump_flags & TDF_DETAILS))
2166 fprintf (dump_file, "Querying dependencies of ref %u in loop %d: %s\n",
2167 ref->id, loop->num, indep_p ? "independent" : "dependent");
2168
2169 /* Record the computed result in the cache. */
2170 if (indep_p)
2171 {
2172 if (bitmap_set_bit (&ref->indep_loop, LOOP_DEP_BIT (loop->num, stored_p))
2173 && stored_p)
2174 {
2175 /* If it's independend against all refs then it's independent
2176 against stores, too. */
2177 bitmap_set_bit (&ref->indep_loop, LOOP_DEP_BIT (loop->num, false));
2178 }
2179 }
2180 else
2181 {
2182 record_dep_loop (loop, ref, stored_p);
2183 if (!stored_p)
2184 {
2185 /* If it's dependent against stores it's dependent against
2186 all refs, too. */
2187 record_dep_loop (loop, ref, true);
2188 }
2189 }
2190
2191 return indep_p;
2192 }
2193
2194 /* Returns true if REF is independent on all other memory references in
2195 LOOP. */
2196
2197 static bool
2198 ref_indep_loop_p (struct loop *loop, mem_ref_p ref)
2199 {
2200 gcc_checking_assert (MEM_ANALYZABLE (ref));
2201
2202 return ref_indep_loop_p_2 (loop, ref, false);
2203 }
2204
2205 /* Returns true if we can perform store motion of REF from LOOP. */
2206
2207 static bool
2208 can_sm_ref_p (struct loop *loop, mem_ref_p ref)
2209 {
2210 tree base;
2211
2212 /* Can't hoist unanalyzable refs. */
2213 if (!MEM_ANALYZABLE (ref))
2214 return false;
2215
2216 /* It should be movable. */
2217 if (!is_gimple_reg_type (TREE_TYPE (ref->mem.ref))
2218 || TREE_THIS_VOLATILE (ref->mem.ref)
2219 || !for_each_index (&ref->mem.ref, may_move_till, loop))
2220 return false;
2221
2222 /* If it can throw fail, we do not properly update EH info. */
2223 if (tree_could_throw_p (ref->mem.ref))
2224 return false;
2225
2226 /* If it can trap, it must be always executed in LOOP.
2227 Readonly memory locations may trap when storing to them, but
2228 tree_could_trap_p is a predicate for rvalues, so check that
2229 explicitly. */
2230 base = get_base_address (ref->mem.ref);
2231 if ((tree_could_trap_p (ref->mem.ref)
2232 || (DECL_P (base) && TREE_READONLY (base)))
2233 && !ref_always_accessed_p (loop, ref, true))
2234 return false;
2235
2236 /* And it must be independent on all other memory references
2237 in LOOP. */
2238 if (!ref_indep_loop_p (loop, ref))
2239 return false;
2240
2241 return true;
2242 }
2243
2244 /* Marks the references in LOOP for that store motion should be performed
2245 in REFS_TO_SM. SM_EXECUTED is the set of references for that store
2246 motion was performed in one of the outer loops. */
2247
2248 static void
2249 find_refs_for_sm (struct loop *loop, bitmap sm_executed, bitmap refs_to_sm)
2250 {
2251 bitmap refs = &memory_accesses.all_refs_stored_in_loop[loop->num];
2252 unsigned i;
2253 bitmap_iterator bi;
2254 mem_ref_p ref;
2255
2256 EXECUTE_IF_AND_COMPL_IN_BITMAP (refs, sm_executed, 0, i, bi)
2257 {
2258 ref = memory_accesses.refs_list[i];
2259 if (can_sm_ref_p (loop, ref))
2260 bitmap_set_bit (refs_to_sm, i);
2261 }
2262 }
2263
2264 /* Checks whether LOOP (with exits stored in EXITS array) is suitable
2265 for a store motion optimization (i.e. whether we can insert statement
2266 on its exits). */
2267
2268 static bool
2269 loop_suitable_for_sm (struct loop *loop ATTRIBUTE_UNUSED,
2270 vec<edge> exits)
2271 {
2272 unsigned i;
2273 edge ex;
2274
2275 FOR_EACH_VEC_ELT (exits, i, ex)
2276 if (ex->flags & (EDGE_ABNORMAL | EDGE_EH))
2277 return false;
2278
2279 return true;
2280 }
2281
2282 /* Try to perform store motion for all memory references modified inside
2283 LOOP. SM_EXECUTED is the bitmap of the memory references for that
2284 store motion was executed in one of the outer loops. */
2285
2286 static void
2287 store_motion_loop (struct loop *loop, bitmap sm_executed)
2288 {
2289 vec<edge> exits = get_loop_exit_edges (loop);
2290 struct loop *subloop;
2291 bitmap sm_in_loop = BITMAP_ALLOC (&lim_bitmap_obstack);
2292
2293 if (loop_suitable_for_sm (loop, exits))
2294 {
2295 find_refs_for_sm (loop, sm_executed, sm_in_loop);
2296 hoist_memory_references (loop, sm_in_loop, exits);
2297 }
2298 exits.release ();
2299
2300 bitmap_ior_into (sm_executed, sm_in_loop);
2301 for (subloop = loop->inner; subloop != NULL; subloop = subloop->next)
2302 store_motion_loop (subloop, sm_executed);
2303 bitmap_and_compl_into (sm_executed, sm_in_loop);
2304 BITMAP_FREE (sm_in_loop);
2305 }
2306
2307 /* Try to perform store motion for all memory references modified inside
2308 loops. */
2309
2310 static void
2311 store_motion (void)
2312 {
2313 struct loop *loop;
2314 bitmap sm_executed = BITMAP_ALLOC (&lim_bitmap_obstack);
2315
2316 for (loop = current_loops->tree_root->inner; loop != NULL; loop = loop->next)
2317 store_motion_loop (loop, sm_executed);
2318
2319 BITMAP_FREE (sm_executed);
2320 gsi_commit_edge_inserts ();
2321 }
2322
2323 /* Fills ALWAYS_EXECUTED_IN information for basic blocks of LOOP, i.e.
2324 for each such basic block bb records the outermost loop for that execution
2325 of its header implies execution of bb. CONTAINS_CALL is the bitmap of
2326 blocks that contain a nonpure call. */
2327
2328 static void
2329 fill_always_executed_in_1 (struct loop *loop, sbitmap contains_call)
2330 {
2331 basic_block bb = NULL, *bbs, last = NULL;
2332 unsigned i;
2333 edge e;
2334 struct loop *inn_loop = loop;
2335
2336 if (ALWAYS_EXECUTED_IN (loop->header) == NULL)
2337 {
2338 bbs = get_loop_body_in_dom_order (loop);
2339
2340 for (i = 0; i < loop->num_nodes; i++)
2341 {
2342 edge_iterator ei;
2343 bb = bbs[i];
2344
2345 if (dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
2346 last = bb;
2347
2348 if (bitmap_bit_p (contains_call, bb->index))
2349 break;
2350
2351 FOR_EACH_EDGE (e, ei, bb->succs)
2352 if (!flow_bb_inside_loop_p (loop, e->dest))
2353 break;
2354 if (e)
2355 break;
2356
2357 /* A loop might be infinite (TODO use simple loop analysis
2358 to disprove this if possible). */
2359 if (bb->flags & BB_IRREDUCIBLE_LOOP)
2360 break;
2361
2362 if (!flow_bb_inside_loop_p (inn_loop, bb))
2363 break;
2364
2365 if (bb->loop_father->header == bb)
2366 {
2367 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
2368 break;
2369
2370 /* In a loop that is always entered we may proceed anyway.
2371 But record that we entered it and stop once we leave it. */
2372 inn_loop = bb->loop_father;
2373 }
2374 }
2375
2376 while (1)
2377 {
2378 SET_ALWAYS_EXECUTED_IN (last, loop);
2379 if (last == loop->header)
2380 break;
2381 last = get_immediate_dominator (CDI_DOMINATORS, last);
2382 }
2383
2384 free (bbs);
2385 }
2386
2387 for (loop = loop->inner; loop; loop = loop->next)
2388 fill_always_executed_in_1 (loop, contains_call);
2389 }
2390
2391 /* Fills ALWAYS_EXECUTED_IN information for basic blocks, i.e.
2392 for each such basic block bb records the outermost loop for that execution
2393 of its header implies execution of bb. */
2394
2395 static void
2396 fill_always_executed_in (void)
2397 {
2398 sbitmap contains_call = sbitmap_alloc (last_basic_block);
2399 basic_block bb;
2400 struct loop *loop;
2401
2402 bitmap_clear (contains_call);
2403 FOR_EACH_BB (bb)
2404 {
2405 gimple_stmt_iterator gsi;
2406 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2407 {
2408 if (nonpure_call_p (gsi_stmt (gsi)))
2409 break;
2410 }
2411
2412 if (!gsi_end_p (gsi))
2413 bitmap_set_bit (contains_call, bb->index);
2414 }
2415
2416 for (loop = current_loops->tree_root->inner; loop; loop = loop->next)
2417 fill_always_executed_in_1 (loop, contains_call);
2418
2419 sbitmap_free (contains_call);
2420 }
2421
2422
2423 /* Compute the global information needed by the loop invariant motion pass. */
2424
2425 static void
2426 tree_ssa_lim_initialize (void)
2427 {
2428 unsigned i;
2429
2430 bitmap_obstack_initialize (&lim_bitmap_obstack);
2431 lim_aux_data_map = pointer_map_create ();
2432
2433 if (flag_tm)
2434 compute_transaction_bits ();
2435
2436 alloc_aux_for_edges (0);
2437
2438 memory_accesses.refs.create (100);
2439 memory_accesses.refs_list.create (100);
2440 /* Allocate a special, unanalyzable mem-ref with ID zero. */
2441 memory_accesses.refs_list.quick_push
2442 (mem_ref_alloc (error_mark_node, 0, UNANALYZABLE_MEM_ID));
2443
2444 memory_accesses.refs_in_loop.create (number_of_loops (cfun));
2445 memory_accesses.refs_in_loop.quick_grow (number_of_loops (cfun));
2446 memory_accesses.refs_stored_in_loop.create (number_of_loops (cfun));
2447 memory_accesses.refs_stored_in_loop.quick_grow (number_of_loops (cfun));
2448 memory_accesses.all_refs_stored_in_loop.create (number_of_loops (cfun));
2449 memory_accesses.all_refs_stored_in_loop.quick_grow (number_of_loops (cfun));
2450
2451 for (i = 0; i < number_of_loops (cfun); i++)
2452 {
2453 bitmap_initialize (&memory_accesses.refs_in_loop[i],
2454 &lim_bitmap_obstack);
2455 bitmap_initialize (&memory_accesses.refs_stored_in_loop[i],
2456 &lim_bitmap_obstack);
2457 bitmap_initialize (&memory_accesses.all_refs_stored_in_loop[i],
2458 &lim_bitmap_obstack);
2459 }
2460
2461 memory_accesses.ttae_cache = NULL;
2462 }
2463
2464 /* Cleans up after the invariant motion pass. */
2465
2466 static void
2467 tree_ssa_lim_finalize (void)
2468 {
2469 basic_block bb;
2470 unsigned i;
2471 mem_ref_p ref;
2472
2473 free_aux_for_edges ();
2474
2475 FOR_EACH_BB (bb)
2476 SET_ALWAYS_EXECUTED_IN (bb, NULL);
2477
2478 bitmap_obstack_release (&lim_bitmap_obstack);
2479 pointer_map_destroy (lim_aux_data_map);
2480
2481 memory_accesses.refs.dispose ();
2482
2483 FOR_EACH_VEC_ELT (memory_accesses.refs_list, i, ref)
2484 memref_free (ref);
2485 memory_accesses.refs_list.release ();
2486
2487 memory_accesses.refs_in_loop.release ();
2488 memory_accesses.refs_stored_in_loop.release ();
2489 memory_accesses.all_refs_stored_in_loop.release ();
2490
2491 if (memory_accesses.ttae_cache)
2492 free_affine_expand_cache (&memory_accesses.ttae_cache);
2493 }
2494
2495 /* Moves invariants from loops. Only "expensive" invariants are moved out --
2496 i.e. those that are likely to be win regardless of the register pressure. */
2497
2498 unsigned int
2499 tree_ssa_lim (void)
2500 {
2501 unsigned int todo;
2502
2503 tree_ssa_lim_initialize ();
2504
2505 /* Gathers information about memory accesses in the loops. */
2506 analyze_memory_references ();
2507
2508 /* Fills ALWAYS_EXECUTED_IN information for basic blocks. */
2509 fill_always_executed_in ();
2510
2511 /* For each statement determine the outermost loop in that it is
2512 invariant and cost for computing the invariant. */
2513 invariantness_dom_walker (CDI_DOMINATORS)
2514 .walk (cfun->cfg->x_entry_block_ptr);
2515
2516 /* Execute store motion. Force the necessary invariants to be moved
2517 out of the loops as well. */
2518 store_motion ();
2519
2520 /* Move the expressions that are expensive enough. */
2521 todo = move_computations ();
2522
2523 tree_ssa_lim_finalize ();
2524
2525 return todo;
2526 }
2527
2528 /* Loop invariant motion pass. */
2529
2530 static unsigned int
2531 tree_ssa_loop_im (void)
2532 {
2533 if (number_of_loops (cfun) <= 1)
2534 return 0;
2535
2536 return tree_ssa_lim ();
2537 }
2538
2539 static bool
2540 gate_tree_ssa_loop_im (void)
2541 {
2542 return flag_tree_loop_im != 0;
2543 }
2544
2545 namespace {
2546
2547 const pass_data pass_data_lim =
2548 {
2549 GIMPLE_PASS, /* type */
2550 "lim", /* name */
2551 OPTGROUP_LOOP, /* optinfo_flags */
2552 true, /* has_gate */
2553 true, /* has_execute */
2554 TV_LIM, /* tv_id */
2555 PROP_cfg, /* properties_required */
2556 0, /* properties_provided */
2557 0, /* properties_destroyed */
2558 0, /* todo_flags_start */
2559 0, /* todo_flags_finish */
2560 };
2561
2562 class pass_lim : public gimple_opt_pass
2563 {
2564 public:
2565 pass_lim (gcc::context *ctxt)
2566 : gimple_opt_pass (pass_data_lim, ctxt)
2567 {}
2568
2569 /* opt_pass methods: */
2570 opt_pass * clone () { return new pass_lim (m_ctxt); }
2571 bool gate () { return gate_tree_ssa_loop_im (); }
2572 unsigned int execute () { return tree_ssa_loop_im (); }
2573
2574 }; // class pass_lim
2575
2576 } // anon namespace
2577
2578 gimple_opt_pass *
2579 make_pass_lim (gcc::context *ctxt)
2580 {
2581 return new pass_lim (ctxt);
2582 }
2583
2584