tree-core.h: Include symtab.h.
[gcc.git] / gcc / tree-ssa-loop-im.c
1 /* Loop invariant motion.
2 Copyright (C) 2003-2015 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "tree.h"
25 #include "gimple.h"
26 #include "hard-reg-set.h"
27 #include "ssa.h"
28 #include "alias.h"
29 #include "fold-const.h"
30 #include "tm_p.h"
31 #include "cfganal.h"
32 #include "gimple-pretty-print.h"
33 #include "internal-fn.h"
34 #include "tree-eh.h"
35 #include "gimplify.h"
36 #include "gimple-iterator.h"
37 #include "tree-cfg.h"
38 #include "tree-ssa-loop-manip.h"
39 #include "tree-ssa-loop.h"
40 #include "tree-into-ssa.h"
41 #include "cfgloop.h"
42 #include "domwalk.h"
43 #include "params.h"
44 #include "tree-pass.h"
45 #include "flags.h"
46 #include "tree-affine.h"
47 #include "tree-ssa-propagate.h"
48 #include "trans-mem.h"
49 #include "gimple-fold.h"
50
51 /* TODO: Support for predicated code motion. I.e.
52
53 while (1)
54 {
55 if (cond)
56 {
57 a = inv;
58 something;
59 }
60 }
61
62 Where COND and INV are invariants, but evaluating INV may trap or be
63 invalid from some other reason if !COND. This may be transformed to
64
65 if (cond)
66 a = inv;
67 while (1)
68 {
69 if (cond)
70 something;
71 } */
72
73 /* The auxiliary data kept for each statement. */
74
75 struct lim_aux_data
76 {
77 struct loop *max_loop; /* The outermost loop in that the statement
78 is invariant. */
79
80 struct loop *tgt_loop; /* The loop out of that we want to move the
81 invariant. */
82
83 struct loop *always_executed_in;
84 /* The outermost loop for that we are sure
85 the statement is executed if the loop
86 is entered. */
87
88 unsigned cost; /* Cost of the computation performed by the
89 statement. */
90
91 vec<gimple> depends; /* Vector of statements that must be also
92 hoisted out of the loop when this statement
93 is hoisted; i.e. those that define the
94 operands of the statement and are inside of
95 the MAX_LOOP loop. */
96 };
97
98 /* Maps statements to their lim_aux_data. */
99
100 static hash_map<gimple, lim_aux_data *> *lim_aux_data_map;
101
102 /* Description of a memory reference location. */
103
104 typedef struct mem_ref_loc
105 {
106 tree *ref; /* The reference itself. */
107 gimple stmt; /* The statement in that it occurs. */
108 } *mem_ref_loc_p;
109
110
111 /* Description of a memory reference. */
112
113 typedef struct im_mem_ref
114 {
115 unsigned id; /* ID assigned to the memory reference
116 (its index in memory_accesses.refs_list) */
117 hashval_t hash; /* Its hash value. */
118
119 /* The memory access itself and associated caching of alias-oracle
120 query meta-data. */
121 ao_ref mem;
122
123 bitmap stored; /* The set of loops in that this memory location
124 is stored to. */
125 vec<mem_ref_loc> accesses_in_loop;
126 /* The locations of the accesses. Vector
127 indexed by the loop number. */
128
129 /* The following sets are computed on demand. We keep both set and
130 its complement, so that we know whether the information was
131 already computed or not. */
132 bitmap_head indep_loop; /* The set of loops in that the memory
133 reference is independent, meaning:
134 If it is stored in the loop, this store
135 is independent on all other loads and
136 stores.
137 If it is only loaded, then it is independent
138 on all stores in the loop. */
139 bitmap_head dep_loop; /* The complement of INDEP_LOOP. */
140 } *mem_ref_p;
141
142 /* We use two bits per loop in the ref->{in,}dep_loop bitmaps, the first
143 to record (in)dependence against stores in the loop and its subloops, the
144 second to record (in)dependence against all references in the loop
145 and its subloops. */
146 #define LOOP_DEP_BIT(loopnum, storedp) (2 * (loopnum) + (storedp ? 1 : 0))
147
148 /* Mem_ref hashtable helpers. */
149
150 struct mem_ref_hasher : nofree_ptr_hash <im_mem_ref>
151 {
152 typedef tree_node *compare_type;
153 static inline hashval_t hash (const im_mem_ref *);
154 static inline bool equal (const im_mem_ref *, const tree_node *);
155 };
156
157 /* A hash function for struct im_mem_ref object OBJ. */
158
159 inline hashval_t
160 mem_ref_hasher::hash (const im_mem_ref *mem)
161 {
162 return mem->hash;
163 }
164
165 /* An equality function for struct im_mem_ref object MEM1 with
166 memory reference OBJ2. */
167
168 inline bool
169 mem_ref_hasher::equal (const im_mem_ref *mem1, const tree_node *obj2)
170 {
171 return operand_equal_p (mem1->mem.ref, (const_tree) obj2, 0);
172 }
173
174
175 /* Description of memory accesses in loops. */
176
177 static struct
178 {
179 /* The hash table of memory references accessed in loops. */
180 hash_table<mem_ref_hasher> *refs;
181
182 /* The list of memory references. */
183 vec<mem_ref_p> refs_list;
184
185 /* The set of memory references accessed in each loop. */
186 vec<bitmap_head> refs_in_loop;
187
188 /* The set of memory references stored in each loop. */
189 vec<bitmap_head> refs_stored_in_loop;
190
191 /* The set of memory references stored in each loop, including subloops . */
192 vec<bitmap_head> all_refs_stored_in_loop;
193
194 /* Cache for expanding memory addresses. */
195 hash_map<tree, name_expansion *> *ttae_cache;
196 } memory_accesses;
197
198 /* Obstack for the bitmaps in the above data structures. */
199 static bitmap_obstack lim_bitmap_obstack;
200 static obstack mem_ref_obstack;
201
202 static bool ref_indep_loop_p (struct loop *, mem_ref_p);
203
204 /* Minimum cost of an expensive expression. */
205 #define LIM_EXPENSIVE ((unsigned) PARAM_VALUE (PARAM_LIM_EXPENSIVE))
206
207 /* The outermost loop for which execution of the header guarantees that the
208 block will be executed. */
209 #define ALWAYS_EXECUTED_IN(BB) ((struct loop *) (BB)->aux)
210 #define SET_ALWAYS_EXECUTED_IN(BB, VAL) ((BB)->aux = (void *) (VAL))
211
212 /* ID of the shared unanalyzable mem. */
213 #define UNANALYZABLE_MEM_ID 0
214
215 /* Whether the reference was analyzable. */
216 #define MEM_ANALYZABLE(REF) ((REF)->id != UNANALYZABLE_MEM_ID)
217
218 static struct lim_aux_data *
219 init_lim_data (gimple stmt)
220 {
221 lim_aux_data *p = XCNEW (struct lim_aux_data);
222 lim_aux_data_map->put (stmt, p);
223
224 return p;
225 }
226
227 static struct lim_aux_data *
228 get_lim_data (gimple stmt)
229 {
230 lim_aux_data **p = lim_aux_data_map->get (stmt);
231 if (!p)
232 return NULL;
233
234 return *p;
235 }
236
237 /* Releases the memory occupied by DATA. */
238
239 static void
240 free_lim_aux_data (struct lim_aux_data *data)
241 {
242 data->depends.release ();
243 free (data);
244 }
245
246 static void
247 clear_lim_data (gimple stmt)
248 {
249 lim_aux_data **p = lim_aux_data_map->get (stmt);
250 if (!p)
251 return;
252
253 free_lim_aux_data (*p);
254 *p = NULL;
255 }
256
257
258 /* The possibilities of statement movement. */
259 enum move_pos
260 {
261 MOVE_IMPOSSIBLE, /* No movement -- side effect expression. */
262 MOVE_PRESERVE_EXECUTION, /* Must not cause the non-executed statement
263 become executed -- memory accesses, ... */
264 MOVE_POSSIBLE /* Unlimited movement. */
265 };
266
267
268 /* If it is possible to hoist the statement STMT unconditionally,
269 returns MOVE_POSSIBLE.
270 If it is possible to hoist the statement STMT, but we must avoid making
271 it executed if it would not be executed in the original program (e.g.
272 because it may trap), return MOVE_PRESERVE_EXECUTION.
273 Otherwise return MOVE_IMPOSSIBLE. */
274
275 enum move_pos
276 movement_possibility (gimple stmt)
277 {
278 tree lhs;
279 enum move_pos ret = MOVE_POSSIBLE;
280
281 if (flag_unswitch_loops
282 && gimple_code (stmt) == GIMPLE_COND)
283 {
284 /* If we perform unswitching, force the operands of the invariant
285 condition to be moved out of the loop. */
286 return MOVE_POSSIBLE;
287 }
288
289 if (gimple_code (stmt) == GIMPLE_PHI
290 && gimple_phi_num_args (stmt) <= 2
291 && !virtual_operand_p (gimple_phi_result (stmt))
292 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_phi_result (stmt)))
293 return MOVE_POSSIBLE;
294
295 if (gimple_get_lhs (stmt) == NULL_TREE)
296 return MOVE_IMPOSSIBLE;
297
298 if (gimple_vdef (stmt))
299 return MOVE_IMPOSSIBLE;
300
301 if (stmt_ends_bb_p (stmt)
302 || gimple_has_volatile_ops (stmt)
303 || gimple_has_side_effects (stmt)
304 || stmt_could_throw_p (stmt))
305 return MOVE_IMPOSSIBLE;
306
307 if (is_gimple_call (stmt))
308 {
309 /* While pure or const call is guaranteed to have no side effects, we
310 cannot move it arbitrarily. Consider code like
311
312 char *s = something ();
313
314 while (1)
315 {
316 if (s)
317 t = strlen (s);
318 else
319 t = 0;
320 }
321
322 Here the strlen call cannot be moved out of the loop, even though
323 s is invariant. In addition to possibly creating a call with
324 invalid arguments, moving out a function call that is not executed
325 may cause performance regressions in case the call is costly and
326 not executed at all. */
327 ret = MOVE_PRESERVE_EXECUTION;
328 lhs = gimple_call_lhs (stmt);
329 }
330 else if (is_gimple_assign (stmt))
331 lhs = gimple_assign_lhs (stmt);
332 else
333 return MOVE_IMPOSSIBLE;
334
335 if (TREE_CODE (lhs) == SSA_NAME
336 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
337 return MOVE_IMPOSSIBLE;
338
339 if (TREE_CODE (lhs) != SSA_NAME
340 || gimple_could_trap_p (stmt))
341 return MOVE_PRESERVE_EXECUTION;
342
343 /* Non local loads in a transaction cannot be hoisted out. Well,
344 unless the load happens on every path out of the loop, but we
345 don't take this into account yet. */
346 if (flag_tm
347 && gimple_in_transaction (stmt)
348 && gimple_assign_single_p (stmt))
349 {
350 tree rhs = gimple_assign_rhs1 (stmt);
351 if (DECL_P (rhs) && is_global_var (rhs))
352 {
353 if (dump_file)
354 {
355 fprintf (dump_file, "Cannot hoist conditional load of ");
356 print_generic_expr (dump_file, rhs, TDF_SLIM);
357 fprintf (dump_file, " because it is in a transaction.\n");
358 }
359 return MOVE_IMPOSSIBLE;
360 }
361 }
362
363 return ret;
364 }
365
366 /* Suppose that operand DEF is used inside the LOOP. Returns the outermost
367 loop to that we could move the expression using DEF if it did not have
368 other operands, i.e. the outermost loop enclosing LOOP in that the value
369 of DEF is invariant. */
370
371 static struct loop *
372 outermost_invariant_loop (tree def, struct loop *loop)
373 {
374 gimple def_stmt;
375 basic_block def_bb;
376 struct loop *max_loop;
377 struct lim_aux_data *lim_data;
378
379 if (!def)
380 return superloop_at_depth (loop, 1);
381
382 if (TREE_CODE (def) != SSA_NAME)
383 {
384 gcc_assert (is_gimple_min_invariant (def));
385 return superloop_at_depth (loop, 1);
386 }
387
388 def_stmt = SSA_NAME_DEF_STMT (def);
389 def_bb = gimple_bb (def_stmt);
390 if (!def_bb)
391 return superloop_at_depth (loop, 1);
392
393 max_loop = find_common_loop (loop, def_bb->loop_father);
394
395 lim_data = get_lim_data (def_stmt);
396 if (lim_data != NULL && lim_data->max_loop != NULL)
397 max_loop = find_common_loop (max_loop,
398 loop_outer (lim_data->max_loop));
399 if (max_loop == loop)
400 return NULL;
401 max_loop = superloop_at_depth (loop, loop_depth (max_loop) + 1);
402
403 return max_loop;
404 }
405
406 /* DATA is a structure containing information associated with a statement
407 inside LOOP. DEF is one of the operands of this statement.
408
409 Find the outermost loop enclosing LOOP in that value of DEF is invariant
410 and record this in DATA->max_loop field. If DEF itself is defined inside
411 this loop as well (i.e. we need to hoist it out of the loop if we want
412 to hoist the statement represented by DATA), record the statement in that
413 DEF is defined to the DATA->depends list. Additionally if ADD_COST is true,
414 add the cost of the computation of DEF to the DATA->cost.
415
416 If DEF is not invariant in LOOP, return false. Otherwise return TRUE. */
417
418 static bool
419 add_dependency (tree def, struct lim_aux_data *data, struct loop *loop,
420 bool add_cost)
421 {
422 gimple def_stmt = SSA_NAME_DEF_STMT (def);
423 basic_block def_bb = gimple_bb (def_stmt);
424 struct loop *max_loop;
425 struct lim_aux_data *def_data;
426
427 if (!def_bb)
428 return true;
429
430 max_loop = outermost_invariant_loop (def, loop);
431 if (!max_loop)
432 return false;
433
434 if (flow_loop_nested_p (data->max_loop, max_loop))
435 data->max_loop = max_loop;
436
437 def_data = get_lim_data (def_stmt);
438 if (!def_data)
439 return true;
440
441 if (add_cost
442 /* Only add the cost if the statement defining DEF is inside LOOP,
443 i.e. if it is likely that by moving the invariants dependent
444 on it, we will be able to avoid creating a new register for
445 it (since it will be only used in these dependent invariants). */
446 && def_bb->loop_father == loop)
447 data->cost += def_data->cost;
448
449 data->depends.safe_push (def_stmt);
450
451 return true;
452 }
453
454 /* Returns an estimate for a cost of statement STMT. The values here
455 are just ad-hoc constants, similar to costs for inlining. */
456
457 static unsigned
458 stmt_cost (gimple stmt)
459 {
460 /* Always try to create possibilities for unswitching. */
461 if (gimple_code (stmt) == GIMPLE_COND
462 || gimple_code (stmt) == GIMPLE_PHI)
463 return LIM_EXPENSIVE;
464
465 /* We should be hoisting calls if possible. */
466 if (is_gimple_call (stmt))
467 {
468 tree fndecl;
469
470 /* Unless the call is a builtin_constant_p; this always folds to a
471 constant, so moving it is useless. */
472 fndecl = gimple_call_fndecl (stmt);
473 if (fndecl
474 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
475 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_CONSTANT_P)
476 return 0;
477
478 return LIM_EXPENSIVE;
479 }
480
481 /* Hoisting memory references out should almost surely be a win. */
482 if (gimple_references_memory_p (stmt))
483 return LIM_EXPENSIVE;
484
485 if (gimple_code (stmt) != GIMPLE_ASSIGN)
486 return 1;
487
488 switch (gimple_assign_rhs_code (stmt))
489 {
490 case MULT_EXPR:
491 case WIDEN_MULT_EXPR:
492 case WIDEN_MULT_PLUS_EXPR:
493 case WIDEN_MULT_MINUS_EXPR:
494 case DOT_PROD_EXPR:
495 case FMA_EXPR:
496 case TRUNC_DIV_EXPR:
497 case CEIL_DIV_EXPR:
498 case FLOOR_DIV_EXPR:
499 case ROUND_DIV_EXPR:
500 case EXACT_DIV_EXPR:
501 case CEIL_MOD_EXPR:
502 case FLOOR_MOD_EXPR:
503 case ROUND_MOD_EXPR:
504 case TRUNC_MOD_EXPR:
505 case RDIV_EXPR:
506 /* Division and multiplication are usually expensive. */
507 return LIM_EXPENSIVE;
508
509 case LSHIFT_EXPR:
510 case RSHIFT_EXPR:
511 case WIDEN_LSHIFT_EXPR:
512 case LROTATE_EXPR:
513 case RROTATE_EXPR:
514 /* Shifts and rotates are usually expensive. */
515 return LIM_EXPENSIVE;
516
517 case CONSTRUCTOR:
518 /* Make vector construction cost proportional to the number
519 of elements. */
520 return CONSTRUCTOR_NELTS (gimple_assign_rhs1 (stmt));
521
522 case SSA_NAME:
523 case PAREN_EXPR:
524 /* Whether or not something is wrapped inside a PAREN_EXPR
525 should not change move cost. Nor should an intermediate
526 unpropagated SSA name copy. */
527 return 0;
528
529 default:
530 return 1;
531 }
532 }
533
534 /* Finds the outermost loop between OUTER and LOOP in that the memory reference
535 REF is independent. If REF is not independent in LOOP, NULL is returned
536 instead. */
537
538 static struct loop *
539 outermost_indep_loop (struct loop *outer, struct loop *loop, mem_ref_p ref)
540 {
541 struct loop *aloop;
542
543 if (ref->stored && bitmap_bit_p (ref->stored, loop->num))
544 return NULL;
545
546 for (aloop = outer;
547 aloop != loop;
548 aloop = superloop_at_depth (loop, loop_depth (aloop) + 1))
549 if ((!ref->stored || !bitmap_bit_p (ref->stored, aloop->num))
550 && ref_indep_loop_p (aloop, ref))
551 return aloop;
552
553 if (ref_indep_loop_p (loop, ref))
554 return loop;
555 else
556 return NULL;
557 }
558
559 /* If there is a simple load or store to a memory reference in STMT, returns
560 the location of the memory reference, and sets IS_STORE according to whether
561 it is a store or load. Otherwise, returns NULL. */
562
563 static tree *
564 simple_mem_ref_in_stmt (gimple stmt, bool *is_store)
565 {
566 tree *lhs, *rhs;
567
568 /* Recognize SSA_NAME = MEM and MEM = (SSA_NAME | invariant) patterns. */
569 if (!gimple_assign_single_p (stmt))
570 return NULL;
571
572 lhs = gimple_assign_lhs_ptr (stmt);
573 rhs = gimple_assign_rhs1_ptr (stmt);
574
575 if (TREE_CODE (*lhs) == SSA_NAME && gimple_vuse (stmt))
576 {
577 *is_store = false;
578 return rhs;
579 }
580 else if (gimple_vdef (stmt)
581 && (TREE_CODE (*rhs) == SSA_NAME || is_gimple_min_invariant (*rhs)))
582 {
583 *is_store = true;
584 return lhs;
585 }
586 else
587 return NULL;
588 }
589
590 /* Returns the memory reference contained in STMT. */
591
592 static mem_ref_p
593 mem_ref_in_stmt (gimple stmt)
594 {
595 bool store;
596 tree *mem = simple_mem_ref_in_stmt (stmt, &store);
597 hashval_t hash;
598 mem_ref_p ref;
599
600 if (!mem)
601 return NULL;
602 gcc_assert (!store);
603
604 hash = iterative_hash_expr (*mem, 0);
605 ref = memory_accesses.refs->find_with_hash (*mem, hash);
606
607 gcc_assert (ref != NULL);
608 return ref;
609 }
610
611 /* From a controlling predicate in DOM determine the arguments from
612 the PHI node PHI that are chosen if the predicate evaluates to
613 true and false and store them to *TRUE_ARG_P and *FALSE_ARG_P if
614 they are non-NULL. Returns true if the arguments can be determined,
615 else return false. */
616
617 static bool
618 extract_true_false_args_from_phi (basic_block dom, gphi *phi,
619 tree *true_arg_p, tree *false_arg_p)
620 {
621 basic_block bb = gimple_bb (phi);
622 edge true_edge, false_edge, tem;
623 tree arg0 = NULL_TREE, arg1 = NULL_TREE;
624
625 /* We have to verify that one edge into the PHI node is dominated
626 by the true edge of the predicate block and the other edge
627 dominated by the false edge. This ensures that the PHI argument
628 we are going to take is completely determined by the path we
629 take from the predicate block.
630 We can only use BB dominance checks below if the destination of
631 the true/false edges are dominated by their edge, thus only
632 have a single predecessor. */
633 extract_true_false_edges_from_block (dom, &true_edge, &false_edge);
634 tem = EDGE_PRED (bb, 0);
635 if (tem == true_edge
636 || (single_pred_p (true_edge->dest)
637 && (tem->src == true_edge->dest
638 || dominated_by_p (CDI_DOMINATORS,
639 tem->src, true_edge->dest))))
640 arg0 = PHI_ARG_DEF (phi, tem->dest_idx);
641 else if (tem == false_edge
642 || (single_pred_p (false_edge->dest)
643 && (tem->src == false_edge->dest
644 || dominated_by_p (CDI_DOMINATORS,
645 tem->src, false_edge->dest))))
646 arg1 = PHI_ARG_DEF (phi, tem->dest_idx);
647 else
648 return false;
649 tem = EDGE_PRED (bb, 1);
650 if (tem == true_edge
651 || (single_pred_p (true_edge->dest)
652 && (tem->src == true_edge->dest
653 || dominated_by_p (CDI_DOMINATORS,
654 tem->src, true_edge->dest))))
655 arg0 = PHI_ARG_DEF (phi, tem->dest_idx);
656 else if (tem == false_edge
657 || (single_pred_p (false_edge->dest)
658 && (tem->src == false_edge->dest
659 || dominated_by_p (CDI_DOMINATORS,
660 tem->src, false_edge->dest))))
661 arg1 = PHI_ARG_DEF (phi, tem->dest_idx);
662 else
663 return false;
664 if (!arg0 || !arg1)
665 return false;
666
667 if (true_arg_p)
668 *true_arg_p = arg0;
669 if (false_arg_p)
670 *false_arg_p = arg1;
671
672 return true;
673 }
674
675 /* Determine the outermost loop to that it is possible to hoist a statement
676 STMT and store it to LIM_DATA (STMT)->max_loop. To do this we determine
677 the outermost loop in that the value computed by STMT is invariant.
678 If MUST_PRESERVE_EXEC is true, additionally choose such a loop that
679 we preserve the fact whether STMT is executed. It also fills other related
680 information to LIM_DATA (STMT).
681
682 The function returns false if STMT cannot be hoisted outside of the loop it
683 is defined in, and true otherwise. */
684
685 static bool
686 determine_max_movement (gimple stmt, bool must_preserve_exec)
687 {
688 basic_block bb = gimple_bb (stmt);
689 struct loop *loop = bb->loop_father;
690 struct loop *level;
691 struct lim_aux_data *lim_data = get_lim_data (stmt);
692 tree val;
693 ssa_op_iter iter;
694
695 if (must_preserve_exec)
696 level = ALWAYS_EXECUTED_IN (bb);
697 else
698 level = superloop_at_depth (loop, 1);
699 lim_data->max_loop = level;
700
701 if (gphi *phi = dyn_cast <gphi *> (stmt))
702 {
703 use_operand_p use_p;
704 unsigned min_cost = UINT_MAX;
705 unsigned total_cost = 0;
706 struct lim_aux_data *def_data;
707
708 /* We will end up promoting dependencies to be unconditionally
709 evaluated. For this reason the PHI cost (and thus the
710 cost we remove from the loop by doing the invariant motion)
711 is that of the cheapest PHI argument dependency chain. */
712 FOR_EACH_PHI_ARG (use_p, phi, iter, SSA_OP_USE)
713 {
714 val = USE_FROM_PTR (use_p);
715
716 if (TREE_CODE (val) != SSA_NAME)
717 {
718 /* Assign const 1 to constants. */
719 min_cost = MIN (min_cost, 1);
720 total_cost += 1;
721 continue;
722 }
723 if (!add_dependency (val, lim_data, loop, false))
724 return false;
725
726 gimple def_stmt = SSA_NAME_DEF_STMT (val);
727 if (gimple_bb (def_stmt)
728 && gimple_bb (def_stmt)->loop_father == loop)
729 {
730 def_data = get_lim_data (def_stmt);
731 if (def_data)
732 {
733 min_cost = MIN (min_cost, def_data->cost);
734 total_cost += def_data->cost;
735 }
736 }
737 }
738
739 min_cost = MIN (min_cost, total_cost);
740 lim_data->cost += min_cost;
741
742 if (gimple_phi_num_args (phi) > 1)
743 {
744 basic_block dom = get_immediate_dominator (CDI_DOMINATORS, bb);
745 gimple cond;
746 if (gsi_end_p (gsi_last_bb (dom)))
747 return false;
748 cond = gsi_stmt (gsi_last_bb (dom));
749 if (gimple_code (cond) != GIMPLE_COND)
750 return false;
751 /* Verify that this is an extended form of a diamond and
752 the PHI arguments are completely controlled by the
753 predicate in DOM. */
754 if (!extract_true_false_args_from_phi (dom, phi, NULL, NULL))
755 return false;
756
757 /* Fold in dependencies and cost of the condition. */
758 FOR_EACH_SSA_TREE_OPERAND (val, cond, iter, SSA_OP_USE)
759 {
760 if (!add_dependency (val, lim_data, loop, false))
761 return false;
762 def_data = get_lim_data (SSA_NAME_DEF_STMT (val));
763 if (def_data)
764 total_cost += def_data->cost;
765 }
766
767 /* We want to avoid unconditionally executing very expensive
768 operations. As costs for our dependencies cannot be
769 negative just claim we are not invariand for this case.
770 We also are not sure whether the control-flow inside the
771 loop will vanish. */
772 if (total_cost - min_cost >= 2 * LIM_EXPENSIVE
773 && !(min_cost != 0
774 && total_cost / min_cost <= 2))
775 return false;
776
777 /* Assume that the control-flow in the loop will vanish.
778 ??? We should verify this and not artificially increase
779 the cost if that is not the case. */
780 lim_data->cost += stmt_cost (stmt);
781 }
782
783 return true;
784 }
785 else
786 FOR_EACH_SSA_TREE_OPERAND (val, stmt, iter, SSA_OP_USE)
787 if (!add_dependency (val, lim_data, loop, true))
788 return false;
789
790 if (gimple_vuse (stmt))
791 {
792 mem_ref_p ref = mem_ref_in_stmt (stmt);
793
794 if (ref)
795 {
796 lim_data->max_loop
797 = outermost_indep_loop (lim_data->max_loop, loop, ref);
798 if (!lim_data->max_loop)
799 return false;
800 }
801 else
802 {
803 if ((val = gimple_vuse (stmt)) != NULL_TREE)
804 {
805 if (!add_dependency (val, lim_data, loop, false))
806 return false;
807 }
808 }
809 }
810
811 lim_data->cost += stmt_cost (stmt);
812
813 return true;
814 }
815
816 /* Suppose that some statement in ORIG_LOOP is hoisted to the loop LEVEL,
817 and that one of the operands of this statement is computed by STMT.
818 Ensure that STMT (together with all the statements that define its
819 operands) is hoisted at least out of the loop LEVEL. */
820
821 static void
822 set_level (gimple stmt, struct loop *orig_loop, struct loop *level)
823 {
824 struct loop *stmt_loop = gimple_bb (stmt)->loop_father;
825 struct lim_aux_data *lim_data;
826 gimple dep_stmt;
827 unsigned i;
828
829 stmt_loop = find_common_loop (orig_loop, stmt_loop);
830 lim_data = get_lim_data (stmt);
831 if (lim_data != NULL && lim_data->tgt_loop != NULL)
832 stmt_loop = find_common_loop (stmt_loop,
833 loop_outer (lim_data->tgt_loop));
834 if (flow_loop_nested_p (stmt_loop, level))
835 return;
836
837 gcc_assert (level == lim_data->max_loop
838 || flow_loop_nested_p (lim_data->max_loop, level));
839
840 lim_data->tgt_loop = level;
841 FOR_EACH_VEC_ELT (lim_data->depends, i, dep_stmt)
842 set_level (dep_stmt, orig_loop, level);
843 }
844
845 /* Determines an outermost loop from that we want to hoist the statement STMT.
846 For now we chose the outermost possible loop. TODO -- use profiling
847 information to set it more sanely. */
848
849 static void
850 set_profitable_level (gimple stmt)
851 {
852 set_level (stmt, gimple_bb (stmt)->loop_father, get_lim_data (stmt)->max_loop);
853 }
854
855 /* Returns true if STMT is a call that has side effects. */
856
857 static bool
858 nonpure_call_p (gimple stmt)
859 {
860 if (gimple_code (stmt) != GIMPLE_CALL)
861 return false;
862
863 return gimple_has_side_effects (stmt);
864 }
865
866 /* Rewrite a/b to a*(1/b). Return the invariant stmt to process. */
867
868 static gimple
869 rewrite_reciprocal (gimple_stmt_iterator *bsi)
870 {
871 gassign *stmt, *stmt1, *stmt2;
872 tree name, lhs, type;
873 tree real_one;
874 gimple_stmt_iterator gsi;
875
876 stmt = as_a <gassign *> (gsi_stmt (*bsi));
877 lhs = gimple_assign_lhs (stmt);
878 type = TREE_TYPE (lhs);
879
880 real_one = build_one_cst (type);
881
882 name = make_temp_ssa_name (type, NULL, "reciptmp");
883 stmt1 = gimple_build_assign (name, RDIV_EXPR, real_one,
884 gimple_assign_rhs2 (stmt));
885 stmt2 = gimple_build_assign (lhs, MULT_EXPR, name,
886 gimple_assign_rhs1 (stmt));
887
888 /* Replace division stmt with reciprocal and multiply stmts.
889 The multiply stmt is not invariant, so update iterator
890 and avoid rescanning. */
891 gsi = *bsi;
892 gsi_insert_before (bsi, stmt1, GSI_NEW_STMT);
893 gsi_replace (&gsi, stmt2, true);
894
895 /* Continue processing with invariant reciprocal statement. */
896 return stmt1;
897 }
898
899 /* Check if the pattern at *BSI is a bittest of the form
900 (A >> B) & 1 != 0 and in this case rewrite it to A & (1 << B) != 0. */
901
902 static gimple
903 rewrite_bittest (gimple_stmt_iterator *bsi)
904 {
905 gassign *stmt;
906 gimple stmt1;
907 gassign *stmt2;
908 gimple use_stmt;
909 gcond *cond_stmt;
910 tree lhs, name, t, a, b;
911 use_operand_p use;
912
913 stmt = as_a <gassign *> (gsi_stmt (*bsi));
914 lhs = gimple_assign_lhs (stmt);
915
916 /* Verify that the single use of lhs is a comparison against zero. */
917 if (TREE_CODE (lhs) != SSA_NAME
918 || !single_imm_use (lhs, &use, &use_stmt))
919 return stmt;
920 cond_stmt = dyn_cast <gcond *> (use_stmt);
921 if (!cond_stmt)
922 return stmt;
923 if (gimple_cond_lhs (cond_stmt) != lhs
924 || (gimple_cond_code (cond_stmt) != NE_EXPR
925 && gimple_cond_code (cond_stmt) != EQ_EXPR)
926 || !integer_zerop (gimple_cond_rhs (cond_stmt)))
927 return stmt;
928
929 /* Get at the operands of the shift. The rhs is TMP1 & 1. */
930 stmt1 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
931 if (gimple_code (stmt1) != GIMPLE_ASSIGN)
932 return stmt;
933
934 /* There is a conversion in between possibly inserted by fold. */
935 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt1)))
936 {
937 t = gimple_assign_rhs1 (stmt1);
938 if (TREE_CODE (t) != SSA_NAME
939 || !has_single_use (t))
940 return stmt;
941 stmt1 = SSA_NAME_DEF_STMT (t);
942 if (gimple_code (stmt1) != GIMPLE_ASSIGN)
943 return stmt;
944 }
945
946 /* Verify that B is loop invariant but A is not. Verify that with
947 all the stmt walking we are still in the same loop. */
948 if (gimple_assign_rhs_code (stmt1) != RSHIFT_EXPR
949 || loop_containing_stmt (stmt1) != loop_containing_stmt (stmt))
950 return stmt;
951
952 a = gimple_assign_rhs1 (stmt1);
953 b = gimple_assign_rhs2 (stmt1);
954
955 if (outermost_invariant_loop (b, loop_containing_stmt (stmt1)) != NULL
956 && outermost_invariant_loop (a, loop_containing_stmt (stmt1)) == NULL)
957 {
958 gimple_stmt_iterator rsi;
959
960 /* 1 << B */
961 t = fold_build2 (LSHIFT_EXPR, TREE_TYPE (a),
962 build_int_cst (TREE_TYPE (a), 1), b);
963 name = make_temp_ssa_name (TREE_TYPE (a), NULL, "shifttmp");
964 stmt1 = gimple_build_assign (name, t);
965
966 /* A & (1 << B) */
967 t = fold_build2 (BIT_AND_EXPR, TREE_TYPE (a), a, name);
968 name = make_temp_ssa_name (TREE_TYPE (a), NULL, "shifttmp");
969 stmt2 = gimple_build_assign (name, t);
970
971 /* Replace the SSA_NAME we compare against zero. Adjust
972 the type of zero accordingly. */
973 SET_USE (use, name);
974 gimple_cond_set_rhs (cond_stmt,
975 build_int_cst_type (TREE_TYPE (name),
976 0));
977
978 /* Don't use gsi_replace here, none of the new assignments sets
979 the variable originally set in stmt. Move bsi to stmt1, and
980 then remove the original stmt, so that we get a chance to
981 retain debug info for it. */
982 rsi = *bsi;
983 gsi_insert_before (bsi, stmt1, GSI_NEW_STMT);
984 gsi_insert_before (&rsi, stmt2, GSI_SAME_STMT);
985 gsi_remove (&rsi, true);
986
987 return stmt1;
988 }
989
990 return stmt;
991 }
992
993 /* For each statement determines the outermost loop in that it is invariant,
994 - statements on whose motion it depends and the cost of the computation.
995 - This information is stored to the LIM_DATA structure associated with
996 - each statement. */
997 class invariantness_dom_walker : public dom_walker
998 {
999 public:
1000 invariantness_dom_walker (cdi_direction direction)
1001 : dom_walker (direction) {}
1002
1003 virtual void before_dom_children (basic_block);
1004 };
1005
1006 /* Determine the outermost loops in that statements in basic block BB are
1007 invariant, and record them to the LIM_DATA associated with the statements.
1008 Callback for dom_walker. */
1009
1010 void
1011 invariantness_dom_walker::before_dom_children (basic_block bb)
1012 {
1013 enum move_pos pos;
1014 gimple_stmt_iterator bsi;
1015 gimple stmt;
1016 bool maybe_never = ALWAYS_EXECUTED_IN (bb) == NULL;
1017 struct loop *outermost = ALWAYS_EXECUTED_IN (bb);
1018 struct lim_aux_data *lim_data;
1019
1020 if (!loop_outer (bb->loop_father))
1021 return;
1022
1023 if (dump_file && (dump_flags & TDF_DETAILS))
1024 fprintf (dump_file, "Basic block %d (loop %d -- depth %d):\n\n",
1025 bb->index, bb->loop_father->num, loop_depth (bb->loop_father));
1026
1027 /* Look at PHI nodes, but only if there is at most two.
1028 ??? We could relax this further by post-processing the inserted
1029 code and transforming adjacent cond-exprs with the same predicate
1030 to control flow again. */
1031 bsi = gsi_start_phis (bb);
1032 if (!gsi_end_p (bsi)
1033 && ((gsi_next (&bsi), gsi_end_p (bsi))
1034 || (gsi_next (&bsi), gsi_end_p (bsi))))
1035 for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1036 {
1037 stmt = gsi_stmt (bsi);
1038
1039 pos = movement_possibility (stmt);
1040 if (pos == MOVE_IMPOSSIBLE)
1041 continue;
1042
1043 lim_data = init_lim_data (stmt);
1044 lim_data->always_executed_in = outermost;
1045
1046 if (!determine_max_movement (stmt, false))
1047 {
1048 lim_data->max_loop = NULL;
1049 continue;
1050 }
1051
1052 if (dump_file && (dump_flags & TDF_DETAILS))
1053 {
1054 print_gimple_stmt (dump_file, stmt, 2, 0);
1055 fprintf (dump_file, " invariant up to level %d, cost %d.\n\n",
1056 loop_depth (lim_data->max_loop),
1057 lim_data->cost);
1058 }
1059
1060 if (lim_data->cost >= LIM_EXPENSIVE)
1061 set_profitable_level (stmt);
1062 }
1063
1064 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1065 {
1066 stmt = gsi_stmt (bsi);
1067
1068 pos = movement_possibility (stmt);
1069 if (pos == MOVE_IMPOSSIBLE)
1070 {
1071 if (nonpure_call_p (stmt))
1072 {
1073 maybe_never = true;
1074 outermost = NULL;
1075 }
1076 /* Make sure to note always_executed_in for stores to make
1077 store-motion work. */
1078 else if (stmt_makes_single_store (stmt))
1079 {
1080 struct lim_aux_data *lim_data = init_lim_data (stmt);
1081 lim_data->always_executed_in = outermost;
1082 }
1083 continue;
1084 }
1085
1086 if (is_gimple_assign (stmt)
1087 && (get_gimple_rhs_class (gimple_assign_rhs_code (stmt))
1088 == GIMPLE_BINARY_RHS))
1089 {
1090 tree op0 = gimple_assign_rhs1 (stmt);
1091 tree op1 = gimple_assign_rhs2 (stmt);
1092 struct loop *ol1 = outermost_invariant_loop (op1,
1093 loop_containing_stmt (stmt));
1094
1095 /* If divisor is invariant, convert a/b to a*(1/b), allowing reciprocal
1096 to be hoisted out of loop, saving expensive divide. */
1097 if (pos == MOVE_POSSIBLE
1098 && gimple_assign_rhs_code (stmt) == RDIV_EXPR
1099 && flag_unsafe_math_optimizations
1100 && !flag_trapping_math
1101 && ol1 != NULL
1102 && outermost_invariant_loop (op0, ol1) == NULL)
1103 stmt = rewrite_reciprocal (&bsi);
1104
1105 /* If the shift count is invariant, convert (A >> B) & 1 to
1106 A & (1 << B) allowing the bit mask to be hoisted out of the loop
1107 saving an expensive shift. */
1108 if (pos == MOVE_POSSIBLE
1109 && gimple_assign_rhs_code (stmt) == BIT_AND_EXPR
1110 && integer_onep (op1)
1111 && TREE_CODE (op0) == SSA_NAME
1112 && has_single_use (op0))
1113 stmt = rewrite_bittest (&bsi);
1114 }
1115
1116 lim_data = init_lim_data (stmt);
1117 lim_data->always_executed_in = outermost;
1118
1119 if (maybe_never && pos == MOVE_PRESERVE_EXECUTION)
1120 continue;
1121
1122 if (!determine_max_movement (stmt, pos == MOVE_PRESERVE_EXECUTION))
1123 {
1124 lim_data->max_loop = NULL;
1125 continue;
1126 }
1127
1128 if (dump_file && (dump_flags & TDF_DETAILS))
1129 {
1130 print_gimple_stmt (dump_file, stmt, 2, 0);
1131 fprintf (dump_file, " invariant up to level %d, cost %d.\n\n",
1132 loop_depth (lim_data->max_loop),
1133 lim_data->cost);
1134 }
1135
1136 if (lim_data->cost >= LIM_EXPENSIVE)
1137 set_profitable_level (stmt);
1138 }
1139 }
1140
1141 class move_computations_dom_walker : public dom_walker
1142 {
1143 public:
1144 move_computations_dom_walker (cdi_direction direction)
1145 : dom_walker (direction), todo_ (0) {}
1146
1147 virtual void before_dom_children (basic_block);
1148
1149 unsigned int todo_;
1150 };
1151
1152 /* Hoist the statements in basic block BB out of the loops prescribed by
1153 data stored in LIM_DATA structures associated with each statement. Callback
1154 for walk_dominator_tree. */
1155
1156 void
1157 move_computations_dom_walker::before_dom_children (basic_block bb)
1158 {
1159 struct loop *level;
1160 unsigned cost = 0;
1161 struct lim_aux_data *lim_data;
1162
1163 if (!loop_outer (bb->loop_father))
1164 return;
1165
1166 for (gphi_iterator bsi = gsi_start_phis (bb); !gsi_end_p (bsi); )
1167 {
1168 gassign *new_stmt;
1169 gphi *stmt = bsi.phi ();
1170
1171 lim_data = get_lim_data (stmt);
1172 if (lim_data == NULL)
1173 {
1174 gsi_next (&bsi);
1175 continue;
1176 }
1177
1178 cost = lim_data->cost;
1179 level = lim_data->tgt_loop;
1180 clear_lim_data (stmt);
1181
1182 if (!level)
1183 {
1184 gsi_next (&bsi);
1185 continue;
1186 }
1187
1188 if (dump_file && (dump_flags & TDF_DETAILS))
1189 {
1190 fprintf (dump_file, "Moving PHI node\n");
1191 print_gimple_stmt (dump_file, stmt, 0, 0);
1192 fprintf (dump_file, "(cost %u) out of loop %d.\n\n",
1193 cost, level->num);
1194 }
1195
1196 if (gimple_phi_num_args (stmt) == 1)
1197 {
1198 tree arg = PHI_ARG_DEF (stmt, 0);
1199 new_stmt = gimple_build_assign (gimple_phi_result (stmt),
1200 TREE_CODE (arg), arg);
1201 }
1202 else
1203 {
1204 basic_block dom = get_immediate_dominator (CDI_DOMINATORS, bb);
1205 gimple cond = gsi_stmt (gsi_last_bb (dom));
1206 tree arg0 = NULL_TREE, arg1 = NULL_TREE, t;
1207 /* Get the PHI arguments corresponding to the true and false
1208 edges of COND. */
1209 extract_true_false_args_from_phi (dom, stmt, &arg0, &arg1);
1210 gcc_assert (arg0 && arg1);
1211 t = build2 (gimple_cond_code (cond), boolean_type_node,
1212 gimple_cond_lhs (cond), gimple_cond_rhs (cond));
1213 new_stmt = gimple_build_assign (gimple_phi_result (stmt),
1214 COND_EXPR, t, arg0, arg1);
1215 todo_ |= TODO_cleanup_cfg;
1216 }
1217 if (INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_lhs (new_stmt)))
1218 && (!ALWAYS_EXECUTED_IN (bb)
1219 || (ALWAYS_EXECUTED_IN (bb) != level
1220 && !flow_loop_nested_p (ALWAYS_EXECUTED_IN (bb), level))))
1221 {
1222 tree lhs = gimple_assign_lhs (new_stmt);
1223 SSA_NAME_RANGE_INFO (lhs) = NULL;
1224 SSA_NAME_ANTI_RANGE_P (lhs) = 0;
1225 }
1226 gsi_insert_on_edge (loop_preheader_edge (level), new_stmt);
1227 remove_phi_node (&bsi, false);
1228 }
1229
1230 for (gimple_stmt_iterator bsi = gsi_start_bb (bb); !gsi_end_p (bsi); )
1231 {
1232 edge e;
1233
1234 gimple stmt = gsi_stmt (bsi);
1235
1236 lim_data = get_lim_data (stmt);
1237 if (lim_data == NULL)
1238 {
1239 gsi_next (&bsi);
1240 continue;
1241 }
1242
1243 cost = lim_data->cost;
1244 level = lim_data->tgt_loop;
1245 clear_lim_data (stmt);
1246
1247 if (!level)
1248 {
1249 gsi_next (&bsi);
1250 continue;
1251 }
1252
1253 /* We do not really want to move conditionals out of the loop; we just
1254 placed it here to force its operands to be moved if necessary. */
1255 if (gimple_code (stmt) == GIMPLE_COND)
1256 continue;
1257
1258 if (dump_file && (dump_flags & TDF_DETAILS))
1259 {
1260 fprintf (dump_file, "Moving statement\n");
1261 print_gimple_stmt (dump_file, stmt, 0, 0);
1262 fprintf (dump_file, "(cost %u) out of loop %d.\n\n",
1263 cost, level->num);
1264 }
1265
1266 e = loop_preheader_edge (level);
1267 gcc_assert (!gimple_vdef (stmt));
1268 if (gimple_vuse (stmt))
1269 {
1270 /* The new VUSE is the one from the virtual PHI in the loop
1271 header or the one already present. */
1272 gphi_iterator gsi2;
1273 for (gsi2 = gsi_start_phis (e->dest);
1274 !gsi_end_p (gsi2); gsi_next (&gsi2))
1275 {
1276 gphi *phi = gsi2.phi ();
1277 if (virtual_operand_p (gimple_phi_result (phi)))
1278 {
1279 gimple_set_vuse (stmt, PHI_ARG_DEF_FROM_EDGE (phi, e));
1280 break;
1281 }
1282 }
1283 }
1284 gsi_remove (&bsi, false);
1285 if (gimple_has_lhs (stmt)
1286 && TREE_CODE (gimple_get_lhs (stmt)) == SSA_NAME
1287 && INTEGRAL_TYPE_P (TREE_TYPE (gimple_get_lhs (stmt)))
1288 && (!ALWAYS_EXECUTED_IN (bb)
1289 || !(ALWAYS_EXECUTED_IN (bb) == level
1290 || flow_loop_nested_p (ALWAYS_EXECUTED_IN (bb), level))))
1291 {
1292 tree lhs = gimple_get_lhs (stmt);
1293 SSA_NAME_RANGE_INFO (lhs) = NULL;
1294 SSA_NAME_ANTI_RANGE_P (lhs) = 0;
1295 }
1296 /* In case this is a stmt that is not unconditionally executed
1297 when the target loop header is executed and the stmt may
1298 invoke undefined integer or pointer overflow rewrite it to
1299 unsigned arithmetic. */
1300 if (is_gimple_assign (stmt)
1301 && INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_lhs (stmt)))
1302 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (gimple_assign_lhs (stmt)))
1303 && arith_code_with_undefined_signed_overflow
1304 (gimple_assign_rhs_code (stmt))
1305 && (!ALWAYS_EXECUTED_IN (bb)
1306 || !(ALWAYS_EXECUTED_IN (bb) == level
1307 || flow_loop_nested_p (ALWAYS_EXECUTED_IN (bb), level))))
1308 gsi_insert_seq_on_edge (e, rewrite_to_defined_overflow (stmt));
1309 else
1310 gsi_insert_on_edge (e, stmt);
1311 }
1312 }
1313
1314 /* Hoist the statements out of the loops prescribed by data stored in
1315 LIM_DATA structures associated with each statement.*/
1316
1317 static unsigned int
1318 move_computations (void)
1319 {
1320 move_computations_dom_walker walker (CDI_DOMINATORS);
1321 walker.walk (cfun->cfg->x_entry_block_ptr);
1322
1323 gsi_commit_edge_inserts ();
1324 if (need_ssa_update_p (cfun))
1325 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
1326
1327 return walker.todo_;
1328 }
1329
1330 /* Checks whether the statement defining variable *INDEX can be hoisted
1331 out of the loop passed in DATA. Callback for for_each_index. */
1332
1333 static bool
1334 may_move_till (tree ref, tree *index, void *data)
1335 {
1336 struct loop *loop = (struct loop *) data, *max_loop;
1337
1338 /* If REF is an array reference, check also that the step and the lower
1339 bound is invariant in LOOP. */
1340 if (TREE_CODE (ref) == ARRAY_REF)
1341 {
1342 tree step = TREE_OPERAND (ref, 3);
1343 tree lbound = TREE_OPERAND (ref, 2);
1344
1345 max_loop = outermost_invariant_loop (step, loop);
1346 if (!max_loop)
1347 return false;
1348
1349 max_loop = outermost_invariant_loop (lbound, loop);
1350 if (!max_loop)
1351 return false;
1352 }
1353
1354 max_loop = outermost_invariant_loop (*index, loop);
1355 if (!max_loop)
1356 return false;
1357
1358 return true;
1359 }
1360
1361 /* If OP is SSA NAME, force the statement that defines it to be
1362 moved out of the LOOP. ORIG_LOOP is the loop in that EXPR is used. */
1363
1364 static void
1365 force_move_till_op (tree op, struct loop *orig_loop, struct loop *loop)
1366 {
1367 gimple stmt;
1368
1369 if (!op
1370 || is_gimple_min_invariant (op))
1371 return;
1372
1373 gcc_assert (TREE_CODE (op) == SSA_NAME);
1374
1375 stmt = SSA_NAME_DEF_STMT (op);
1376 if (gimple_nop_p (stmt))
1377 return;
1378
1379 set_level (stmt, orig_loop, loop);
1380 }
1381
1382 /* Forces statement defining invariants in REF (and *INDEX) to be moved out of
1383 the LOOP. The reference REF is used in the loop ORIG_LOOP. Callback for
1384 for_each_index. */
1385
1386 struct fmt_data
1387 {
1388 struct loop *loop;
1389 struct loop *orig_loop;
1390 };
1391
1392 static bool
1393 force_move_till (tree ref, tree *index, void *data)
1394 {
1395 struct fmt_data *fmt_data = (struct fmt_data *) data;
1396
1397 if (TREE_CODE (ref) == ARRAY_REF)
1398 {
1399 tree step = TREE_OPERAND (ref, 3);
1400 tree lbound = TREE_OPERAND (ref, 2);
1401
1402 force_move_till_op (step, fmt_data->orig_loop, fmt_data->loop);
1403 force_move_till_op (lbound, fmt_data->orig_loop, fmt_data->loop);
1404 }
1405
1406 force_move_till_op (*index, fmt_data->orig_loop, fmt_data->loop);
1407
1408 return true;
1409 }
1410
1411 /* A function to free the mem_ref object OBJ. */
1412
1413 static void
1414 memref_free (struct im_mem_ref *mem)
1415 {
1416 mem->accesses_in_loop.release ();
1417 }
1418
1419 /* Allocates and returns a memory reference description for MEM whose hash
1420 value is HASH and id is ID. */
1421
1422 static mem_ref_p
1423 mem_ref_alloc (tree mem, unsigned hash, unsigned id)
1424 {
1425 mem_ref_p ref = XOBNEW (&mem_ref_obstack, struct im_mem_ref);
1426 ao_ref_init (&ref->mem, mem);
1427 ref->id = id;
1428 ref->hash = hash;
1429 ref->stored = NULL;
1430 bitmap_initialize (&ref->indep_loop, &lim_bitmap_obstack);
1431 bitmap_initialize (&ref->dep_loop, &lim_bitmap_obstack);
1432 ref->accesses_in_loop.create (1);
1433
1434 return ref;
1435 }
1436
1437 /* Records memory reference location *LOC in LOOP to the memory reference
1438 description REF. The reference occurs in statement STMT. */
1439
1440 static void
1441 record_mem_ref_loc (mem_ref_p ref, gimple stmt, tree *loc)
1442 {
1443 mem_ref_loc aref;
1444 aref.stmt = stmt;
1445 aref.ref = loc;
1446 ref->accesses_in_loop.safe_push (aref);
1447 }
1448
1449 /* Set the LOOP bit in REF stored bitmap and allocate that if
1450 necessary. Return whether a bit was changed. */
1451
1452 static bool
1453 set_ref_stored_in_loop (mem_ref_p ref, struct loop *loop)
1454 {
1455 if (!ref->stored)
1456 ref->stored = BITMAP_ALLOC (&lim_bitmap_obstack);
1457 return bitmap_set_bit (ref->stored, loop->num);
1458 }
1459
1460 /* Marks reference REF as stored in LOOP. */
1461
1462 static void
1463 mark_ref_stored (mem_ref_p ref, struct loop *loop)
1464 {
1465 while (loop != current_loops->tree_root
1466 && set_ref_stored_in_loop (ref, loop))
1467 loop = loop_outer (loop);
1468 }
1469
1470 /* Gathers memory references in statement STMT in LOOP, storing the
1471 information about them in the memory_accesses structure. Marks
1472 the vops accessed through unrecognized statements there as
1473 well. */
1474
1475 static void
1476 gather_mem_refs_stmt (struct loop *loop, gimple stmt)
1477 {
1478 tree *mem = NULL;
1479 hashval_t hash;
1480 im_mem_ref **slot;
1481 mem_ref_p ref;
1482 bool is_stored;
1483 unsigned id;
1484
1485 if (!gimple_vuse (stmt))
1486 return;
1487
1488 mem = simple_mem_ref_in_stmt (stmt, &is_stored);
1489 if (!mem)
1490 {
1491 /* We use the shared mem_ref for all unanalyzable refs. */
1492 id = UNANALYZABLE_MEM_ID;
1493 ref = memory_accesses.refs_list[id];
1494 if (dump_file && (dump_flags & TDF_DETAILS))
1495 {
1496 fprintf (dump_file, "Unanalyzed memory reference %u: ", id);
1497 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
1498 }
1499 is_stored = gimple_vdef (stmt);
1500 }
1501 else
1502 {
1503 hash = iterative_hash_expr (*mem, 0);
1504 slot = memory_accesses.refs->find_slot_with_hash (*mem, hash, INSERT);
1505 if (*slot)
1506 {
1507 ref = (mem_ref_p) *slot;
1508 id = ref->id;
1509 }
1510 else
1511 {
1512 id = memory_accesses.refs_list.length ();
1513 ref = mem_ref_alloc (*mem, hash, id);
1514 memory_accesses.refs_list.safe_push (ref);
1515 *slot = ref;
1516
1517 if (dump_file && (dump_flags & TDF_DETAILS))
1518 {
1519 fprintf (dump_file, "Memory reference %u: ", id);
1520 print_generic_expr (dump_file, ref->mem.ref, TDF_SLIM);
1521 fprintf (dump_file, "\n");
1522 }
1523 }
1524
1525 record_mem_ref_loc (ref, stmt, mem);
1526 }
1527 bitmap_set_bit (&memory_accesses.refs_in_loop[loop->num], ref->id);
1528 if (is_stored)
1529 {
1530 bitmap_set_bit (&memory_accesses.refs_stored_in_loop[loop->num], ref->id);
1531 mark_ref_stored (ref, loop);
1532 }
1533 return;
1534 }
1535
1536 static unsigned *bb_loop_postorder;
1537
1538 /* qsort sort function to sort blocks after their loop fathers postorder. */
1539
1540 static int
1541 sort_bbs_in_loop_postorder_cmp (const void *bb1_, const void *bb2_)
1542 {
1543 basic_block bb1 = *(basic_block *)const_cast<void *>(bb1_);
1544 basic_block bb2 = *(basic_block *)const_cast<void *>(bb2_);
1545 struct loop *loop1 = bb1->loop_father;
1546 struct loop *loop2 = bb2->loop_father;
1547 if (loop1->num == loop2->num)
1548 return 0;
1549 return bb_loop_postorder[loop1->num] < bb_loop_postorder[loop2->num] ? -1 : 1;
1550 }
1551
1552 /* qsort sort function to sort ref locs after their loop fathers postorder. */
1553
1554 static int
1555 sort_locs_in_loop_postorder_cmp (const void *loc1_, const void *loc2_)
1556 {
1557 mem_ref_loc *loc1 = (mem_ref_loc *)const_cast<void *>(loc1_);
1558 mem_ref_loc *loc2 = (mem_ref_loc *)const_cast<void *>(loc2_);
1559 struct loop *loop1 = gimple_bb (loc1->stmt)->loop_father;
1560 struct loop *loop2 = gimple_bb (loc2->stmt)->loop_father;
1561 if (loop1->num == loop2->num)
1562 return 0;
1563 return bb_loop_postorder[loop1->num] < bb_loop_postorder[loop2->num] ? -1 : 1;
1564 }
1565
1566 /* Gathers memory references in loops. */
1567
1568 static void
1569 analyze_memory_references (void)
1570 {
1571 gimple_stmt_iterator bsi;
1572 basic_block bb, *bbs;
1573 struct loop *loop, *outer;
1574 unsigned i, n;
1575
1576 /* Collect all basic-blocks in loops and sort them after their
1577 loops postorder. */
1578 i = 0;
1579 bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS);
1580 FOR_EACH_BB_FN (bb, cfun)
1581 if (bb->loop_father != current_loops->tree_root)
1582 bbs[i++] = bb;
1583 n = i;
1584 qsort (bbs, n, sizeof (basic_block), sort_bbs_in_loop_postorder_cmp);
1585
1586 /* Visit blocks in loop postorder and assign mem-ref IDs in that order.
1587 That results in better locality for all the bitmaps. */
1588 for (i = 0; i < n; ++i)
1589 {
1590 basic_block bb = bbs[i];
1591 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1592 gather_mem_refs_stmt (bb->loop_father, gsi_stmt (bsi));
1593 }
1594
1595 /* Sort the location list of gathered memory references after their
1596 loop postorder number. */
1597 im_mem_ref *ref;
1598 FOR_EACH_VEC_ELT (memory_accesses.refs_list, i, ref)
1599 ref->accesses_in_loop.qsort (sort_locs_in_loop_postorder_cmp);
1600
1601 free (bbs);
1602 // free (bb_loop_postorder);
1603
1604 /* Propagate the information about accessed memory references up
1605 the loop hierarchy. */
1606 FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
1607 {
1608 /* Finalize the overall touched references (including subloops). */
1609 bitmap_ior_into (&memory_accesses.all_refs_stored_in_loop[loop->num],
1610 &memory_accesses.refs_stored_in_loop[loop->num]);
1611
1612 /* Propagate the information about accessed memory references up
1613 the loop hierarchy. */
1614 outer = loop_outer (loop);
1615 if (outer == current_loops->tree_root)
1616 continue;
1617
1618 bitmap_ior_into (&memory_accesses.all_refs_stored_in_loop[outer->num],
1619 &memory_accesses.all_refs_stored_in_loop[loop->num]);
1620 }
1621 }
1622
1623 /* Returns true if MEM1 and MEM2 may alias. TTAE_CACHE is used as a cache in
1624 tree_to_aff_combination_expand. */
1625
1626 static bool
1627 mem_refs_may_alias_p (mem_ref_p mem1, mem_ref_p mem2,
1628 hash_map<tree, name_expansion *> **ttae_cache)
1629 {
1630 /* Perform BASE + OFFSET analysis -- if MEM1 and MEM2 are based on the same
1631 object and their offset differ in such a way that the locations cannot
1632 overlap, then they cannot alias. */
1633 widest_int size1, size2;
1634 aff_tree off1, off2;
1635
1636 /* Perform basic offset and type-based disambiguation. */
1637 if (!refs_may_alias_p_1 (&mem1->mem, &mem2->mem, true))
1638 return false;
1639
1640 /* The expansion of addresses may be a bit expensive, thus we only do
1641 the check at -O2 and higher optimization levels. */
1642 if (optimize < 2)
1643 return true;
1644
1645 get_inner_reference_aff (mem1->mem.ref, &off1, &size1);
1646 get_inner_reference_aff (mem2->mem.ref, &off2, &size2);
1647 aff_combination_expand (&off1, ttae_cache);
1648 aff_combination_expand (&off2, ttae_cache);
1649 aff_combination_scale (&off1, -1);
1650 aff_combination_add (&off2, &off1);
1651
1652 if (aff_comb_cannot_overlap_p (&off2, size1, size2))
1653 return false;
1654
1655 return true;
1656 }
1657
1658 /* Compare function for bsearch searching for reference locations
1659 in a loop. */
1660
1661 static int
1662 find_ref_loc_in_loop_cmp (const void *loop_, const void *loc_)
1663 {
1664 struct loop *loop = (struct loop *)const_cast<void *>(loop_);
1665 mem_ref_loc *loc = (mem_ref_loc *)const_cast<void *>(loc_);
1666 struct loop *loc_loop = gimple_bb (loc->stmt)->loop_father;
1667 if (loop->num == loc_loop->num
1668 || flow_loop_nested_p (loop, loc_loop))
1669 return 0;
1670 return (bb_loop_postorder[loop->num] < bb_loop_postorder[loc_loop->num]
1671 ? -1 : 1);
1672 }
1673
1674 /* Iterates over all locations of REF in LOOP and its subloops calling
1675 fn.operator() with the location as argument. When that operator
1676 returns true the iteration is stopped and true is returned.
1677 Otherwise false is returned. */
1678
1679 template <typename FN>
1680 static bool
1681 for_all_locs_in_loop (struct loop *loop, mem_ref_p ref, FN fn)
1682 {
1683 unsigned i;
1684 mem_ref_loc_p loc;
1685
1686 /* Search for the cluster of locs in the accesses_in_loop vector
1687 which is sorted after postorder index of the loop father. */
1688 loc = ref->accesses_in_loop.bsearch (loop, find_ref_loc_in_loop_cmp);
1689 if (!loc)
1690 return false;
1691
1692 /* We have found one location inside loop or its sub-loops. Iterate
1693 both forward and backward to cover the whole cluster. */
1694 i = loc - ref->accesses_in_loop.address ();
1695 while (i > 0)
1696 {
1697 --i;
1698 mem_ref_loc_p l = &ref->accesses_in_loop[i];
1699 if (!flow_bb_inside_loop_p (loop, gimple_bb (l->stmt)))
1700 break;
1701 if (fn (l))
1702 return true;
1703 }
1704 for (i = loc - ref->accesses_in_loop.address ();
1705 i < ref->accesses_in_loop.length (); ++i)
1706 {
1707 mem_ref_loc_p l = &ref->accesses_in_loop[i];
1708 if (!flow_bb_inside_loop_p (loop, gimple_bb (l->stmt)))
1709 break;
1710 if (fn (l))
1711 return true;
1712 }
1713
1714 return false;
1715 }
1716
1717 /* Rewrites location LOC by TMP_VAR. */
1718
1719 struct rewrite_mem_ref_loc
1720 {
1721 rewrite_mem_ref_loc (tree tmp_var_) : tmp_var (tmp_var_) {}
1722 bool operator () (mem_ref_loc_p loc);
1723 tree tmp_var;
1724 };
1725
1726 bool
1727 rewrite_mem_ref_loc::operator () (mem_ref_loc_p loc)
1728 {
1729 *loc->ref = tmp_var;
1730 update_stmt (loc->stmt);
1731 return false;
1732 }
1733
1734 /* Rewrites all references to REF in LOOP by variable TMP_VAR. */
1735
1736 static void
1737 rewrite_mem_refs (struct loop *loop, mem_ref_p ref, tree tmp_var)
1738 {
1739 for_all_locs_in_loop (loop, ref, rewrite_mem_ref_loc (tmp_var));
1740 }
1741
1742 /* Stores the first reference location in LOCP. */
1743
1744 struct first_mem_ref_loc_1
1745 {
1746 first_mem_ref_loc_1 (mem_ref_loc_p *locp_) : locp (locp_) {}
1747 bool operator () (mem_ref_loc_p loc);
1748 mem_ref_loc_p *locp;
1749 };
1750
1751 bool
1752 first_mem_ref_loc_1::operator () (mem_ref_loc_p loc)
1753 {
1754 *locp = loc;
1755 return true;
1756 }
1757
1758 /* Returns the first reference location to REF in LOOP. */
1759
1760 static mem_ref_loc_p
1761 first_mem_ref_loc (struct loop *loop, mem_ref_p ref)
1762 {
1763 mem_ref_loc_p locp = NULL;
1764 for_all_locs_in_loop (loop, ref, first_mem_ref_loc_1 (&locp));
1765 return locp;
1766 }
1767
1768 struct prev_flag_edges {
1769 /* Edge to insert new flag comparison code. */
1770 edge append_cond_position;
1771
1772 /* Edge for fall through from previous flag comparison. */
1773 edge last_cond_fallthru;
1774 };
1775
1776 /* Helper function for execute_sm. Emit code to store TMP_VAR into
1777 MEM along edge EX.
1778
1779 The store is only done if MEM has changed. We do this so no
1780 changes to MEM occur on code paths that did not originally store
1781 into it.
1782
1783 The common case for execute_sm will transform:
1784
1785 for (...) {
1786 if (foo)
1787 stuff;
1788 else
1789 MEM = TMP_VAR;
1790 }
1791
1792 into:
1793
1794 lsm = MEM;
1795 for (...) {
1796 if (foo)
1797 stuff;
1798 else
1799 lsm = TMP_VAR;
1800 }
1801 MEM = lsm;
1802
1803 This function will generate:
1804
1805 lsm = MEM;
1806
1807 lsm_flag = false;
1808 ...
1809 for (...) {
1810 if (foo)
1811 stuff;
1812 else {
1813 lsm = TMP_VAR;
1814 lsm_flag = true;
1815 }
1816 }
1817 if (lsm_flag) <--
1818 MEM = lsm; <--
1819 */
1820
1821 static void
1822 execute_sm_if_changed (edge ex, tree mem, tree tmp_var, tree flag)
1823 {
1824 basic_block new_bb, then_bb, old_dest;
1825 bool loop_has_only_one_exit;
1826 edge then_old_edge, orig_ex = ex;
1827 gimple_stmt_iterator gsi;
1828 gimple stmt;
1829 struct prev_flag_edges *prev_edges = (struct prev_flag_edges *) ex->aux;
1830 bool irr = ex->flags & EDGE_IRREDUCIBLE_LOOP;
1831
1832 /* ?? Insert store after previous store if applicable. See note
1833 below. */
1834 if (prev_edges)
1835 ex = prev_edges->append_cond_position;
1836
1837 loop_has_only_one_exit = single_pred_p (ex->dest);
1838
1839 if (loop_has_only_one_exit)
1840 ex = split_block_after_labels (ex->dest);
1841
1842 old_dest = ex->dest;
1843 new_bb = split_edge (ex);
1844 then_bb = create_empty_bb (new_bb);
1845 if (irr)
1846 then_bb->flags = BB_IRREDUCIBLE_LOOP;
1847 add_bb_to_loop (then_bb, new_bb->loop_father);
1848
1849 gsi = gsi_start_bb (new_bb);
1850 stmt = gimple_build_cond (NE_EXPR, flag, boolean_false_node,
1851 NULL_TREE, NULL_TREE);
1852 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1853
1854 gsi = gsi_start_bb (then_bb);
1855 /* Insert actual store. */
1856 stmt = gimple_build_assign (unshare_expr (mem), tmp_var);
1857 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1858
1859 make_edge (new_bb, then_bb,
1860 EDGE_TRUE_VALUE | (irr ? EDGE_IRREDUCIBLE_LOOP : 0));
1861 make_edge (new_bb, old_dest,
1862 EDGE_FALSE_VALUE | (irr ? EDGE_IRREDUCIBLE_LOOP : 0));
1863 then_old_edge = make_edge (then_bb, old_dest,
1864 EDGE_FALLTHRU | (irr ? EDGE_IRREDUCIBLE_LOOP : 0));
1865
1866 set_immediate_dominator (CDI_DOMINATORS, then_bb, new_bb);
1867
1868 if (prev_edges)
1869 {
1870 basic_block prevbb = prev_edges->last_cond_fallthru->src;
1871 redirect_edge_succ (prev_edges->last_cond_fallthru, new_bb);
1872 set_immediate_dominator (CDI_DOMINATORS, new_bb, prevbb);
1873 set_immediate_dominator (CDI_DOMINATORS, old_dest,
1874 recompute_dominator (CDI_DOMINATORS, old_dest));
1875 }
1876
1877 /* ?? Because stores may alias, they must happen in the exact
1878 sequence they originally happened. Save the position right after
1879 the (_lsm) store we just created so we can continue appending after
1880 it and maintain the original order. */
1881 {
1882 struct prev_flag_edges *p;
1883
1884 if (orig_ex->aux)
1885 orig_ex->aux = NULL;
1886 alloc_aux_for_edge (orig_ex, sizeof (struct prev_flag_edges));
1887 p = (struct prev_flag_edges *) orig_ex->aux;
1888 p->append_cond_position = then_old_edge;
1889 p->last_cond_fallthru = find_edge (new_bb, old_dest);
1890 orig_ex->aux = (void *) p;
1891 }
1892
1893 if (!loop_has_only_one_exit)
1894 for (gphi_iterator gpi = gsi_start_phis (old_dest);
1895 !gsi_end_p (gpi); gsi_next (&gpi))
1896 {
1897 gphi *phi = gpi.phi ();
1898 unsigned i;
1899
1900 for (i = 0; i < gimple_phi_num_args (phi); i++)
1901 if (gimple_phi_arg_edge (phi, i)->src == new_bb)
1902 {
1903 tree arg = gimple_phi_arg_def (phi, i);
1904 add_phi_arg (phi, arg, then_old_edge, UNKNOWN_LOCATION);
1905 update_stmt (phi);
1906 }
1907 }
1908 /* Remove the original fall through edge. This was the
1909 single_succ_edge (new_bb). */
1910 EDGE_SUCC (new_bb, 0)->flags &= ~EDGE_FALLTHRU;
1911 }
1912
1913 /* When REF is set on the location, set flag indicating the store. */
1914
1915 struct sm_set_flag_if_changed
1916 {
1917 sm_set_flag_if_changed (tree flag_) : flag (flag_) {}
1918 bool operator () (mem_ref_loc_p loc);
1919 tree flag;
1920 };
1921
1922 bool
1923 sm_set_flag_if_changed::operator () (mem_ref_loc_p loc)
1924 {
1925 /* Only set the flag for writes. */
1926 if (is_gimple_assign (loc->stmt)
1927 && gimple_assign_lhs_ptr (loc->stmt) == loc->ref)
1928 {
1929 gimple_stmt_iterator gsi = gsi_for_stmt (loc->stmt);
1930 gimple stmt = gimple_build_assign (flag, boolean_true_node);
1931 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1932 }
1933 return false;
1934 }
1935
1936 /* Helper function for execute_sm. On every location where REF is
1937 set, set an appropriate flag indicating the store. */
1938
1939 static tree
1940 execute_sm_if_changed_flag_set (struct loop *loop, mem_ref_p ref)
1941 {
1942 tree flag;
1943 char *str = get_lsm_tmp_name (ref->mem.ref, ~0, "_flag");
1944 flag = create_tmp_reg (boolean_type_node, str);
1945 for_all_locs_in_loop (loop, ref, sm_set_flag_if_changed (flag));
1946 return flag;
1947 }
1948
1949 /* Executes store motion of memory reference REF from LOOP.
1950 Exits from the LOOP are stored in EXITS. The initialization of the
1951 temporary variable is put to the preheader of the loop, and assignments
1952 to the reference from the temporary variable are emitted to exits. */
1953
1954 static void
1955 execute_sm (struct loop *loop, vec<edge> exits, mem_ref_p ref)
1956 {
1957 tree tmp_var, store_flag = NULL_TREE;
1958 unsigned i;
1959 gassign *load;
1960 struct fmt_data fmt_data;
1961 edge ex;
1962 struct lim_aux_data *lim_data;
1963 bool multi_threaded_model_p = false;
1964 gimple_stmt_iterator gsi;
1965
1966 if (dump_file && (dump_flags & TDF_DETAILS))
1967 {
1968 fprintf (dump_file, "Executing store motion of ");
1969 print_generic_expr (dump_file, ref->mem.ref, 0);
1970 fprintf (dump_file, " from loop %d\n", loop->num);
1971 }
1972
1973 tmp_var = create_tmp_reg (TREE_TYPE (ref->mem.ref),
1974 get_lsm_tmp_name (ref->mem.ref, ~0));
1975
1976 fmt_data.loop = loop;
1977 fmt_data.orig_loop = loop;
1978 for_each_index (&ref->mem.ref, force_move_till, &fmt_data);
1979
1980 if (bb_in_transaction (loop_preheader_edge (loop)->src)
1981 || !PARAM_VALUE (PARAM_ALLOW_STORE_DATA_RACES))
1982 multi_threaded_model_p = true;
1983
1984 if (multi_threaded_model_p)
1985 store_flag = execute_sm_if_changed_flag_set (loop, ref);
1986
1987 rewrite_mem_refs (loop, ref, tmp_var);
1988
1989 /* Emit the load code on a random exit edge or into the latch if
1990 the loop does not exit, so that we are sure it will be processed
1991 by move_computations after all dependencies. */
1992 gsi = gsi_for_stmt (first_mem_ref_loc (loop, ref)->stmt);
1993
1994 /* FIXME/TODO: For the multi-threaded variant, we could avoid this
1995 load altogether, since the store is predicated by a flag. We
1996 could, do the load only if it was originally in the loop. */
1997 load = gimple_build_assign (tmp_var, unshare_expr (ref->mem.ref));
1998 lim_data = init_lim_data (load);
1999 lim_data->max_loop = loop;
2000 lim_data->tgt_loop = loop;
2001 gsi_insert_before (&gsi, load, GSI_SAME_STMT);
2002
2003 if (multi_threaded_model_p)
2004 {
2005 load = gimple_build_assign (store_flag, boolean_false_node);
2006 lim_data = init_lim_data (load);
2007 lim_data->max_loop = loop;
2008 lim_data->tgt_loop = loop;
2009 gsi_insert_before (&gsi, load, GSI_SAME_STMT);
2010 }
2011
2012 /* Sink the store to every exit from the loop. */
2013 FOR_EACH_VEC_ELT (exits, i, ex)
2014 if (!multi_threaded_model_p)
2015 {
2016 gassign *store;
2017 store = gimple_build_assign (unshare_expr (ref->mem.ref), tmp_var);
2018 gsi_insert_on_edge (ex, store);
2019 }
2020 else
2021 execute_sm_if_changed (ex, ref->mem.ref, tmp_var, store_flag);
2022 }
2023
2024 /* Hoists memory references MEM_REFS out of LOOP. EXITS is the list of exit
2025 edges of the LOOP. */
2026
2027 static void
2028 hoist_memory_references (struct loop *loop, bitmap mem_refs,
2029 vec<edge> exits)
2030 {
2031 mem_ref_p ref;
2032 unsigned i;
2033 bitmap_iterator bi;
2034
2035 EXECUTE_IF_SET_IN_BITMAP (mem_refs, 0, i, bi)
2036 {
2037 ref = memory_accesses.refs_list[i];
2038 execute_sm (loop, exits, ref);
2039 }
2040 }
2041
2042 struct ref_always_accessed
2043 {
2044 ref_always_accessed (struct loop *loop_, bool stored_p_)
2045 : loop (loop_), stored_p (stored_p_) {}
2046 bool operator () (mem_ref_loc_p loc);
2047 struct loop *loop;
2048 bool stored_p;
2049 };
2050
2051 bool
2052 ref_always_accessed::operator () (mem_ref_loc_p loc)
2053 {
2054 struct loop *must_exec;
2055
2056 if (!get_lim_data (loc->stmt))
2057 return false;
2058
2059 /* If we require an always executed store make sure the statement
2060 stores to the reference. */
2061 if (stored_p)
2062 {
2063 tree lhs = gimple_get_lhs (loc->stmt);
2064 if (!lhs
2065 || lhs != *loc->ref)
2066 return false;
2067 }
2068
2069 must_exec = get_lim_data (loc->stmt)->always_executed_in;
2070 if (!must_exec)
2071 return false;
2072
2073 if (must_exec == loop
2074 || flow_loop_nested_p (must_exec, loop))
2075 return true;
2076
2077 return false;
2078 }
2079
2080 /* Returns true if REF is always accessed in LOOP. If STORED_P is true
2081 make sure REF is always stored to in LOOP. */
2082
2083 static bool
2084 ref_always_accessed_p (struct loop *loop, mem_ref_p ref, bool stored_p)
2085 {
2086 return for_all_locs_in_loop (loop, ref,
2087 ref_always_accessed (loop, stored_p));
2088 }
2089
2090 /* Returns true if REF1 and REF2 are independent. */
2091
2092 static bool
2093 refs_independent_p (mem_ref_p ref1, mem_ref_p ref2)
2094 {
2095 if (ref1 == ref2)
2096 return true;
2097
2098 if (dump_file && (dump_flags & TDF_DETAILS))
2099 fprintf (dump_file, "Querying dependency of refs %u and %u: ",
2100 ref1->id, ref2->id);
2101
2102 if (mem_refs_may_alias_p (ref1, ref2, &memory_accesses.ttae_cache))
2103 {
2104 if (dump_file && (dump_flags & TDF_DETAILS))
2105 fprintf (dump_file, "dependent.\n");
2106 return false;
2107 }
2108 else
2109 {
2110 if (dump_file && (dump_flags & TDF_DETAILS))
2111 fprintf (dump_file, "independent.\n");
2112 return true;
2113 }
2114 }
2115
2116 /* Mark REF dependent on stores or loads (according to STORED_P) in LOOP
2117 and its super-loops. */
2118
2119 static void
2120 record_dep_loop (struct loop *loop, mem_ref_p ref, bool stored_p)
2121 {
2122 /* We can propagate dependent-in-loop bits up the loop
2123 hierarchy to all outer loops. */
2124 while (loop != current_loops->tree_root
2125 && bitmap_set_bit (&ref->dep_loop, LOOP_DEP_BIT (loop->num, stored_p)))
2126 loop = loop_outer (loop);
2127 }
2128
2129 /* Returns true if REF is independent on all other memory references in
2130 LOOP. */
2131
2132 static bool
2133 ref_indep_loop_p_1 (struct loop *loop, mem_ref_p ref, bool stored_p)
2134 {
2135 bitmap refs_to_check;
2136 unsigned i;
2137 bitmap_iterator bi;
2138 mem_ref_p aref;
2139
2140 if (stored_p)
2141 refs_to_check = &memory_accesses.refs_in_loop[loop->num];
2142 else
2143 refs_to_check = &memory_accesses.refs_stored_in_loop[loop->num];
2144
2145 if (bitmap_bit_p (refs_to_check, UNANALYZABLE_MEM_ID))
2146 return false;
2147
2148 EXECUTE_IF_SET_IN_BITMAP (refs_to_check, 0, i, bi)
2149 {
2150 aref = memory_accesses.refs_list[i];
2151 if (!refs_independent_p (ref, aref))
2152 return false;
2153 }
2154
2155 return true;
2156 }
2157
2158 /* Returns true if REF is independent on all other memory references in
2159 LOOP. Wrapper over ref_indep_loop_p_1, caching its results. */
2160
2161 static bool
2162 ref_indep_loop_p_2 (struct loop *loop, mem_ref_p ref, bool stored_p)
2163 {
2164 stored_p |= (ref->stored && bitmap_bit_p (ref->stored, loop->num));
2165
2166 if (bitmap_bit_p (&ref->indep_loop, LOOP_DEP_BIT (loop->num, stored_p)))
2167 return true;
2168 if (bitmap_bit_p (&ref->dep_loop, LOOP_DEP_BIT (loop->num, stored_p)))
2169 return false;
2170
2171 struct loop *inner = loop->inner;
2172 while (inner)
2173 {
2174 if (!ref_indep_loop_p_2 (inner, ref, stored_p))
2175 return false;
2176 inner = inner->next;
2177 }
2178
2179 bool indep_p = ref_indep_loop_p_1 (loop, ref, stored_p);
2180
2181 if (dump_file && (dump_flags & TDF_DETAILS))
2182 fprintf (dump_file, "Querying dependencies of ref %u in loop %d: %s\n",
2183 ref->id, loop->num, indep_p ? "independent" : "dependent");
2184
2185 /* Record the computed result in the cache. */
2186 if (indep_p)
2187 {
2188 if (bitmap_set_bit (&ref->indep_loop, LOOP_DEP_BIT (loop->num, stored_p))
2189 && stored_p)
2190 {
2191 /* If it's independend against all refs then it's independent
2192 against stores, too. */
2193 bitmap_set_bit (&ref->indep_loop, LOOP_DEP_BIT (loop->num, false));
2194 }
2195 }
2196 else
2197 {
2198 record_dep_loop (loop, ref, stored_p);
2199 if (!stored_p)
2200 {
2201 /* If it's dependent against stores it's dependent against
2202 all refs, too. */
2203 record_dep_loop (loop, ref, true);
2204 }
2205 }
2206
2207 return indep_p;
2208 }
2209
2210 /* Returns true if REF is independent on all other memory references in
2211 LOOP. */
2212
2213 static bool
2214 ref_indep_loop_p (struct loop *loop, mem_ref_p ref)
2215 {
2216 gcc_checking_assert (MEM_ANALYZABLE (ref));
2217
2218 return ref_indep_loop_p_2 (loop, ref, false);
2219 }
2220
2221 /* Returns true if we can perform store motion of REF from LOOP. */
2222
2223 static bool
2224 can_sm_ref_p (struct loop *loop, mem_ref_p ref)
2225 {
2226 tree base;
2227
2228 /* Can't hoist unanalyzable refs. */
2229 if (!MEM_ANALYZABLE (ref))
2230 return false;
2231
2232 /* It should be movable. */
2233 if (!is_gimple_reg_type (TREE_TYPE (ref->mem.ref))
2234 || TREE_THIS_VOLATILE (ref->mem.ref)
2235 || !for_each_index (&ref->mem.ref, may_move_till, loop))
2236 return false;
2237
2238 /* If it can throw fail, we do not properly update EH info. */
2239 if (tree_could_throw_p (ref->mem.ref))
2240 return false;
2241
2242 /* If it can trap, it must be always executed in LOOP.
2243 Readonly memory locations may trap when storing to them, but
2244 tree_could_trap_p is a predicate for rvalues, so check that
2245 explicitly. */
2246 base = get_base_address (ref->mem.ref);
2247 if ((tree_could_trap_p (ref->mem.ref)
2248 || (DECL_P (base) && TREE_READONLY (base)))
2249 && !ref_always_accessed_p (loop, ref, true))
2250 return false;
2251
2252 /* And it must be independent on all other memory references
2253 in LOOP. */
2254 if (!ref_indep_loop_p (loop, ref))
2255 return false;
2256
2257 return true;
2258 }
2259
2260 /* Marks the references in LOOP for that store motion should be performed
2261 in REFS_TO_SM. SM_EXECUTED is the set of references for that store
2262 motion was performed in one of the outer loops. */
2263
2264 static void
2265 find_refs_for_sm (struct loop *loop, bitmap sm_executed, bitmap refs_to_sm)
2266 {
2267 bitmap refs = &memory_accesses.all_refs_stored_in_loop[loop->num];
2268 unsigned i;
2269 bitmap_iterator bi;
2270 mem_ref_p ref;
2271
2272 EXECUTE_IF_AND_COMPL_IN_BITMAP (refs, sm_executed, 0, i, bi)
2273 {
2274 ref = memory_accesses.refs_list[i];
2275 if (can_sm_ref_p (loop, ref))
2276 bitmap_set_bit (refs_to_sm, i);
2277 }
2278 }
2279
2280 /* Checks whether LOOP (with exits stored in EXITS array) is suitable
2281 for a store motion optimization (i.e. whether we can insert statement
2282 on its exits). */
2283
2284 static bool
2285 loop_suitable_for_sm (struct loop *loop ATTRIBUTE_UNUSED,
2286 vec<edge> exits)
2287 {
2288 unsigned i;
2289 edge ex;
2290
2291 FOR_EACH_VEC_ELT (exits, i, ex)
2292 if (ex->flags & (EDGE_ABNORMAL | EDGE_EH))
2293 return false;
2294
2295 return true;
2296 }
2297
2298 /* Try to perform store motion for all memory references modified inside
2299 LOOP. SM_EXECUTED is the bitmap of the memory references for that
2300 store motion was executed in one of the outer loops. */
2301
2302 static void
2303 store_motion_loop (struct loop *loop, bitmap sm_executed)
2304 {
2305 vec<edge> exits = get_loop_exit_edges (loop);
2306 struct loop *subloop;
2307 bitmap sm_in_loop = BITMAP_ALLOC (&lim_bitmap_obstack);
2308
2309 if (loop_suitable_for_sm (loop, exits))
2310 {
2311 find_refs_for_sm (loop, sm_executed, sm_in_loop);
2312 hoist_memory_references (loop, sm_in_loop, exits);
2313 }
2314 exits.release ();
2315
2316 bitmap_ior_into (sm_executed, sm_in_loop);
2317 for (subloop = loop->inner; subloop != NULL; subloop = subloop->next)
2318 store_motion_loop (subloop, sm_executed);
2319 bitmap_and_compl_into (sm_executed, sm_in_loop);
2320 BITMAP_FREE (sm_in_loop);
2321 }
2322
2323 /* Try to perform store motion for all memory references modified inside
2324 loops. */
2325
2326 static void
2327 store_motion (void)
2328 {
2329 struct loop *loop;
2330 bitmap sm_executed = BITMAP_ALLOC (&lim_bitmap_obstack);
2331
2332 for (loop = current_loops->tree_root->inner; loop != NULL; loop = loop->next)
2333 store_motion_loop (loop, sm_executed);
2334
2335 BITMAP_FREE (sm_executed);
2336 gsi_commit_edge_inserts ();
2337 }
2338
2339 /* Fills ALWAYS_EXECUTED_IN information for basic blocks of LOOP, i.e.
2340 for each such basic block bb records the outermost loop for that execution
2341 of its header implies execution of bb. CONTAINS_CALL is the bitmap of
2342 blocks that contain a nonpure call. */
2343
2344 static void
2345 fill_always_executed_in_1 (struct loop *loop, sbitmap contains_call)
2346 {
2347 basic_block bb = NULL, *bbs, last = NULL;
2348 unsigned i;
2349 edge e;
2350 struct loop *inn_loop = loop;
2351
2352 if (ALWAYS_EXECUTED_IN (loop->header) == NULL)
2353 {
2354 bbs = get_loop_body_in_dom_order (loop);
2355
2356 for (i = 0; i < loop->num_nodes; i++)
2357 {
2358 edge_iterator ei;
2359 bb = bbs[i];
2360
2361 if (dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
2362 last = bb;
2363
2364 if (bitmap_bit_p (contains_call, bb->index))
2365 break;
2366
2367 FOR_EACH_EDGE (e, ei, bb->succs)
2368 if (!flow_bb_inside_loop_p (loop, e->dest))
2369 break;
2370 if (e)
2371 break;
2372
2373 /* A loop might be infinite (TODO use simple loop analysis
2374 to disprove this if possible). */
2375 if (bb->flags & BB_IRREDUCIBLE_LOOP)
2376 break;
2377
2378 if (!flow_bb_inside_loop_p (inn_loop, bb))
2379 break;
2380
2381 if (bb->loop_father->header == bb)
2382 {
2383 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
2384 break;
2385
2386 /* In a loop that is always entered we may proceed anyway.
2387 But record that we entered it and stop once we leave it. */
2388 inn_loop = bb->loop_father;
2389 }
2390 }
2391
2392 while (1)
2393 {
2394 SET_ALWAYS_EXECUTED_IN (last, loop);
2395 if (last == loop->header)
2396 break;
2397 last = get_immediate_dominator (CDI_DOMINATORS, last);
2398 }
2399
2400 free (bbs);
2401 }
2402
2403 for (loop = loop->inner; loop; loop = loop->next)
2404 fill_always_executed_in_1 (loop, contains_call);
2405 }
2406
2407 /* Fills ALWAYS_EXECUTED_IN information for basic blocks, i.e.
2408 for each such basic block bb records the outermost loop for that execution
2409 of its header implies execution of bb. */
2410
2411 static void
2412 fill_always_executed_in (void)
2413 {
2414 sbitmap contains_call = sbitmap_alloc (last_basic_block_for_fn (cfun));
2415 basic_block bb;
2416 struct loop *loop;
2417
2418 bitmap_clear (contains_call);
2419 FOR_EACH_BB_FN (bb, cfun)
2420 {
2421 gimple_stmt_iterator gsi;
2422 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2423 {
2424 if (nonpure_call_p (gsi_stmt (gsi)))
2425 break;
2426 }
2427
2428 if (!gsi_end_p (gsi))
2429 bitmap_set_bit (contains_call, bb->index);
2430 }
2431
2432 for (loop = current_loops->tree_root->inner; loop; loop = loop->next)
2433 fill_always_executed_in_1 (loop, contains_call);
2434
2435 sbitmap_free (contains_call);
2436 }
2437
2438
2439 /* Compute the global information needed by the loop invariant motion pass. */
2440
2441 static void
2442 tree_ssa_lim_initialize (void)
2443 {
2444 struct loop *loop;
2445 unsigned i;
2446
2447 bitmap_obstack_initialize (&lim_bitmap_obstack);
2448 gcc_obstack_init (&mem_ref_obstack);
2449 lim_aux_data_map = new hash_map<gimple, lim_aux_data *>;
2450
2451 if (flag_tm)
2452 compute_transaction_bits ();
2453
2454 alloc_aux_for_edges (0);
2455
2456 memory_accesses.refs = new hash_table<mem_ref_hasher> (100);
2457 memory_accesses.refs_list.create (100);
2458 /* Allocate a special, unanalyzable mem-ref with ID zero. */
2459 memory_accesses.refs_list.quick_push
2460 (mem_ref_alloc (error_mark_node, 0, UNANALYZABLE_MEM_ID));
2461
2462 memory_accesses.refs_in_loop.create (number_of_loops (cfun));
2463 memory_accesses.refs_in_loop.quick_grow (number_of_loops (cfun));
2464 memory_accesses.refs_stored_in_loop.create (number_of_loops (cfun));
2465 memory_accesses.refs_stored_in_loop.quick_grow (number_of_loops (cfun));
2466 memory_accesses.all_refs_stored_in_loop.create (number_of_loops (cfun));
2467 memory_accesses.all_refs_stored_in_loop.quick_grow (number_of_loops (cfun));
2468
2469 for (i = 0; i < number_of_loops (cfun); i++)
2470 {
2471 bitmap_initialize (&memory_accesses.refs_in_loop[i],
2472 &lim_bitmap_obstack);
2473 bitmap_initialize (&memory_accesses.refs_stored_in_loop[i],
2474 &lim_bitmap_obstack);
2475 bitmap_initialize (&memory_accesses.all_refs_stored_in_loop[i],
2476 &lim_bitmap_obstack);
2477 }
2478
2479 memory_accesses.ttae_cache = NULL;
2480
2481 /* Initialize bb_loop_postorder with a mapping from loop->num to
2482 its postorder index. */
2483 i = 0;
2484 bb_loop_postorder = XNEWVEC (unsigned, number_of_loops (cfun));
2485 FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
2486 bb_loop_postorder[loop->num] = i++;
2487 }
2488
2489 /* Cleans up after the invariant motion pass. */
2490
2491 static void
2492 tree_ssa_lim_finalize (void)
2493 {
2494 basic_block bb;
2495 unsigned i;
2496 mem_ref_p ref;
2497
2498 free_aux_for_edges ();
2499
2500 FOR_EACH_BB_FN (bb, cfun)
2501 SET_ALWAYS_EXECUTED_IN (bb, NULL);
2502
2503 bitmap_obstack_release (&lim_bitmap_obstack);
2504 delete lim_aux_data_map;
2505
2506 delete memory_accesses.refs;
2507 memory_accesses.refs = NULL;
2508
2509 FOR_EACH_VEC_ELT (memory_accesses.refs_list, i, ref)
2510 memref_free (ref);
2511 memory_accesses.refs_list.release ();
2512 obstack_free (&mem_ref_obstack, NULL);
2513
2514 memory_accesses.refs_in_loop.release ();
2515 memory_accesses.refs_stored_in_loop.release ();
2516 memory_accesses.all_refs_stored_in_loop.release ();
2517
2518 if (memory_accesses.ttae_cache)
2519 free_affine_expand_cache (&memory_accesses.ttae_cache);
2520
2521 free (bb_loop_postorder);
2522 }
2523
2524 /* Moves invariants from loops. Only "expensive" invariants are moved out --
2525 i.e. those that are likely to be win regardless of the register pressure. */
2526
2527 unsigned int
2528 tree_ssa_lim (void)
2529 {
2530 unsigned int todo;
2531
2532 tree_ssa_lim_initialize ();
2533
2534 /* Gathers information about memory accesses in the loops. */
2535 analyze_memory_references ();
2536
2537 /* Fills ALWAYS_EXECUTED_IN information for basic blocks. */
2538 fill_always_executed_in ();
2539
2540 /* For each statement determine the outermost loop in that it is
2541 invariant and cost for computing the invariant. */
2542 invariantness_dom_walker (CDI_DOMINATORS)
2543 .walk (cfun->cfg->x_entry_block_ptr);
2544
2545 /* Execute store motion. Force the necessary invariants to be moved
2546 out of the loops as well. */
2547 store_motion ();
2548
2549 /* Move the expressions that are expensive enough. */
2550 todo = move_computations ();
2551
2552 tree_ssa_lim_finalize ();
2553
2554 return todo;
2555 }
2556
2557 /* Loop invariant motion pass. */
2558
2559 namespace {
2560
2561 const pass_data pass_data_lim =
2562 {
2563 GIMPLE_PASS, /* type */
2564 "lim", /* name */
2565 OPTGROUP_LOOP, /* optinfo_flags */
2566 TV_LIM, /* tv_id */
2567 PROP_cfg, /* properties_required */
2568 0, /* properties_provided */
2569 0, /* properties_destroyed */
2570 0, /* todo_flags_start */
2571 0, /* todo_flags_finish */
2572 };
2573
2574 class pass_lim : public gimple_opt_pass
2575 {
2576 public:
2577 pass_lim (gcc::context *ctxt)
2578 : gimple_opt_pass (pass_data_lim, ctxt)
2579 {}
2580
2581 /* opt_pass methods: */
2582 opt_pass * clone () { return new pass_lim (m_ctxt); }
2583 virtual bool gate (function *) { return flag_tree_loop_im != 0; }
2584 virtual unsigned int execute (function *);
2585
2586 }; // class pass_lim
2587
2588 unsigned int
2589 pass_lim::execute (function *fun)
2590 {
2591 if (number_of_loops (fun) <= 1)
2592 return 0;
2593
2594 return tree_ssa_lim ();
2595 }
2596
2597 } // anon namespace
2598
2599 gimple_opt_pass *
2600 make_pass_lim (gcc::context *ctxt)
2601 {
2602 return new pass_lim (ctxt);
2603 }
2604
2605