1 /* Perform various loop optimizations, including strength reduction.
2 Copyright (C) 1987, 88, 89, 91-6, 1997 Free Software Foundation, Inc.
4 This file is part of GNU CC.
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
22 /* This is the loop optimization pass of the compiler.
23 It finds invariant computations within loops and moves them
24 to the beginning of the loop. Then it identifies basic and
25 general induction variables. Strength reduction is applied to the general
26 induction variables, and induction variable elimination is applied to
27 the basic induction variables.
29 It also finds cases where
30 a register is set within the loop by zero-extending a narrower value
31 and changes these to zero the entire register once before the loop
32 and merely copy the low part within the loop.
34 Most of the complexity is in heuristics to decide when it is worth
35 while to do these things. */
42 #include "insn-config.h"
43 #include "insn-flags.h"
45 #include "hard-reg-set.h"
52 /* Vector mapping INSN_UIDs to luids.
53 The luids are like uids but increase monotonically always.
54 We use them to see whether a jump comes from outside a given loop. */
58 /* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop
59 number the insn is contained in. */
63 /* 1 + largest uid of any insn. */
67 /* 1 + luid of last insn. */
71 /* Number of loops detected in current function. Used as index to the
74 static int max_loop_num
;
76 /* Indexed by loop number, contains the first and last insn of each loop. */
78 static rtx
*loop_number_loop_starts
, *loop_number_loop_ends
;
80 /* For each loop, gives the containing loop number, -1 if none. */
85 /* The main output of analyze_loop_iterations is placed here */
87 int *loop_can_insert_bct
;
89 /* For each loop, determines whether some of its inner loops has used
92 int *loop_used_count_register
;
94 /* loop parameters for arithmetic loops. These loops have a loop variable
95 which is initialized to loop_start_value, incremented in each iteration
96 by "loop_increment". At the end of the iteration the loop variable is
97 compared to the loop_comparison_value (using loop_comparison_code). */
100 rtx
*loop_comparison_value
;
101 rtx
*loop_start_value
;
102 enum rtx_code
*loop_comparison_code
;
105 /* For each loop, keep track of its unrolling factor.
109 -1: completely unrolled
110 >0: holds the unroll exact factor. */
111 int *loop_unroll_factor
;
113 /* Indexed by loop number, contains a nonzero value if the "loop" isn't
114 really a loop (an insn outside the loop branches into it). */
116 static char *loop_invalid
;
118 /* Indexed by loop number, links together all LABEL_REFs which refer to
119 code labels outside the loop. Used by routines that need to know all
120 loop exits, such as final_biv_value and final_giv_value.
122 This does not include loop exits due to return instructions. This is
123 because all bivs and givs are pseudos, and hence must be dead after a
124 return, so the presense of a return does not affect any of the
125 optimizations that use this info. It is simpler to just not include return
126 instructions on this list. */
128 rtx
*loop_number_exit_labels
;
130 /* Indexed by loop number, counts the number of LABEL_REFs on
131 loop_number_exit_labels for this loop and all loops nested inside it. */
133 int *loop_number_exit_count
;
135 /* Holds the number of loop iterations. It is zero if the number could not be
136 calculated. Must be unsigned since the number of iterations can
137 be as high as 2^wordsize-1. For loops with a wider iterator, this number
138 will will be zero if the number of loop iterations is too large for an
139 unsigned integer to hold. */
141 unsigned HOST_WIDE_INT loop_n_iterations
;
143 /* Nonzero if there is a subroutine call in the current loop. */
145 static int loop_has_call
;
147 /* Nonzero if there is a volatile memory reference in the current
150 static int loop_has_volatile
;
152 /* Added loop_continue which is the NOTE_INSN_LOOP_CONT of the
153 current loop. A continue statement will generate a branch to
154 NEXT_INSN (loop_continue). */
156 static rtx loop_continue
;
158 /* Indexed by register number, contains the number of times the reg
159 is set during the loop being scanned.
160 During code motion, a negative value indicates a reg that has been
161 made a candidate; in particular -2 means that it is an candidate that
162 we know is equal to a constant and -1 means that it is an candidate
163 not known equal to a constant.
164 After code motion, regs moved have 0 (which is accurate now)
165 while the failed candidates have the original number of times set.
167 Therefore, at all times, == 0 indicates an invariant register;
168 < 0 a conditionally invariant one. */
170 static int *n_times_set
;
172 /* Original value of n_times_set; same except that this value
173 is not set negative for a reg whose sets have been made candidates
174 and not set to 0 for a reg that is moved. */
176 static int *n_times_used
;
178 /* Index by register number, 1 indicates that the register
179 cannot be moved or strength reduced. */
181 static char *may_not_optimize
;
183 /* Nonzero means reg N has already been moved out of one loop.
184 This reduces the desire to move it out of another. */
186 static char *moved_once
;
188 /* Array of MEMs that are stored in this loop. If there are too many to fit
189 here, we just turn on unknown_address_altered. */
191 #define NUM_STORES 30
192 static rtx loop_store_mems
[NUM_STORES
];
194 /* Index of first available slot in above array. */
195 static int loop_store_mems_idx
;
197 /* Nonzero if we don't know what MEMs were changed in the current loop.
198 This happens if the loop contains a call (in which case `loop_has_call'
199 will also be set) or if we store into more than NUM_STORES MEMs. */
201 static int unknown_address_altered
;
203 /* Count of movable (i.e. invariant) instructions discovered in the loop. */
204 static int num_movables
;
206 /* Count of memory write instructions discovered in the loop. */
207 static int num_mem_sets
;
209 /* Number of loops contained within the current one, including itself. */
210 static int loops_enclosed
;
212 /* Bound on pseudo register number before loop optimization.
213 A pseudo has valid regscan info if its number is < max_reg_before_loop. */
214 int max_reg_before_loop
;
216 /* This obstack is used in product_cheap_p to allocate its rtl. It
217 may call gen_reg_rtx which, in turn, may reallocate regno_reg_rtx.
218 If we used the same obstack that it did, we would be deallocating
221 static struct obstack temp_obstack
;
223 /* This is where the pointer to the obstack being used for RTL is stored. */
225 extern struct obstack
*rtl_obstack
;
227 #define obstack_chunk_alloc xmalloc
228 #define obstack_chunk_free free
230 extern char *oballoc ();
232 /* During the analysis of a loop, a chain of `struct movable's
233 is made to record all the movable insns found.
234 Then the entire chain can be scanned to decide which to move. */
238 rtx insn
; /* A movable insn */
239 rtx set_src
; /* The expression this reg is set from. */
240 rtx set_dest
; /* The destination of this SET. */
241 rtx dependencies
; /* When INSN is libcall, this is an EXPR_LIST
242 of any registers used within the LIBCALL. */
243 int consec
; /* Number of consecutive following insns
244 that must be moved with this one. */
245 int regno
; /* The register it sets */
246 short lifetime
; /* lifetime of that register;
247 may be adjusted when matching movables
248 that load the same value are found. */
249 short savings
; /* Number of insns we can move for this reg,
250 including other movables that force this
251 or match this one. */
252 unsigned int cond
: 1; /* 1 if only conditionally movable */
253 unsigned int force
: 1; /* 1 means MUST move this insn */
254 unsigned int global
: 1; /* 1 means reg is live outside this loop */
255 /* If PARTIAL is 1, GLOBAL means something different:
256 that the reg is live outside the range from where it is set
257 to the following label. */
258 unsigned int done
: 1; /* 1 inhibits further processing of this */
260 unsigned int partial
: 1; /* 1 means this reg is used for zero-extending.
261 In particular, moving it does not make it
263 unsigned int move_insn
: 1; /* 1 means that we call emit_move_insn to
264 load SRC, rather than copying INSN. */
265 unsigned int is_equiv
: 1; /* 1 means a REG_EQUIV is present on INSN. */
266 enum machine_mode savemode
; /* Nonzero means it is a mode for a low part
267 that we should avoid changing when clearing
268 the rest of the reg. */
269 struct movable
*match
; /* First entry for same value */
270 struct movable
*forces
; /* An insn that must be moved if this is */
271 struct movable
*next
;
274 FILE *loop_dump_stream
;
276 /* Forward declarations. */
278 static void find_and_verify_loops ();
279 static void mark_loop_jump ();
280 static void prescan_loop ();
281 static int reg_in_basic_block_p ();
282 static int consec_sets_invariant_p ();
283 static rtx
libcall_other_reg ();
284 static int labels_in_range_p ();
285 static void count_loop_regs_set ();
286 static void note_addr_stored ();
287 static int loop_reg_used_before_p ();
288 static void scan_loop ();
290 static void replace_call_address ();
292 static rtx
skip_consec_insns ();
293 static int libcall_benefit ();
294 static void ignore_some_movables ();
295 static void force_movables ();
296 static void combine_movables ();
297 static int rtx_equal_for_loop_p ();
298 static void move_movables ();
299 static void strength_reduce ();
300 static int valid_initial_value_p ();
301 static void find_mem_givs ();
302 static void record_biv ();
303 static void check_final_value ();
304 static void record_giv ();
305 static void update_giv_derive ();
306 static int basic_induction_var ();
307 static rtx
simplify_giv_expr ();
308 static int general_induction_var ();
309 static int consec_sets_giv ();
310 static int check_dbra_loop ();
311 static rtx
express_from ();
312 static int combine_givs_p ();
313 static void combine_givs ();
314 static int product_cheap_p ();
315 static int maybe_eliminate_biv ();
316 static int maybe_eliminate_biv_1 ();
317 static int last_use_this_basic_block ();
318 static void record_initial ();
319 static void update_reg_last_use ();
322 /* This is extern from unroll.c */
323 void iteration_info ();
325 /* Two main functions for implementing bct:
326 first - to be called before loop unrolling, and the second - after */
327 static void analyze_loop_iterations ();
328 static void insert_bct ();
330 /* Auxiliary function that inserts the bct pattern into the loop */
331 static void instrument_loop_bct ();
334 /* Indirect_jump_in_function is computed once per function. */
335 int indirect_jump_in_function
= 0;
336 static int indirect_jump_in_function_p ();
339 /* Relative gain of eliminating various kinds of operations. */
346 /* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to
347 copy the value of the strength reduced giv to its original register. */
353 char *free_point
= (char *) oballoc (1);
354 rtx reg
= gen_rtx_REG (word_mode
, LAST_VIRTUAL_REGISTER
+ 1);
356 add_cost
= rtx_cost (gen_rtx_PLUS (word_mode
, reg
, reg
), SET
);
358 /* We multiply by 2 to reconcile the difference in scale between
359 these two ways of computing costs. Otherwise the cost of a copy
360 will be far less than the cost of an add. */
364 /* Free the objects we just allocated. */
367 /* Initialize the obstack used for rtl in product_cheap_p. */
368 gcc_obstack_init (&temp_obstack
);
371 /* Entry point of this file. Perform loop optimization
372 on the current function. F is the first insn of the function
373 and DUMPFILE is a stream for output of a trace of actions taken
374 (or 0 if none should be output). */
377 loop_optimize (f
, dumpfile
, unroll_p
)
378 /* f is the first instruction of a chain of insns for one function */
387 loop_dump_stream
= dumpfile
;
389 init_recog_no_volatile ();
390 init_alias_analysis ();
392 max_reg_before_loop
= max_reg_num ();
394 moved_once
= (char *) alloca (max_reg_before_loop
);
395 bzero (moved_once
, max_reg_before_loop
);
399 /* Count the number of loops. */
402 for (insn
= f
; insn
; insn
= NEXT_INSN (insn
))
404 if (GET_CODE (insn
) == NOTE
405 && NOTE_LINE_NUMBER (insn
) == NOTE_INSN_LOOP_BEG
)
409 /* Don't waste time if no loops. */
410 if (max_loop_num
== 0)
413 /* Get size to use for tables indexed by uids.
414 Leave some space for labels allocated by find_and_verify_loops. */
415 max_uid_for_loop
= get_max_uid () + 1 + max_loop_num
* 32;
417 uid_luid
= (int *) alloca (max_uid_for_loop
* sizeof (int));
418 uid_loop_num
= (int *) alloca (max_uid_for_loop
* sizeof (int));
420 bzero ((char *) uid_luid
, max_uid_for_loop
* sizeof (int));
421 bzero ((char *) uid_loop_num
, max_uid_for_loop
* sizeof (int));
423 /* Allocate tables for recording each loop. We set each entry, so they need
425 loop_number_loop_starts
= (rtx
*) alloca (max_loop_num
* sizeof (rtx
));
426 loop_number_loop_ends
= (rtx
*) alloca (max_loop_num
* sizeof (rtx
));
427 loop_outer_loop
= (int *) alloca (max_loop_num
* sizeof (int));
428 loop_invalid
= (char *) alloca (max_loop_num
* sizeof (char));
429 loop_number_exit_labels
= (rtx
*) alloca (max_loop_num
* sizeof (rtx
));
430 loop_number_exit_count
= (int *) alloca (max_loop_num
* sizeof (int));
432 /* This is initialized by the unrolling code, so we go ahead
433 and clear them just in case we are not performing loop
435 loop_unroll_factor
= (int *) alloca (max_loop_num
*sizeof (int));
436 bzero ((char *) loop_unroll_factor
, max_loop_num
* sizeof (int));
439 /* Allocate for BCT optimization */
440 loop_can_insert_bct
= (int *) alloca (max_loop_num
* sizeof (int));
441 bzero ((char *) loop_can_insert_bct
, max_loop_num
* sizeof (int));
443 loop_used_count_register
= (int *) alloca (max_loop_num
* sizeof (int));
444 bzero ((char *) loop_used_count_register
, max_loop_num
* sizeof (int));
446 loop_increment
= (rtx
*) alloca (max_loop_num
* sizeof (rtx
));
447 loop_comparison_value
= (rtx
*) alloca (max_loop_num
* sizeof (rtx
));
448 loop_start_value
= (rtx
*) alloca (max_loop_num
* sizeof (rtx
));
449 bzero ((char *) loop_increment
, max_loop_num
* sizeof (rtx
));
450 bzero ((char *) loop_comparison_value
, max_loop_num
* sizeof (rtx
));
451 bzero ((char *) loop_start_value
, max_loop_num
* sizeof (rtx
));
454 = (enum rtx_code
*) alloca (max_loop_num
* sizeof (enum rtx_code
));
455 bzero ((char *) loop_comparison_code
, max_loop_num
* sizeof (enum rtx_code
));
458 /* Find and process each loop.
459 First, find them, and record them in order of their beginnings. */
460 find_and_verify_loops (f
);
462 /* Now find all register lifetimes. This must be done after
463 find_and_verify_loops, because it might reorder the insns in the
465 reg_scan (f
, max_reg_num (), 1);
467 /* See if we went too far. */
468 if (get_max_uid () > max_uid_for_loop
)
471 /* Compute the mapping from uids to luids.
472 LUIDs are numbers assigned to insns, like uids,
473 except that luids increase monotonically through the code.
474 Don't assign luids to line-number NOTEs, so that the distance in luids
475 between two insns is not affected by -g. */
477 for (insn
= f
, i
= 0; insn
; insn
= NEXT_INSN (insn
))
480 if (GET_CODE (insn
) != NOTE
481 || NOTE_LINE_NUMBER (insn
) <= 0)
482 uid_luid
[INSN_UID (insn
)] = ++i
;
484 /* Give a line number note the same luid as preceding insn. */
485 uid_luid
[INSN_UID (insn
)] = i
;
490 /* Don't leave gaps in uid_luid for insns that have been
491 deleted. It is possible that the first or last insn
492 using some register has been deleted by cross-jumping.
493 Make sure that uid_luid for that former insn's uid
494 points to the general area where that insn used to be. */
495 for (i
= 0; i
< max_uid_for_loop
; i
++)
497 uid_luid
[0] = uid_luid
[i
];
498 if (uid_luid
[0] != 0)
501 for (i
= 0; i
< max_uid_for_loop
; i
++)
502 if (uid_luid
[i
] == 0)
503 uid_luid
[i
] = uid_luid
[i
- 1];
505 /* Create a mapping from loops to BLOCK tree nodes. */
506 if (unroll_p
&& write_symbols
!= NO_DEBUG
)
507 find_loop_tree_blocks ();
509 /* Determine if the function has indirect jump. On some systems
510 this prevents low overhead loop instructions from being used. */
511 indirect_jump_in_function
= indirect_jump_in_function_p (f
);
513 /* Now scan the loops, last ones first, since this means inner ones are done
514 before outer ones. */
515 for (i
= max_loop_num
-1; i
>= 0; i
--)
516 if (! loop_invalid
[i
] && loop_number_loop_ends
[i
])
517 scan_loop (loop_number_loop_starts
[i
], loop_number_loop_ends
[i
],
518 max_reg_num (), unroll_p
);
520 /* If debugging and unrolling loops, we must replicate the tree nodes
521 corresponding to the blocks inside the loop, so that the original one
522 to one mapping will remain. */
523 if (unroll_p
&& write_symbols
!= NO_DEBUG
)
524 unroll_block_trees ();
527 /* Optimize one loop whose start is LOOP_START and end is END.
528 LOOP_START is the NOTE_INSN_LOOP_BEG and END is the matching
529 NOTE_INSN_LOOP_END. */
531 /* ??? Could also move memory writes out of loops if the destination address
532 is invariant, the source is invariant, the memory write is not volatile,
533 and if we can prove that no read inside the loop can read this address
534 before the write occurs. If there is a read of this address after the
535 write, then we can also mark the memory read as invariant. */
538 scan_loop (loop_start
, end
, nregs
, unroll_p
)
545 /* 1 if we are scanning insns that could be executed zero times. */
547 /* 1 if we are scanning insns that might never be executed
548 due to a subroutine call which might exit before they are reached. */
550 /* For a rotated loop that is entered near the bottom,
551 this is the label at the top. Otherwise it is zero. */
553 /* Jump insn that enters the loop, or 0 if control drops in. */
554 rtx loop_entry_jump
= 0;
555 /* Place in the loop where control enters. */
557 /* Number of insns in the loop. */
562 /* The SET from an insn, if it is the only SET in the insn. */
564 /* Chain describing insns movable in current loop. */
565 struct movable
*movables
= 0;
566 /* Last element in `movables' -- so we can add elements at the end. */
567 struct movable
*last_movable
= 0;
568 /* Ratio of extra register life span we can justify
569 for saving an instruction. More if loop doesn't call subroutines
570 since in that case saving an insn makes more difference
571 and more registers are available. */
573 /* If we have calls, contains the insn in which a register was used
574 if it was used exactly once; contains const0_rtx if it was used more
576 rtx
*reg_single_usage
= 0;
577 /* Nonzero if we are scanning instructions in a sub-loop. */
580 n_times_set
= (int *) alloca (nregs
* sizeof (int));
581 n_times_used
= (int *) alloca (nregs
* sizeof (int));
582 may_not_optimize
= (char *) alloca (nregs
);
584 /* Determine whether this loop starts with a jump down to a test at
585 the end. This will occur for a small number of loops with a test
586 that is too complex to duplicate in front of the loop.
588 We search for the first insn or label in the loop, skipping NOTEs.
589 However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG
590 (because we might have a loop executed only once that contains a
591 loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END
592 (in case we have a degenerate loop).
594 Note that if we mistakenly think that a loop is entered at the top
595 when, in fact, it is entered at the exit test, the only effect will be
596 slightly poorer optimization. Making the opposite error can generate
597 incorrect code. Since very few loops now start with a jump to the
598 exit test, the code here to detect that case is very conservative. */
600 for (p
= NEXT_INSN (loop_start
);
602 && GET_CODE (p
) != CODE_LABEL
&& GET_RTX_CLASS (GET_CODE (p
)) != 'i'
603 && (GET_CODE (p
) != NOTE
604 || (NOTE_LINE_NUMBER (p
) != NOTE_INSN_LOOP_BEG
605 && NOTE_LINE_NUMBER (p
) != NOTE_INSN_LOOP_END
));
611 /* Set up variables describing this loop. */
612 prescan_loop (loop_start
, end
);
613 threshold
= (loop_has_call
? 1 : 2) * (1 + n_non_fixed_regs
);
615 /* If loop has a jump before the first label,
616 the true entry is the target of that jump.
617 Start scan from there.
618 But record in LOOP_TOP the place where the end-test jumps
619 back to so we can scan that after the end of the loop. */
620 if (GET_CODE (p
) == JUMP_INSN
)
624 /* Loop entry must be unconditional jump (and not a RETURN) */
626 && JUMP_LABEL (p
) != 0
627 /* Check to see whether the jump actually
628 jumps out of the loop (meaning it's no loop).
629 This case can happen for things like
630 do {..} while (0). If this label was generated previously
631 by loop, we can't tell anything about it and have to reject
633 && INSN_UID (JUMP_LABEL (p
)) < max_uid_for_loop
634 && INSN_LUID (JUMP_LABEL (p
)) >= INSN_LUID (loop_start
)
635 && INSN_LUID (JUMP_LABEL (p
)) < INSN_LUID (end
))
637 loop_top
= next_label (scan_start
);
638 scan_start
= JUMP_LABEL (p
);
642 /* If SCAN_START was an insn created by loop, we don't know its luid
643 as required by loop_reg_used_before_p. So skip such loops. (This
644 test may never be true, but it's best to play it safe.)
646 Also, skip loops where we do not start scanning at a label. This
647 test also rejects loops starting with a JUMP_INSN that failed the
650 if (INSN_UID (scan_start
) >= max_uid_for_loop
651 || GET_CODE (scan_start
) != CODE_LABEL
)
653 if (loop_dump_stream
)
654 fprintf (loop_dump_stream
, "\nLoop from %d to %d is phony.\n\n",
655 INSN_UID (loop_start
), INSN_UID (end
));
659 /* Count number of times each reg is set during this loop.
660 Set may_not_optimize[I] if it is not safe to move out
661 the setting of register I. If this loop has calls, set
662 reg_single_usage[I]. */
664 bzero ((char *) n_times_set
, nregs
* sizeof (int));
665 bzero (may_not_optimize
, nregs
);
669 reg_single_usage
= (rtx
*) alloca (nregs
* sizeof (rtx
));
670 bzero ((char *) reg_single_usage
, nregs
* sizeof (rtx
));
673 count_loop_regs_set (loop_top
? loop_top
: loop_start
, end
,
674 may_not_optimize
, reg_single_usage
, &insn_count
, nregs
);
676 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
677 may_not_optimize
[i
] = 1, n_times_set
[i
] = 1;
678 bcopy ((char *) n_times_set
, (char *) n_times_used
, nregs
* sizeof (int));
680 if (loop_dump_stream
)
682 fprintf (loop_dump_stream
, "\nLoop from %d to %d: %d real insns.\n",
683 INSN_UID (loop_start
), INSN_UID (end
), insn_count
);
685 fprintf (loop_dump_stream
, "Continue at insn %d.\n",
686 INSN_UID (loop_continue
));
689 /* Scan through the loop finding insns that are safe to move.
690 Set n_times_set negative for the reg being set, so that
691 this reg will be considered invariant for subsequent insns.
692 We consider whether subsequent insns use the reg
693 in deciding whether it is worth actually moving.
695 MAYBE_NEVER is nonzero if we have passed a conditional jump insn
696 and therefore it is possible that the insns we are scanning
697 would never be executed. At such times, we must make sure
698 that it is safe to execute the insn once instead of zero times.
699 When MAYBE_NEVER is 0, all insns will be executed at least once
700 so that is not a problem. */
706 /* At end of a straight-in loop, we are done.
707 At end of a loop entered at the bottom, scan the top. */
720 if (GET_RTX_CLASS (GET_CODE (p
)) == 'i'
721 && find_reg_note (p
, REG_LIBCALL
, NULL_RTX
))
723 else if (GET_RTX_CLASS (GET_CODE (p
)) == 'i'
724 && find_reg_note (p
, REG_RETVAL
, NULL_RTX
))
727 if (GET_CODE (p
) == INSN
728 && (set
= single_set (p
))
729 && GET_CODE (SET_DEST (set
)) == REG
730 && ! may_not_optimize
[REGNO (SET_DEST (set
))])
735 rtx src
= SET_SRC (set
);
736 rtx dependencies
= 0;
738 /* Figure out what to use as a source of this insn. If a REG_EQUIV
739 note is given or if a REG_EQUAL note with a constant operand is
740 specified, use it as the source and mark that we should move
741 this insn by calling emit_move_insn rather that duplicating the
744 Otherwise, only use the REG_EQUAL contents if a REG_RETVAL note
746 temp
= find_reg_note (p
, REG_EQUIV
, NULL_RTX
);
748 src
= XEXP (temp
, 0), move_insn
= 1;
751 temp
= find_reg_note (p
, REG_EQUAL
, NULL_RTX
);
752 if (temp
&& CONSTANT_P (XEXP (temp
, 0)))
753 src
= XEXP (temp
, 0), move_insn
= 1;
754 if (temp
&& find_reg_note (p
, REG_RETVAL
, NULL_RTX
))
756 src
= XEXP (temp
, 0);
757 /* A libcall block can use regs that don't appear in
758 the equivalent expression. To move the libcall,
759 we must move those regs too. */
760 dependencies
= libcall_other_reg (p
, src
);
764 /* Don't try to optimize a register that was made
765 by loop-optimization for an inner loop.
766 We don't know its life-span, so we can't compute the benefit. */
767 if (REGNO (SET_DEST (set
)) >= max_reg_before_loop
)
769 /* In order to move a register, we need to have one of three cases:
770 (1) it is used only in the same basic block as the set
771 (2) it is not a user variable and it is not used in the
772 exit test (this can cause the variable to be used
773 before it is set just like a user-variable).
774 (3) the set is guaranteed to be executed once the loop starts,
775 and the reg is not used until after that. */
776 else if (! ((! maybe_never
777 && ! loop_reg_used_before_p (set
, p
, loop_start
,
779 || (! REG_USERVAR_P (SET_DEST (set
))
780 && ! REG_LOOP_TEST_P (SET_DEST (set
)))
781 || reg_in_basic_block_p (p
, SET_DEST (set
))))
783 else if ((tem
= invariant_p (src
))
784 && (dependencies
== 0
785 || (tem2
= invariant_p (dependencies
)) != 0)
786 && (n_times_set
[REGNO (SET_DEST (set
))] == 1
788 = consec_sets_invariant_p (SET_DEST (set
),
789 n_times_set
[REGNO (SET_DEST (set
))],
791 /* If the insn can cause a trap (such as divide by zero),
792 can't move it unless it's guaranteed to be executed
793 once loop is entered. Even a function call might
794 prevent the trap insn from being reached
795 (since it might exit!) */
796 && ! ((maybe_never
|| call_passed
)
797 && may_trap_p (src
)))
799 register struct movable
*m
;
800 register int regno
= REGNO (SET_DEST (set
));
802 /* A potential lossage is where we have a case where two insns
803 can be combined as long as they are both in the loop, but
804 we move one of them outside the loop. For large loops,
805 this can lose. The most common case of this is the address
806 of a function being called.
808 Therefore, if this register is marked as being used exactly
809 once if we are in a loop with calls (a "large loop"), see if
810 we can replace the usage of this register with the source
811 of this SET. If we can, delete this insn.
813 Don't do this if P has a REG_RETVAL note or if we have
814 SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */
816 if (reg_single_usage
&& reg_single_usage
[regno
] != 0
817 && reg_single_usage
[regno
] != const0_rtx
818 && REGNO_FIRST_UID (regno
) == INSN_UID (p
)
819 && (REGNO_LAST_UID (regno
)
820 == INSN_UID (reg_single_usage
[regno
]))
821 && n_times_set
[REGNO (SET_DEST (set
))] == 1
822 && ! side_effects_p (SET_SRC (set
))
823 && ! find_reg_note (p
, REG_RETVAL
, NULL_RTX
)
824 && (! SMALL_REGISTER_CLASSES
825 || (! (GET_CODE (SET_SRC (set
)) == REG
826 && REGNO (SET_SRC (set
)) < FIRST_PSEUDO_REGISTER
)))
827 /* This test is not redundant; SET_SRC (set) might be
828 a call-clobbered register and the life of REGNO
829 might span a call. */
830 && ! modified_between_p (SET_SRC (set
), p
,
831 reg_single_usage
[regno
])
832 && no_labels_between_p (p
, reg_single_usage
[regno
])
833 && validate_replace_rtx (SET_DEST (set
), SET_SRC (set
),
834 reg_single_usage
[regno
]))
836 /* Replace any usage in a REG_EQUAL note. Must copy the
837 new source, so that we don't get rtx sharing between the
838 SET_SOURCE and REG_NOTES of insn p. */
839 REG_NOTES (reg_single_usage
[regno
])
840 = replace_rtx (REG_NOTES (reg_single_usage
[regno
]),
841 SET_DEST (set
), copy_rtx (SET_SRC (set
)));
844 NOTE_LINE_NUMBER (p
) = NOTE_INSN_DELETED
;
845 NOTE_SOURCE_FILE (p
) = 0;
846 n_times_set
[regno
] = 0;
850 m
= (struct movable
*) alloca (sizeof (struct movable
));
854 m
->dependencies
= dependencies
;
855 m
->set_dest
= SET_DEST (set
);
857 m
->consec
= n_times_set
[REGNO (SET_DEST (set
))] - 1;
861 m
->move_insn
= move_insn
;
862 m
->is_equiv
= (find_reg_note (p
, REG_EQUIV
, NULL_RTX
) != 0);
863 m
->savemode
= VOIDmode
;
865 /* Set M->cond if either invariant_p or consec_sets_invariant_p
866 returned 2 (only conditionally invariant). */
867 m
->cond
= ((tem
| tem1
| tem2
) > 1);
868 m
->global
= (uid_luid
[REGNO_LAST_UID (regno
)] > INSN_LUID (end
)
869 || uid_luid
[REGNO_FIRST_UID (regno
)] < INSN_LUID (loop_start
));
871 m
->lifetime
= (uid_luid
[REGNO_LAST_UID (regno
)]
872 - uid_luid
[REGNO_FIRST_UID (regno
)]);
873 m
->savings
= n_times_used
[regno
];
874 if (find_reg_note (p
, REG_RETVAL
, NULL_RTX
))
875 m
->savings
+= libcall_benefit (p
);
876 n_times_set
[regno
] = move_insn
? -2 : -1;
877 /* Add M to the end of the chain MOVABLES. */
881 last_movable
->next
= m
;
886 /* Skip this insn, not checking REG_LIBCALL notes. */
887 p
= next_nonnote_insn (p
);
888 /* Skip the consecutive insns, if there are any. */
889 p
= skip_consec_insns (p
, m
->consec
);
890 /* Back up to the last insn of the consecutive group. */
891 p
= prev_nonnote_insn (p
);
893 /* We must now reset m->move_insn, m->is_equiv, and possibly
894 m->set_src to correspond to the effects of all the
896 temp
= find_reg_note (p
, REG_EQUIV
, NULL_RTX
);
898 m
->set_src
= XEXP (temp
, 0), m
->move_insn
= 1;
901 temp
= find_reg_note (p
, REG_EQUAL
, NULL_RTX
);
902 if (temp
&& CONSTANT_P (XEXP (temp
, 0)))
903 m
->set_src
= XEXP (temp
, 0), m
->move_insn
= 1;
908 m
->is_equiv
= (find_reg_note (p
, REG_EQUIV
, NULL_RTX
) != 0);
911 /* If this register is always set within a STRICT_LOW_PART
912 or set to zero, then its high bytes are constant.
913 So clear them outside the loop and within the loop
914 just load the low bytes.
915 We must check that the machine has an instruction to do so.
916 Also, if the value loaded into the register
917 depends on the same register, this cannot be done. */
918 else if (SET_SRC (set
) == const0_rtx
919 && GET_CODE (NEXT_INSN (p
)) == INSN
920 && (set1
= single_set (NEXT_INSN (p
)))
921 && GET_CODE (set1
) == SET
922 && (GET_CODE (SET_DEST (set1
)) == STRICT_LOW_PART
)
923 && (GET_CODE (XEXP (SET_DEST (set1
), 0)) == SUBREG
)
924 && (SUBREG_REG (XEXP (SET_DEST (set1
), 0))
926 && !reg_mentioned_p (SET_DEST (set
), SET_SRC (set1
)))
928 register int regno
= REGNO (SET_DEST (set
));
929 if (n_times_set
[regno
] == 2)
931 register struct movable
*m
;
932 m
= (struct movable
*) alloca (sizeof (struct movable
));
935 m
->set_dest
= SET_DEST (set
);
943 /* If the insn may not be executed on some cycles,
944 we can't clear the whole reg; clear just high part.
945 Not even if the reg is used only within this loop.
952 Clearing x before the inner loop could clobber a value
953 being saved from the last time around the outer loop.
954 However, if the reg is not used outside this loop
955 and all uses of the register are in the same
956 basic block as the store, there is no problem.
958 If this insn was made by loop, we don't know its
959 INSN_LUID and hence must make a conservative
961 m
->global
= (INSN_UID (p
) >= max_uid_for_loop
962 || (uid_luid
[REGNO_LAST_UID (regno
)]
964 || (uid_luid
[REGNO_FIRST_UID (regno
)]
966 || (labels_in_range_p
967 (p
, uid_luid
[REGNO_FIRST_UID (regno
)])));
968 if (maybe_never
&& m
->global
)
969 m
->savemode
= GET_MODE (SET_SRC (set1
));
971 m
->savemode
= VOIDmode
;
975 m
->lifetime
= (uid_luid
[REGNO_LAST_UID (regno
)]
976 - uid_luid
[REGNO_FIRST_UID (regno
)]);
978 n_times_set
[regno
] = -1;
979 /* Add M to the end of the chain MOVABLES. */
983 last_movable
->next
= m
;
988 /* Past a call insn, we get to insns which might not be executed
989 because the call might exit. This matters for insns that trap.
990 Call insns inside a REG_LIBCALL/REG_RETVAL block always return,
991 so they don't count. */
992 else if (GET_CODE (p
) == CALL_INSN
&& ! in_libcall
)
994 /* Past a label or a jump, we get to insns for which we
995 can't count on whether or how many times they will be
996 executed during each iteration. Therefore, we can
997 only move out sets of trivial variables
998 (those not used after the loop). */
999 /* Similar code appears twice in strength_reduce. */
1000 else if ((GET_CODE (p
) == CODE_LABEL
|| GET_CODE (p
) == JUMP_INSN
)
1001 /* If we enter the loop in the middle, and scan around to the
1002 beginning, don't set maybe_never for that. This must be an
1003 unconditional jump, otherwise the code at the top of the
1004 loop might never be executed. Unconditional jumps are
1005 followed a by barrier then loop end. */
1006 && ! (GET_CODE (p
) == JUMP_INSN
&& JUMP_LABEL (p
) == loop_top
1007 && NEXT_INSN (NEXT_INSN (p
)) == end
1008 && simplejump_p (p
)))
1010 else if (GET_CODE (p
) == NOTE
)
1012 /* At the virtual top of a converted loop, insns are again known to
1013 be executed: logically, the loop begins here even though the exit
1014 code has been duplicated. */
1015 if (NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_VTOP
&& loop_depth
== 0)
1016 maybe_never
= call_passed
= 0;
1017 else if (NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_BEG
)
1019 else if (NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_END
)
1024 /* If one movable subsumes another, ignore that other. */
1026 ignore_some_movables (movables
);
1028 /* For each movable insn, see if the reg that it loads
1029 leads when it dies right into another conditionally movable insn.
1030 If so, record that the second insn "forces" the first one,
1031 since the second can be moved only if the first is. */
1033 force_movables (movables
);
1035 /* See if there are multiple movable insns that load the same value.
1036 If there are, make all but the first point at the first one
1037 through the `match' field, and add the priorities of them
1038 all together as the priority of the first. */
1040 combine_movables (movables
, nregs
);
1042 /* Now consider each movable insn to decide whether it is worth moving.
1043 Store 0 in n_times_set for each reg that is moved. */
1045 move_movables (movables
, threshold
,
1046 insn_count
, loop_start
, end
, nregs
);
1048 /* Now candidates that still are negative are those not moved.
1049 Change n_times_set to indicate that those are not actually invariant. */
1050 for (i
= 0; i
< nregs
; i
++)
1051 if (n_times_set
[i
] < 0)
1052 n_times_set
[i
] = n_times_used
[i
];
1054 if (flag_strength_reduce
)
1055 strength_reduce (scan_start
, end
, loop_top
,
1056 insn_count
, loop_start
, end
, unroll_p
);
1059 /* Add elements to *OUTPUT to record all the pseudo-regs
1060 mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */
1063 record_excess_regs (in_this
, not_in_this
, output
)
1064 rtx in_this
, not_in_this
;
1071 code
= GET_CODE (in_this
);
1085 if (REGNO (in_this
) >= FIRST_PSEUDO_REGISTER
1086 && ! reg_mentioned_p (in_this
, not_in_this
))
1087 *output
= gen_rtx_EXPR_LIST (VOIDmode
, in_this
, *output
);
1094 fmt
= GET_RTX_FORMAT (code
);
1095 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1102 for (j
= 0; j
< XVECLEN (in_this
, i
); j
++)
1103 record_excess_regs (XVECEXP (in_this
, i
, j
), not_in_this
, output
);
1107 record_excess_regs (XEXP (in_this
, i
), not_in_this
, output
);
1113 /* Check what regs are referred to in the libcall block ending with INSN,
1114 aside from those mentioned in the equivalent value.
1115 If there are none, return 0.
1116 If there are one or more, return an EXPR_LIST containing all of them. */
1119 libcall_other_reg (insn
, equiv
)
1122 rtx note
= find_reg_note (insn
, REG_RETVAL
, NULL_RTX
);
1123 rtx p
= XEXP (note
, 0);
1126 /* First, find all the regs used in the libcall block
1127 that are not mentioned as inputs to the result. */
1131 if (GET_CODE (p
) == INSN
|| GET_CODE (p
) == JUMP_INSN
1132 || GET_CODE (p
) == CALL_INSN
)
1133 record_excess_regs (PATTERN (p
), equiv
, &output
);
1140 /* Return 1 if all uses of REG
1141 are between INSN and the end of the basic block. */
1144 reg_in_basic_block_p (insn
, reg
)
1147 int regno
= REGNO (reg
);
1150 if (REGNO_FIRST_UID (regno
) != INSN_UID (insn
))
1153 /* Search this basic block for the already recorded last use of the reg. */
1154 for (p
= insn
; p
; p
= NEXT_INSN (p
))
1156 switch (GET_CODE (p
))
1163 /* Ordinary insn: if this is the last use, we win. */
1164 if (REGNO_LAST_UID (regno
) == INSN_UID (p
))
1169 /* Jump insn: if this is the last use, we win. */
1170 if (REGNO_LAST_UID (regno
) == INSN_UID (p
))
1172 /* Otherwise, it's the end of the basic block, so we lose. */
1177 /* It's the end of the basic block, so we lose. */
1185 /* The "last use" doesn't follow the "first use"?? */
1189 /* Compute the benefit of eliminating the insns in the block whose
1190 last insn is LAST. This may be a group of insns used to compute a
1191 value directly or can contain a library call. */
1194 libcall_benefit (last
)
1200 for (insn
= XEXP (find_reg_note (last
, REG_RETVAL
, NULL_RTX
), 0);
1201 insn
!= last
; insn
= NEXT_INSN (insn
))
1203 if (GET_CODE (insn
) == CALL_INSN
)
1204 benefit
+= 10; /* Assume at least this many insns in a library
1206 else if (GET_CODE (insn
) == INSN
1207 && GET_CODE (PATTERN (insn
)) != USE
1208 && GET_CODE (PATTERN (insn
)) != CLOBBER
)
1215 /* Skip COUNT insns from INSN, counting library calls as 1 insn. */
1218 skip_consec_insns (insn
, count
)
1222 for (; count
> 0; count
--)
1226 /* If first insn of libcall sequence, skip to end. */
1227 /* Do this at start of loop, since INSN is guaranteed to
1229 if (GET_CODE (insn
) != NOTE
1230 && (temp
= find_reg_note (insn
, REG_LIBCALL
, NULL_RTX
)))
1231 insn
= XEXP (temp
, 0);
1233 do insn
= NEXT_INSN (insn
);
1234 while (GET_CODE (insn
) == NOTE
);
1240 /* Ignore any movable whose insn falls within a libcall
1241 which is part of another movable.
1242 We make use of the fact that the movable for the libcall value
1243 was made later and so appears later on the chain. */
1246 ignore_some_movables (movables
)
1247 struct movable
*movables
;
1249 register struct movable
*m
, *m1
;
1251 for (m
= movables
; m
; m
= m
->next
)
1253 /* Is this a movable for the value of a libcall? */
1254 rtx note
= find_reg_note (m
->insn
, REG_RETVAL
, NULL_RTX
);
1258 /* Check for earlier movables inside that range,
1259 and mark them invalid. We cannot use LUIDs here because
1260 insns created by loop.c for prior loops don't have LUIDs.
1261 Rather than reject all such insns from movables, we just
1262 explicitly check each insn in the libcall (since invariant
1263 libcalls aren't that common). */
1264 for (insn
= XEXP (note
, 0); insn
!= m
->insn
; insn
= NEXT_INSN (insn
))
1265 for (m1
= movables
; m1
!= m
; m1
= m1
->next
)
1266 if (m1
->insn
== insn
)
1272 /* For each movable insn, see if the reg that it loads
1273 leads when it dies right into another conditionally movable insn.
1274 If so, record that the second insn "forces" the first one,
1275 since the second can be moved only if the first is. */
1278 force_movables (movables
)
1279 struct movable
*movables
;
1281 register struct movable
*m
, *m1
;
1282 for (m1
= movables
; m1
; m1
= m1
->next
)
1283 /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */
1284 if (!m1
->partial
&& !m1
->done
)
1286 int regno
= m1
->regno
;
1287 for (m
= m1
->next
; m
; m
= m
->next
)
1288 /* ??? Could this be a bug? What if CSE caused the
1289 register of M1 to be used after this insn?
1290 Since CSE does not update regno_last_uid,
1291 this insn M->insn might not be where it dies.
1292 But very likely this doesn't matter; what matters is
1293 that M's reg is computed from M1's reg. */
1294 if (INSN_UID (m
->insn
) == REGNO_LAST_UID (regno
)
1297 if (m
!= 0 && m
->set_src
== m1
->set_dest
1298 /* If m->consec, m->set_src isn't valid. */
1302 /* Increase the priority of the moving the first insn
1303 since it permits the second to be moved as well. */
1307 m1
->lifetime
+= m
->lifetime
;
1308 m1
->savings
+= m1
->savings
;
1313 /* Find invariant expressions that are equal and can be combined into
1317 combine_movables (movables
, nregs
)
1318 struct movable
*movables
;
1321 register struct movable
*m
;
1322 char *matched_regs
= (char *) alloca (nregs
);
1323 enum machine_mode mode
;
1325 /* Regs that are set more than once are not allowed to match
1326 or be matched. I'm no longer sure why not. */
1327 /* Perhaps testing m->consec_sets would be more appropriate here? */
1329 for (m
= movables
; m
; m
= m
->next
)
1330 if (m
->match
== 0 && n_times_used
[m
->regno
] == 1 && !m
->partial
)
1332 register struct movable
*m1
;
1333 int regno
= m
->regno
;
1335 bzero (matched_regs
, nregs
);
1336 matched_regs
[regno
] = 1;
1338 /* We want later insns to match the first one. Don't make the first
1339 one match any later ones. So start this loop at m->next. */
1340 for (m1
= m
->next
; m1
; m1
= m1
->next
)
1341 if (m
!= m1
&& m1
->match
== 0 && n_times_used
[m1
->regno
] == 1
1342 /* A reg used outside the loop mustn't be eliminated. */
1344 /* A reg used for zero-extending mustn't be eliminated. */
1346 && (matched_regs
[m1
->regno
]
1349 /* Can combine regs with different modes loaded from the
1350 same constant only if the modes are the same or
1351 if both are integer modes with M wider or the same
1352 width as M1. The check for integer is redundant, but
1353 safe, since the only case of differing destination
1354 modes with equal sources is when both sources are
1355 VOIDmode, i.e., CONST_INT. */
1356 (GET_MODE (m
->set_dest
) == GET_MODE (m1
->set_dest
)
1357 || (GET_MODE_CLASS (GET_MODE (m
->set_dest
)) == MODE_INT
1358 && GET_MODE_CLASS (GET_MODE (m1
->set_dest
)) == MODE_INT
1359 && (GET_MODE_BITSIZE (GET_MODE (m
->set_dest
))
1360 >= GET_MODE_BITSIZE (GET_MODE (m1
->set_dest
)))))
1361 /* See if the source of M1 says it matches M. */
1362 && ((GET_CODE (m1
->set_src
) == REG
1363 && matched_regs
[REGNO (m1
->set_src
)])
1364 || rtx_equal_for_loop_p (m
->set_src
, m1
->set_src
,
1366 && ((m
->dependencies
== m1
->dependencies
)
1367 || rtx_equal_p (m
->dependencies
, m1
->dependencies
)))
1369 m
->lifetime
+= m1
->lifetime
;
1370 m
->savings
+= m1
->savings
;
1373 matched_regs
[m1
->regno
] = 1;
1377 /* Now combine the regs used for zero-extension.
1378 This can be done for those not marked `global'
1379 provided their lives don't overlap. */
1381 for (mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
); mode
!= VOIDmode
;
1382 mode
= GET_MODE_WIDER_MODE (mode
))
1384 register struct movable
*m0
= 0;
1386 /* Combine all the registers for extension from mode MODE.
1387 Don't combine any that are used outside this loop. */
1388 for (m
= movables
; m
; m
= m
->next
)
1389 if (m
->partial
&& ! m
->global
1390 && mode
== GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m
->insn
)))))
1392 register struct movable
*m1
;
1393 int first
= uid_luid
[REGNO_FIRST_UID (m
->regno
)];
1394 int last
= uid_luid
[REGNO_LAST_UID (m
->regno
)];
1398 /* First one: don't check for overlap, just record it. */
1403 /* Make sure they extend to the same mode.
1404 (Almost always true.) */
1405 if (GET_MODE (m
->set_dest
) != GET_MODE (m0
->set_dest
))
1408 /* We already have one: check for overlap with those
1409 already combined together. */
1410 for (m1
= movables
; m1
!= m
; m1
= m1
->next
)
1411 if (m1
== m0
|| (m1
->partial
&& m1
->match
== m0
))
1412 if (! (uid_luid
[REGNO_FIRST_UID (m1
->regno
)] > last
1413 || uid_luid
[REGNO_LAST_UID (m1
->regno
)] < first
))
1416 /* No overlap: we can combine this with the others. */
1417 m0
->lifetime
+= m
->lifetime
;
1418 m0
->savings
+= m
->savings
;
1427 /* Return 1 if regs X and Y will become the same if moved. */
1430 regs_match_p (x
, y
, movables
)
1432 struct movable
*movables
;
1436 struct movable
*mx
, *my
;
1438 for (mx
= movables
; mx
; mx
= mx
->next
)
1439 if (mx
->regno
== xn
)
1442 for (my
= movables
; my
; my
= my
->next
)
1443 if (my
->regno
== yn
)
1447 && ((mx
->match
== my
->match
&& mx
->match
!= 0)
1449 || mx
== my
->match
));
1452 /* Return 1 if X and Y are identical-looking rtx's.
1453 This is the Lisp function EQUAL for rtx arguments.
1455 If two registers are matching movables or a movable register and an
1456 equivalent constant, consider them equal. */
1459 rtx_equal_for_loop_p (x
, y
, movables
)
1461 struct movable
*movables
;
1465 register struct movable
*m
;
1466 register enum rtx_code code
;
1471 if (x
== 0 || y
== 0)
1474 code
= GET_CODE (x
);
1476 /* If we have a register and a constant, they may sometimes be
1478 if (GET_CODE (x
) == REG
&& n_times_set
[REGNO (x
)] == -2
1480 for (m
= movables
; m
; m
= m
->next
)
1481 if (m
->move_insn
&& m
->regno
== REGNO (x
)
1482 && rtx_equal_p (m
->set_src
, y
))
1485 else if (GET_CODE (y
) == REG
&& n_times_set
[REGNO (y
)] == -2
1487 for (m
= movables
; m
; m
= m
->next
)
1488 if (m
->move_insn
&& m
->regno
== REGNO (y
)
1489 && rtx_equal_p (m
->set_src
, x
))
1492 /* Otherwise, rtx's of different codes cannot be equal. */
1493 if (code
!= GET_CODE (y
))
1496 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1497 (REG:SI x) and (REG:HI x) are NOT equivalent. */
1499 if (GET_MODE (x
) != GET_MODE (y
))
1502 /* These three types of rtx's can be compared nonrecursively. */
1504 return (REGNO (x
) == REGNO (y
) || regs_match_p (x
, y
, movables
));
1506 if (code
== LABEL_REF
)
1507 return XEXP (x
, 0) == XEXP (y
, 0);
1508 if (code
== SYMBOL_REF
)
1509 return XSTR (x
, 0) == XSTR (y
, 0);
1511 /* Compare the elements. If any pair of corresponding elements
1512 fail to match, return 0 for the whole things. */
1514 fmt
= GET_RTX_FORMAT (code
);
1515 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1520 if (XWINT (x
, i
) != XWINT (y
, i
))
1525 if (XINT (x
, i
) != XINT (y
, i
))
1530 /* Two vectors must have the same length. */
1531 if (XVECLEN (x
, i
) != XVECLEN (y
, i
))
1534 /* And the corresponding elements must match. */
1535 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
1536 if (rtx_equal_for_loop_p (XVECEXP (x
, i
, j
), XVECEXP (y
, i
, j
), movables
) == 0)
1541 if (rtx_equal_for_loop_p (XEXP (x
, i
), XEXP (y
, i
), movables
) == 0)
1546 if (strcmp (XSTR (x
, i
), XSTR (y
, i
)))
1551 /* These are just backpointers, so they don't matter. */
1557 /* It is believed that rtx's at this level will never
1558 contain anything but integers and other rtx's,
1559 except for within LABEL_REFs and SYMBOL_REFs. */
1567 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to all
1568 insns in INSNS which use thet reference. */
1571 add_label_notes (x
, insns
)
1575 enum rtx_code code
= GET_CODE (x
);
1580 if (code
== LABEL_REF
&& !LABEL_REF_NONLOCAL_P (x
))
1582 rtx next
= next_real_insn (XEXP (x
, 0));
1584 /* Don't record labels that refer to dispatch tables.
1585 This is not necessary, since the tablejump references the same label.
1586 And if we did record them, flow.c would make worse code. */
1588 || ! (GET_CODE (next
) == JUMP_INSN
1589 && (GET_CODE (PATTERN (next
)) == ADDR_VEC
1590 || GET_CODE (PATTERN (next
)) == ADDR_DIFF_VEC
)))
1592 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
1593 if (reg_mentioned_p (XEXP (x
, 0), insn
))
1594 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_LABEL
, XEXP (x
, 0),
1600 fmt
= GET_RTX_FORMAT (code
);
1601 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1604 add_label_notes (XEXP (x
, i
), insns
);
1605 else if (fmt
[i
] == 'E')
1606 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1607 add_label_notes (XVECEXP (x
, i
, j
), insns
);
1611 /* Scan MOVABLES, and move the insns that deserve to be moved.
1612 If two matching movables are combined, replace one reg with the
1613 other throughout. */
1616 move_movables (movables
, threshold
, insn_count
, loop_start
, end
, nregs
)
1617 struct movable
*movables
;
1625 register struct movable
*m
;
1627 /* Map of pseudo-register replacements to handle combining
1628 when we move several insns that load the same value
1629 into different pseudo-registers. */
1630 rtx
*reg_map
= (rtx
*) alloca (nregs
* sizeof (rtx
));
1631 char *already_moved
= (char *) alloca (nregs
);
1633 bzero (already_moved
, nregs
);
1634 bzero ((char *) reg_map
, nregs
* sizeof (rtx
));
1638 for (m
= movables
; m
; m
= m
->next
)
1640 /* Describe this movable insn. */
1642 if (loop_dump_stream
)
1644 fprintf (loop_dump_stream
, "Insn %d: regno %d (life %d), ",
1645 INSN_UID (m
->insn
), m
->regno
, m
->lifetime
);
1647 fprintf (loop_dump_stream
, "consec %d, ", m
->consec
);
1649 fprintf (loop_dump_stream
, "cond ");
1651 fprintf (loop_dump_stream
, "force ");
1653 fprintf (loop_dump_stream
, "global ");
1655 fprintf (loop_dump_stream
, "done ");
1657 fprintf (loop_dump_stream
, "move-insn ");
1659 fprintf (loop_dump_stream
, "matches %d ",
1660 INSN_UID (m
->match
->insn
));
1662 fprintf (loop_dump_stream
, "forces %d ",
1663 INSN_UID (m
->forces
->insn
));
1666 /* Count movables. Value used in heuristics in strength_reduce. */
1669 /* Ignore the insn if it's already done (it matched something else).
1670 Otherwise, see if it is now safe to move. */
1674 || (1 == invariant_p (m
->set_src
)
1675 && (m
->dependencies
== 0
1676 || 1 == invariant_p (m
->dependencies
))
1678 || 1 == consec_sets_invariant_p (m
->set_dest
,
1681 && (! m
->forces
|| m
->forces
->done
))
1685 int savings
= m
->savings
;
1687 /* We have an insn that is safe to move.
1688 Compute its desirability. */
1693 if (loop_dump_stream
)
1694 fprintf (loop_dump_stream
, "savings %d ", savings
);
1696 if (moved_once
[regno
])
1700 if (loop_dump_stream
)
1701 fprintf (loop_dump_stream
, "halved since already moved ");
1704 /* An insn MUST be moved if we already moved something else
1705 which is safe only if this one is moved too: that is,
1706 if already_moved[REGNO] is nonzero. */
1708 /* An insn is desirable to move if the new lifetime of the
1709 register is no more than THRESHOLD times the old lifetime.
1710 If it's not desirable, it means the loop is so big
1711 that moving won't speed things up much,
1712 and it is liable to make register usage worse. */
1714 /* It is also desirable to move if it can be moved at no
1715 extra cost because something else was already moved. */
1717 if (already_moved
[regno
]
1718 || flag_move_all_movables
1719 || (threshold
* savings
* m
->lifetime
) >= insn_count
1720 || (m
->forces
&& m
->forces
->done
1721 && n_times_used
[m
->forces
->regno
] == 1))
1724 register struct movable
*m1
;
1727 /* Now move the insns that set the reg. */
1729 if (m
->partial
&& m
->match
)
1733 /* Find the end of this chain of matching regs.
1734 Thus, we load each reg in the chain from that one reg.
1735 And that reg is loaded with 0 directly,
1736 since it has ->match == 0. */
1737 for (m1
= m
; m1
->match
; m1
= m1
->match
);
1738 newpat
= gen_move_insn (SET_DEST (PATTERN (m
->insn
)),
1739 SET_DEST (PATTERN (m1
->insn
)));
1740 i1
= emit_insn_before (newpat
, loop_start
);
1742 /* Mark the moved, invariant reg as being allowed to
1743 share a hard reg with the other matching invariant. */
1744 REG_NOTES (i1
) = REG_NOTES (m
->insn
);
1745 r1
= SET_DEST (PATTERN (m
->insn
));
1746 r2
= SET_DEST (PATTERN (m1
->insn
));
1748 = gen_rtx_EXPR_LIST (VOIDmode
, r1
,
1749 gen_rtx_EXPR_LIST (VOIDmode
, r2
,
1751 delete_insn (m
->insn
);
1756 if (loop_dump_stream
)
1757 fprintf (loop_dump_stream
, " moved to %d", INSN_UID (i1
));
1759 /* If we are to re-generate the item being moved with a
1760 new move insn, first delete what we have and then emit
1761 the move insn before the loop. */
1762 else if (m
->move_insn
)
1766 for (count
= m
->consec
; count
>= 0; count
--)
1768 /* If this is the first insn of a library call sequence,
1770 if (GET_CODE (p
) != NOTE
1771 && (temp
= find_reg_note (p
, REG_LIBCALL
, NULL_RTX
)))
1774 /* If this is the last insn of a libcall sequence, then
1775 delete every insn in the sequence except the last.
1776 The last insn is handled in the normal manner. */
1777 if (GET_CODE (p
) != NOTE
1778 && (temp
= find_reg_note (p
, REG_RETVAL
, NULL_RTX
)))
1780 temp
= XEXP (temp
, 0);
1782 temp
= delete_insn (temp
);
1785 p
= delete_insn (p
);
1786 while (p
&& GET_CODE (p
) == NOTE
)
1791 emit_move_insn (m
->set_dest
, m
->set_src
);
1792 temp
= get_insns ();
1795 add_label_notes (m
->set_src
, temp
);
1797 i1
= emit_insns_before (temp
, loop_start
);
1798 if (! find_reg_note (i1
, REG_EQUAL
, NULL_RTX
))
1800 = gen_rtx_EXPR_LIST (m
->is_equiv
? REG_EQUIV
: REG_EQUAL
,
1801 m
->set_src
, REG_NOTES (i1
));
1803 if (loop_dump_stream
)
1804 fprintf (loop_dump_stream
, " moved to %d", INSN_UID (i1
));
1806 /* The more regs we move, the less we like moving them. */
1811 for (count
= m
->consec
; count
>= 0; count
--)
1815 /* If first insn of libcall sequence, skip to end. */
1816 /* Do this at start of loop, since p is guaranteed to
1818 if (GET_CODE (p
) != NOTE
1819 && (temp
= find_reg_note (p
, REG_LIBCALL
, NULL_RTX
)))
1822 /* If last insn of libcall sequence, move all
1823 insns except the last before the loop. The last
1824 insn is handled in the normal manner. */
1825 if (GET_CODE (p
) != NOTE
1826 && (temp
= find_reg_note (p
, REG_RETVAL
, NULL_RTX
)))
1830 rtx fn_address_insn
= 0;
1833 for (temp
= XEXP (temp
, 0); temp
!= p
;
1834 temp
= NEXT_INSN (temp
))
1840 if (GET_CODE (temp
) == NOTE
)
1843 body
= PATTERN (temp
);
1845 /* Find the next insn after TEMP,
1846 not counting USE or NOTE insns. */
1847 for (next
= NEXT_INSN (temp
); next
!= p
;
1848 next
= NEXT_INSN (next
))
1849 if (! (GET_CODE (next
) == INSN
1850 && GET_CODE (PATTERN (next
)) == USE
)
1851 && GET_CODE (next
) != NOTE
)
1854 /* If that is the call, this may be the insn
1855 that loads the function address.
1857 Extract the function address from the insn
1858 that loads it into a register.
1859 If this insn was cse'd, we get incorrect code.
1861 So emit a new move insn that copies the
1862 function address into the register that the
1863 call insn will use. flow.c will delete any
1864 redundant stores that we have created. */
1865 if (GET_CODE (next
) == CALL_INSN
1866 && GET_CODE (body
) == SET
1867 && GET_CODE (SET_DEST (body
)) == REG
1868 && (n
= find_reg_note (temp
, REG_EQUAL
,
1871 fn_reg
= SET_SRC (body
);
1872 if (GET_CODE (fn_reg
) != REG
)
1873 fn_reg
= SET_DEST (body
);
1874 fn_address
= XEXP (n
, 0);
1875 fn_address_insn
= temp
;
1877 /* We have the call insn.
1878 If it uses the register we suspect it might,
1879 load it with the correct address directly. */
1880 if (GET_CODE (temp
) == CALL_INSN
1882 && reg_referenced_p (fn_reg
, body
))
1883 emit_insn_after (gen_move_insn (fn_reg
,
1887 if (GET_CODE (temp
) == CALL_INSN
)
1889 i1
= emit_call_insn_before (body
, loop_start
);
1890 /* Because the USAGE information potentially
1891 contains objects other than hard registers
1892 we need to copy it. */
1893 if (CALL_INSN_FUNCTION_USAGE (temp
))
1894 CALL_INSN_FUNCTION_USAGE (i1
)
1895 = copy_rtx (CALL_INSN_FUNCTION_USAGE (temp
));
1898 i1
= emit_insn_before (body
, loop_start
);
1901 if (temp
== fn_address_insn
)
1902 fn_address_insn
= i1
;
1903 REG_NOTES (i1
) = REG_NOTES (temp
);
1907 if (m
->savemode
!= VOIDmode
)
1909 /* P sets REG to zero; but we should clear only
1910 the bits that are not covered by the mode
1912 rtx reg
= m
->set_dest
;
1918 (GET_MODE (reg
), and_optab
, reg
,
1919 GEN_INT ((((HOST_WIDE_INT
) 1
1920 << GET_MODE_BITSIZE (m
->savemode
)))
1922 reg
, 1, OPTAB_LIB_WIDEN
);
1926 emit_move_insn (reg
, tem
);
1927 sequence
= gen_sequence ();
1929 i1
= emit_insn_before (sequence
, loop_start
);
1931 else if (GET_CODE (p
) == CALL_INSN
)
1933 i1
= emit_call_insn_before (PATTERN (p
), loop_start
);
1934 /* Because the USAGE information potentially
1935 contains objects other than hard registers
1936 we need to copy it. */
1937 if (CALL_INSN_FUNCTION_USAGE (p
))
1938 CALL_INSN_FUNCTION_USAGE (i1
)
1939 = copy_rtx (CALL_INSN_FUNCTION_USAGE (p
));
1942 i1
= emit_insn_before (PATTERN (p
), loop_start
);
1944 REG_NOTES (i1
) = REG_NOTES (p
);
1946 /* If there is a REG_EQUAL note present whose value is
1947 not loop invariant, then delete it, since it may
1948 cause problems with later optimization passes.
1949 It is possible for cse to create such notes
1950 like this as a result of record_jump_cond. */
1952 if ((temp
= find_reg_note (i1
, REG_EQUAL
, NULL_RTX
))
1953 && ! invariant_p (XEXP (temp
, 0)))
1954 remove_note (i1
, temp
);
1959 if (loop_dump_stream
)
1960 fprintf (loop_dump_stream
, " moved to %d",
1964 /* This isn't needed because REG_NOTES is copied
1965 below and is wrong since P might be a PARALLEL. */
1966 if (REG_NOTES (i1
) == 0
1967 && ! m
->partial
/* But not if it's a zero-extend clr. */
1968 && ! m
->global
/* and not if used outside the loop
1969 (since it might get set outside). */
1970 && CONSTANT_P (SET_SRC (PATTERN (p
))))
1972 = gen_rtx_EXPR_LIST (REG_EQUAL
,
1973 SET_SRC (PATTERN (p
)),
1977 /* If library call, now fix the REG_NOTES that contain
1978 insn pointers, namely REG_LIBCALL on FIRST
1979 and REG_RETVAL on I1. */
1980 if (temp
= find_reg_note (i1
, REG_RETVAL
, NULL_RTX
))
1982 XEXP (temp
, 0) = first
;
1983 temp
= find_reg_note (first
, REG_LIBCALL
, NULL_RTX
);
1984 XEXP (temp
, 0) = i1
;
1988 do p
= NEXT_INSN (p
);
1989 while (p
&& GET_CODE (p
) == NOTE
);
1992 /* The more regs we move, the less we like moving them. */
1996 /* Any other movable that loads the same register
1998 already_moved
[regno
] = 1;
2000 /* This reg has been moved out of one loop. */
2001 moved_once
[regno
] = 1;
2003 /* The reg set here is now invariant. */
2005 n_times_set
[regno
] = 0;
2009 /* Change the length-of-life info for the register
2010 to say it lives at least the full length of this loop.
2011 This will help guide optimizations in outer loops. */
2013 if (uid_luid
[REGNO_FIRST_UID (regno
)] > INSN_LUID (loop_start
))
2014 /* This is the old insn before all the moved insns.
2015 We can't use the moved insn because it is out of range
2016 in uid_luid. Only the old insns have luids. */
2017 REGNO_FIRST_UID (regno
) = INSN_UID (loop_start
);
2018 if (uid_luid
[REGNO_LAST_UID (regno
)] < INSN_LUID (end
))
2019 REGNO_LAST_UID (regno
) = INSN_UID (end
);
2021 /* Combine with this moved insn any other matching movables. */
2024 for (m1
= movables
; m1
; m1
= m1
->next
)
2029 /* Schedule the reg loaded by M1
2030 for replacement so that shares the reg of M.
2031 If the modes differ (only possible in restricted
2032 circumstances, make a SUBREG. */
2033 if (GET_MODE (m
->set_dest
) == GET_MODE (m1
->set_dest
))
2034 reg_map
[m1
->regno
] = m
->set_dest
;
2037 = gen_lowpart_common (GET_MODE (m1
->set_dest
),
2040 /* Get rid of the matching insn
2041 and prevent further processing of it. */
2044 /* if library call, delete all insn except last, which
2046 if (temp
= find_reg_note (m1
->insn
, REG_RETVAL
,
2049 for (temp
= XEXP (temp
, 0); temp
!= m1
->insn
;
2050 temp
= NEXT_INSN (temp
))
2053 delete_insn (m1
->insn
);
2055 /* Any other movable that loads the same register
2057 already_moved
[m1
->regno
] = 1;
2059 /* The reg merged here is now invariant,
2060 if the reg it matches is invariant. */
2062 n_times_set
[m1
->regno
] = 0;
2065 else if (loop_dump_stream
)
2066 fprintf (loop_dump_stream
, "not desirable");
2068 else if (loop_dump_stream
&& !m
->match
)
2069 fprintf (loop_dump_stream
, "not safe");
2071 if (loop_dump_stream
)
2072 fprintf (loop_dump_stream
, "\n");
2076 new_start
= loop_start
;
2078 /* Go through all the instructions in the loop, making
2079 all the register substitutions scheduled in REG_MAP. */
2080 for (p
= new_start
; p
!= end
; p
= NEXT_INSN (p
))
2081 if (GET_CODE (p
) == INSN
|| GET_CODE (p
) == JUMP_INSN
2082 || GET_CODE (p
) == CALL_INSN
)
2084 replace_regs (PATTERN (p
), reg_map
, nregs
, 0);
2085 replace_regs (REG_NOTES (p
), reg_map
, nregs
, 0);
2091 /* Scan X and replace the address of any MEM in it with ADDR.
2092 REG is the address that MEM should have before the replacement. */
2095 replace_call_address (x
, reg
, addr
)
2098 register enum rtx_code code
;
2104 code
= GET_CODE (x
);
2118 /* Short cut for very common case. */
2119 replace_call_address (XEXP (x
, 1), reg
, addr
);
2123 /* Short cut for very common case. */
2124 replace_call_address (XEXP (x
, 0), reg
, addr
);
2128 /* If this MEM uses a reg other than the one we expected,
2129 something is wrong. */
2130 if (XEXP (x
, 0) != reg
)
2139 fmt
= GET_RTX_FORMAT (code
);
2140 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2143 replace_call_address (XEXP (x
, i
), reg
, addr
);
2147 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2148 replace_call_address (XVECEXP (x
, i
, j
), reg
, addr
);
2154 /* Return the number of memory refs to addresses that vary
2158 count_nonfixed_reads (x
)
2161 register enum rtx_code code
;
2169 code
= GET_CODE (x
);
2183 return ((invariant_p (XEXP (x
, 0)) != 1)
2184 + count_nonfixed_reads (XEXP (x
, 0)));
2191 fmt
= GET_RTX_FORMAT (code
);
2192 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2195 value
+= count_nonfixed_reads (XEXP (x
, i
));
2199 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2200 value
+= count_nonfixed_reads (XVECEXP (x
, i
, j
));
2208 /* P is an instruction that sets a register to the result of a ZERO_EXTEND.
2209 Replace it with an instruction to load just the low bytes
2210 if the machine supports such an instruction,
2211 and insert above LOOP_START an instruction to clear the register. */
2214 constant_high_bytes (p
, loop_start
)
2218 register int insn_code_number
;
2220 /* Try to change (SET (REG ...) (ZERO_EXTEND (..:B ...)))
2221 to (SET (STRICT_LOW_PART (SUBREG:B (REG...))) ...). */
2223 new = gen_rtx_SET (VOIDmode
,
2224 gen_rtx_STRICT_LOW_PART (VOIDmode
,
2225 gen_rtx_SUBREG (GET_MODE (XEXP (SET_SRC (PATTERN (p
)), 0)),
2226 SET_DEST (PATTERN (p
)),
2228 XEXP (SET_SRC (PATTERN (p
)), 0));
2229 insn_code_number
= recog (new, p
);
2231 if (insn_code_number
)
2235 /* Clear destination register before the loop. */
2236 emit_insn_before (gen_rtx_SET (VOIDmode
, SET_DEST (PATTERN (p
)),
2240 /* Inside the loop, just load the low part. */
2246 /* Scan a loop setting the variables `unknown_address_altered',
2247 `num_mem_sets', `loop_continue', loops_enclosed', `loop_has_call',
2248 and `loop_has_volatile'.
2249 Also, fill in the array `loop_store_mems'. */
2252 prescan_loop (start
, end
)
2255 register int level
= 1;
2258 unknown_address_altered
= 0;
2260 loop_has_volatile
= 0;
2261 loop_store_mems_idx
= 0;
2267 for (insn
= NEXT_INSN (start
); insn
!= NEXT_INSN (end
);
2268 insn
= NEXT_INSN (insn
))
2270 if (GET_CODE (insn
) == NOTE
)
2272 if (NOTE_LINE_NUMBER (insn
) == NOTE_INSN_LOOP_BEG
)
2275 /* Count number of loops contained in this one. */
2278 else if (NOTE_LINE_NUMBER (insn
) == NOTE_INSN_LOOP_END
)
2287 else if (NOTE_LINE_NUMBER (insn
) == NOTE_INSN_LOOP_CONT
)
2290 loop_continue
= insn
;
2293 else if (GET_CODE (insn
) == CALL_INSN
)
2295 if (! CONST_CALL_P (insn
))
2296 unknown_address_altered
= 1;
2301 if (GET_CODE (insn
) == INSN
|| GET_CODE (insn
) == JUMP_INSN
)
2303 if (volatile_refs_p (PATTERN (insn
)))
2304 loop_has_volatile
= 1;
2306 note_stores (PATTERN (insn
), note_addr_stored
);
2312 /* Scan the function looking for loops. Record the start and end of each loop.
2313 Also mark as invalid loops any loops that contain a setjmp or are branched
2314 to from outside the loop. */
2317 find_and_verify_loops (f
)
2321 int current_loop
= -1;
2325 /* If there are jumps to undefined labels,
2326 treat them as jumps out of any/all loops.
2327 This also avoids writing past end of tables when there are no loops. */
2328 uid_loop_num
[0] = -1;
2330 /* Find boundaries of loops, mark which loops are contained within
2331 loops, and invalidate loops that have setjmp. */
2333 for (insn
= f
; insn
; insn
= NEXT_INSN (insn
))
2335 if (GET_CODE (insn
) == NOTE
)
2336 switch (NOTE_LINE_NUMBER (insn
))
2338 case NOTE_INSN_LOOP_BEG
:
2339 loop_number_loop_starts
[++next_loop
] = insn
;
2340 loop_number_loop_ends
[next_loop
] = 0;
2341 loop_outer_loop
[next_loop
] = current_loop
;
2342 loop_invalid
[next_loop
] = 0;
2343 loop_number_exit_labels
[next_loop
] = 0;
2344 loop_number_exit_count
[next_loop
] = 0;
2345 current_loop
= next_loop
;
2348 case NOTE_INSN_SETJMP
:
2349 /* In this case, we must invalidate our current loop and any
2351 for (loop
= current_loop
; loop
!= -1; loop
= loop_outer_loop
[loop
])
2353 loop_invalid
[loop
] = 1;
2354 if (loop_dump_stream
)
2355 fprintf (loop_dump_stream
,
2356 "\nLoop at %d ignored due to setjmp.\n",
2357 INSN_UID (loop_number_loop_starts
[loop
]));
2361 case NOTE_INSN_LOOP_END
:
2362 if (current_loop
== -1)
2365 loop_number_loop_ends
[current_loop
] = insn
;
2366 current_loop
= loop_outer_loop
[current_loop
];
2373 /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the
2374 enclosing loop, but this doesn't matter. */
2375 uid_loop_num
[INSN_UID (insn
)] = current_loop
;
2378 /* Any loop containing a label used in an initializer must be invalidated,
2379 because it can be jumped into from anywhere. */
2381 for (label
= forced_labels
; label
; label
= XEXP (label
, 1))
2385 for (loop_num
= uid_loop_num
[INSN_UID (XEXP (label
, 0))];
2387 loop_num
= loop_outer_loop
[loop_num
])
2388 loop_invalid
[loop_num
] = 1;
2391 /* Any loop containing a label used for an exception handler must be
2392 invalidated, because it can be jumped into from anywhere. */
2394 for (label
= exception_handler_labels
; label
; label
= XEXP (label
, 1))
2398 for (loop_num
= uid_loop_num
[INSN_UID (XEXP (label
, 0))];
2400 loop_num
= loop_outer_loop
[loop_num
])
2401 loop_invalid
[loop_num
] = 1;
2404 /* Now scan all insn's in the function. If any JUMP_INSN branches into a
2405 loop that it is not contained within, that loop is marked invalid.
2406 If any INSN or CALL_INSN uses a label's address, then the loop containing
2407 that label is marked invalid, because it could be jumped into from
2410 Also look for blocks of code ending in an unconditional branch that
2411 exits the loop. If such a block is surrounded by a conditional
2412 branch around the block, move the block elsewhere (see below) and
2413 invert the jump to point to the code block. This may eliminate a
2414 label in our loop and will simplify processing by both us and a
2415 possible second cse pass. */
2417 for (insn
= f
; insn
; insn
= NEXT_INSN (insn
))
2418 if (GET_RTX_CLASS (GET_CODE (insn
)) == 'i')
2420 int this_loop_num
= uid_loop_num
[INSN_UID (insn
)];
2422 if (GET_CODE (insn
) == INSN
|| GET_CODE (insn
) == CALL_INSN
)
2424 rtx note
= find_reg_note (insn
, REG_LABEL
, NULL_RTX
);
2429 for (loop_num
= uid_loop_num
[INSN_UID (XEXP (note
, 0))];
2431 loop_num
= loop_outer_loop
[loop_num
])
2432 loop_invalid
[loop_num
] = 1;
2436 if (GET_CODE (insn
) != JUMP_INSN
)
2439 mark_loop_jump (PATTERN (insn
), this_loop_num
);
2441 /* See if this is an unconditional branch outside the loop. */
2442 if (this_loop_num
!= -1
2443 && (GET_CODE (PATTERN (insn
)) == RETURN
2444 || (simplejump_p (insn
)
2445 && (uid_loop_num
[INSN_UID (JUMP_LABEL (insn
))]
2447 && get_max_uid () < max_uid_for_loop
)
2450 rtx our_next
= next_real_insn (insn
);
2452 int outer_loop
= -1;
2454 /* Go backwards until we reach the start of the loop, a label,
2456 for (p
= PREV_INSN (insn
);
2457 GET_CODE (p
) != CODE_LABEL
2458 && ! (GET_CODE (p
) == NOTE
2459 && NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_BEG
)
2460 && GET_CODE (p
) != JUMP_INSN
;
2464 /* Check for the case where we have a jump to an inner nested
2465 loop, and do not perform the optimization in that case. */
2467 if (JUMP_LABEL (insn
))
2469 dest_loop
= uid_loop_num
[INSN_UID (JUMP_LABEL (insn
))];
2470 if (dest_loop
!= -1)
2472 for (outer_loop
= dest_loop
; outer_loop
!= -1;
2473 outer_loop
= loop_outer_loop
[outer_loop
])
2474 if (outer_loop
== this_loop_num
)
2479 /* Make sure that the target of P is within the current loop. */
2481 if (GET_CODE (p
) == JUMP_INSN
&& JUMP_LABEL (p
)
2482 && uid_loop_num
[INSN_UID (JUMP_LABEL (p
))] != this_loop_num
)
2483 outer_loop
= this_loop_num
;
2485 /* If we stopped on a JUMP_INSN to the next insn after INSN,
2486 we have a block of code to try to move.
2488 We look backward and then forward from the target of INSN
2489 to find a BARRIER at the same loop depth as the target.
2490 If we find such a BARRIER, we make a new label for the start
2491 of the block, invert the jump in P and point it to that label,
2492 and move the block of code to the spot we found. */
2494 if (outer_loop
== -1
2495 && GET_CODE (p
) == JUMP_INSN
2496 && JUMP_LABEL (p
) != 0
2497 /* Just ignore jumps to labels that were never emitted.
2498 These always indicate compilation errors. */
2499 && INSN_UID (JUMP_LABEL (p
)) != 0
2501 && ! simplejump_p (p
)
2502 && next_real_insn (JUMP_LABEL (p
)) == our_next
)
2505 = JUMP_LABEL (insn
) ? JUMP_LABEL (insn
) : get_last_insn ();
2506 int target_loop_num
= uid_loop_num
[INSN_UID (target
)];
2509 for (loc
= target
; loc
; loc
= PREV_INSN (loc
))
2510 if (GET_CODE (loc
) == BARRIER
2511 && uid_loop_num
[INSN_UID (loc
)] == target_loop_num
)
2515 for (loc
= target
; loc
; loc
= NEXT_INSN (loc
))
2516 if (GET_CODE (loc
) == BARRIER
2517 && uid_loop_num
[INSN_UID (loc
)] == target_loop_num
)
2522 rtx cond_label
= JUMP_LABEL (p
);
2523 rtx new_label
= get_label_after (p
);
2525 /* Ensure our label doesn't go away. */
2526 LABEL_NUSES (cond_label
)++;
2528 /* Verify that uid_loop_num is large enough and that
2530 if (invert_jump (p
, new_label
))
2534 /* If no suitable BARRIER was found, create a suitable
2535 one before TARGET. Since TARGET is a fall through
2536 path, we'll need to insert an jump around our block
2537 and a add a BARRIER before TARGET.
2539 This creates an extra unconditional jump outside
2540 the loop. However, the benefits of removing rarely
2541 executed instructions from inside the loop usually
2542 outweighs the cost of the extra unconditional jump
2543 outside the loop. */
2548 temp
= gen_jump (JUMP_LABEL (insn
));
2549 temp
= emit_jump_insn_before (temp
, target
);
2550 JUMP_LABEL (temp
) = JUMP_LABEL (insn
);
2551 LABEL_NUSES (JUMP_LABEL (insn
))++;
2552 loc
= emit_barrier_before (target
);
2555 /* Include the BARRIER after INSN and copy the
2557 new_label
= squeeze_notes (new_label
, NEXT_INSN (insn
));
2558 reorder_insns (new_label
, NEXT_INSN (insn
), loc
);
2560 /* All those insns are now in TARGET_LOOP_NUM. */
2561 for (q
= new_label
; q
!= NEXT_INSN (NEXT_INSN (insn
));
2563 uid_loop_num
[INSN_UID (q
)] = target_loop_num
;
2565 /* The label jumped to by INSN is no longer a loop exit.
2566 Unless INSN does not have a label (e.g., it is a
2567 RETURN insn), search loop_number_exit_labels to find
2568 its label_ref, and remove it. Also turn off
2569 LABEL_OUTSIDE_LOOP_P bit. */
2570 if (JUMP_LABEL (insn
))
2575 r
= loop_number_exit_labels
[this_loop_num
];
2576 r
; q
= r
, r
= LABEL_NEXTREF (r
))
2577 if (XEXP (r
, 0) == JUMP_LABEL (insn
))
2579 LABEL_OUTSIDE_LOOP_P (r
) = 0;
2581 LABEL_NEXTREF (q
) = LABEL_NEXTREF (r
);
2583 loop_number_exit_labels
[this_loop_num
]
2584 = LABEL_NEXTREF (r
);
2588 for (loop_num
= this_loop_num
;
2589 loop_num
!= -1 && loop_num
!= target_loop_num
;
2590 loop_num
= loop_outer_loop
[loop_num
])
2591 loop_number_exit_count
[loop_num
]--;
2593 /* If we didn't find it, then something is wrong. */
2598 /* P is now a jump outside the loop, so it must be put
2599 in loop_number_exit_labels, and marked as such.
2600 The easiest way to do this is to just call
2601 mark_loop_jump again for P. */
2602 mark_loop_jump (PATTERN (p
), this_loop_num
);
2604 /* If INSN now jumps to the insn after it,
2606 if (JUMP_LABEL (insn
) != 0
2607 && (next_real_insn (JUMP_LABEL (insn
))
2608 == next_real_insn (insn
)))
2612 /* Continue the loop after where the conditional
2613 branch used to jump, since the only branch insn
2614 in the block (if it still remains) is an inter-loop
2615 branch and hence needs no processing. */
2616 insn
= NEXT_INSN (cond_label
);
2618 if (--LABEL_NUSES (cond_label
) == 0)
2619 delete_insn (cond_label
);
2621 /* This loop will be continued with NEXT_INSN (insn). */
2622 insn
= PREV_INSN (insn
);
2629 /* If any label in X jumps to a loop different from LOOP_NUM and any of the
2630 loops it is contained in, mark the target loop invalid.
2632 For speed, we assume that X is part of a pattern of a JUMP_INSN. */
2635 mark_loop_jump (x
, loop_num
)
2643 switch (GET_CODE (x
))
2656 /* There could be a label reference in here. */
2657 mark_loop_jump (XEXP (x
, 0), loop_num
);
2663 mark_loop_jump (XEXP (x
, 0), loop_num
);
2664 mark_loop_jump (XEXP (x
, 1), loop_num
);
2669 mark_loop_jump (XEXP (x
, 0), loop_num
);
2673 dest_loop
= uid_loop_num
[INSN_UID (XEXP (x
, 0))];
2675 /* Link together all labels that branch outside the loop. This
2676 is used by final_[bg]iv_value and the loop unrolling code. Also
2677 mark this LABEL_REF so we know that this branch should predict
2680 /* A check to make sure the label is not in an inner nested loop,
2681 since this does not count as a loop exit. */
2682 if (dest_loop
!= -1)
2684 for (outer_loop
= dest_loop
; outer_loop
!= -1;
2685 outer_loop
= loop_outer_loop
[outer_loop
])
2686 if (outer_loop
== loop_num
)
2692 if (loop_num
!= -1 && outer_loop
== -1)
2694 LABEL_OUTSIDE_LOOP_P (x
) = 1;
2695 LABEL_NEXTREF (x
) = loop_number_exit_labels
[loop_num
];
2696 loop_number_exit_labels
[loop_num
] = x
;
2698 for (outer_loop
= loop_num
;
2699 outer_loop
!= -1 && outer_loop
!= dest_loop
;
2700 outer_loop
= loop_outer_loop
[outer_loop
])
2701 loop_number_exit_count
[outer_loop
]++;
2704 /* If this is inside a loop, but not in the current loop or one enclosed
2705 by it, it invalidates at least one loop. */
2707 if (dest_loop
== -1)
2710 /* We must invalidate every nested loop containing the target of this
2711 label, except those that also contain the jump insn. */
2713 for (; dest_loop
!= -1; dest_loop
= loop_outer_loop
[dest_loop
])
2715 /* Stop when we reach a loop that also contains the jump insn. */
2716 for (outer_loop
= loop_num
; outer_loop
!= -1;
2717 outer_loop
= loop_outer_loop
[outer_loop
])
2718 if (dest_loop
== outer_loop
)
2721 /* If we get here, we know we need to invalidate a loop. */
2722 if (loop_dump_stream
&& ! loop_invalid
[dest_loop
])
2723 fprintf (loop_dump_stream
,
2724 "\nLoop at %d ignored due to multiple entry points.\n",
2725 INSN_UID (loop_number_loop_starts
[dest_loop
]));
2727 loop_invalid
[dest_loop
] = 1;
2732 /* If this is not setting pc, ignore. */
2733 if (SET_DEST (x
) == pc_rtx
)
2734 mark_loop_jump (SET_SRC (x
), loop_num
);
2738 mark_loop_jump (XEXP (x
, 1), loop_num
);
2739 mark_loop_jump (XEXP (x
, 2), loop_num
);
2744 for (i
= 0; i
< XVECLEN (x
, 0); i
++)
2745 mark_loop_jump (XVECEXP (x
, 0, i
), loop_num
);
2749 for (i
= 0; i
< XVECLEN (x
, 1); i
++)
2750 mark_loop_jump (XVECEXP (x
, 1, i
), loop_num
);
2754 /* Treat anything else (such as a symbol_ref)
2755 as a branch out of this loop, but not into any loop. */
2760 LABEL_OUTSIDE_LOOP_P (x
) = 1;
2761 LABEL_NEXTREF (x
) = loop_number_exit_labels
[loop_num
];
2764 loop_number_exit_labels
[loop_num
] = x
;
2766 for (outer_loop
= loop_num
; outer_loop
!= -1;
2767 outer_loop
= loop_outer_loop
[outer_loop
])
2768 loop_number_exit_count
[outer_loop
]++;
2774 /* Return nonzero if there is a label in the range from
2775 insn INSN to and including the insn whose luid is END
2776 INSN must have an assigned luid (i.e., it must not have
2777 been previously created by loop.c). */
2780 labels_in_range_p (insn
, end
)
2784 while (insn
&& INSN_LUID (insn
) <= end
)
2786 if (GET_CODE (insn
) == CODE_LABEL
)
2788 insn
= NEXT_INSN (insn
);
2794 /* Record that a memory reference X is being set. */
2797 note_addr_stored (x
)
2802 if (x
== 0 || GET_CODE (x
) != MEM
)
2805 /* Count number of memory writes.
2806 This affects heuristics in strength_reduce. */
2809 /* BLKmode MEM means all memory is clobbered. */
2810 if (GET_MODE (x
) == BLKmode
)
2811 unknown_address_altered
= 1;
2813 if (unknown_address_altered
)
2816 for (i
= 0; i
< loop_store_mems_idx
; i
++)
2817 if (rtx_equal_p (XEXP (loop_store_mems
[i
], 0), XEXP (x
, 0))
2818 && MEM_IN_STRUCT_P (x
) == MEM_IN_STRUCT_P (loop_store_mems
[i
]))
2820 /* We are storing at the same address as previously noted. Save the
2822 if (GET_MODE_SIZE (GET_MODE (x
))
2823 > GET_MODE_SIZE (GET_MODE (loop_store_mems
[i
])))
2824 loop_store_mems
[i
] = x
;
2828 if (i
== NUM_STORES
)
2829 unknown_address_altered
= 1;
2831 else if (i
== loop_store_mems_idx
)
2832 loop_store_mems
[loop_store_mems_idx
++] = x
;
2835 /* Return nonzero if the rtx X is invariant over the current loop.
2837 The value is 2 if we refer to something only conditionally invariant.
2839 If `unknown_address_altered' is nonzero, no memory ref is invariant.
2840 Otherwise, a memory ref is invariant if it does not conflict with
2841 anything stored in `loop_store_mems'. */
2848 register enum rtx_code code
;
2850 int conditional
= 0;
2854 code
= GET_CODE (x
);
2864 /* A LABEL_REF is normally invariant, however, if we are unrolling
2865 loops, and this label is inside the loop, then it isn't invariant.
2866 This is because each unrolled copy of the loop body will have
2867 a copy of this label. If this was invariant, then an insn loading
2868 the address of this label into a register might get moved outside
2869 the loop, and then each loop body would end up using the same label.
2871 We don't know the loop bounds here though, so just fail for all
2873 if (flag_unroll_loops
)
2880 case UNSPEC_VOLATILE
:
2884 /* We used to check RTX_UNCHANGING_P (x) here, but that is invalid
2885 since the reg might be set by initialization within the loop. */
2887 if ((x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
2888 || x
== arg_pointer_rtx
)
2889 && ! current_function_has_nonlocal_goto
)
2893 && REGNO (x
) < FIRST_PSEUDO_REGISTER
&& call_used_regs
[REGNO (x
)])
2896 if (n_times_set
[REGNO (x
)] < 0)
2899 return n_times_set
[REGNO (x
)] == 0;
2902 /* Volatile memory references must be rejected. Do this before
2903 checking for read-only items, so that volatile read-only items
2904 will be rejected also. */
2905 if (MEM_VOLATILE_P (x
))
2908 /* Read-only items (such as constants in a constant pool) are
2909 invariant if their address is. */
2910 if (RTX_UNCHANGING_P (x
))
2913 /* If we filled the table (or had a subroutine call), any location
2914 in memory could have been clobbered. */
2915 if (unknown_address_altered
)
2918 /* See if there is any dependence between a store and this load. */
2919 for (i
= loop_store_mems_idx
- 1; i
>= 0; i
--)
2920 if (true_dependence (loop_store_mems
[i
], VOIDmode
, x
, rtx_varies_p
))
2923 /* It's not invalidated by a store in memory
2924 but we must still verify the address is invariant. */
2928 /* Don't mess with insns declared volatile. */
2929 if (MEM_VOLATILE_P (x
))
2937 fmt
= GET_RTX_FORMAT (code
);
2938 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2942 int tem
= invariant_p (XEXP (x
, i
));
2948 else if (fmt
[i
] == 'E')
2951 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2953 int tem
= invariant_p (XVECEXP (x
, i
, j
));
2963 return 1 + conditional
;
2967 /* Return nonzero if all the insns in the loop that set REG
2968 are INSN and the immediately following insns,
2969 and if each of those insns sets REG in an invariant way
2970 (not counting uses of REG in them).
2972 The value is 2 if some of these insns are only conditionally invariant.
2974 We assume that INSN itself is the first set of REG
2975 and that its source is invariant. */
2978 consec_sets_invariant_p (reg
, n_sets
, insn
)
2982 register rtx p
= insn
;
2983 register int regno
= REGNO (reg
);
2985 /* Number of sets we have to insist on finding after INSN. */
2986 int count
= n_sets
- 1;
2987 int old
= n_times_set
[regno
];
2991 /* If N_SETS hit the limit, we can't rely on its value. */
2995 n_times_set
[regno
] = 0;
2999 register enum rtx_code code
;
3003 code
= GET_CODE (p
);
3005 /* If library call, skip to end of of it. */
3006 if (code
== INSN
&& (temp
= find_reg_note (p
, REG_LIBCALL
, NULL_RTX
)))
3011 && (set
= single_set (p
))
3012 && GET_CODE (SET_DEST (set
)) == REG
3013 && REGNO (SET_DEST (set
)) == regno
)
3015 this = invariant_p (SET_SRC (set
));
3018 else if (temp
= find_reg_note (p
, REG_EQUAL
, NULL_RTX
))
3020 /* If this is a libcall, then any invariant REG_EQUAL note is OK.
3021 If this is an ordinary insn, then only CONSTANT_P REG_EQUAL
3023 this = (CONSTANT_P (XEXP (temp
, 0))
3024 || (find_reg_note (p
, REG_RETVAL
, NULL_RTX
)
3025 && invariant_p (XEXP (temp
, 0))));
3032 else if (code
!= NOTE
)
3034 n_times_set
[regno
] = old
;
3039 n_times_set
[regno
] = old
;
3040 /* If invariant_p ever returned 2, we return 2. */
3041 return 1 + (value
& 2);
3045 /* I don't think this condition is sufficient to allow INSN
3046 to be moved, so we no longer test it. */
3048 /* Return 1 if all insns in the basic block of INSN and following INSN
3049 that set REG are invariant according to TABLE. */
3052 all_sets_invariant_p (reg
, insn
, table
)
3056 register rtx p
= insn
;
3057 register int regno
= REGNO (reg
);
3061 register enum rtx_code code
;
3063 code
= GET_CODE (p
);
3064 if (code
== CODE_LABEL
|| code
== JUMP_INSN
)
3066 if (code
== INSN
&& GET_CODE (PATTERN (p
)) == SET
3067 && GET_CODE (SET_DEST (PATTERN (p
))) == REG
3068 && REGNO (SET_DEST (PATTERN (p
))) == regno
)
3070 if (!invariant_p (SET_SRC (PATTERN (p
)), table
))
3077 /* Look at all uses (not sets) of registers in X. For each, if it is
3078 the single use, set USAGE[REGNO] to INSN; if there was a previous use in
3079 a different insn, set USAGE[REGNO] to const0_rtx. */
3082 find_single_use_in_loop (insn
, x
, usage
)
3087 enum rtx_code code
= GET_CODE (x
);
3088 char *fmt
= GET_RTX_FORMAT (code
);
3093 = (usage
[REGNO (x
)] != 0 && usage
[REGNO (x
)] != insn
)
3094 ? const0_rtx
: insn
;
3096 else if (code
== SET
)
3098 /* Don't count SET_DEST if it is a REG; otherwise count things
3099 in SET_DEST because if a register is partially modified, it won't
3100 show up as a potential movable so we don't care how USAGE is set
3102 if (GET_CODE (SET_DEST (x
)) != REG
)
3103 find_single_use_in_loop (insn
, SET_DEST (x
), usage
);
3104 find_single_use_in_loop (insn
, SET_SRC (x
), usage
);
3107 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3109 if (fmt
[i
] == 'e' && XEXP (x
, i
) != 0)
3110 find_single_use_in_loop (insn
, XEXP (x
, i
), usage
);
3111 else if (fmt
[i
] == 'E')
3112 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
3113 find_single_use_in_loop (insn
, XVECEXP (x
, i
, j
), usage
);
3117 /* Increment N_TIMES_SET at the index of each register
3118 that is modified by an insn between FROM and TO.
3119 If the value of an element of N_TIMES_SET becomes 127 or more,
3120 stop incrementing it, to avoid overflow.
3122 Store in SINGLE_USAGE[I] the single insn in which register I is
3123 used, if it is only used once. Otherwise, it is set to 0 (for no
3124 uses) or const0_rtx for more than one use. This parameter may be zero,
3125 in which case this processing is not done.
3127 Store in *COUNT_PTR the number of actual instruction
3128 in the loop. We use this to decide what is worth moving out. */
3130 /* last_set[n] is nonzero iff reg n has been set in the current basic block.
3131 In that case, it is the insn that last set reg n. */
3134 count_loop_regs_set (from
, to
, may_not_move
, single_usage
, count_ptr
, nregs
)
3135 register rtx from
, to
;
3141 register rtx
*last_set
= (rtx
*) alloca (nregs
* sizeof (rtx
));
3143 register int count
= 0;
3146 bzero ((char *) last_set
, nregs
* sizeof (rtx
));
3147 for (insn
= from
; insn
!= to
; insn
= NEXT_INSN (insn
))
3149 if (GET_RTX_CLASS (GET_CODE (insn
)) == 'i')
3153 /* If requested, record registers that have exactly one use. */
3156 find_single_use_in_loop (insn
, PATTERN (insn
), single_usage
);
3158 /* Include uses in REG_EQUAL notes. */
3159 if (REG_NOTES (insn
))
3160 find_single_use_in_loop (insn
, REG_NOTES (insn
), single_usage
);
3163 if (GET_CODE (PATTERN (insn
)) == CLOBBER
3164 && GET_CODE (XEXP (PATTERN (insn
), 0)) == REG
)
3165 /* Don't move a reg that has an explicit clobber.
3166 We might do so sometimes, but it's not worth the pain. */
3167 may_not_move
[REGNO (XEXP (PATTERN (insn
), 0))] = 1;
3169 if (GET_CODE (PATTERN (insn
)) == SET
3170 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
3172 dest
= SET_DEST (PATTERN (insn
));
3173 while (GET_CODE (dest
) == SUBREG
3174 || GET_CODE (dest
) == ZERO_EXTRACT
3175 || GET_CODE (dest
) == SIGN_EXTRACT
3176 || GET_CODE (dest
) == STRICT_LOW_PART
)
3177 dest
= XEXP (dest
, 0);
3178 if (GET_CODE (dest
) == REG
)
3180 register int regno
= REGNO (dest
);
3181 /* If this is the first setting of this reg
3182 in current basic block, and it was set before,
3183 it must be set in two basic blocks, so it cannot
3184 be moved out of the loop. */
3185 if (n_times_set
[regno
] > 0 && last_set
[regno
] == 0)
3186 may_not_move
[regno
] = 1;
3187 /* If this is not first setting in current basic block,
3188 see if reg was used in between previous one and this.
3189 If so, neither one can be moved. */
3190 if (last_set
[regno
] != 0
3191 && reg_used_between_p (dest
, last_set
[regno
], insn
))
3192 may_not_move
[regno
] = 1;
3193 if (n_times_set
[regno
] < 127)
3194 ++n_times_set
[regno
];
3195 last_set
[regno
] = insn
;
3198 else if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
3201 for (i
= XVECLEN (PATTERN (insn
), 0) - 1; i
>= 0; i
--)
3203 register rtx x
= XVECEXP (PATTERN (insn
), 0, i
);
3204 if (GET_CODE (x
) == CLOBBER
&& GET_CODE (XEXP (x
, 0)) == REG
)
3205 /* Don't move a reg that has an explicit clobber.
3206 It's not worth the pain to try to do it correctly. */
3207 may_not_move
[REGNO (XEXP (x
, 0))] = 1;
3209 if (GET_CODE (x
) == SET
|| GET_CODE (x
) == CLOBBER
)
3211 dest
= SET_DEST (x
);
3212 while (GET_CODE (dest
) == SUBREG
3213 || GET_CODE (dest
) == ZERO_EXTRACT
3214 || GET_CODE (dest
) == SIGN_EXTRACT
3215 || GET_CODE (dest
) == STRICT_LOW_PART
)
3216 dest
= XEXP (dest
, 0);
3217 if (GET_CODE (dest
) == REG
)
3219 register int regno
= REGNO (dest
);
3220 if (n_times_set
[regno
] > 0 && last_set
[regno
] == 0)
3221 may_not_move
[regno
] = 1;
3222 if (last_set
[regno
] != 0
3223 && reg_used_between_p (dest
, last_set
[regno
], insn
))
3224 may_not_move
[regno
] = 1;
3225 if (n_times_set
[regno
] < 127)
3226 ++n_times_set
[regno
];
3227 last_set
[regno
] = insn
;
3234 if (GET_CODE (insn
) == CODE_LABEL
|| GET_CODE (insn
) == JUMP_INSN
)
3235 bzero ((char *) last_set
, nregs
* sizeof (rtx
));
3240 /* Given a loop that is bounded by LOOP_START and LOOP_END
3241 and that is entered at SCAN_START,
3242 return 1 if the register set in SET contained in insn INSN is used by
3243 any insn that precedes INSN in cyclic order starting
3244 from the loop entry point.
3246 We don't want to use INSN_LUID here because if we restrict INSN to those
3247 that have a valid INSN_LUID, it means we cannot move an invariant out
3248 from an inner loop past two loops. */
3251 loop_reg_used_before_p (set
, insn
, loop_start
, scan_start
, loop_end
)
3252 rtx set
, insn
, loop_start
, scan_start
, loop_end
;
3254 rtx reg
= SET_DEST (set
);
3257 /* Scan forward checking for register usage. If we hit INSN, we
3258 are done. Otherwise, if we hit LOOP_END, wrap around to LOOP_START. */
3259 for (p
= scan_start
; p
!= insn
; p
= NEXT_INSN (p
))
3261 if (GET_RTX_CLASS (GET_CODE (p
)) == 'i'
3262 && reg_overlap_mentioned_p (reg
, PATTERN (p
)))
3272 /* A "basic induction variable" or biv is a pseudo reg that is set
3273 (within this loop) only by incrementing or decrementing it. */
3274 /* A "general induction variable" or giv is a pseudo reg whose
3275 value is a linear function of a biv. */
3277 /* Bivs are recognized by `basic_induction_var';
3278 Givs by `general_induct_var'. */
3280 /* Indexed by register number, indicates whether or not register is an
3281 induction variable, and if so what type. */
3283 enum iv_mode
*reg_iv_type
;
3285 /* Indexed by register number, contains pointer to `struct induction'
3286 if register is an induction variable. This holds general info for
3287 all induction variables. */
3289 struct induction
**reg_iv_info
;
3291 /* Indexed by register number, contains pointer to `struct iv_class'
3292 if register is a basic induction variable. This holds info describing
3293 the class (a related group) of induction variables that the biv belongs
3296 struct iv_class
**reg_biv_class
;
3298 /* The head of a list which links together (via the next field)
3299 every iv class for the current loop. */
3301 struct iv_class
*loop_iv_list
;
3303 /* Communication with routines called via `note_stores'. */
3305 static rtx note_insn
;
3307 /* Dummy register to have non-zero DEST_REG for DEST_ADDR type givs. */
3309 static rtx addr_placeholder
;
3311 /* ??? Unfinished optimizations, and possible future optimizations,
3312 for the strength reduction code. */
3314 /* ??? There is one more optimization you might be interested in doing: to
3315 allocate pseudo registers for frequently-accessed memory locations.
3316 If the same memory location is referenced each time around, it might
3317 be possible to copy it into a register before and out after.
3318 This is especially useful when the memory location is a variable which
3319 is in a stack slot because somewhere its address is taken. If the
3320 loop doesn't contain a function call and the variable isn't volatile,
3321 it is safe to keep the value in a register for the duration of the
3322 loop. One tricky thing is that the copying of the value back from the
3323 register has to be done on all exits from the loop. You need to check that
3324 all the exits from the loop go to the same place. */
3326 /* ??? The interaction of biv elimination, and recognition of 'constant'
3327 bivs, may cause problems. */
3329 /* ??? Add heuristics so that DEST_ADDR strength reduction does not cause
3330 performance problems.
3332 Perhaps don't eliminate things that can be combined with an addressing
3333 mode. Find all givs that have the same biv, mult_val, and add_val;
3334 then for each giv, check to see if its only use dies in a following
3335 memory address. If so, generate a new memory address and check to see
3336 if it is valid. If it is valid, then store the modified memory address,
3337 otherwise, mark the giv as not done so that it will get its own iv. */
3339 /* ??? Could try to optimize branches when it is known that a biv is always
3342 /* ??? When replace a biv in a compare insn, we should replace with closest
3343 giv so that an optimized branch can still be recognized by the combiner,
3344 e.g. the VAX acb insn. */
3346 /* ??? Many of the checks involving uid_luid could be simplified if regscan
3347 was rerun in loop_optimize whenever a register was added or moved.
3348 Also, some of the optimizations could be a little less conservative. */
3350 /* Perform strength reduction and induction variable elimination. */
3352 /* Pseudo registers created during this function will be beyond the last
3353 valid index in several tables including n_times_set and regno_last_uid.
3354 This does not cause a problem here, because the added registers cannot be
3355 givs outside of their loop, and hence will never be reconsidered.
3356 But scan_loop must check regnos to make sure they are in bounds. */
3359 strength_reduce (scan_start
, end
, loop_top
, insn_count
,
3360 loop_start
, loop_end
, unroll_p
)
3374 /* This is 1 if current insn is not executed at least once for every loop
3376 int not_every_iteration
= 0;
3377 /* This is 1 if current insn may be executed more than once for every
3379 int maybe_multiple
= 0;
3380 /* Temporary list pointers for traversing loop_iv_list. */
3381 struct iv_class
*bl
, **backbl
;
3382 /* Ratio of extra register life span we can justify
3383 for saving an instruction. More if loop doesn't call subroutines
3384 since in that case saving an insn makes more difference
3385 and more registers are available. */
3386 /* ??? could set this to last value of threshold in move_movables */
3387 int threshold
= (loop_has_call
? 1 : 2) * (3 + n_non_fixed_regs
);
3388 /* Map of pseudo-register replacements. */
3392 rtx end_insert_before
;
3395 reg_iv_type
= (enum iv_mode
*) alloca (max_reg_before_loop
3396 * sizeof (enum iv_mode
*));
3397 bzero ((char *) reg_iv_type
, max_reg_before_loop
* sizeof (enum iv_mode
*));
3398 reg_iv_info
= (struct induction
**)
3399 alloca (max_reg_before_loop
* sizeof (struct induction
*));
3400 bzero ((char *) reg_iv_info
, (max_reg_before_loop
3401 * sizeof (struct induction
*)));
3402 reg_biv_class
= (struct iv_class
**)
3403 alloca (max_reg_before_loop
* sizeof (struct iv_class
*));
3404 bzero ((char *) reg_biv_class
, (max_reg_before_loop
3405 * sizeof (struct iv_class
*)));
3408 addr_placeholder
= gen_reg_rtx (Pmode
);
3410 /* Save insn immediately after the loop_end. Insns inserted after loop_end
3411 must be put before this insn, so that they will appear in the right
3412 order (i.e. loop order).
3414 If loop_end is the end of the current function, then emit a
3415 NOTE_INSN_DELETED after loop_end and set end_insert_before to the
3417 if (NEXT_INSN (loop_end
) != 0)
3418 end_insert_before
= NEXT_INSN (loop_end
);
3420 end_insert_before
= emit_note_after (NOTE_INSN_DELETED
, loop_end
);
3422 /* Scan through loop to find all possible bivs. */
3428 /* At end of a straight-in loop, we are done.
3429 At end of a loop entered at the bottom, scan the top. */
3430 if (p
== scan_start
)
3438 if (p
== scan_start
)
3442 if (GET_CODE (p
) == INSN
3443 && (set
= single_set (p
))
3444 && GET_CODE (SET_DEST (set
)) == REG
)
3446 dest_reg
= SET_DEST (set
);
3447 if (REGNO (dest_reg
) < max_reg_before_loop
3448 && REGNO (dest_reg
) >= FIRST_PSEUDO_REGISTER
3449 && reg_iv_type
[REGNO (dest_reg
)] != NOT_BASIC_INDUCT
)
3451 if (basic_induction_var (SET_SRC (set
), GET_MODE (SET_SRC (set
)),
3452 dest_reg
, p
, &inc_val
, &mult_val
))
3454 /* It is a possible basic induction variable.
3455 Create and initialize an induction structure for it. */
3458 = (struct induction
*) alloca (sizeof (struct induction
));
3460 record_biv (v
, p
, dest_reg
, inc_val
, mult_val
,
3461 not_every_iteration
, maybe_multiple
);
3462 reg_iv_type
[REGNO (dest_reg
)] = BASIC_INDUCT
;
3464 else if (REGNO (dest_reg
) < max_reg_before_loop
)
3465 reg_iv_type
[REGNO (dest_reg
)] = NOT_BASIC_INDUCT
;
3469 /* Past CODE_LABEL, we get to insns that may be executed multiple
3470 times. The only way we can be sure that they can't is if every
3471 every jump insn between here and the end of the loop either
3472 returns, exits the loop, is a forward jump, or is a jump
3473 to the loop start. */
3475 if (GET_CODE (p
) == CODE_LABEL
)
3483 insn
= NEXT_INSN (insn
);
3484 if (insn
== scan_start
)
3492 if (insn
== scan_start
)
3496 if (GET_CODE (insn
) == JUMP_INSN
3497 && GET_CODE (PATTERN (insn
)) != RETURN
3498 && (! condjump_p (insn
)
3499 || (JUMP_LABEL (insn
) != 0
3500 && JUMP_LABEL (insn
) != scan_start
3501 && (INSN_UID (JUMP_LABEL (insn
)) >= max_uid_for_loop
3502 || INSN_UID (insn
) >= max_uid_for_loop
3503 || (INSN_LUID (JUMP_LABEL (insn
))
3504 < INSN_LUID (insn
))))))
3512 /* Past a jump, we get to insns for which we can't count
3513 on whether they will be executed during each iteration. */
3514 /* This code appears twice in strength_reduce. There is also similar
3515 code in scan_loop. */
3516 if (GET_CODE (p
) == JUMP_INSN
3517 /* If we enter the loop in the middle, and scan around to the
3518 beginning, don't set not_every_iteration for that.
3519 This can be any kind of jump, since we want to know if insns
3520 will be executed if the loop is executed. */
3521 && ! (JUMP_LABEL (p
) == loop_top
3522 && ((NEXT_INSN (NEXT_INSN (p
)) == loop_end
&& simplejump_p (p
))
3523 || (NEXT_INSN (p
) == loop_end
&& condjump_p (p
)))))
3527 /* If this is a jump outside the loop, then it also doesn't
3528 matter. Check to see if the target of this branch is on the
3529 loop_number_exits_labels list. */
3531 for (label
= loop_number_exit_labels
[uid_loop_num
[INSN_UID (loop_start
)]];
3533 label
= LABEL_NEXTREF (label
))
3534 if (XEXP (label
, 0) == JUMP_LABEL (p
))
3538 not_every_iteration
= 1;
3541 else if (GET_CODE (p
) == NOTE
)
3543 /* At the virtual top of a converted loop, insns are again known to
3544 be executed each iteration: logically, the loop begins here
3545 even though the exit code has been duplicated. */
3546 if (NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_VTOP
&& loop_depth
== 0)
3547 not_every_iteration
= 0;
3548 else if (NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_BEG
)
3550 else if (NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_END
)
3554 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
3555 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
3556 or not an insn is known to be executed each iteration of the
3557 loop, whether or not any iterations are known to occur.
3559 Therefore, if we have just passed a label and have no more labels
3560 between here and the test insn of the loop, we know these insns
3561 will be executed each iteration. */
3563 if (not_every_iteration
&& GET_CODE (p
) == CODE_LABEL
3564 && no_labels_between_p (p
, loop_end
))
3565 not_every_iteration
= 0;
3568 /* Scan loop_iv_list to remove all regs that proved not to be bivs.
3569 Make a sanity check against n_times_set. */
3570 for (backbl
= &loop_iv_list
, bl
= *backbl
; bl
; bl
= bl
->next
)
3572 if (reg_iv_type
[bl
->regno
] != BASIC_INDUCT
3573 /* Above happens if register modified by subreg, etc. */
3574 /* Make sure it is not recognized as a basic induction var: */
3575 || n_times_set
[bl
->regno
] != bl
->biv_count
3576 /* If never incremented, it is invariant that we decided not to
3577 move. So leave it alone. */
3578 || ! bl
->incremented
)
3580 if (loop_dump_stream
)
3581 fprintf (loop_dump_stream
, "Reg %d: biv discarded, %s\n",
3583 (reg_iv_type
[bl
->regno
] != BASIC_INDUCT
3584 ? "not induction variable"
3585 : (! bl
->incremented
? "never incremented"
3588 reg_iv_type
[bl
->regno
] = NOT_BASIC_INDUCT
;
3595 if (loop_dump_stream
)
3596 fprintf (loop_dump_stream
, "Reg %d: biv verified\n", bl
->regno
);
3600 /* Exit if there are no bivs. */
3603 /* Can still unroll the loop anyways, but indicate that there is no
3604 strength reduction info available. */
3606 unroll_loop (loop_end
, insn_count
, loop_start
, end_insert_before
, 0);
3611 /* Find initial value for each biv by searching backwards from loop_start,
3612 halting at first label. Also record any test condition. */
3615 for (p
= loop_start
; p
&& GET_CODE (p
) != CODE_LABEL
; p
= PREV_INSN (p
))
3619 if (GET_CODE (p
) == CALL_INSN
)
3622 if (GET_CODE (p
) == INSN
|| GET_CODE (p
) == JUMP_INSN
3623 || GET_CODE (p
) == CALL_INSN
)
3624 note_stores (PATTERN (p
), record_initial
);
3626 /* Record any test of a biv that branches around the loop if no store
3627 between it and the start of loop. We only care about tests with
3628 constants and registers and only certain of those. */
3629 if (GET_CODE (p
) == JUMP_INSN
3630 && JUMP_LABEL (p
) != 0
3631 && next_real_insn (JUMP_LABEL (p
)) == next_real_insn (loop_end
)
3632 && (test
= get_condition_for_loop (p
)) != 0
3633 && GET_CODE (XEXP (test
, 0)) == REG
3634 && REGNO (XEXP (test
, 0)) < max_reg_before_loop
3635 && (bl
= reg_biv_class
[REGNO (XEXP (test
, 0))]) != 0
3636 && valid_initial_value_p (XEXP (test
, 1), p
, call_seen
, loop_start
)
3637 && bl
->init_insn
== 0)
3639 /* If an NE test, we have an initial value! */
3640 if (GET_CODE (test
) == NE
)
3643 bl
->init_set
= gen_rtx_SET (VOIDmode
,
3644 XEXP (test
, 0), XEXP (test
, 1));
3647 bl
->initial_test
= test
;
3651 /* Look at the each biv and see if we can say anything better about its
3652 initial value from any initializing insns set up above. (This is done
3653 in two passes to avoid missing SETs in a PARALLEL.) */
3654 for (bl
= loop_iv_list
; bl
; bl
= bl
->next
)
3659 if (! bl
->init_insn
)
3662 /* IF INIT_INSN has a REG_EQUAL or REG_EQUIV note and the value
3663 is a constant, use the value of that. */
3664 if (((note
= find_reg_note (bl
->init_insn
, REG_EQUAL
, 0)) != NULL
3665 && CONSTANT_P (XEXP (note
, 0)))
3666 || ((note
= find_reg_note (bl
->init_insn
, REG_EQUIV
, 0)) != NULL
3667 && CONSTANT_P (XEXP (note
, 0))))
3668 src
= XEXP (note
, 0);
3670 src
= SET_SRC (bl
->init_set
);
3672 if (loop_dump_stream
)
3673 fprintf (loop_dump_stream
,
3674 "Biv %d initialized at insn %d: initial value ",
3675 bl
->regno
, INSN_UID (bl
->init_insn
));
3677 if ((GET_MODE (src
) == GET_MODE (regno_reg_rtx
[bl
->regno
])
3678 || GET_MODE (src
) == VOIDmode
)
3679 && valid_initial_value_p (src
, bl
->init_insn
, call_seen
, loop_start
))
3681 bl
->initial_value
= src
;
3683 if (loop_dump_stream
)
3685 if (GET_CODE (src
) == CONST_INT
)
3686 fprintf (loop_dump_stream
, "%d\n", INTVAL (src
));
3689 print_rtl (loop_dump_stream
, src
);
3690 fprintf (loop_dump_stream
, "\n");
3696 /* Biv initial value is not simple move,
3697 so let it keep initial value of "itself". */
3699 if (loop_dump_stream
)
3700 fprintf (loop_dump_stream
, "is complex\n");
3704 /* Search the loop for general induction variables. */
3706 /* A register is a giv if: it is only set once, it is a function of a
3707 biv and a constant (or invariant), and it is not a biv. */
3709 not_every_iteration
= 0;
3715 /* At end of a straight-in loop, we are done.
3716 At end of a loop entered at the bottom, scan the top. */
3717 if (p
== scan_start
)
3725 if (p
== scan_start
)
3729 /* Look for a general induction variable in a register. */
3730 if (GET_CODE (p
) == INSN
3731 && (set
= single_set (p
))
3732 && GET_CODE (SET_DEST (set
)) == REG
3733 && ! may_not_optimize
[REGNO (SET_DEST (set
))])
3741 dest_reg
= SET_DEST (set
);
3742 if (REGNO (dest_reg
) < FIRST_PSEUDO_REGISTER
)
3745 if (/* SET_SRC is a giv. */
3746 ((benefit
= general_induction_var (SET_SRC (set
),
3749 /* Equivalent expression is a giv. */
3750 || ((regnote
= find_reg_note (p
, REG_EQUAL
, NULL_RTX
))
3751 && (benefit
= general_induction_var (XEXP (regnote
, 0),
3753 &add_val
, &mult_val
))))
3754 /* Don't try to handle any regs made by loop optimization.
3755 We have nothing on them in regno_first_uid, etc. */
3756 && REGNO (dest_reg
) < max_reg_before_loop
3757 /* Don't recognize a BASIC_INDUCT_VAR here. */
3758 && dest_reg
!= src_reg
3759 /* This must be the only place where the register is set. */
3760 && (n_times_set
[REGNO (dest_reg
)] == 1
3761 /* or all sets must be consecutive and make a giv. */
3762 || (benefit
= consec_sets_giv (benefit
, p
,
3764 &add_val
, &mult_val
))))
3768 = (struct induction
*) alloca (sizeof (struct induction
));
3771 /* If this is a library call, increase benefit. */
3772 if (find_reg_note (p
, REG_RETVAL
, NULL_RTX
))
3773 benefit
+= libcall_benefit (p
);
3775 /* Skip the consecutive insns, if there are any. */
3776 for (count
= n_times_set
[REGNO (dest_reg
)] - 1;
3779 /* If first insn of libcall sequence, skip to end.
3780 Do this at start of loop, since INSN is guaranteed to
3782 if (GET_CODE (p
) != NOTE
3783 && (temp
= find_reg_note (p
, REG_LIBCALL
, NULL_RTX
)))
3786 do p
= NEXT_INSN (p
);
3787 while (GET_CODE (p
) == NOTE
);
3790 record_giv (v
, p
, src_reg
, dest_reg
, mult_val
, add_val
, benefit
,
3791 DEST_REG
, not_every_iteration
, NULL_PTR
, loop_start
,
3797 #ifndef DONT_REDUCE_ADDR
3798 /* Look for givs which are memory addresses. */
3799 /* This resulted in worse code on a VAX 8600. I wonder if it
3801 if (GET_CODE (p
) == INSN
)
3802 find_mem_givs (PATTERN (p
), p
, not_every_iteration
, loop_start
,
3806 /* Update the status of whether giv can derive other givs. This can
3807 change when we pass a label or an insn that updates a biv. */
3808 if (GET_CODE (p
) == INSN
|| GET_CODE (p
) == JUMP_INSN
3809 || GET_CODE (p
) == CODE_LABEL
)
3810 update_giv_derive (p
);
3812 /* Past a jump, we get to insns for which we can't count
3813 on whether they will be executed during each iteration. */
3814 /* This code appears twice in strength_reduce. There is also similar
3815 code in scan_loop. */
3816 if (GET_CODE (p
) == JUMP_INSN
3817 /* If we enter the loop in the middle, and scan around to the
3818 beginning, don't set not_every_iteration for that.
3819 This can be any kind of jump, since we want to know if insns
3820 will be executed if the loop is executed. */
3821 && ! (JUMP_LABEL (p
) == loop_top
3822 && ((NEXT_INSN (NEXT_INSN (p
)) == loop_end
&& simplejump_p (p
))
3823 || (NEXT_INSN (p
) == loop_end
&& condjump_p (p
)))))
3827 /* If this is a jump outside the loop, then it also doesn't
3828 matter. Check to see if the target of this branch is on the
3829 loop_number_exits_labels list. */
3831 for (label
= loop_number_exit_labels
[uid_loop_num
[INSN_UID (loop_start
)]];
3833 label
= LABEL_NEXTREF (label
))
3834 if (XEXP (label
, 0) == JUMP_LABEL (p
))
3838 not_every_iteration
= 1;
3841 else if (GET_CODE (p
) == NOTE
)
3843 /* At the virtual top of a converted loop, insns are again known to
3844 be executed each iteration: logically, the loop begins here
3845 even though the exit code has been duplicated. */
3846 if (NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_VTOP
&& loop_depth
== 0)
3847 not_every_iteration
= 0;
3848 else if (NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_BEG
)
3850 else if (NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_END
)
3854 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
3855 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
3856 or not an insn is known to be executed each iteration of the
3857 loop, whether or not any iterations are known to occur.
3859 Therefore, if we have just passed a label and have no more labels
3860 between here and the test insn of the loop, we know these insns
3861 will be executed each iteration. */
3863 if (not_every_iteration
&& GET_CODE (p
) == CODE_LABEL
3864 && no_labels_between_p (p
, loop_end
))
3865 not_every_iteration
= 0;
3868 /* Try to calculate and save the number of loop iterations. This is
3869 set to zero if the actual number can not be calculated. This must
3870 be called after all giv's have been identified, since otherwise it may
3871 fail if the iteration variable is a giv. */
3873 loop_n_iterations
= loop_iterations (loop_start
, loop_end
);
3875 /* Now for each giv for which we still don't know whether or not it is
3876 replaceable, check to see if it is replaceable because its final value
3877 can be calculated. This must be done after loop_iterations is called,
3878 so that final_giv_value will work correctly. */
3880 for (bl
= loop_iv_list
; bl
; bl
= bl
->next
)
3882 struct induction
*v
;
3884 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
3885 if (! v
->replaceable
&& ! v
->not_replaceable
)
3886 check_final_value (v
, loop_start
, loop_end
);
3889 /* Try to prove that the loop counter variable (if any) is always
3890 nonnegative; if so, record that fact with a REG_NONNEG note
3891 so that "decrement and branch until zero" insn can be used. */
3892 check_dbra_loop (loop_end
, insn_count
, loop_start
);
3895 /* record loop-variables relevant for BCT optimization before unrolling
3896 the loop. Unrolling may update part of this information, and the
3897 correct data will be used for generating the BCT. */
3898 #ifdef HAVE_decrement_and_branch_on_count
3899 if (HAVE_decrement_and_branch_on_count
)
3900 analyze_loop_iterations (loop_start
, loop_end
);
3904 /* Create reg_map to hold substitutions for replaceable giv regs. */
3905 reg_map
= (rtx
*) alloca (max_reg_before_loop
* sizeof (rtx
));
3906 bzero ((char *) reg_map
, max_reg_before_loop
* sizeof (rtx
));
3908 /* Examine each iv class for feasibility of strength reduction/induction
3909 variable elimination. */
3911 for (bl
= loop_iv_list
; bl
; bl
= bl
->next
)
3913 struct induction
*v
;
3916 rtx final_value
= 0;
3918 /* Test whether it will be possible to eliminate this biv
3919 provided all givs are reduced. This is possible if either
3920 the reg is not used outside the loop, or we can compute
3921 what its final value will be.
3923 For architectures with a decrement_and_branch_until_zero insn,
3924 don't do this if we put a REG_NONNEG note on the endtest for
3927 /* Compare against bl->init_insn rather than loop_start.
3928 We aren't concerned with any uses of the biv between
3929 init_insn and loop_start since these won't be affected
3930 by the value of the biv elsewhere in the function, so
3931 long as init_insn doesn't use the biv itself.
3932 March 14, 1989 -- self@bayes.arc.nasa.gov */
3934 if ((uid_luid
[REGNO_LAST_UID (bl
->regno
)] < INSN_LUID (loop_end
)
3936 && INSN_UID (bl
->init_insn
) < max_uid_for_loop
3937 && uid_luid
[REGNO_FIRST_UID (bl
->regno
)] >= INSN_LUID (bl
->init_insn
)
3938 #ifdef HAVE_decrement_and_branch_until_zero
3941 && ! reg_mentioned_p (bl
->biv
->dest_reg
, SET_SRC (bl
->init_set
)))
3942 || ((final_value
= final_biv_value (bl
, loop_start
, loop_end
))
3943 #ifdef HAVE_decrement_and_branch_until_zero
3947 bl
->eliminable
= maybe_eliminate_biv (bl
, loop_start
, end
, 0,
3948 threshold
, insn_count
);
3951 if (loop_dump_stream
)
3953 fprintf (loop_dump_stream
,
3954 "Cannot eliminate biv %d.\n",
3956 fprintf (loop_dump_stream
,
3957 "First use: insn %d, last use: insn %d.\n",
3958 REGNO_FIRST_UID (bl
->regno
),
3959 REGNO_LAST_UID (bl
->regno
));
3963 /* Combine all giv's for this iv_class. */
3966 /* This will be true at the end, if all givs which depend on this
3967 biv have been strength reduced.
3968 We can't (currently) eliminate the biv unless this is so. */
3971 /* Check each giv in this class to see if we will benefit by reducing
3972 it. Skip giv's combined with others. */
3973 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
3975 struct induction
*tv
;
3977 if (v
->ignore
|| v
->same
)
3980 benefit
= v
->benefit
;
3982 /* Reduce benefit if not replaceable, since we will insert
3983 a move-insn to replace the insn that calculates this giv.
3984 Don't do this unless the giv is a user variable, since it
3985 will often be marked non-replaceable because of the duplication
3986 of the exit code outside the loop. In such a case, the copies
3987 we insert are dead and will be deleted. So they don't have
3988 a cost. Similar situations exist. */
3989 /* ??? The new final_[bg]iv_value code does a much better job
3990 of finding replaceable giv's, and hence this code may no longer
3992 if (! v
->replaceable
&& ! bl
->eliminable
3993 && REG_USERVAR_P (v
->dest_reg
))
3994 benefit
-= copy_cost
;
3996 /* Decrease the benefit to count the add-insns that we will
3997 insert to increment the reduced reg for the giv. */
3998 benefit
-= add_cost
* bl
->biv_count
;
4000 /* Decide whether to strength-reduce this giv or to leave the code
4001 unchanged (recompute it from the biv each time it is used).
4002 This decision can be made independently for each giv. */
4005 /* Attempt to guess whether autoincrement will handle some of the
4006 new add insns; if so, increase BENEFIT (undo the subtraction of
4007 add_cost that was done above). */
4008 if (v
->giv_type
== DEST_ADDR
4009 && GET_CODE (v
->mult_val
) == CONST_INT
)
4011 #if defined (HAVE_POST_INCREMENT) || defined (HAVE_PRE_INCREMENT)
4012 if (INTVAL (v
->mult_val
) == GET_MODE_SIZE (v
->mem_mode
))
4013 benefit
+= add_cost
* bl
->biv_count
;
4015 #if defined (HAVE_POST_DECREMENT) || defined (HAVE_PRE_DECREMENT)
4016 if (-INTVAL (v
->mult_val
) == GET_MODE_SIZE (v
->mem_mode
))
4017 benefit
+= add_cost
* bl
->biv_count
;
4022 /* If an insn is not to be strength reduced, then set its ignore
4023 flag, and clear all_reduced. */
4025 /* A giv that depends on a reversed biv must be reduced if it is
4026 used after the loop exit, otherwise, it would have the wrong
4027 value after the loop exit. To make it simple, just reduce all
4028 of such giv's whether or not we know they are used after the loop
4031 if ( ! flag_reduce_all_givs
&& v
->lifetime
* threshold
* benefit
< insn_count
4034 if (loop_dump_stream
)
4035 fprintf (loop_dump_stream
,
4036 "giv of insn %d not worth while, %d vs %d.\n",
4038 v
->lifetime
* threshold
* benefit
, insn_count
);
4044 /* Check that we can increment the reduced giv without a
4045 multiply insn. If not, reject it. */
4047 for (tv
= bl
->biv
; tv
; tv
= tv
->next_iv
)
4048 if (tv
->mult_val
== const1_rtx
4049 && ! product_cheap_p (tv
->add_val
, v
->mult_val
))
4051 if (loop_dump_stream
)
4052 fprintf (loop_dump_stream
,
4053 "giv of insn %d: would need a multiply.\n",
4054 INSN_UID (v
->insn
));
4062 /* Reduce each giv that we decided to reduce. */
4064 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
4066 struct induction
*tv
;
4067 if (! v
->ignore
&& v
->same
== 0)
4069 int auto_inc_opt
= 0;
4071 v
->new_reg
= gen_reg_rtx (v
->mode
);
4074 /* If the target has auto-increment addressing modes, and
4075 this is an address giv, then try to put the increment
4076 immediately after its use, so that flow can create an
4077 auto-increment addressing mode. */
4078 if (v
->giv_type
== DEST_ADDR
&& bl
->biv_count
== 1
4079 && bl
->biv
->always_executed
&& ! bl
->biv
->maybe_multiple
4080 /* We don't handle reversed biv's because bl->biv->insn
4081 does not have a valid INSN_LUID. */
4083 && v
->always_executed
&& ! v
->maybe_multiple
)
4085 /* If other giv's have been combined with this one, then
4086 this will work only if all uses of the other giv's occur
4087 before this giv's insn. This is difficult to check.
4089 We simplify this by looking for the common case where
4090 there is one DEST_REG giv, and this giv's insn is the
4091 last use of the dest_reg of that DEST_REG giv. If the
4092 the increment occurs after the address giv, then we can
4093 perform the optimization. (Otherwise, the increment
4094 would have to go before other_giv, and we would not be
4095 able to combine it with the address giv to get an
4096 auto-inc address.) */
4097 if (v
->combined_with
)
4099 struct induction
*other_giv
= 0;
4101 for (tv
= bl
->giv
; tv
; tv
= tv
->next_iv
)
4109 if (! tv
&& other_giv
4110 && REGNO (other_giv
->dest_reg
) < max_reg_before_loop
4111 && (REGNO_LAST_UID (REGNO (other_giv
->dest_reg
))
4112 == INSN_UID (v
->insn
))
4113 && INSN_LUID (v
->insn
) < INSN_LUID (bl
->biv
->insn
))
4116 /* Check for case where increment is before the the address
4118 else if (INSN_LUID (v
->insn
) > INSN_LUID (bl
->biv
->insn
))
4127 /* We can't put an insn immediately after one setting
4128 cc0, or immediately before one using cc0. */
4129 if ((auto_inc_opt
== 1 && sets_cc0_p (PATTERN (v
->insn
)))
4130 || (auto_inc_opt
== -1
4131 && (prev
= prev_nonnote_insn (v
->insn
)) != 0
4132 && GET_RTX_CLASS (GET_CODE (prev
)) == 'i'
4133 && sets_cc0_p (PATTERN (prev
))))
4139 v
->auto_inc_opt
= 1;
4143 /* For each place where the biv is incremented, add an insn
4144 to increment the new, reduced reg for the giv. */
4145 for (tv
= bl
->biv
; tv
; tv
= tv
->next_iv
)
4150 insert_before
= tv
->insn
;
4151 else if (auto_inc_opt
== 1)
4152 insert_before
= NEXT_INSN (v
->insn
);
4154 insert_before
= v
->insn
;
4156 if (tv
->mult_val
== const1_rtx
)
4157 emit_iv_add_mult (tv
->add_val
, v
->mult_val
,
4158 v
->new_reg
, v
->new_reg
, insert_before
);
4159 else /* tv->mult_val == const0_rtx */
4160 /* A multiply is acceptable here
4161 since this is presumed to be seldom executed. */
4162 emit_iv_add_mult (tv
->add_val
, v
->mult_val
,
4163 v
->add_val
, v
->new_reg
, insert_before
);
4166 /* Add code at loop start to initialize giv's reduced reg. */
4168 emit_iv_add_mult (bl
->initial_value
, v
->mult_val
,
4169 v
->add_val
, v
->new_reg
, loop_start
);
4173 /* Rescan all givs. If a giv is the same as a giv not reduced, mark it
4176 For each giv register that can be reduced now: if replaceable,
4177 substitute reduced reg wherever the old giv occurs;
4178 else add new move insn "giv_reg = reduced_reg".
4180 Also check for givs whose first use is their definition and whose
4181 last use is the definition of another giv. If so, it is likely
4182 dead and should not be used to eliminate a biv. */
4183 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
4185 if (v
->same
&& v
->same
->ignore
)
4191 if (v
->giv_type
== DEST_REG
4192 && REGNO_FIRST_UID (REGNO (v
->dest_reg
)) == INSN_UID (v
->insn
))
4194 struct induction
*v1
;
4196 for (v1
= bl
->giv
; v1
; v1
= v1
->next_iv
)
4197 if (REGNO_LAST_UID (REGNO (v
->dest_reg
)) == INSN_UID (v1
->insn
))
4201 /* Update expression if this was combined, in case other giv was
4204 v
->new_reg
= replace_rtx (v
->new_reg
,
4205 v
->same
->dest_reg
, v
->same
->new_reg
);
4207 if (v
->giv_type
== DEST_ADDR
)
4208 /* Store reduced reg as the address in the memref where we found
4210 validate_change (v
->insn
, v
->location
, v
->new_reg
, 0);
4211 else if (v
->replaceable
)
4213 reg_map
[REGNO (v
->dest_reg
)] = v
->new_reg
;
4216 /* I can no longer duplicate the original problem. Perhaps
4217 this is unnecessary now? */
4219 /* Replaceable; it isn't strictly necessary to delete the old
4220 insn and emit a new one, because v->dest_reg is now dead.
4222 However, especially when unrolling loops, the special
4223 handling for (set REG0 REG1) in the second cse pass may
4224 make v->dest_reg live again. To avoid this problem, emit
4225 an insn to set the original giv reg from the reduced giv.
4226 We can not delete the original insn, since it may be part
4227 of a LIBCALL, and the code in flow that eliminates dead
4228 libcalls will fail if it is deleted. */
4229 emit_insn_after (gen_move_insn (v
->dest_reg
, v
->new_reg
),
4235 /* Not replaceable; emit an insn to set the original giv reg from
4236 the reduced giv, same as above. */
4237 emit_insn_after (gen_move_insn (v
->dest_reg
, v
->new_reg
),
4241 /* When a loop is reversed, givs which depend on the reversed
4242 biv, and which are live outside the loop, must be set to their
4243 correct final value. This insn is only needed if the giv is
4244 not replaceable. The correct final value is the same as the
4245 value that the giv starts the reversed loop with. */
4246 if (bl
->reversed
&& ! v
->replaceable
)
4247 emit_iv_add_mult (bl
->initial_value
, v
->mult_val
,
4248 v
->add_val
, v
->dest_reg
, end_insert_before
);
4249 else if (v
->final_value
)
4253 /* If the loop has multiple exits, emit the insn before the
4254 loop to ensure that it will always be executed no matter
4255 how the loop exits. Otherwise, emit the insn after the loop,
4256 since this is slightly more efficient. */
4257 if (loop_number_exit_count
[uid_loop_num
[INSN_UID (loop_start
)]])
4258 insert_before
= loop_start
;
4260 insert_before
= end_insert_before
;
4261 emit_insn_before (gen_move_insn (v
->dest_reg
, v
->final_value
),
4265 /* If the insn to set the final value of the giv was emitted
4266 before the loop, then we must delete the insn inside the loop
4267 that sets it. If this is a LIBCALL, then we must delete
4268 every insn in the libcall. Note, however, that
4269 final_giv_value will only succeed when there are multiple
4270 exits if the giv is dead at each exit, hence it does not
4271 matter that the original insn remains because it is dead
4273 /* Delete the insn inside the loop that sets the giv since
4274 the giv is now set before (or after) the loop. */
4275 delete_insn (v
->insn
);
4279 if (loop_dump_stream
)
4281 fprintf (loop_dump_stream
, "giv at %d reduced to ",
4282 INSN_UID (v
->insn
));
4283 print_rtl (loop_dump_stream
, v
->new_reg
);
4284 fprintf (loop_dump_stream
, "\n");
4288 /* All the givs based on the biv bl have been reduced if they
4291 /* For each giv not marked as maybe dead that has been combined with a
4292 second giv, clear any "maybe dead" mark on that second giv.
4293 v->new_reg will either be or refer to the register of the giv it
4296 Doing this clearing avoids problems in biv elimination where a
4297 giv's new_reg is a complex value that can't be put in the insn but
4298 the giv combined with (with a reg as new_reg) is marked maybe_dead.
4299 Since the register will be used in either case, we'd prefer it be
4300 used from the simpler giv. */
4302 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
4303 if (! v
->maybe_dead
&& v
->same
)
4304 v
->same
->maybe_dead
= 0;
4306 /* Try to eliminate the biv, if it is a candidate.
4307 This won't work if ! all_reduced,
4308 since the givs we planned to use might not have been reduced.
4310 We have to be careful that we didn't initially think we could eliminate
4311 this biv because of a giv that we now think may be dead and shouldn't
4312 be used as a biv replacement.
4314 Also, there is the possibility that we may have a giv that looks
4315 like it can be used to eliminate a biv, but the resulting insn
4316 isn't valid. This can happen, for example, on the 88k, where a
4317 JUMP_INSN can compare a register only with zero. Attempts to
4318 replace it with a compare with a constant will fail.
4320 Note that in cases where this call fails, we may have replaced some
4321 of the occurrences of the biv with a giv, but no harm was done in
4322 doing so in the rare cases where it can occur. */
4324 if (all_reduced
== 1 && bl
->eliminable
4325 && maybe_eliminate_biv (bl
, loop_start
, end
, 1,
4326 threshold
, insn_count
))
4329 /* ?? If we created a new test to bypass the loop entirely,
4330 or otherwise drop straight in, based on this test, then
4331 we might want to rewrite it also. This way some later
4332 pass has more hope of removing the initialization of this
4335 /* If final_value != 0, then the biv may be used after loop end
4336 and we must emit an insn to set it just in case.
4338 Reversed bivs already have an insn after the loop setting their
4339 value, so we don't need another one. We can't calculate the
4340 proper final value for such a biv here anyways. */
4341 if (final_value
!= 0 && ! bl
->reversed
)
4345 /* If the loop has multiple exits, emit the insn before the
4346 loop to ensure that it will always be executed no matter
4347 how the loop exits. Otherwise, emit the insn after the
4348 loop, since this is slightly more efficient. */
4349 if (loop_number_exit_count
[uid_loop_num
[INSN_UID (loop_start
)]])
4350 insert_before
= loop_start
;
4352 insert_before
= end_insert_before
;
4354 emit_insn_before (gen_move_insn (bl
->biv
->dest_reg
, final_value
),
4359 /* Delete all of the instructions inside the loop which set
4360 the biv, as they are all dead. If is safe to delete them,
4361 because an insn setting a biv will never be part of a libcall. */
4362 /* However, deleting them will invalidate the regno_last_uid info,
4363 so keeping them around is more convenient. Final_biv_value
4364 will only succeed when there are multiple exits if the biv
4365 is dead at each exit, hence it does not matter that the original
4366 insn remains, because it is dead anyways. */
4367 for (v
= bl
->biv
; v
; v
= v
->next_iv
)
4368 delete_insn (v
->insn
);
4371 if (loop_dump_stream
)
4372 fprintf (loop_dump_stream
, "Reg %d: biv eliminated\n",
4377 /* Go through all the instructions in the loop, making all the
4378 register substitutions scheduled in REG_MAP. */
4380 for (p
= loop_start
; p
!= end
; p
= NEXT_INSN (p
))
4381 if (GET_CODE (p
) == INSN
|| GET_CODE (p
) == JUMP_INSN
4382 || GET_CODE (p
) == CALL_INSN
)
4384 replace_regs (PATTERN (p
), reg_map
, max_reg_before_loop
, 0);
4385 replace_regs (REG_NOTES (p
), reg_map
, max_reg_before_loop
, 0);
4389 /* Unroll loops from within strength reduction so that we can use the
4390 induction variable information that strength_reduce has already
4394 unroll_loop (loop_end
, insn_count
, loop_start
, end_insert_before
, 1);
4397 /* instrument the loop with bct insn */
4398 #ifdef HAVE_decrement_and_branch_on_count
4399 if (HAVE_decrement_and_branch_on_count
)
4400 insert_bct (loop_start
, loop_end
);
4404 if (loop_dump_stream
)
4405 fprintf (loop_dump_stream
, "\n");
4408 /* Return 1 if X is a valid source for an initial value (or as value being
4409 compared against in an initial test).
4411 X must be either a register or constant and must not be clobbered between
4412 the current insn and the start of the loop.
4414 INSN is the insn containing X. */
4417 valid_initial_value_p (x
, insn
, call_seen
, loop_start
)
4426 /* Only consider pseudos we know about initialized in insns whose luids
4428 if (GET_CODE (x
) != REG
4429 || REGNO (x
) >= max_reg_before_loop
)
4432 /* Don't use call-clobbered registers across a call which clobbers it. On
4433 some machines, don't use any hard registers at all. */
4434 if (REGNO (x
) < FIRST_PSEUDO_REGISTER
4435 && (SMALL_REGISTER_CLASSES
4436 || (call_used_regs
[REGNO (x
)] && call_seen
)))
4439 /* Don't use registers that have been clobbered before the start of the
4441 if (reg_set_between_p (x
, insn
, loop_start
))
4447 /* Scan X for memory refs and check each memory address
4448 as a possible giv. INSN is the insn whose pattern X comes from.
4449 NOT_EVERY_ITERATION is 1 if the insn might not be executed during
4450 every loop iteration. */
4453 find_mem_givs (x
, insn
, not_every_iteration
, loop_start
, loop_end
)
4456 int not_every_iteration
;
4457 rtx loop_start
, loop_end
;
4460 register enum rtx_code code
;
4466 code
= GET_CODE (x
);
4490 benefit
= general_induction_var (XEXP (x
, 0),
4491 &src_reg
, &add_val
, &mult_val
);
4493 /* Don't make a DEST_ADDR giv with mult_val == 1 && add_val == 0.
4494 Such a giv isn't useful. */
4495 if (benefit
> 0 && (mult_val
!= const1_rtx
|| add_val
!= const0_rtx
))
4497 /* Found one; record it. */
4499 = (struct induction
*) oballoc (sizeof (struct induction
));
4501 record_giv (v
, insn
, src_reg
, addr_placeholder
, mult_val
,
4502 add_val
, benefit
, DEST_ADDR
, not_every_iteration
,
4503 &XEXP (x
, 0), loop_start
, loop_end
);
4505 v
->mem_mode
= GET_MODE (x
);
4514 /* Recursively scan the subexpressions for other mem refs. */
4516 fmt
= GET_RTX_FORMAT (code
);
4517 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
4519 find_mem_givs (XEXP (x
, i
), insn
, not_every_iteration
, loop_start
,
4521 else if (fmt
[i
] == 'E')
4522 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
4523 find_mem_givs (XVECEXP (x
, i
, j
), insn
, not_every_iteration
,
4524 loop_start
, loop_end
);
4527 /* Fill in the data about one biv update.
4528 V is the `struct induction' in which we record the biv. (It is
4529 allocated by the caller, with alloca.)
4530 INSN is the insn that sets it.
4531 DEST_REG is the biv's reg.
4533 MULT_VAL is const1_rtx if the biv is being incremented here, in which case
4534 INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is
4535 being set to INC_VAL.
4537 NOT_EVERY_ITERATION is nonzero if this biv update is not know to be
4538 executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update
4539 can be executed more than once per iteration. If MAYBE_MULTIPLE
4540 and NOT_EVERY_ITERATION are both zero, we know that the biv update is
4541 executed exactly once per iteration. */
4544 record_biv (v
, insn
, dest_reg
, inc_val
, mult_val
,
4545 not_every_iteration
, maybe_multiple
)
4546 struct induction
*v
;
4551 int not_every_iteration
;
4554 struct iv_class
*bl
;
4557 v
->src_reg
= dest_reg
;
4558 v
->dest_reg
= dest_reg
;
4559 v
->mult_val
= mult_val
;
4560 v
->add_val
= inc_val
;
4561 v
->mode
= GET_MODE (dest_reg
);
4562 v
->always_computable
= ! not_every_iteration
;
4563 v
->always_executed
= ! not_every_iteration
;
4564 v
->maybe_multiple
= maybe_multiple
;
4566 /* Add this to the reg's iv_class, creating a class
4567 if this is the first incrementation of the reg. */
4569 bl
= reg_biv_class
[REGNO (dest_reg
)];
4572 /* Create and initialize new iv_class. */
4574 bl
= (struct iv_class
*) oballoc (sizeof (struct iv_class
));
4576 bl
->regno
= REGNO (dest_reg
);
4582 /* Set initial value to the reg itself. */
4583 bl
->initial_value
= dest_reg
;
4584 /* We haven't seen the initializing insn yet */
4587 bl
->initial_test
= 0;
4588 bl
->incremented
= 0;
4592 bl
->total_benefit
= 0;
4594 /* Add this class to loop_iv_list. */
4595 bl
->next
= loop_iv_list
;
4598 /* Put it in the array of biv register classes. */
4599 reg_biv_class
[REGNO (dest_reg
)] = bl
;
4602 /* Update IV_CLASS entry for this biv. */
4603 v
->next_iv
= bl
->biv
;
4606 if (mult_val
== const1_rtx
)
4607 bl
->incremented
= 1;
4609 if (loop_dump_stream
)
4611 fprintf (loop_dump_stream
,
4612 "Insn %d: possible biv, reg %d,",
4613 INSN_UID (insn
), REGNO (dest_reg
));
4614 if (GET_CODE (inc_val
) == CONST_INT
)
4615 fprintf (loop_dump_stream
, " const = %d\n",
4619 fprintf (loop_dump_stream
, " const = ");
4620 print_rtl (loop_dump_stream
, inc_val
);
4621 fprintf (loop_dump_stream
, "\n");
4626 /* Fill in the data about one giv.
4627 V is the `struct induction' in which we record the giv. (It is
4628 allocated by the caller, with alloca.)
4629 INSN is the insn that sets it.
4630 BENEFIT estimates the savings from deleting this insn.
4631 TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed
4632 into a register or is used as a memory address.
4634 SRC_REG is the biv reg which the giv is computed from.
4635 DEST_REG is the giv's reg (if the giv is stored in a reg).
4636 MULT_VAL and ADD_VAL are the coefficients used to compute the giv.
4637 LOCATION points to the place where this giv's value appears in INSN. */
4640 record_giv (v
, insn
, src_reg
, dest_reg
, mult_val
, add_val
, benefit
,
4641 type
, not_every_iteration
, location
, loop_start
, loop_end
)
4642 struct induction
*v
;
4646 rtx mult_val
, add_val
;
4649 int not_every_iteration
;
4651 rtx loop_start
, loop_end
;
4653 struct induction
*b
;
4654 struct iv_class
*bl
;
4655 rtx set
= single_set (insn
);
4659 v
->src_reg
= src_reg
;
4661 v
->dest_reg
= dest_reg
;
4662 v
->mult_val
= mult_val
;
4663 v
->add_val
= add_val
;
4664 v
->benefit
= benefit
;
4665 v
->location
= location
;
4667 v
->combined_with
= 0;
4668 v
->maybe_multiple
= 0;
4670 v
->derive_adjustment
= 0;
4676 v
->auto_inc_opt
= 0;
4680 /* The v->always_computable field is used in update_giv_derive, to
4681 determine whether a giv can be used to derive another giv. For a
4682 DEST_REG giv, INSN computes a new value for the giv, so its value
4683 isn't computable if INSN insn't executed every iteration.
4684 However, for a DEST_ADDR giv, INSN merely uses the value of the giv;
4685 it does not compute a new value. Hence the value is always computable
4686 regardless of whether INSN is executed each iteration. */
4688 if (type
== DEST_ADDR
)
4689 v
->always_computable
= 1;
4691 v
->always_computable
= ! not_every_iteration
;
4693 v
->always_executed
= ! not_every_iteration
;
4695 if (type
== DEST_ADDR
)
4697 v
->mode
= GET_MODE (*location
);
4701 else /* type == DEST_REG */
4703 v
->mode
= GET_MODE (SET_DEST (set
));
4705 v
->lifetime
= (uid_luid
[REGNO_LAST_UID (REGNO (dest_reg
))]
4706 - uid_luid
[REGNO_FIRST_UID (REGNO (dest_reg
))]);
4708 v
->times_used
= n_times_used
[REGNO (dest_reg
)];
4710 /* If the lifetime is zero, it means that this register is
4711 really a dead store. So mark this as a giv that can be
4712 ignored. This will not prevent the biv from being eliminated. */
4713 if (v
->lifetime
== 0)
4716 reg_iv_type
[REGNO (dest_reg
)] = GENERAL_INDUCT
;
4717 reg_iv_info
[REGNO (dest_reg
)] = v
;
4720 /* Add the giv to the class of givs computed from one biv. */
4722 bl
= reg_biv_class
[REGNO (src_reg
)];
4725 v
->next_iv
= bl
->giv
;
4727 /* Don't count DEST_ADDR. This is supposed to count the number of
4728 insns that calculate givs. */
4729 if (type
== DEST_REG
)
4731 bl
->total_benefit
+= benefit
;
4734 /* Fatal error, biv missing for this giv? */
4737 if (type
== DEST_ADDR
)
4741 /* The giv can be replaced outright by the reduced register only if all
4742 of the following conditions are true:
4743 - the insn that sets the giv is always executed on any iteration
4744 on which the giv is used at all
4745 (there are two ways to deduce this:
4746 either the insn is executed on every iteration,
4747 or all uses follow that insn in the same basic block),
4748 - the giv is not used outside the loop
4749 - no assignments to the biv occur during the giv's lifetime. */
4751 if (REGNO_FIRST_UID (REGNO (dest_reg
)) == INSN_UID (insn
)
4752 /* Previous line always fails if INSN was moved by loop opt. */
4753 && uid_luid
[REGNO_LAST_UID (REGNO (dest_reg
))] < INSN_LUID (loop_end
)
4754 && (! not_every_iteration
4755 || last_use_this_basic_block (dest_reg
, insn
)))
4757 /* Now check that there are no assignments to the biv within the
4758 giv's lifetime. This requires two separate checks. */
4760 /* Check each biv update, and fail if any are between the first
4761 and last use of the giv.
4763 If this loop contains an inner loop that was unrolled, then
4764 the insn modifying the biv may have been emitted by the loop
4765 unrolling code, and hence does not have a valid luid. Just
4766 mark the biv as not replaceable in this case. It is not very
4767 useful as a biv, because it is used in two different loops.
4768 It is very unlikely that we would be able to optimize the giv
4769 using this biv anyways. */
4772 for (b
= bl
->biv
; b
; b
= b
->next_iv
)
4774 if (INSN_UID (b
->insn
) >= max_uid_for_loop
4775 || ((uid_luid
[INSN_UID (b
->insn
)]
4776 >= uid_luid
[REGNO_FIRST_UID (REGNO (dest_reg
))])
4777 && (uid_luid
[INSN_UID (b
->insn
)]
4778 <= uid_luid
[REGNO_LAST_UID (REGNO (dest_reg
))])))
4781 v
->not_replaceable
= 1;
4786 /* If there are any backwards branches that go from after the
4787 biv update to before it, then this giv is not replaceable. */
4789 for (b
= bl
->biv
; b
; b
= b
->next_iv
)
4790 if (back_branch_in_range_p (b
->insn
, loop_start
, loop_end
))
4793 v
->not_replaceable
= 1;
4799 /* May still be replaceable, we don't have enough info here to
4802 v
->not_replaceable
= 0;
4806 if (loop_dump_stream
)
4808 if (type
== DEST_REG
)
4809 fprintf (loop_dump_stream
, "Insn %d: giv reg %d",
4810 INSN_UID (insn
), REGNO (dest_reg
));
4812 fprintf (loop_dump_stream
, "Insn %d: dest address",
4815 fprintf (loop_dump_stream
, " src reg %d benefit %d",
4816 REGNO (src_reg
), v
->benefit
);
4817 fprintf (loop_dump_stream
, " used %d lifetime %d",
4818 v
->times_used
, v
->lifetime
);
4821 fprintf (loop_dump_stream
, " replaceable");
4823 if (GET_CODE (mult_val
) == CONST_INT
)
4824 fprintf (loop_dump_stream
, " mult %d",
4828 fprintf (loop_dump_stream
, " mult ");
4829 print_rtl (loop_dump_stream
, mult_val
);
4832 if (GET_CODE (add_val
) == CONST_INT
)
4833 fprintf (loop_dump_stream
, " add %d",
4837 fprintf (loop_dump_stream
, " add ");
4838 print_rtl (loop_dump_stream
, add_val
);
4842 if (loop_dump_stream
)
4843 fprintf (loop_dump_stream
, "\n");
4848 /* All this does is determine whether a giv can be made replaceable because
4849 its final value can be calculated. This code can not be part of record_giv
4850 above, because final_giv_value requires that the number of loop iterations
4851 be known, and that can not be accurately calculated until after all givs
4852 have been identified. */
4855 check_final_value (v
, loop_start
, loop_end
)
4856 struct induction
*v
;
4857 rtx loop_start
, loop_end
;
4859 struct iv_class
*bl
;
4860 rtx final_value
= 0;
4862 bl
= reg_biv_class
[REGNO (v
->src_reg
)];
4864 /* DEST_ADDR givs will never reach here, because they are always marked
4865 replaceable above in record_giv. */
4867 /* The giv can be replaced outright by the reduced register only if all
4868 of the following conditions are true:
4869 - the insn that sets the giv is always executed on any iteration
4870 on which the giv is used at all
4871 (there are two ways to deduce this:
4872 either the insn is executed on every iteration,
4873 or all uses follow that insn in the same basic block),
4874 - its final value can be calculated (this condition is different
4875 than the one above in record_giv)
4876 - no assignments to the biv occur during the giv's lifetime. */
4879 /* This is only called now when replaceable is known to be false. */
4880 /* Clear replaceable, so that it won't confuse final_giv_value. */
4884 if ((final_value
= final_giv_value (v
, loop_start
, loop_end
))
4885 && (v
->always_computable
|| last_use_this_basic_block (v
->dest_reg
, v
->insn
)))
4887 int biv_increment_seen
= 0;
4893 /* When trying to determine whether or not a biv increment occurs
4894 during the lifetime of the giv, we can ignore uses of the variable
4895 outside the loop because final_value is true. Hence we can not
4896 use regno_last_uid and regno_first_uid as above in record_giv. */
4898 /* Search the loop to determine whether any assignments to the
4899 biv occur during the giv's lifetime. Start with the insn
4900 that sets the giv, and search around the loop until we come
4901 back to that insn again.
4903 Also fail if there is a jump within the giv's lifetime that jumps
4904 to somewhere outside the lifetime but still within the loop. This
4905 catches spaghetti code where the execution order is not linear, and
4906 hence the above test fails. Here we assume that the giv lifetime
4907 does not extend from one iteration of the loop to the next, so as
4908 to make the test easier. Since the lifetime isn't known yet,
4909 this requires two loops. See also record_giv above. */
4911 last_giv_use
= v
->insn
;
4917 p
= NEXT_INSN (loop_start
);
4921 if (GET_CODE (p
) == INSN
|| GET_CODE (p
) == JUMP_INSN
4922 || GET_CODE (p
) == CALL_INSN
)
4924 if (biv_increment_seen
)
4926 if (reg_mentioned_p (v
->dest_reg
, PATTERN (p
)))
4929 v
->not_replaceable
= 1;
4933 else if (reg_set_p (v
->src_reg
, PATTERN (p
)))
4934 biv_increment_seen
= 1;
4935 else if (reg_mentioned_p (v
->dest_reg
, PATTERN (p
)))
4940 /* Now that the lifetime of the giv is known, check for branches
4941 from within the lifetime to outside the lifetime if it is still
4951 p
= NEXT_INSN (loop_start
);
4952 if (p
== last_giv_use
)
4955 if (GET_CODE (p
) == JUMP_INSN
&& JUMP_LABEL (p
)
4956 && LABEL_NAME (JUMP_LABEL (p
))
4957 && ((INSN_UID (JUMP_LABEL (p
)) >= max_uid_for_loop
)
4958 || (INSN_UID (v
->insn
) >= max_uid_for_loop
)
4959 || (INSN_UID (last_giv_use
) >= max_uid_for_loop
)
4960 || (INSN_LUID (JUMP_LABEL (p
)) < INSN_LUID (v
->insn
)
4961 && INSN_LUID (JUMP_LABEL (p
)) > INSN_LUID (loop_start
))
4962 || (INSN_LUID (JUMP_LABEL (p
)) > INSN_LUID (last_giv_use
)
4963 && INSN_LUID (JUMP_LABEL (p
)) < INSN_LUID (loop_end
))))
4966 v
->not_replaceable
= 1;
4968 if (loop_dump_stream
)
4969 fprintf (loop_dump_stream
,
4970 "Found branch outside giv lifetime.\n");
4977 /* If it is replaceable, then save the final value. */
4979 v
->final_value
= final_value
;
4982 if (loop_dump_stream
&& v
->replaceable
)
4983 fprintf (loop_dump_stream
, "Insn %d: giv reg %d final_value replaceable\n",
4984 INSN_UID (v
->insn
), REGNO (v
->dest_reg
));
4987 /* Update the status of whether a giv can derive other givs.
4989 We need to do something special if there is or may be an update to the biv
4990 between the time the giv is defined and the time it is used to derive
4993 In addition, a giv that is only conditionally set is not allowed to
4994 derive another giv once a label has been passed.
4996 The cases we look at are when a label or an update to a biv is passed. */
4999 update_giv_derive (p
)
5002 struct iv_class
*bl
;
5003 struct induction
*biv
, *giv
;
5007 /* Search all IV classes, then all bivs, and finally all givs.
5009 There are three cases we are concerned with. First we have the situation
5010 of a giv that is only updated conditionally. In that case, it may not
5011 derive any givs after a label is passed.
5013 The second case is when a biv update occurs, or may occur, after the
5014 definition of a giv. For certain biv updates (see below) that are
5015 known to occur between the giv definition and use, we can adjust the
5016 giv definition. For others, or when the biv update is conditional,
5017 we must prevent the giv from deriving any other givs. There are two
5018 sub-cases within this case.
5020 If this is a label, we are concerned with any biv update that is done
5021 conditionally, since it may be done after the giv is defined followed by
5022 a branch here (actually, we need to pass both a jump and a label, but
5023 this extra tracking doesn't seem worth it).
5025 If this is a jump, we are concerned about any biv update that may be
5026 executed multiple times. We are actually only concerned about
5027 backward jumps, but it is probably not worth performing the test
5028 on the jump again here.
5030 If this is a biv update, we must adjust the giv status to show that a
5031 subsequent biv update was performed. If this adjustment cannot be done,
5032 the giv cannot derive further givs. */
5034 for (bl
= loop_iv_list
; bl
; bl
= bl
->next
)
5035 for (biv
= bl
->biv
; biv
; biv
= biv
->next_iv
)
5036 if (GET_CODE (p
) == CODE_LABEL
|| GET_CODE (p
) == JUMP_INSN
5039 for (giv
= bl
->giv
; giv
; giv
= giv
->next_iv
)
5041 /* If cant_derive is already true, there is no point in
5042 checking all of these conditions again. */
5043 if (giv
->cant_derive
)
5046 /* If this giv is conditionally set and we have passed a label,
5047 it cannot derive anything. */
5048 if (GET_CODE (p
) == CODE_LABEL
&& ! giv
->always_computable
)
5049 giv
->cant_derive
= 1;
5051 /* Skip givs that have mult_val == 0, since
5052 they are really invariants. Also skip those that are
5053 replaceable, since we know their lifetime doesn't contain
5055 else if (giv
->mult_val
== const0_rtx
|| giv
->replaceable
)
5058 /* The only way we can allow this giv to derive another
5059 is if this is a biv increment and we can form the product
5060 of biv->add_val and giv->mult_val. In this case, we will
5061 be able to compute a compensation. */
5062 else if (biv
->insn
== p
)
5066 if (biv
->mult_val
== const1_rtx
)
5067 tem
= simplify_giv_expr (gen_rtx_MULT (giv
->mode
,
5072 if (tem
&& giv
->derive_adjustment
)
5073 tem
= simplify_giv_expr (gen_rtx_PLUS (giv
->mode
, tem
,
5074 giv
->derive_adjustment
),
5077 giv
->derive_adjustment
= tem
;
5079 giv
->cant_derive
= 1;
5081 else if ((GET_CODE (p
) == CODE_LABEL
&& ! biv
->always_computable
)
5082 || (GET_CODE (p
) == JUMP_INSN
&& biv
->maybe_multiple
))
5083 giv
->cant_derive
= 1;
5088 /* Check whether an insn is an increment legitimate for a basic induction var.
5089 X is the source of insn P, or a part of it.
5090 MODE is the mode in which X should be interpreted.
5092 DEST_REG is the putative biv, also the destination of the insn.
5093 We accept patterns of these forms:
5094 REG = REG + INVARIANT (includes REG = REG - CONSTANT)
5095 REG = INVARIANT + REG
5097 If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX,
5098 and store the additive term into *INC_VAL.
5100 If X is an assignment of an invariant into DEST_REG, we set
5101 *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL.
5103 We also want to detect a BIV when it corresponds to a variable
5104 whose mode was promoted via PROMOTED_MODE. In that case, an increment
5105 of the variable may be a PLUS that adds a SUBREG of that variable to
5106 an invariant and then sign- or zero-extends the result of the PLUS
5109 Most GIVs in such cases will be in the promoted mode, since that is the
5110 probably the natural computation mode (and almost certainly the mode
5111 used for addresses) on the machine. So we view the pseudo-reg containing
5112 the variable as the BIV, as if it were simply incremented.
5114 Note that treating the entire pseudo as a BIV will result in making
5115 simple increments to any GIVs based on it. However, if the variable
5116 overflows in its declared mode but not its promoted mode, the result will
5117 be incorrect. This is acceptable if the variable is signed, since
5118 overflows in such cases are undefined, but not if it is unsigned, since
5119 those overflows are defined. So we only check for SIGN_EXTEND and
5122 If we cannot find a biv, we return 0. */
5125 basic_induction_var (x
, mode
, dest_reg
, p
, inc_val
, mult_val
)
5127 enum machine_mode mode
;
5133 register enum rtx_code code
;
5137 code
= GET_CODE (x
);
5141 if (XEXP (x
, 0) == dest_reg
5142 || (GET_CODE (XEXP (x
, 0)) == SUBREG
5143 && SUBREG_PROMOTED_VAR_P (XEXP (x
, 0))
5144 && SUBREG_REG (XEXP (x
, 0)) == dest_reg
))
5146 else if (XEXP (x
, 1) == dest_reg
5147 || (GET_CODE (XEXP (x
, 1)) == SUBREG
5148 && SUBREG_PROMOTED_VAR_P (XEXP (x
, 1))
5149 && SUBREG_REG (XEXP (x
, 1)) == dest_reg
))
5154 if (invariant_p (arg
) != 1)
5157 *inc_val
= convert_modes (GET_MODE (dest_reg
), GET_MODE (x
), arg
, 0);
5158 *mult_val
= const1_rtx
;
5162 /* If this is a SUBREG for a promoted variable, check the inner
5164 if (SUBREG_PROMOTED_VAR_P (x
))
5165 return basic_induction_var (SUBREG_REG (x
), GET_MODE (SUBREG_REG (x
)),
5166 dest_reg
, p
, inc_val
, mult_val
);
5170 /* If this register is assigned in the previous insn, look at its
5171 source, but don't go outside the loop or past a label. */
5173 for (insn
= PREV_INSN (p
);
5174 (insn
&& GET_CODE (insn
) == NOTE
5175 && NOTE_LINE_NUMBER (insn
) != NOTE_INSN_LOOP_BEG
);
5176 insn
= PREV_INSN (insn
))
5180 set
= single_set (insn
);
5183 && (SET_DEST (set
) == x
5184 || (GET_CODE (SET_DEST (set
)) == SUBREG
5185 && (GET_MODE_SIZE (GET_MODE (SET_DEST (set
)))
5187 && SUBREG_REG (SET_DEST (set
)) == x
)))
5188 return basic_induction_var (SET_SRC (set
),
5189 (GET_MODE (SET_SRC (set
)) == VOIDmode
5191 : GET_MODE (SET_SRC (set
))),
5194 /* ... fall through ... */
5196 /* Can accept constant setting of biv only when inside inner most loop.
5197 Otherwise, a biv of an inner loop may be incorrectly recognized
5198 as a biv of the outer loop,
5199 causing code to be moved INTO the inner loop. */
5201 if (invariant_p (x
) != 1)
5206 if (loops_enclosed
== 1)
5208 /* Possible bug here? Perhaps we don't know the mode of X. */
5209 *inc_val
= convert_modes (GET_MODE (dest_reg
), mode
, x
, 0);
5210 *mult_val
= const0_rtx
;
5217 return basic_induction_var (XEXP (x
, 0), GET_MODE (XEXP (x
, 0)),
5218 dest_reg
, p
, inc_val
, mult_val
);
5220 /* Similar, since this can be a sign extension. */
5221 for (insn
= PREV_INSN (p
);
5222 (insn
&& GET_CODE (insn
) == NOTE
5223 && NOTE_LINE_NUMBER (insn
) != NOTE_INSN_LOOP_BEG
);
5224 insn
= PREV_INSN (insn
))
5228 set
= single_set (insn
);
5230 if (set
&& SET_DEST (set
) == XEXP (x
, 0)
5231 && GET_CODE (XEXP (x
, 1)) == CONST_INT
5232 && INTVAL (XEXP (x
, 1)) >= 0
5233 && GET_CODE (SET_SRC (set
)) == ASHIFT
5234 && XEXP (x
, 1) == XEXP (SET_SRC (set
), 1))
5235 return basic_induction_var (XEXP (SET_SRC (set
), 0),
5236 GET_MODE (XEXP (x
, 0)),
5237 dest_reg
, insn
, inc_val
, mult_val
);
5245 /* A general induction variable (giv) is any quantity that is a linear
5246 function of a basic induction variable,
5247 i.e. giv = biv * mult_val + add_val.
5248 The coefficients can be any loop invariant quantity.
5249 A giv need not be computed directly from the biv;
5250 it can be computed by way of other givs. */
5252 /* Determine whether X computes a giv.
5253 If it does, return a nonzero value
5254 which is the benefit from eliminating the computation of X;
5255 set *SRC_REG to the register of the biv that it is computed from;
5256 set *ADD_VAL and *MULT_VAL to the coefficients,
5257 such that the value of X is biv * mult + add; */
5260 general_induction_var (x
, src_reg
, add_val
, mult_val
)
5270 /* If this is an invariant, forget it, it isn't a giv. */
5271 if (invariant_p (x
) == 1)
5274 /* See if the expression could be a giv and get its form.
5275 Mark our place on the obstack in case we don't find a giv. */
5276 storage
= (char *) oballoc (0);
5277 x
= simplify_giv_expr (x
, &benefit
);
5284 switch (GET_CODE (x
))
5288 /* Since this is now an invariant and wasn't before, it must be a giv
5289 with MULT_VAL == 0. It doesn't matter which BIV we associate this
5291 *src_reg
= loop_iv_list
->biv
->dest_reg
;
5292 *mult_val
= const0_rtx
;
5297 /* This is equivalent to a BIV. */
5299 *mult_val
= const1_rtx
;
5300 *add_val
= const0_rtx
;
5304 /* Either (plus (biv) (invar)) or
5305 (plus (mult (biv) (invar_1)) (invar_2)). */
5306 if (GET_CODE (XEXP (x
, 0)) == MULT
)
5308 *src_reg
= XEXP (XEXP (x
, 0), 0);
5309 *mult_val
= XEXP (XEXP (x
, 0), 1);
5313 *src_reg
= XEXP (x
, 0);
5314 *mult_val
= const1_rtx
;
5316 *add_val
= XEXP (x
, 1);
5320 /* ADD_VAL is zero. */
5321 *src_reg
= XEXP (x
, 0);
5322 *mult_val
= XEXP (x
, 1);
5323 *add_val
= const0_rtx
;
5330 /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be
5331 unless they are CONST_INT). */
5332 if (GET_CODE (*add_val
) == USE
)
5333 *add_val
= XEXP (*add_val
, 0);
5334 if (GET_CODE (*mult_val
) == USE
)
5335 *mult_val
= XEXP (*mult_val
, 0);
5337 benefit
+= rtx_cost (orig_x
, SET
);
5339 /* Always return some benefit if this is a giv so it will be detected
5340 as such. This allows elimination of bivs that might otherwise
5341 not be eliminated. */
5342 return benefit
== 0 ? 1 : benefit
;
5345 /* Given an expression, X, try to form it as a linear function of a biv.
5346 We will canonicalize it to be of the form
5347 (plus (mult (BIV) (invar_1))
5349 with possible degeneracies.
5351 The invariant expressions must each be of a form that can be used as a
5352 machine operand. We surround then with a USE rtx (a hack, but localized
5353 and certainly unambiguous!) if not a CONST_INT for simplicity in this
5354 routine; it is the caller's responsibility to strip them.
5356 If no such canonicalization is possible (i.e., two biv's are used or an
5357 expression that is neither invariant nor a biv or giv), this routine
5360 For a non-zero return, the result will have a code of CONST_INT, USE,
5361 REG (for a BIV), PLUS, or MULT. No other codes will occur.
5363 *BENEFIT will be incremented by the benefit of any sub-giv encountered. */
5366 simplify_giv_expr (x
, benefit
)
5370 enum machine_mode mode
= GET_MODE (x
);
5374 /* If this is not an integer mode, or if we cannot do arithmetic in this
5375 mode, this can't be a giv. */
5376 if (mode
!= VOIDmode
5377 && (GET_MODE_CLASS (mode
) != MODE_INT
5378 || GET_MODE_BITSIZE (mode
) > HOST_BITS_PER_WIDE_INT
))
5381 switch (GET_CODE (x
))
5384 arg0
= simplify_giv_expr (XEXP (x
, 0), benefit
);
5385 arg1
= simplify_giv_expr (XEXP (x
, 1), benefit
);
5386 if (arg0
== 0 || arg1
== 0)
5389 /* Put constant last, CONST_INT last if both constant. */
5390 if ((GET_CODE (arg0
) == USE
5391 || GET_CODE (arg0
) == CONST_INT
)
5392 && GET_CODE (arg1
) != CONST_INT
)
5393 tem
= arg0
, arg0
= arg1
, arg1
= tem
;
5395 /* Handle addition of zero, then addition of an invariant. */
5396 if (arg1
== const0_rtx
)
5398 else if (GET_CODE (arg1
) == CONST_INT
|| GET_CODE (arg1
) == USE
)
5399 switch (GET_CODE (arg0
))
5403 /* Both invariant. Only valid if sum is machine operand.
5404 First strip off possible USE on the operands. */
5405 if (GET_CODE (arg0
) == USE
)
5406 arg0
= XEXP (arg0
, 0);
5408 if (GET_CODE (arg1
) == USE
)
5409 arg1
= XEXP (arg1
, 0);
5412 if (CONSTANT_P (arg0
) && GET_CODE (arg1
) == CONST_INT
)
5414 tem
= plus_constant (arg0
, INTVAL (arg1
));
5415 if (GET_CODE (tem
) != CONST_INT
)
5416 tem
= gen_rtx_USE (mode
, tem
);
5420 /* Adding two invariants must result in an invariant,
5421 so enclose addition operation inside a USE and
5423 tem
= gen_rtx_USE (mode
, gen_rtx_PLUS (mode
, arg0
, arg1
));
5430 /* biv + invar or mult + invar. Return sum. */
5431 return gen_rtx_PLUS (mode
, arg0
, arg1
);
5434 /* (a + invar_1) + invar_2. Associate. */
5435 return simplify_giv_expr (gen_rtx_PLUS (mode
,
5438 XEXP (arg0
, 1), arg1
)),
5445 /* Each argument must be either REG, PLUS, or MULT. Convert REG to
5446 MULT to reduce cases. */
5447 if (GET_CODE (arg0
) == REG
)
5448 arg0
= gen_rtx_MULT (mode
, arg0
, const1_rtx
);
5449 if (GET_CODE (arg1
) == REG
)
5450 arg1
= gen_rtx_MULT (mode
, arg1
, const1_rtx
);
5452 /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT.
5453 Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT.
5454 Recurse to associate the second PLUS. */
5455 if (GET_CODE (arg1
) == MULT
)
5456 tem
= arg0
, arg0
= arg1
, arg1
= tem
;
5458 if (GET_CODE (arg1
) == PLUS
)
5459 return simplify_giv_expr (gen_rtx_PLUS (mode
,
5460 gen_rtx_PLUS (mode
, arg0
,
5465 /* Now must have MULT + MULT. Distribute if same biv, else not giv. */
5466 if (GET_CODE (arg0
) != MULT
|| GET_CODE (arg1
) != MULT
)
5469 if (XEXP (arg0
, 0) != XEXP (arg1
, 0))
5472 return simplify_giv_expr (gen_rtx_MULT (mode
,
5480 /* Handle "a - b" as "a + b * (-1)". */
5481 return simplify_giv_expr (gen_rtx_PLUS (mode
,
5483 gen_rtx_MULT (mode
, XEXP (x
, 1),
5488 arg0
= simplify_giv_expr (XEXP (x
, 0), benefit
);
5489 arg1
= simplify_giv_expr (XEXP (x
, 1), benefit
);
5490 if (arg0
== 0 || arg1
== 0)
5493 /* Put constant last, CONST_INT last if both constant. */
5494 if ((GET_CODE (arg0
) == USE
|| GET_CODE (arg0
) == CONST_INT
)
5495 && GET_CODE (arg1
) != CONST_INT
)
5496 tem
= arg0
, arg0
= arg1
, arg1
= tem
;
5498 /* If second argument is not now constant, not giv. */
5499 if (GET_CODE (arg1
) != USE
&& GET_CODE (arg1
) != CONST_INT
)
5502 /* Handle multiply by 0 or 1. */
5503 if (arg1
== const0_rtx
)
5506 else if (arg1
== const1_rtx
)
5509 switch (GET_CODE (arg0
))
5512 /* biv * invar. Done. */
5513 return gen_rtx_MULT (mode
, arg0
, arg1
);
5516 /* Product of two constants. */
5517 return GEN_INT (INTVAL (arg0
) * INTVAL (arg1
));
5520 /* invar * invar. Not giv. */
5524 /* (a * invar_1) * invar_2. Associate. */
5525 return simplify_giv_expr (gen_rtx_MULT (mode
, XEXP (arg0
, 0),
5532 /* (a + invar_1) * invar_2. Distribute. */
5533 return simplify_giv_expr (gen_rtx_PLUS (mode
,
5547 /* Shift by constant is multiply by power of two. */
5548 if (GET_CODE (XEXP (x
, 1)) != CONST_INT
)
5551 return simplify_giv_expr (gen_rtx_MULT (mode
,
5553 GEN_INT ((HOST_WIDE_INT
) 1
5554 << INTVAL (XEXP (x
, 1)))),
5558 /* "-a" is "a * (-1)" */
5559 return simplify_giv_expr (gen_rtx_MULT (mode
, XEXP (x
, 0), constm1_rtx
),
5563 /* "~a" is "-a - 1". Silly, but easy. */
5564 return simplify_giv_expr (gen_rtx_MINUS (mode
,
5565 gen_rtx_NEG (mode
, XEXP (x
, 0)),
5570 /* Already in proper form for invariant. */
5574 /* If this is a new register, we can't deal with it. */
5575 if (REGNO (x
) >= max_reg_before_loop
)
5578 /* Check for biv or giv. */
5579 switch (reg_iv_type
[REGNO (x
)])
5583 case GENERAL_INDUCT
:
5585 struct induction
*v
= reg_iv_info
[REGNO (x
)];
5587 /* Form expression from giv and add benefit. Ensure this giv
5588 can derive another and subtract any needed adjustment if so. */
5589 *benefit
+= v
->benefit
;
5593 tem
= gen_rtx_PLUS (mode
, gen_rtx_MULT (mode
, v
->src_reg
,
5596 if (v
->derive_adjustment
)
5597 tem
= gen_rtx_MINUS (mode
, tem
, v
->derive_adjustment
);
5598 return simplify_giv_expr (tem
, benefit
);
5605 /* Fall through to general case. */
5607 /* If invariant, return as USE (unless CONST_INT).
5608 Otherwise, not giv. */
5609 if (GET_CODE (x
) == USE
)
5612 if (invariant_p (x
) == 1)
5614 if (GET_CODE (x
) == CONST_INT
)
5617 return gen_rtx_USE (mode
, x
);
5624 /* Help detect a giv that is calculated by several consecutive insns;
5628 The caller has already identified the first insn P as having a giv as dest;
5629 we check that all other insns that set the same register follow
5630 immediately after P, that they alter nothing else,
5631 and that the result of the last is still a giv.
5633 The value is 0 if the reg set in P is not really a giv.
5634 Otherwise, the value is the amount gained by eliminating
5635 all the consecutive insns that compute the value.
5637 FIRST_BENEFIT is the amount gained by eliminating the first insn, P.
5638 SRC_REG is the reg of the biv; DEST_REG is the reg of the giv.
5640 The coefficients of the ultimate giv value are stored in
5641 *MULT_VAL and *ADD_VAL. */
5644 consec_sets_giv (first_benefit
, p
, src_reg
, dest_reg
,
5659 /* Indicate that this is a giv so that we can update the value produced in
5660 each insn of the multi-insn sequence.
5662 This induction structure will be used only by the call to
5663 general_induction_var below, so we can allocate it on our stack.
5664 If this is a giv, our caller will replace the induct var entry with
5665 a new induction structure. */
5667 = (struct induction
*) alloca (sizeof (struct induction
));
5668 v
->src_reg
= src_reg
;
5669 v
->mult_val
= *mult_val
;
5670 v
->add_val
= *add_val
;
5671 v
->benefit
= first_benefit
;
5673 v
->derive_adjustment
= 0;
5675 reg_iv_type
[REGNO (dest_reg
)] = GENERAL_INDUCT
;
5676 reg_iv_info
[REGNO (dest_reg
)] = v
;
5678 count
= n_times_set
[REGNO (dest_reg
)] - 1;
5683 code
= GET_CODE (p
);
5685 /* If libcall, skip to end of call sequence. */
5686 if (code
== INSN
&& (temp
= find_reg_note (p
, REG_LIBCALL
, NULL_RTX
)))
5690 && (set
= single_set (p
))
5691 && GET_CODE (SET_DEST (set
)) == REG
5692 && SET_DEST (set
) == dest_reg
5693 && ((benefit
= general_induction_var (SET_SRC (set
), &src_reg
,
5695 /* Giv created by equivalent expression. */
5696 || ((temp
= find_reg_note (p
, REG_EQUAL
, NULL_RTX
))
5697 && (benefit
= general_induction_var (XEXP (temp
, 0), &src_reg
,
5698 add_val
, mult_val
))))
5699 && src_reg
== v
->src_reg
)
5701 if (find_reg_note (p
, REG_RETVAL
, NULL_RTX
))
5702 benefit
+= libcall_benefit (p
);
5705 v
->mult_val
= *mult_val
;
5706 v
->add_val
= *add_val
;
5707 v
->benefit
= benefit
;
5709 else if (code
!= NOTE
)
5711 /* Allow insns that set something other than this giv to a
5712 constant. Such insns are needed on machines which cannot
5713 include long constants and should not disqualify a giv. */
5715 && (set
= single_set (p
))
5716 && SET_DEST (set
) != dest_reg
5717 && CONSTANT_P (SET_SRC (set
)))
5720 reg_iv_type
[REGNO (dest_reg
)] = UNKNOWN_INDUCT
;
5728 /* Return an rtx, if any, that expresses giv G2 as a function of the register
5729 represented by G1. If no such expression can be found, or it is clear that
5730 it cannot possibly be a valid address, 0 is returned.
5732 To perform the computation, we note that
5735 where `v' is the biv.
5737 So G2 = (c/a) * G1 + (d - b*c/a) */
5741 express_from (g1
, g2
)
5742 struct induction
*g1
, *g2
;
5746 /* The value that G1 will be multiplied by must be a constant integer. Also,
5747 the only chance we have of getting a valid address is if b*c/a (see above
5748 for notation) is also an integer. */
5749 if (GET_CODE (g1
->mult_val
) != CONST_INT
5750 || GET_CODE (g2
->mult_val
) != CONST_INT
5751 || GET_CODE (g1
->add_val
) != CONST_INT
5752 || g1
->mult_val
== const0_rtx
5753 || INTVAL (g2
->mult_val
) % INTVAL (g1
->mult_val
) != 0)
5756 mult
= GEN_INT (INTVAL (g2
->mult_val
) / INTVAL (g1
->mult_val
));
5757 add
= plus_constant (g2
->add_val
, - INTVAL (g1
->add_val
) * INTVAL (mult
));
5759 /* Form simplified final result. */
5760 if (mult
== const0_rtx
)
5762 else if (mult
== const1_rtx
)
5763 mult
= g1
->dest_reg
;
5765 mult
= gen_rtx_MULT (g2
->mode
, g1
->dest_reg
, mult
);
5767 if (add
== const0_rtx
)
5770 return gen_rtx_PLUS (g2
->mode
, mult
, add
);
5774 /* Return 1 if giv G2 can be combined with G1. This means that G2 can use
5775 (either directly or via an address expression) a register used to represent
5776 G1. Set g2->new_reg to a represtation of G1 (normally just
5780 combine_givs_p (g1
, g2
)
5781 struct induction
*g1
, *g2
;
5785 /* If these givs are identical, they can be combined. */
5786 if (rtx_equal_p (g1
->mult_val
, g2
->mult_val
)
5787 && rtx_equal_p (g1
->add_val
, g2
->add_val
))
5789 g2
->new_reg
= g1
->dest_reg
;
5794 /* If G2 can be expressed as a function of G1 and that function is valid
5795 as an address and no more expensive than using a register for G2,
5796 the expression of G2 in terms of G1 can be used. */
5797 if (g2
->giv_type
== DEST_ADDR
5798 && (tem
= express_from (g1
, g2
)) != 0
5799 && memory_address_p (g2
->mem_mode
, tem
)
5800 && ADDRESS_COST (tem
) <= ADDRESS_COST (*g2
->location
))
5810 #ifdef GIV_SORT_CRITERION
5811 /* Compare two givs and sort the most desirable one for combinations first.
5812 This is used only in one qsort call below. */
5816 struct induction
**x
, **y
;
5818 GIV_SORT_CRITERION (*x
, *y
);
5824 /* Check all pairs of givs for iv_class BL and see if any can be combined with
5825 any other. If so, point SAME to the giv combined with and set NEW_REG to
5826 be an expression (in terms of the other giv's DEST_REG) equivalent to the
5827 giv. Also, update BENEFIT and related fields for cost/benefit analysis. */
5831 struct iv_class
*bl
;
5833 struct induction
*g1
, *g2
, **giv_array
, *temp_iv
;
5834 int i
, j
, giv_count
, pass
;
5836 /* Count givs, because bl->giv_count is incorrect here. */
5838 for (g1
= bl
->giv
; g1
; g1
= g1
->next_iv
)
5842 = (struct induction
**) alloca (giv_count
* sizeof (struct induction
*));
5844 for (g1
= bl
->giv
; g1
; g1
= g1
->next_iv
)
5845 giv_array
[i
++] = g1
;
5847 #ifdef GIV_SORT_CRITERION
5848 /* Sort the givs if GIV_SORT_CRITERION is defined.
5849 This is usually defined for processors which lack
5850 negative register offsets so more givs may be combined. */
5852 if (loop_dump_stream
)
5853 fprintf (loop_dump_stream
, "%d givs counted, sorting...\n", giv_count
);
5855 qsort (giv_array
, giv_count
, sizeof (struct induction
*), giv_sort
);
5858 for (i
= 0; i
< giv_count
; i
++)
5861 for (pass
= 0; pass
<= 1; pass
++)
5862 for (j
= 0; j
< giv_count
; j
++)
5866 /* First try to combine with replaceable givs, then all givs. */
5867 && (g1
->replaceable
|| pass
== 1)
5868 /* If either has already been combined or is to be ignored, can't
5870 && ! g1
->ignore
&& ! g2
->ignore
&& ! g1
->same
&& ! g2
->same
5871 /* If something has been based on G2, G2 cannot itself be based
5872 on something else. */
5873 && ! g2
->combined_with
5874 && combine_givs_p (g1
, g2
))
5876 /* g2->new_reg set by `combine_givs_p' */
5878 g1
->combined_with
= 1;
5880 /* If one of these givs is a DEST_REG that was only used
5881 once, by the other giv, this is actually a single use.
5882 The DEST_REG has the correct cost, while the other giv
5883 counts the REG use too often. */
5884 if (g2
->giv_type
== DEST_REG
5885 && n_times_used
[REGNO (g2
->dest_reg
)] == 1
5886 && reg_mentioned_p (g2
->dest_reg
, PATTERN (g1
->insn
)))
5887 g1
->benefit
= g2
->benefit
;
5888 else if (g1
->giv_type
!= DEST_REG
5889 || n_times_used
[REGNO (g1
->dest_reg
)] != 1
5890 || ! reg_mentioned_p (g1
->dest_reg
,
5891 PATTERN (g2
->insn
)))
5893 g1
->benefit
+= g2
->benefit
;
5894 g1
->times_used
+= g2
->times_used
;
5896 /* ??? The new final_[bg]iv_value code does a much better job
5897 of finding replaceable giv's, and hence this code may no
5898 longer be necessary. */
5899 if (! g2
->replaceable
&& REG_USERVAR_P (g2
->dest_reg
))
5900 g1
->benefit
-= copy_cost
;
5901 g1
->lifetime
+= g2
->lifetime
;
5903 if (loop_dump_stream
)
5904 fprintf (loop_dump_stream
, "giv at %d combined with giv at %d\n",
5905 INSN_UID (g2
->insn
), INSN_UID (g1
->insn
));
5911 /* EMIT code before INSERT_BEFORE to set REG = B * M + A. */
5914 emit_iv_add_mult (b
, m
, a
, reg
, insert_before
)
5915 rtx b
; /* initial value of basic induction variable */
5916 rtx m
; /* multiplicative constant */
5917 rtx a
; /* additive constant */
5918 rtx reg
; /* destination register */
5924 /* Prevent unexpected sharing of these rtx. */
5928 /* Increase the lifetime of any invariants moved further in code. */
5929 update_reg_last_use (a
, insert_before
);
5930 update_reg_last_use (b
, insert_before
);
5931 update_reg_last_use (m
, insert_before
);
5934 result
= expand_mult_add (b
, reg
, m
, a
, GET_MODE (reg
), 0);
5936 emit_move_insn (reg
, result
);
5937 seq
= gen_sequence ();
5940 emit_insn_before (seq
, insert_before
);
5942 record_base_value (REGNO (reg
), b
);
5945 /* Test whether A * B can be computed without
5946 an actual multiply insn. Value is 1 if so. */
5949 product_cheap_p (a
, b
)
5955 struct obstack
*old_rtl_obstack
= rtl_obstack
;
5956 char *storage
= (char *) obstack_alloc (&temp_obstack
, 0);
5959 /* If only one is constant, make it B. */
5960 if (GET_CODE (a
) == CONST_INT
)
5961 tmp
= a
, a
= b
, b
= tmp
;
5963 /* If first constant, both constant, so don't need multiply. */
5964 if (GET_CODE (a
) == CONST_INT
)
5967 /* If second not constant, neither is constant, so would need multiply. */
5968 if (GET_CODE (b
) != CONST_INT
)
5971 /* One operand is constant, so might not need multiply insn. Generate the
5972 code for the multiply and see if a call or multiply, or long sequence
5973 of insns is generated. */
5975 rtl_obstack
= &temp_obstack
;
5977 expand_mult (GET_MODE (a
), a
, b
, NULL_RTX
, 0);
5978 tmp
= gen_sequence ();
5981 if (GET_CODE (tmp
) == SEQUENCE
)
5983 if (XVEC (tmp
, 0) == 0)
5985 else if (XVECLEN (tmp
, 0) > 3)
5988 for (i
= 0; i
< XVECLEN (tmp
, 0); i
++)
5990 rtx insn
= XVECEXP (tmp
, 0, i
);
5992 if (GET_CODE (insn
) != INSN
5993 || (GET_CODE (PATTERN (insn
)) == SET
5994 && GET_CODE (SET_SRC (PATTERN (insn
))) == MULT
)
5995 || (GET_CODE (PATTERN (insn
)) == PARALLEL
5996 && GET_CODE (XVECEXP (PATTERN (insn
), 0, 0)) == SET
5997 && GET_CODE (SET_SRC (XVECEXP (PATTERN (insn
), 0, 0))) == MULT
))
6004 else if (GET_CODE (tmp
) == SET
6005 && GET_CODE (SET_SRC (tmp
)) == MULT
)
6007 else if (GET_CODE (tmp
) == PARALLEL
6008 && GET_CODE (XVECEXP (tmp
, 0, 0)) == SET
6009 && GET_CODE (SET_SRC (XVECEXP (tmp
, 0, 0))) == MULT
)
6012 /* Free any storage we obtained in generating this multiply and restore rtl
6013 allocation to its normal obstack. */
6014 obstack_free (&temp_obstack
, storage
);
6015 rtl_obstack
= old_rtl_obstack
;
6020 /* Check to see if loop can be terminated by a "decrement and branch until
6021 zero" instruction. If so, add a REG_NONNEG note to the branch insn if so.
6022 Also try reversing an increment loop to a decrement loop
6023 to see if the optimization can be performed.
6024 Value is nonzero if optimization was performed. */
6026 /* This is useful even if the architecture doesn't have such an insn,
6027 because it might change a loops which increments from 0 to n to a loop
6028 which decrements from n to 0. A loop that decrements to zero is usually
6029 faster than one that increments from zero. */
6031 /* ??? This could be rewritten to use some of the loop unrolling procedures,
6032 such as approx_final_value, biv_total_increment, loop_iterations, and
6033 final_[bg]iv_value. */
6036 check_dbra_loop (loop_end
, insn_count
, loop_start
)
6041 struct iv_class
*bl
;
6048 rtx before_comparison
;
6051 /* If last insn is a conditional branch, and the insn before tests a
6052 register value, try to optimize it. Otherwise, we can't do anything. */
6054 comparison
= get_condition_for_loop (PREV_INSN (loop_end
));
6055 if (comparison
== 0)
6058 /* Check all of the bivs to see if the compare uses one of them.
6059 Skip biv's set more than once because we can't guarantee that
6060 it will be zero on the last iteration. Also skip if the biv is
6061 used between its update and the test insn. */
6063 for (bl
= loop_iv_list
; bl
; bl
= bl
->next
)
6065 if (bl
->biv_count
== 1
6066 && bl
->biv
->dest_reg
== XEXP (comparison
, 0)
6067 && ! reg_used_between_p (regno_reg_rtx
[bl
->regno
], bl
->biv
->insn
,
6068 PREV_INSN (PREV_INSN (loop_end
))))
6075 /* Look for the case where the basic induction variable is always
6076 nonnegative, and equals zero on the last iteration.
6077 In this case, add a reg_note REG_NONNEG, which allows the
6078 m68k DBRA instruction to be used. */
6080 if (((GET_CODE (comparison
) == GT
6081 && GET_CODE (XEXP (comparison
, 1)) == CONST_INT
6082 && INTVAL (XEXP (comparison
, 1)) == -1)
6083 || (GET_CODE (comparison
) == NE
&& XEXP (comparison
, 1) == const0_rtx
))
6084 && GET_CODE (bl
->biv
->add_val
) == CONST_INT
6085 && INTVAL (bl
->biv
->add_val
) < 0)
6087 /* Initial value must be greater than 0,
6088 init_val % -dec_value == 0 to ensure that it equals zero on
6089 the last iteration */
6091 if (GET_CODE (bl
->initial_value
) == CONST_INT
6092 && INTVAL (bl
->initial_value
) > 0
6093 && (INTVAL (bl
->initial_value
)
6094 % (-INTVAL (bl
->biv
->add_val
))) == 0)
6096 /* register always nonnegative, add REG_NOTE to branch */
6097 REG_NOTES (PREV_INSN (loop_end
))
6098 = gen_rtx_EXPR_LIST (REG_NONNEG
, NULL_RTX
,
6099 REG_NOTES (PREV_INSN (loop_end
)));
6105 /* If the decrement is 1 and the value was tested as >= 0 before
6106 the loop, then we can safely optimize. */
6107 for (p
= loop_start
; p
; p
= PREV_INSN (p
))
6109 if (GET_CODE (p
) == CODE_LABEL
)
6111 if (GET_CODE (p
) != JUMP_INSN
)
6114 before_comparison
= get_condition_for_loop (p
);
6115 if (before_comparison
6116 && XEXP (before_comparison
, 0) == bl
->biv
->dest_reg
6117 && GET_CODE (before_comparison
) == LT
6118 && XEXP (before_comparison
, 1) == const0_rtx
6119 && ! reg_set_between_p (bl
->biv
->dest_reg
, p
, loop_start
)
6120 && INTVAL (bl
->biv
->add_val
) == -1)
6122 REG_NOTES (PREV_INSN (loop_end
))
6123 = gen_rtx_EXPR_LIST (REG_NONNEG
, NULL_RTX
,
6124 REG_NOTES (PREV_INSN (loop_end
)));
6131 else if (num_mem_sets
<= 1)
6133 /* Try to change inc to dec, so can apply above optimization. */
6135 all registers modified are induction variables or invariant,
6136 all memory references have non-overlapping addresses
6137 (obviously true if only one write)
6138 allow 2 insns for the compare/jump at the end of the loop. */
6139 /* Also, we must avoid any instructions which use both the reversed
6140 biv and another biv. Such instructions will fail if the loop is
6141 reversed. We meet this condition by requiring that either
6142 no_use_except_counting is true, or else that there is only
6144 int num_nonfixed_reads
= 0;
6145 /* 1 if the iteration var is used only to count iterations. */
6146 int no_use_except_counting
= 0;
6147 /* 1 if the loop has no memory store, or it has a single memory store
6148 which is reversible. */
6149 int reversible_mem_store
= 1;
6151 for (p
= loop_start
; p
!= loop_end
; p
= NEXT_INSN (p
))
6152 if (GET_RTX_CLASS (GET_CODE (p
)) == 'i')
6153 num_nonfixed_reads
+= count_nonfixed_reads (PATTERN (p
));
6155 if (bl
->giv_count
== 0
6156 && ! loop_number_exit_count
[uid_loop_num
[INSN_UID (loop_start
)]])
6158 rtx bivreg
= regno_reg_rtx
[bl
->regno
];
6160 /* If there are no givs for this biv, and the only exit is the
6161 fall through at the end of the the loop, then
6162 see if perhaps there are no uses except to count. */
6163 no_use_except_counting
= 1;
6164 for (p
= loop_start
; p
!= loop_end
; p
= NEXT_INSN (p
))
6165 if (GET_RTX_CLASS (GET_CODE (p
)) == 'i')
6167 rtx set
= single_set (p
);
6169 if (set
&& GET_CODE (SET_DEST (set
)) == REG
6170 && REGNO (SET_DEST (set
)) == bl
->regno
)
6171 /* An insn that sets the biv is okay. */
6173 else if (p
== prev_nonnote_insn (prev_nonnote_insn (loop_end
))
6174 || p
== prev_nonnote_insn (loop_end
))
6175 /* Don't bother about the end test. */
6177 else if (reg_mentioned_p (bivreg
, PATTERN (p
)))
6178 /* Any other use of the biv is no good. */
6180 no_use_except_counting
= 0;
6186 /* If the loop has a single store, and the destination address is
6187 invariant, then we can't reverse the loop, because this address
6188 might then have the wrong value at loop exit.
6189 This would work if the source was invariant also, however, in that
6190 case, the insn should have been moved out of the loop. */
6192 if (num_mem_sets
== 1)
6193 reversible_mem_store
6194 = (! unknown_address_altered
6195 && ! invariant_p (XEXP (loop_store_mems
[0], 0)));
6197 /* This code only acts for innermost loops. Also it simplifies
6198 the memory address check by only reversing loops with
6199 zero or one memory access.
6200 Two memory accesses could involve parts of the same array,
6201 and that can't be reversed. */
6203 if (num_nonfixed_reads
<= 1
6205 && !loop_has_volatile
6206 && reversible_mem_store
6207 && (no_use_except_counting
6208 || ((bl
->giv_count
+ bl
->biv_count
+ num_mem_sets
6209 + num_movables
+ 2 == insn_count
)
6210 && (bl
== loop_iv_list
&& bl
->next
== 0))))
6214 /* Loop can be reversed. */
6215 if (loop_dump_stream
)
6216 fprintf (loop_dump_stream
, "Can reverse loop\n");
6218 /* Now check other conditions:
6220 The increment must be a constant, as must the initial value,
6221 and the comparison code must be LT.
6223 This test can probably be improved since +/- 1 in the constant
6224 can be obtained by changing LT to LE and vice versa; this is
6228 && GET_CODE (XEXP (comparison
, 1)) == CONST_INT
6229 /* LE gets turned into LT */
6230 && GET_CODE (comparison
) == LT
6231 && GET_CODE (bl
->initial_value
) == CONST_INT
)
6233 HOST_WIDE_INT add_val
, comparison_val
;
6236 add_val
= INTVAL (bl
->biv
->add_val
);
6237 comparison_val
= INTVAL (XEXP (comparison
, 1));
6238 initial_value
= bl
->initial_value
;
6240 /* Normalize the initial value if it is an integer and
6241 has no other use except as a counter. This will allow
6242 a few more loops to be reversed. */
6243 if (no_use_except_counting
6244 && GET_CODE (initial_value
) == CONST_INT
)
6246 comparison_val
= comparison_val
- INTVAL (bl
->initial_value
);
6247 initial_value
= const0_rtx
;
6250 /* If the initial value is not zero, or if the comparison
6251 value is not an exact multiple of the increment, then we
6252 can not reverse this loop. */
6253 if (initial_value
!= const0_rtx
6254 || (comparison_val
% add_val
) != 0)
6257 /* Reset these in case we normalized the initial value
6258 and comparison value above. */
6259 bl
->initial_value
= initial_value
;
6260 XEXP (comparison
, 1) = GEN_INT (comparison_val
);
6262 /* Register will always be nonnegative, with value
6263 0 on last iteration if loop reversed */
6265 /* Save some info needed to produce the new insns. */
6266 reg
= bl
->biv
->dest_reg
;
6267 jump_label
= XEXP (SET_SRC (PATTERN (PREV_INSN (loop_end
))), 1);
6268 if (jump_label
== pc_rtx
)
6269 jump_label
= XEXP (SET_SRC (PATTERN (PREV_INSN (loop_end
))), 2);
6270 new_add_val
= GEN_INT (- INTVAL (bl
->biv
->add_val
));
6272 final_value
= XEXP (comparison
, 1);
6273 start_value
= GEN_INT (INTVAL (XEXP (comparison
, 1))
6274 - INTVAL (bl
->biv
->add_val
));
6276 /* Initialize biv to start_value before loop start.
6277 The old initializing insn will be deleted as a
6278 dead store by flow.c. */
6279 emit_insn_before (gen_move_insn (reg
, start_value
), loop_start
);
6281 /* Add insn to decrement register, and delete insn
6282 that incremented the register. */
6283 p
= emit_insn_before (gen_add2_insn (reg
, new_add_val
),
6285 delete_insn (bl
->biv
->insn
);
6287 /* Update biv info to reflect its new status. */
6289 bl
->initial_value
= start_value
;
6290 bl
->biv
->add_val
= new_add_val
;
6292 /* Inc LABEL_NUSES so that delete_insn will
6293 not delete the label. */
6294 LABEL_NUSES (XEXP (jump_label
, 0)) ++;
6296 /* Emit an insn after the end of the loop to set the biv's
6297 proper exit value if it is used anywhere outside the loop. */
6298 if ((REGNO_LAST_UID (bl
->regno
)
6299 != INSN_UID (PREV_INSN (PREV_INSN (loop_end
))))
6301 || REGNO_FIRST_UID (bl
->regno
) != INSN_UID (bl
->init_insn
))
6302 emit_insn_after (gen_move_insn (reg
, final_value
),
6305 /* Delete compare/branch at end of loop. */
6306 delete_insn (PREV_INSN (loop_end
));
6307 delete_insn (PREV_INSN (loop_end
));
6309 /* Add new compare/branch insn at end of loop. */
6311 emit_cmp_insn (reg
, const0_rtx
, GE
, NULL_RTX
,
6312 GET_MODE (reg
), 0, 0);
6313 emit_jump_insn (gen_bge (XEXP (jump_label
, 0)));
6314 tem
= gen_sequence ();
6316 emit_jump_insn_before (tem
, loop_end
);
6318 for (tem
= PREV_INSN (loop_end
);
6319 tem
&& GET_CODE (tem
) != JUMP_INSN
; tem
= PREV_INSN (tem
))
6323 JUMP_LABEL (tem
) = XEXP (jump_label
, 0);
6325 /* Increment of LABEL_NUSES done above. */
6326 /* Register is now always nonnegative,
6327 so add REG_NONNEG note to the branch. */
6328 REG_NOTES (tem
) = gen_rtx_EXPR_LIST (REG_NONNEG
, NULL_RTX
,
6334 /* Mark that this biv has been reversed. Each giv which depends
6335 on this biv, and which is also live past the end of the loop
6336 will have to be fixed up. */
6340 if (loop_dump_stream
)
6341 fprintf (loop_dump_stream
,
6342 "Reversed loop and added reg_nonneg\n");
6352 /* Verify whether the biv BL appears to be eliminable,
6353 based on the insns in the loop that refer to it.
6354 LOOP_START is the first insn of the loop, and END is the end insn.
6356 If ELIMINATE_P is non-zero, actually do the elimination.
6358 THRESHOLD and INSN_COUNT are from loop_optimize and are used to
6359 determine whether invariant insns should be placed inside or at the
6360 start of the loop. */
6363 maybe_eliminate_biv (bl
, loop_start
, end
, eliminate_p
, threshold
, insn_count
)
6364 struct iv_class
*bl
;
6368 int threshold
, insn_count
;
6370 rtx reg
= bl
->biv
->dest_reg
;
6373 /* Scan all insns in the loop, stopping if we find one that uses the
6374 biv in a way that we cannot eliminate. */
6376 for (p
= loop_start
; p
!= end
; p
= NEXT_INSN (p
))
6378 enum rtx_code code
= GET_CODE (p
);
6379 rtx where
= threshold
>= insn_count
? loop_start
: p
;
6381 if ((code
== INSN
|| code
== JUMP_INSN
|| code
== CALL_INSN
)
6382 && reg_mentioned_p (reg
, PATTERN (p
))
6383 && ! maybe_eliminate_biv_1 (PATTERN (p
), p
, bl
, eliminate_p
, where
))
6385 if (loop_dump_stream
)
6386 fprintf (loop_dump_stream
,
6387 "Cannot eliminate biv %d: biv used in insn %d.\n",
6388 bl
->regno
, INSN_UID (p
));
6395 if (loop_dump_stream
)
6396 fprintf (loop_dump_stream
, "biv %d %s eliminated.\n",
6397 bl
->regno
, eliminate_p
? "was" : "can be");
6404 /* If BL appears in X (part of the pattern of INSN), see if we can
6405 eliminate its use. If so, return 1. If not, return 0.
6407 If BIV does not appear in X, return 1.
6409 If ELIMINATE_P is non-zero, actually do the elimination. WHERE indicates
6410 where extra insns should be added. Depending on how many items have been
6411 moved out of the loop, it will either be before INSN or at the start of
6415 maybe_eliminate_biv_1 (x
, insn
, bl
, eliminate_p
, where
)
6417 struct iv_class
*bl
;
6421 enum rtx_code code
= GET_CODE (x
);
6422 rtx reg
= bl
->biv
->dest_reg
;
6423 enum machine_mode mode
= GET_MODE (reg
);
6424 struct induction
*v
;
6433 /* If we haven't already been able to do something with this BIV,
6434 we can't eliminate it. */
6440 /* If this sets the BIV, it is not a problem. */
6441 if (SET_DEST (x
) == reg
)
6444 /* If this is an insn that defines a giv, it is also ok because
6445 it will go away when the giv is reduced. */
6446 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
6447 if (v
->giv_type
== DEST_REG
&& SET_DEST (x
) == v
->dest_reg
)
6451 if (SET_DEST (x
) == cc0_rtx
&& SET_SRC (x
) == reg
)
6453 /* Can replace with any giv that was reduced and
6454 that has (MULT_VAL != 0) and (ADD_VAL == 0).
6455 Require a constant for MULT_VAL, so we know it's nonzero.
6456 ??? We disable this optimization to avoid potential
6459 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
6460 if (CONSTANT_P (v
->mult_val
) && v
->mult_val
!= const0_rtx
6461 && v
->add_val
== const0_rtx
6462 && ! v
->ignore
&& ! v
->maybe_dead
&& v
->always_computable
6466 /* If the giv V had the auto-inc address optimization applied
6467 to it, and INSN occurs between the giv insn and the biv
6468 insn, then we must adjust the value used here.
6469 This is rare, so we don't bother to do so. */
6471 && ((INSN_LUID (v
->insn
) < INSN_LUID (insn
)
6472 && INSN_LUID (insn
) < INSN_LUID (bl
->biv
->insn
))
6473 || (INSN_LUID (v
->insn
) > INSN_LUID (insn
)
6474 && INSN_LUID (insn
) > INSN_LUID (bl
->biv
->insn
))))
6480 /* If the giv has the opposite direction of change,
6481 then reverse the comparison. */
6482 if (INTVAL (v
->mult_val
) < 0)
6483 new = gen_rtx_COMPARE (GET_MODE (v
->new_reg
),
6484 const0_rtx
, v
->new_reg
);
6488 /* We can probably test that giv's reduced reg. */
6489 if (validate_change (insn
, &SET_SRC (x
), new, 0))
6493 /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0);
6494 replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL).
6495 Require a constant for MULT_VAL, so we know it's nonzero.
6496 ??? Do this only if ADD_VAL is a pointer to avoid a potential
6497 overflow problem. */
6499 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
6500 if (CONSTANT_P (v
->mult_val
) && v
->mult_val
!= const0_rtx
6501 && ! v
->ignore
&& ! v
->maybe_dead
&& v
->always_computable
6503 && (GET_CODE (v
->add_val
) == SYMBOL_REF
6504 || GET_CODE (v
->add_val
) == LABEL_REF
6505 || GET_CODE (v
->add_val
) == CONST
6506 || (GET_CODE (v
->add_val
) == REG
6507 && REGNO_POINTER_FLAG (REGNO (v
->add_val
)))))
6509 /* If the giv V had the auto-inc address optimization applied
6510 to it, and INSN occurs between the giv insn and the biv
6511 insn, then we must adjust the value used here.
6512 This is rare, so we don't bother to do so. */
6514 && ((INSN_LUID (v
->insn
) < INSN_LUID (insn
)
6515 && INSN_LUID (insn
) < INSN_LUID (bl
->biv
->insn
))
6516 || (INSN_LUID (v
->insn
) > INSN_LUID (insn
)
6517 && INSN_LUID (insn
) > INSN_LUID (bl
->biv
->insn
))))
6523 /* If the giv has the opposite direction of change,
6524 then reverse the comparison. */
6525 if (INTVAL (v
->mult_val
) < 0)
6526 new = gen_rtx_COMPARE (VOIDmode
, copy_rtx (v
->add_val
),
6529 new = gen_rtx_COMPARE (VOIDmode
, v
->new_reg
,
6530 copy_rtx (v
->add_val
));
6532 /* Replace biv with the giv's reduced register. */
6533 update_reg_last_use (v
->add_val
, insn
);
6534 if (validate_change (insn
, &SET_SRC (PATTERN (insn
)), new, 0))
6537 /* Insn doesn't support that constant or invariant. Copy it
6538 into a register (it will be a loop invariant.) */
6539 tem
= gen_reg_rtx (GET_MODE (v
->new_reg
));
6541 emit_insn_before (gen_move_insn (tem
, copy_rtx (v
->add_val
)),
6544 /* Substitute the new register for its invariant value in
6545 the compare expression. */
6546 XEXP (new, (INTVAL (v
->mult_val
) < 0) ? 0 : 1) = tem
;
6547 if (validate_change (insn
, &SET_SRC (PATTERN (insn
)), new, 0))
6556 case GT
: case GE
: case GTU
: case GEU
:
6557 case LT
: case LE
: case LTU
: case LEU
:
6558 /* See if either argument is the biv. */
6559 if (XEXP (x
, 0) == reg
)
6560 arg
= XEXP (x
, 1), arg_operand
= 1;
6561 else if (XEXP (x
, 1) == reg
)
6562 arg
= XEXP (x
, 0), arg_operand
= 0;
6566 if (CONSTANT_P (arg
))
6568 /* First try to replace with any giv that has constant positive
6569 mult_val and constant add_val. We might be able to support
6570 negative mult_val, but it seems complex to do it in general. */
6572 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
6573 if (CONSTANT_P (v
->mult_val
) && INTVAL (v
->mult_val
) > 0
6574 && (GET_CODE (v
->add_val
) == SYMBOL_REF
6575 || GET_CODE (v
->add_val
) == LABEL_REF
6576 || GET_CODE (v
->add_val
) == CONST
6577 || (GET_CODE (v
->add_val
) == REG
6578 && REGNO_POINTER_FLAG (REGNO (v
->add_val
))))
6579 && ! v
->ignore
&& ! v
->maybe_dead
&& v
->always_computable
6582 /* If the giv V had the auto-inc address optimization applied
6583 to it, and INSN occurs between the giv insn and the biv
6584 insn, then we must adjust the value used here.
6585 This is rare, so we don't bother to do so. */
6587 && ((INSN_LUID (v
->insn
) < INSN_LUID (insn
)
6588 && INSN_LUID (insn
) < INSN_LUID (bl
->biv
->insn
))
6589 || (INSN_LUID (v
->insn
) > INSN_LUID (insn
)
6590 && INSN_LUID (insn
) > INSN_LUID (bl
->biv
->insn
))))
6596 /* Replace biv with the giv's reduced reg. */
6597 XEXP (x
, 1-arg_operand
) = v
->new_reg
;
6599 /* If all constants are actually constant integers and
6600 the derived constant can be directly placed in the COMPARE,
6602 if (GET_CODE (arg
) == CONST_INT
6603 && GET_CODE (v
->mult_val
) == CONST_INT
6604 && GET_CODE (v
->add_val
) == CONST_INT
6605 && validate_change (insn
, &XEXP (x
, arg_operand
),
6606 GEN_INT (INTVAL (arg
)
6607 * INTVAL (v
->mult_val
)
6608 + INTVAL (v
->add_val
)), 0))
6611 /* Otherwise, load it into a register. */
6612 tem
= gen_reg_rtx (mode
);
6613 emit_iv_add_mult (arg
, v
->mult_val
, v
->add_val
, tem
, where
);
6614 if (validate_change (insn
, &XEXP (x
, arg_operand
), tem
, 0))
6617 /* If that failed, put back the change we made above. */
6618 XEXP (x
, 1-arg_operand
) = reg
;
6621 /* Look for giv with positive constant mult_val and nonconst add_val.
6622 Insert insns to calculate new compare value.
6623 ??? Turn this off due to possible overflow. */
6625 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
6626 if (CONSTANT_P (v
->mult_val
) && INTVAL (v
->mult_val
) > 0
6627 && ! v
->ignore
&& ! v
->maybe_dead
&& v
->always_computable
6633 /* If the giv V had the auto-inc address optimization applied
6634 to it, and INSN occurs between the giv insn and the biv
6635 insn, then we must adjust the value used here.
6636 This is rare, so we don't bother to do so. */
6638 && ((INSN_LUID (v
->insn
) < INSN_LUID (insn
)
6639 && INSN_LUID (insn
) < INSN_LUID (bl
->biv
->insn
))
6640 || (INSN_LUID (v
->insn
) > INSN_LUID (insn
)
6641 && INSN_LUID (insn
) > INSN_LUID (bl
->biv
->insn
))))
6647 tem
= gen_reg_rtx (mode
);
6649 /* Replace biv with giv's reduced register. */
6650 validate_change (insn
, &XEXP (x
, 1 - arg_operand
),
6653 /* Compute value to compare against. */
6654 emit_iv_add_mult (arg
, v
->mult_val
, v
->add_val
, tem
, where
);
6655 /* Use it in this insn. */
6656 validate_change (insn
, &XEXP (x
, arg_operand
), tem
, 1);
6657 if (apply_change_group ())
6661 else if (GET_CODE (arg
) == REG
|| GET_CODE (arg
) == MEM
)
6663 if (invariant_p (arg
) == 1)
6665 /* Look for giv with constant positive mult_val and nonconst
6666 add_val. Insert insns to compute new compare value.
6667 ??? Turn this off due to possible overflow. */
6669 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
6670 if (CONSTANT_P (v
->mult_val
) && INTVAL (v
->mult_val
) > 0
6671 && ! v
->ignore
&& ! v
->maybe_dead
&& v
->always_computable
6677 /* If the giv V had the auto-inc address optimization applied
6678 to it, and INSN occurs between the giv insn and the biv
6679 insn, then we must adjust the value used here.
6680 This is rare, so we don't bother to do so. */
6682 && ((INSN_LUID (v
->insn
) < INSN_LUID (insn
)
6683 && INSN_LUID (insn
) < INSN_LUID (bl
->biv
->insn
))
6684 || (INSN_LUID (v
->insn
) > INSN_LUID (insn
)
6685 && INSN_LUID (insn
) > INSN_LUID (bl
->biv
->insn
))))
6691 tem
= gen_reg_rtx (mode
);
6693 /* Replace biv with giv's reduced register. */
6694 validate_change (insn
, &XEXP (x
, 1 - arg_operand
),
6697 /* Compute value to compare against. */
6698 emit_iv_add_mult (arg
, v
->mult_val
, v
->add_val
,
6700 validate_change (insn
, &XEXP (x
, arg_operand
), tem
, 1);
6701 if (apply_change_group ())
6706 /* This code has problems. Basically, you can't know when
6707 seeing if we will eliminate BL, whether a particular giv
6708 of ARG will be reduced. If it isn't going to be reduced,
6709 we can't eliminate BL. We can try forcing it to be reduced,
6710 but that can generate poor code.
6712 The problem is that the benefit of reducing TV, below should
6713 be increased if BL can actually be eliminated, but this means
6714 we might have to do a topological sort of the order in which
6715 we try to process biv. It doesn't seem worthwhile to do
6716 this sort of thing now. */
6719 /* Otherwise the reg compared with had better be a biv. */
6720 if (GET_CODE (arg
) != REG
6721 || reg_iv_type
[REGNO (arg
)] != BASIC_INDUCT
)
6724 /* Look for a pair of givs, one for each biv,
6725 with identical coefficients. */
6726 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
6728 struct induction
*tv
;
6730 if (v
->ignore
|| v
->maybe_dead
|| v
->mode
!= mode
)
6733 for (tv
= reg_biv_class
[REGNO (arg
)]->giv
; tv
; tv
= tv
->next_iv
)
6734 if (! tv
->ignore
&& ! tv
->maybe_dead
6735 && rtx_equal_p (tv
->mult_val
, v
->mult_val
)
6736 && rtx_equal_p (tv
->add_val
, v
->add_val
)
6737 && tv
->mode
== mode
)
6739 /* If the giv V had the auto-inc address optimization applied
6740 to it, and INSN occurs between the giv insn and the biv
6741 insn, then we must adjust the value used here.
6742 This is rare, so we don't bother to do so. */
6744 && ((INSN_LUID (v
->insn
) < INSN_LUID (insn
)
6745 && INSN_LUID (insn
) < INSN_LUID (bl
->biv
->insn
))
6746 || (INSN_LUID (v
->insn
) > INSN_LUID (insn
)
6747 && INSN_LUID (insn
) > INSN_LUID (bl
->biv
->insn
))))
6753 /* Replace biv with its giv's reduced reg. */
6754 XEXP (x
, 1-arg_operand
) = v
->new_reg
;
6755 /* Replace other operand with the other giv's
6757 XEXP (x
, arg_operand
) = tv
->new_reg
;
6764 /* If we get here, the biv can't be eliminated. */
6768 /* If this address is a DEST_ADDR giv, it doesn't matter if the
6769 biv is used in it, since it will be replaced. */
6770 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
6771 if (v
->giv_type
== DEST_ADDR
&& v
->location
== &XEXP (x
, 0))
6779 /* See if any subexpression fails elimination. */
6780 fmt
= GET_RTX_FORMAT (code
);
6781 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
6786 if (! maybe_eliminate_biv_1 (XEXP (x
, i
), insn
, bl
,
6787 eliminate_p
, where
))
6792 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
6793 if (! maybe_eliminate_biv_1 (XVECEXP (x
, i
, j
), insn
, bl
,
6794 eliminate_p
, where
))
6803 /* Return nonzero if the last use of REG
6804 is in an insn following INSN in the same basic block. */
6807 last_use_this_basic_block (reg
, insn
)
6813 n
&& GET_CODE (n
) != CODE_LABEL
&& GET_CODE (n
) != JUMP_INSN
;
6816 if (REGNO_LAST_UID (REGNO (reg
)) == INSN_UID (n
))
6822 /* Called via `note_stores' to record the initial value of a biv. Here we
6823 just record the location of the set and process it later. */
6826 record_initial (dest
, set
)
6830 struct iv_class
*bl
;
6832 if (GET_CODE (dest
) != REG
6833 || REGNO (dest
) >= max_reg_before_loop
6834 || reg_iv_type
[REGNO (dest
)] != BASIC_INDUCT
)
6837 bl
= reg_biv_class
[REGNO (dest
)];
6839 /* If this is the first set found, record it. */
6840 if (bl
->init_insn
== 0)
6842 bl
->init_insn
= note_insn
;
6847 /* If any of the registers in X are "old" and currently have a last use earlier
6848 than INSN, update them to have a last use of INSN. Their actual last use
6849 will be the previous insn but it will not have a valid uid_luid so we can't
6853 update_reg_last_use (x
, insn
)
6857 /* Check for the case where INSN does not have a valid luid. In this case,
6858 there is no need to modify the regno_last_uid, as this can only happen
6859 when code is inserted after the loop_end to set a pseudo's final value,
6860 and hence this insn will never be the last use of x. */
6861 if (GET_CODE (x
) == REG
&& REGNO (x
) < max_reg_before_loop
6862 && INSN_UID (insn
) < max_uid_for_loop
6863 && uid_luid
[REGNO_LAST_UID (REGNO (x
))] < uid_luid
[INSN_UID (insn
)])
6864 REGNO_LAST_UID (REGNO (x
)) = INSN_UID (insn
);
6868 register char *fmt
= GET_RTX_FORMAT (GET_CODE (x
));
6869 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
6872 update_reg_last_use (XEXP (x
, i
), insn
);
6873 else if (fmt
[i
] == 'E')
6874 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
6875 update_reg_last_use (XVECEXP (x
, i
, j
), insn
);
6880 /* Given a jump insn JUMP, return the condition that will cause it to branch
6881 to its JUMP_LABEL. If the condition cannot be understood, or is an
6882 inequality floating-point comparison which needs to be reversed, 0 will
6885 If EARLIEST is non-zero, it is a pointer to a place where the earliest
6886 insn used in locating the condition was found. If a replacement test
6887 of the condition is desired, it should be placed in front of that
6888 insn and we will be sure that the inputs are still valid.
6890 The condition will be returned in a canonical form to simplify testing by
6891 callers. Specifically:
6893 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
6894 (2) Both operands will be machine operands; (cc0) will have been replaced.
6895 (3) If an operand is a constant, it will be the second operand.
6896 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
6897 for GE, GEU, and LEU. */
6900 get_condition (jump
, earliest
)
6909 int reverse_code
= 0;
6910 int did_reverse_condition
= 0;
6912 /* If this is not a standard conditional jump, we can't parse it. */
6913 if (GET_CODE (jump
) != JUMP_INSN
6914 || ! condjump_p (jump
) || simplejump_p (jump
))
6917 code
= GET_CODE (XEXP (SET_SRC (PATTERN (jump
)), 0));
6918 op0
= XEXP (XEXP (SET_SRC (PATTERN (jump
)), 0), 0);
6919 op1
= XEXP (XEXP (SET_SRC (PATTERN (jump
)), 0), 1);
6924 /* If this branches to JUMP_LABEL when the condition is false, reverse
6926 if (GET_CODE (XEXP (SET_SRC (PATTERN (jump
)), 2)) == LABEL_REF
6927 && XEXP (XEXP (SET_SRC (PATTERN (jump
)), 2), 0) == JUMP_LABEL (jump
))
6928 code
= reverse_condition (code
), did_reverse_condition
^= 1;
6930 /* If we are comparing a register with zero, see if the register is set
6931 in the previous insn to a COMPARE or a comparison operation. Perform
6932 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
6935 while (GET_RTX_CLASS (code
) == '<' && op1
== CONST0_RTX (GET_MODE (op0
)))
6937 /* Set non-zero when we find something of interest. */
6941 /* If comparison with cc0, import actual comparison from compare
6945 if ((prev
= prev_nonnote_insn (prev
)) == 0
6946 || GET_CODE (prev
) != INSN
6947 || (set
= single_set (prev
)) == 0
6948 || SET_DEST (set
) != cc0_rtx
)
6951 op0
= SET_SRC (set
);
6952 op1
= CONST0_RTX (GET_MODE (op0
));
6958 /* If this is a COMPARE, pick up the two things being compared. */
6959 if (GET_CODE (op0
) == COMPARE
)
6961 op1
= XEXP (op0
, 1);
6962 op0
= XEXP (op0
, 0);
6965 else if (GET_CODE (op0
) != REG
)
6968 /* Go back to the previous insn. Stop if it is not an INSN. We also
6969 stop if it isn't a single set or if it has a REG_INC note because
6970 we don't want to bother dealing with it. */
6972 if ((prev
= prev_nonnote_insn (prev
)) == 0
6973 || GET_CODE (prev
) != INSN
6974 || FIND_REG_INC_NOTE (prev
, 0)
6975 || (set
= single_set (prev
)) == 0)
6978 /* If this is setting OP0, get what it sets it to if it looks
6980 if (rtx_equal_p (SET_DEST (set
), op0
))
6982 enum machine_mode inner_mode
= GET_MODE (SET_SRC (set
));
6984 if ((GET_CODE (SET_SRC (set
)) == COMPARE
6987 && GET_MODE_CLASS (inner_mode
) == MODE_INT
6988 && (GET_MODE_BITSIZE (inner_mode
)
6989 <= HOST_BITS_PER_WIDE_INT
)
6990 && (STORE_FLAG_VALUE
6991 & ((HOST_WIDE_INT
) 1
6992 << (GET_MODE_BITSIZE (inner_mode
) - 1))))
6993 #ifdef FLOAT_STORE_FLAG_VALUE
6995 && GET_MODE_CLASS (inner_mode
) == MODE_FLOAT
6996 && FLOAT_STORE_FLAG_VALUE
< 0)
6999 && GET_RTX_CLASS (GET_CODE (SET_SRC (set
))) == '<')))
7001 else if (((code
== EQ
7003 && (GET_MODE_BITSIZE (inner_mode
)
7004 <= HOST_BITS_PER_WIDE_INT
)
7005 && GET_MODE_CLASS (inner_mode
) == MODE_INT
7006 && (STORE_FLAG_VALUE
7007 & ((HOST_WIDE_INT
) 1
7008 << (GET_MODE_BITSIZE (inner_mode
) - 1))))
7009 #ifdef FLOAT_STORE_FLAG_VALUE
7011 && GET_MODE_CLASS (inner_mode
) == MODE_FLOAT
7012 && FLOAT_STORE_FLAG_VALUE
< 0)
7015 && GET_RTX_CLASS (GET_CODE (SET_SRC (set
))) == '<')
7017 /* We might have reversed a LT to get a GE here. But this wasn't
7018 actually the comparison of data, so we don't flag that we
7019 have had to reverse the condition. */
7020 did_reverse_condition
^= 1;
7028 else if (reg_set_p (op0
, prev
))
7029 /* If this sets OP0, but not directly, we have to give up. */
7034 if (GET_RTX_CLASS (GET_CODE (x
)) == '<')
7035 code
= GET_CODE (x
);
7038 code
= reverse_condition (code
);
7039 did_reverse_condition
^= 1;
7043 op0
= XEXP (x
, 0), op1
= XEXP (x
, 1);
7049 /* If constant is first, put it last. */
7050 if (CONSTANT_P (op0
))
7051 code
= swap_condition (code
), tem
= op0
, op0
= op1
, op1
= tem
;
7053 /* If OP0 is the result of a comparison, we weren't able to find what
7054 was really being compared, so fail. */
7055 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
)
7058 /* Canonicalize any ordered comparison with integers involving equality
7059 if we can do computations in the relevant mode and we do not
7062 if (GET_CODE (op1
) == CONST_INT
7063 && GET_MODE (op0
) != VOIDmode
7064 && GET_MODE_BITSIZE (GET_MODE (op0
)) <= HOST_BITS_PER_WIDE_INT
)
7066 HOST_WIDE_INT const_val
= INTVAL (op1
);
7067 unsigned HOST_WIDE_INT uconst_val
= const_val
;
7068 unsigned HOST_WIDE_INT max_val
7069 = (unsigned HOST_WIDE_INT
) GET_MODE_MASK (GET_MODE (op0
));
7074 if (const_val
!= max_val
>> 1)
7075 code
= LT
, op1
= GEN_INT (const_val
+ 1);
7078 /* When cross-compiling, const_val might be sign-extended from
7079 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
7081 if ((const_val
& max_val
)
7082 != (((HOST_WIDE_INT
) 1
7083 << (GET_MODE_BITSIZE (GET_MODE (op0
)) - 1))))
7084 code
= GT
, op1
= GEN_INT (const_val
- 1);
7088 if (uconst_val
< max_val
)
7089 code
= LTU
, op1
= GEN_INT (uconst_val
+ 1);
7093 if (uconst_val
!= 0)
7094 code
= GTU
, op1
= GEN_INT (uconst_val
- 1);
7102 /* If this was floating-point and we reversed anything other than an
7103 EQ or NE, return zero. */
7104 if (TARGET_FLOAT_FORMAT
== IEEE_FLOAT_FORMAT
7105 && did_reverse_condition
&& code
!= NE
&& code
!= EQ
7107 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_FLOAT
)
7111 /* Never return CC0; return zero instead. */
7116 return gen_rtx_fmt_ee (code
, VOIDmode
, op0
, op1
);
7119 /* Similar to above routine, except that we also put an invariant last
7120 unless both operands are invariants. */
7123 get_condition_for_loop (x
)
7126 rtx comparison
= get_condition (x
, NULL_PTR
);
7129 || ! invariant_p (XEXP (comparison
, 0))
7130 || invariant_p (XEXP (comparison
, 1)))
7133 return gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison
)), VOIDmode
,
7134 XEXP (comparison
, 1), XEXP (comparison
, 0));
7138 /* Analyze a loop in order to instrument it with the use of count register.
7139 loop_start and loop_end are the first and last insns of the loop.
7140 This function works in cooperation with insert_bct ().
7141 loop_can_insert_bct[loop_num] is set according to whether the optimization
7142 is applicable to the loop. When it is applicable, the following variables
7144 loop_start_value[loop_num]
7145 loop_comparison_value[loop_num]
7146 loop_increment[loop_num]
7147 loop_comparison_code[loop_num] */
7150 void analyze_loop_iterations (loop_start
, loop_end
)
7151 rtx loop_start
, loop_end
;
7153 rtx comparison
, comparison_value
;
7154 rtx iteration_var
, initial_value
, increment
;
7155 enum rtx_code comparison_code
;
7161 /* loop_variable mode */
7162 enum machine_mode original_mode
;
7164 /* find the number of the loop */
7165 int loop_num
= uid_loop_num
[INSN_UID (loop_start
)];
7167 /* we change our mind only when we are sure that loop will be instrumented */
7168 loop_can_insert_bct
[loop_num
] = 0;
7170 /* is the optimization suppressed. */
7171 if ( !flag_branch_on_count_reg
)
7174 /* make sure that count-reg is not in use */
7175 if (loop_used_count_register
[loop_num
]){
7176 if (loop_dump_stream
)
7177 fprintf (loop_dump_stream
,
7178 "analyze_loop_iterations %d: BCT instrumentation failed: count register already in use\n",
7183 /* make sure that the function has no indirect jumps. */
7184 if (indirect_jump_in_function
){
7185 if (loop_dump_stream
)
7186 fprintf (loop_dump_stream
,
7187 "analyze_loop_iterations %d: BCT instrumentation failed: indirect jump in function\n",
7192 /* make sure that the last loop insn is a conditional jump */
7193 last_loop_insn
= PREV_INSN (loop_end
);
7194 if (GET_CODE (last_loop_insn
) != JUMP_INSN
|| !condjump_p (last_loop_insn
)) {
7195 if (loop_dump_stream
)
7196 fprintf (loop_dump_stream
,
7197 "analyze_loop_iterations %d: BCT instrumentation failed: invalid jump at loop end\n",
7202 /* First find the iteration variable. If the last insn is a conditional
7203 branch, and the insn preceding it tests a register value, make that
7204 register the iteration variable. */
7206 /* We used to use prev_nonnote_insn here, but that fails because it might
7207 accidentally get the branch for a contained loop if the branch for this
7208 loop was deleted. We can only trust branches immediately before the
7211 comparison
= get_condition_for_loop (last_loop_insn
);
7212 /* ??? Get_condition may switch position of induction variable and
7213 invariant register when it canonicalizes the comparison. */
7215 if (comparison
== 0) {
7216 if (loop_dump_stream
)
7217 fprintf (loop_dump_stream
,
7218 "analyze_loop_iterations %d: BCT instrumentation failed: comparison not found\n",
7223 comparison_code
= GET_CODE (comparison
);
7224 iteration_var
= XEXP (comparison
, 0);
7225 comparison_value
= XEXP (comparison
, 1);
7227 original_mode
= GET_MODE (iteration_var
);
7228 if (GET_MODE_CLASS (original_mode
) != MODE_INT
7229 || GET_MODE_SIZE (original_mode
) != UNITS_PER_WORD
) {
7230 if (loop_dump_stream
)
7231 fprintf (loop_dump_stream
,
7232 "analyze_loop_iterations %d: BCT Instrumentation failed: loop variable not integer\n",
7237 /* get info about loop bounds and increment */
7238 iteration_info (iteration_var
, &initial_value
, &increment
,
7239 loop_start
, loop_end
);
7241 /* make sure that all required loop data were found */
7242 if (!(initial_value
&& increment
&& comparison_value
7243 && invariant_p (comparison_value
) && invariant_p (increment
)
7244 && ! indirect_jump_in_function
))
7246 if (loop_dump_stream
) {
7247 fprintf (loop_dump_stream
,
7248 "analyze_loop_iterations %d: BCT instrumentation failed because of wrong loop: ", loop_num
);
7249 if (!(initial_value
&& increment
&& comparison_value
)) {
7250 fprintf (loop_dump_stream
, "\tbounds not available: ");
7251 if ( ! initial_value
)
7252 fprintf (loop_dump_stream
, "initial ");
7254 fprintf (loop_dump_stream
, "increment ");
7255 if ( ! comparison_value
)
7256 fprintf (loop_dump_stream
, "comparison ");
7257 fprintf (loop_dump_stream
, "\n");
7259 if (!invariant_p (comparison_value
) || !invariant_p (increment
))
7260 fprintf (loop_dump_stream
, "\tloop bounds not invariant\n");
7265 /* make sure that the increment is constant */
7266 if (GET_CODE (increment
) != CONST_INT
) {
7267 if (loop_dump_stream
)
7268 fprintf (loop_dump_stream
,
7269 "analyze_loop_iterations %d: instrumentation failed: not arithmetic loop\n",
7274 /* make sure that the loop contains neither function call, nor jump on table.
7275 (the count register might be altered by the called function, and might
7276 be used for a branch on table). */
7277 for (insn
= loop_start
; insn
&& insn
!= loop_end
; insn
= NEXT_INSN (insn
)) {
7278 if (GET_CODE (insn
) == CALL_INSN
){
7279 if (loop_dump_stream
)
7280 fprintf (loop_dump_stream
,
7281 "analyze_loop_iterations %d: BCT instrumentation failed: function call in the loop\n",
7286 if (GET_CODE (insn
) == JUMP_INSN
7287 && (GET_CODE (PATTERN (insn
)) == ADDR_DIFF_VEC
7288 || GET_CODE (PATTERN (insn
)) == ADDR_VEC
)){
7289 if (loop_dump_stream
)
7290 fprintf (loop_dump_stream
,
7291 "analyze_loop_iterations %d: BCT instrumentation failed: computed branch in the loop\n",
7297 /* At this point, we are sure that the loop can be instrumented with BCT.
7298 Some of the loops, however, will not be instrumented - the final decision
7299 is taken by insert_bct () */
7300 if (loop_dump_stream
)
7301 fprintf (loop_dump_stream
,
7302 "analyze_loop_iterations: loop (luid =%d) can be BCT instrumented.\n",
7305 /* mark all enclosing loops that they cannot use count register */
7306 /* ???: In fact, since insert_bct may decide not to instrument this loop,
7307 marking here may prevent instrumenting an enclosing loop that could
7308 actually be instrumented. But since this is rare, it is safer to mark
7309 here in case the order of calling (analyze/insert)_bct would be changed. */
7310 for (i
=loop_num
; i
!= -1; i
= loop_outer_loop
[i
])
7311 loop_used_count_register
[i
] = 1;
7313 /* Set data structures which will be used by the instrumentation phase */
7314 loop_start_value
[loop_num
] = initial_value
;
7315 loop_comparison_value
[loop_num
] = comparison_value
;
7316 loop_increment
[loop_num
] = increment
;
7317 loop_comparison_code
[loop_num
] = comparison_code
;
7318 loop_can_insert_bct
[loop_num
] = 1;
7322 /* instrument loop for insertion of bct instruction. We distinguish between
7323 loops with compile-time bounds, to those with run-time bounds. The loop
7324 behaviour is analized according to the following characteristics/variables:
7326 ; comparison-value: the value to which the iteration counter is compared.
7327 ; initial-value: iteration-counter initial value.
7328 ; increment: iteration-counter increment.
7329 ; Computed variables:
7330 ; increment-direction: the sign of the increment.
7331 ; compare-direction: '1' for GT, GTE, '-1' for LT, LTE, '0' for NE.
7332 ; range-direction: sign (comparison-value - initial-value)
7333 We give up on the following cases:
7334 ; loop variable overflow.
7335 ; run-time loop bounds with comparison code NE.
7339 insert_bct (loop_start
, loop_end
)
7340 rtx loop_start
, loop_end
;
7342 rtx initial_value
, comparison_value
, increment
;
7343 enum rtx_code comparison_code
;
7345 int increment_direction
, compare_direction
;
7348 /* if the loop condition is <= or >=, the number of iteration
7349 is 1 more than the range of the bounds of the loop */
7350 int add_iteration
= 0;
7352 /* the only machine mode we work with - is the integer of the size that the
7354 enum machine_mode loop_var_mode
= SImode
;
7356 int loop_num
= uid_loop_num
[INSN_UID (loop_start
)];
7358 /* get loop-variables. No need to check that these are valid - already
7359 checked in analyze_loop_iterations (). */
7360 comparison_code
= loop_comparison_code
[loop_num
];
7361 initial_value
= loop_start_value
[loop_num
];
7362 comparison_value
= loop_comparison_value
[loop_num
];
7363 increment
= loop_increment
[loop_num
];
7365 /* check analyze_loop_iterations decision for this loop. */
7366 if (! loop_can_insert_bct
[loop_num
]){
7367 if (loop_dump_stream
)
7368 fprintf (loop_dump_stream
,
7369 "insert_bct: [%d] - was decided not to instrument by analyze_loop_iterations ()\n",
7374 /* It's impossible to instrument a competely unrolled loop. */
7375 if (loop_unroll_factor
[loop_num
] == -1)
7378 /* make sure that the last loop insn is a conditional jump .
7379 This check is repeated from analyze_loop_iterations (),
7380 because unrolling might have changed that. */
7381 if (GET_CODE (PREV_INSN (loop_end
)) != JUMP_INSN
7382 || !condjump_p (PREV_INSN (loop_end
))) {
7383 if (loop_dump_stream
)
7384 fprintf (loop_dump_stream
,
7385 "insert_bct: not instrumenting BCT because of invalid branch\n");
7389 /* fix increment in case loop was unrolled. */
7390 if (loop_unroll_factor
[loop_num
] > 1)
7391 increment
= GEN_INT ( INTVAL (increment
) * loop_unroll_factor
[loop_num
] );
7393 /* determine properties and directions of the loop */
7394 increment_direction
= (INTVAL (increment
) > 0) ? 1:-1;
7395 switch ( comparison_code
) {
7400 compare_direction
= 1;
7407 compare_direction
= -1;
7411 /* in this case we cannot know the number of iterations */
7412 if (loop_dump_stream
)
7413 fprintf (loop_dump_stream
,
7414 "insert_bct: %d: loop cannot be instrumented: == in condition\n",
7421 compare_direction
= 1;
7427 compare_direction
= -1;
7430 compare_direction
= 0;
7437 /* make sure that the loop does not end by an overflow */
7438 if (compare_direction
!= increment_direction
) {
7439 if (loop_dump_stream
)
7440 fprintf (loop_dump_stream
,
7441 "insert_bct: %d: loop cannot be instrumented: terminated by overflow\n",
7446 /* try to instrument the loop. */
7448 /* Handle the simpler case, where the bounds are known at compile time. */
7449 if (GET_CODE (initial_value
) == CONST_INT
&& GET_CODE (comparison_value
) == CONST_INT
)
7452 int increment_value_abs
= INTVAL (increment
) * increment_direction
;
7454 /* check the relation between compare-val and initial-val */
7455 int difference
= INTVAL (comparison_value
) - INTVAL (initial_value
);
7456 int range_direction
= (difference
> 0) ? 1 : -1;
7458 /* make sure the loop executes enough iterations to gain from BCT */
7459 if (difference
> -3 && difference
< 3) {
7460 if (loop_dump_stream
)
7461 fprintf (loop_dump_stream
,
7462 "insert_bct: loop %d not BCT instrumented: too small iteration count.\n",
7467 /* make sure that the loop executes at least once */
7468 if ((range_direction
== 1 && compare_direction
== -1)
7469 || (range_direction
== -1 && compare_direction
== 1))
7471 if (loop_dump_stream
)
7472 fprintf (loop_dump_stream
,
7473 "insert_bct: loop %d: does not iterate even once. Not instrumenting.\n",
7478 /* make sure that the loop does not end by an overflow (in compile time
7479 bounds we must have an additional check for overflow, because here
7480 we also support the compare code of 'NE'. */
7481 if (comparison_code
== NE
7482 && increment_direction
!= range_direction
) {
7483 if (loop_dump_stream
)
7484 fprintf (loop_dump_stream
,
7485 "insert_bct (compile time bounds): %d: loop not instrumented: terminated by overflow\n",
7490 /* Determine the number of iterations by:
7492 ; compare-val - initial-val + (increment -1) + additional-iteration
7493 ; num_iterations = -----------------------------------------------------------------
7496 difference
= (range_direction
> 0) ? difference
: -difference
;
7498 fprintf (stderr
, "difference is: %d\n", difference
); /* @*/
7499 fprintf (stderr
, "increment_value_abs is: %d\n", increment_value_abs
); /* @*/
7500 fprintf (stderr
, "add_iteration is: %d\n", add_iteration
); /* @*/
7501 fprintf (stderr
, "INTVAL (comparison_value) is: %d\n", INTVAL (comparison_value
)); /* @*/
7502 fprintf (stderr
, "INTVAL (initial_value) is: %d\n", INTVAL (initial_value
)); /* @*/
7505 if (increment_value_abs
== 0) {
7506 fprintf (stderr
, "insert_bct: error: increment == 0 !!!\n");
7509 n_iterations
= (difference
+ increment_value_abs
- 1 + add_iteration
)
7510 / increment_value_abs
;
7513 fprintf (stderr
, "number of iterations is: %d\n", n_iterations
); /* @*/
7515 instrument_loop_bct (loop_start
, loop_end
, GEN_INT (n_iterations
));
7517 /* Done with this loop. */
7521 /* Handle the more complex case, that the bounds are NOT known at compile time. */
7522 /* In this case we generate run_time calculation of the number of iterations */
7524 /* With runtime bounds, if the compare is of the form '!=' we give up */
7525 if (comparison_code
== NE
) {
7526 if (loop_dump_stream
)
7527 fprintf (loop_dump_stream
,
7528 "insert_bct: fail for loop %d: runtime bounds with != comparison\n",
7534 /* We rely on the existence of run-time guard to ensure that the
7535 loop executes at least once. */
7537 rtx iterations_num_reg
;
7539 int increment_value_abs
= INTVAL (increment
) * increment_direction
;
7541 /* make sure that the increment is a power of two, otherwise (an
7542 expensive) divide is needed. */
7543 if (exact_log2 (increment_value_abs
) == -1)
7545 if (loop_dump_stream
)
7546 fprintf (loop_dump_stream
,
7547 "insert_bct: not instrumenting BCT because the increment is not power of 2\n");
7551 /* compute the number of iterations */
7554 /* CYGNUS LOCAL: HAIFA bug fix */
7557 /* Again, the number of iterations is calculated by:
7559 ; compare-val - initial-val + (increment -1) + additional-iteration
7560 ; num_iterations = -----------------------------------------------------------------
7563 /* ??? Do we have to call copy_rtx here before passing rtx to
7565 if (compare_direction
> 0) {
7566 /* <, <= :the loop variable is increasing */
7567 temp_reg
= expand_binop (loop_var_mode
, sub_optab
, comparison_value
,
7568 initial_value
, NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
7571 temp_reg
= expand_binop (loop_var_mode
, sub_optab
, initial_value
,
7572 comparison_value
, NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
7575 if (increment_value_abs
- 1 + add_iteration
!= 0)
7576 temp_reg
= expand_binop (loop_var_mode
, add_optab
, temp_reg
,
7577 GEN_INT (increment_value_abs
- 1 + add_iteration
),
7578 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
7580 if (increment_value_abs
!= 1)
7582 /* ??? This will generate an expensive divide instruction for
7583 most targets. The original authors apparently expected this
7584 to be a shift, since they test for power-of-2 divisors above,
7585 but just naively generating a divide instruction will not give
7586 a shift. It happens to work for the PowerPC target because
7587 the rs6000.md file has a divide pattern that emits shifts.
7588 It will probably not work for any other target. */
7589 iterations_num_reg
= expand_binop (loop_var_mode
, sdiv_optab
,
7591 GEN_INT (increment_value_abs
),
7592 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
7595 iterations_num_reg
= temp_reg
;
7596 /* END CYGNUS LOCAL: HAIFA bug fix */
7598 sequence
= gen_sequence ();
7600 emit_insn_before (sequence
, loop_start
);
7601 instrument_loop_bct (loop_start
, loop_end
, iterations_num_reg
);
7605 /* instrument loop by inserting a bct in it. This is done in the following way:
7606 1. A new register is created and assigned the hard register number of the count
7608 2. In the head of the loop the new variable is initialized by the value passed in the
7609 loop_num_iterations parameter.
7610 3. At the end of the loop, comparison of the register with 0 is generated.
7611 The created comparison follows the pattern defined for the
7612 decrement_and_branch_on_count insn, so this insn will be generated in assembly
7614 4. The compare&branch on the old variable is deleted. So, if the loop-variable was
7615 not used elsewhere, it will be eliminated by data-flow analisys. */
7618 instrument_loop_bct (loop_start
, loop_end
, loop_num_iterations
)
7619 rtx loop_start
, loop_end
;
7620 rtx loop_num_iterations
;
7622 rtx temp_reg1
, temp_reg2
;
7626 enum machine_mode loop_var_mode
= SImode
;
7628 #ifdef HAVE_decrement_and_branch_on_count
7629 if (HAVE_decrement_and_branch_on_count
)
7631 if (loop_dump_stream
)
7632 fprintf (loop_dump_stream
, "Loop: Inserting BCT\n");
7634 /* eliminate the check on the old variable */
7635 delete_insn (PREV_INSN (loop_end
));
7636 delete_insn (PREV_INSN (loop_end
));
7638 /* insert the label which will delimit the start of the loop */
7639 start_label
= gen_label_rtx ();
7640 emit_label_after (start_label
, loop_start
);
7642 /* insert initialization of the count register into the loop header */
7644 temp_reg1
= gen_reg_rtx (loop_var_mode
);
7645 emit_insn (gen_move_insn (temp_reg1
, loop_num_iterations
));
7647 /* this will be count register */
7648 temp_reg2
= gen_rtx_REG (loop_var_mode
, COUNT_REGISTER_REGNUM
);
7649 /* we have to move the value to the count register from an GPR
7650 because rtx pointed to by loop_num_iterations could contain
7651 expression which cannot be moved into count register */
7652 emit_insn (gen_move_insn (temp_reg2
, temp_reg1
));
7654 sequence
= gen_sequence ();
7656 emit_insn_after (sequence
, loop_start
);
7658 /* insert new comparison on the count register instead of the
7659 old one, generating the needed BCT pattern (that will be
7660 later recognized by assembly generation phase). */
7661 emit_jump_insn_before (gen_decrement_and_branch_on_count (temp_reg2
, start_label
),
7663 LABEL_NUSES (start_label
)++;
7666 #endif /* HAVE_decrement_and_branch_on_count */
7670 /* Scan the function and determine whether it has indirect (computed) jumps.
7672 This is taken mostly from flow.c; similar code exists elsewhere
7673 in the compiler. It may be useful to put this into rtlanal.c. */
7675 indirect_jump_in_function_p (start
)
7679 int is_indirect_jump
= 0;
7681 for (insn
= start
; insn
; insn
= NEXT_INSN (insn
))
7682 if (computed_jump_p (insn
))