1 /* Perform various loop optimizations, including strength reduction.
2 Copyright (C) 1987, 88, 89, 91-97, 1998 Free Software Foundation, Inc.
4 This file is part of GNU CC.
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
22 /* This is the loop optimization pass of the compiler.
23 It finds invariant computations within loops and moves them
24 to the beginning of the loop. Then it identifies basic and
25 general induction variables. Strength reduction is applied to the general
26 induction variables, and induction variable elimination is applied to
27 the basic induction variables.
29 It also finds cases where
30 a register is set within the loop by zero-extending a narrower value
31 and changes these to zero the entire register once before the loop
32 and merely copy the low part within the loop.
34 Most of the complexity is in heuristics to decide when it is worth
35 while to do these things. */
42 #include "insn-config.h"
43 #include "insn-flags.h"
45 #include "hard-reg-set.h"
52 /* Vector mapping INSN_UIDs to luids.
53 The luids are like uids but increase monotonically always.
54 We use them to see whether a jump comes from outside a given loop. */
58 /* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop
59 number the insn is contained in. */
63 /* 1 + largest uid of any insn. */
67 /* 1 + luid of last insn. */
71 /* Number of loops detected in current function. Used as index to the
74 static int max_loop_num
;
76 /* Indexed by loop number, contains the first and last insn of each loop. */
78 static rtx
*loop_number_loop_starts
, *loop_number_loop_ends
;
80 /* For each loop, gives the containing loop number, -1 if none. */
85 /* The main output of analyze_loop_iterations is placed here */
87 int *loop_can_insert_bct
;
89 /* For each loop, determines whether some of its inner loops has used
92 int *loop_used_count_register
;
94 /* loop parameters for arithmetic loops. These loops have a loop variable
95 which is initialized to loop_start_value, incremented in each iteration
96 by "loop_increment". At the end of the iteration the loop variable is
97 compared to the loop_comparison_value (using loop_comparison_code). */
100 rtx
*loop_comparison_value
;
101 rtx
*loop_start_value
;
102 enum rtx_code
*loop_comparison_code
;
105 /* For each loop, keep track of its unrolling factor.
109 -1: completely unrolled
110 >0: holds the unroll exact factor. */
111 int *loop_unroll_factor
;
113 /* Indexed by loop number, contains a nonzero value if the "loop" isn't
114 really a loop (an insn outside the loop branches into it). */
116 static char *loop_invalid
;
118 /* Indexed by loop number, links together all LABEL_REFs which refer to
119 code labels outside the loop. Used by routines that need to know all
120 loop exits, such as final_biv_value and final_giv_value.
122 This does not include loop exits due to return instructions. This is
123 because all bivs and givs are pseudos, and hence must be dead after a
124 return, so the presense of a return does not affect any of the
125 optimizations that use this info. It is simpler to just not include return
126 instructions on this list. */
128 rtx
*loop_number_exit_labels
;
130 /* Indexed by loop number, counts the number of LABEL_REFs on
131 loop_number_exit_labels for this loop and all loops nested inside it. */
133 int *loop_number_exit_count
;
135 /* Holds the number of loop iterations. It is zero if the number could not be
136 calculated. Must be unsigned since the number of iterations can
137 be as high as 2^wordsize-1. For loops with a wider iterator, this number
138 will will be zero if the number of loop iterations is too large for an
139 unsigned integer to hold. */
141 unsigned HOST_WIDE_INT loop_n_iterations
;
143 /* Nonzero if there is a subroutine call in the current loop. */
145 static int loop_has_call
;
147 /* Nonzero if there is a volatile memory reference in the current
150 static int loop_has_volatile
;
152 /* Added loop_continue which is the NOTE_INSN_LOOP_CONT of the
153 current loop. A continue statement will generate a branch to
154 NEXT_INSN (loop_continue). */
156 static rtx loop_continue
;
158 /* Indexed by register number, contains the number of times the reg
159 is set during the loop being scanned.
160 During code motion, a negative value indicates a reg that has been
161 made a candidate; in particular -2 means that it is an candidate that
162 we know is equal to a constant and -1 means that it is an candidate
163 not known equal to a constant.
164 After code motion, regs moved have 0 (which is accurate now)
165 while the failed candidates have the original number of times set.
167 Therefore, at all times, == 0 indicates an invariant register;
168 < 0 a conditionally invariant one. */
170 static int *n_times_set
;
172 /* Original value of n_times_set; same except that this value
173 is not set negative for a reg whose sets have been made candidates
174 and not set to 0 for a reg that is moved. */
176 static int *n_times_used
;
178 /* Index by register number, 1 indicates that the register
179 cannot be moved or strength reduced. */
181 static char *may_not_optimize
;
183 /* Nonzero means reg N has already been moved out of one loop.
184 This reduces the desire to move it out of another. */
186 static char *moved_once
;
188 /* Array of MEMs that are stored in this loop. If there are too many to fit
189 here, we just turn on unknown_address_altered. */
191 #define NUM_STORES 30
192 static rtx loop_store_mems
[NUM_STORES
];
194 /* Index of first available slot in above array. */
195 static int loop_store_mems_idx
;
197 /* Nonzero if we don't know what MEMs were changed in the current loop.
198 This happens if the loop contains a call (in which case `loop_has_call'
199 will also be set) or if we store into more than NUM_STORES MEMs. */
201 static int unknown_address_altered
;
203 /* Count of movable (i.e. invariant) instructions discovered in the loop. */
204 static int num_movables
;
206 /* Count of memory write instructions discovered in the loop. */
207 static int num_mem_sets
;
209 /* Number of loops contained within the current one, including itself. */
210 static int loops_enclosed
;
212 /* Bound on pseudo register number before loop optimization.
213 A pseudo has valid regscan info if its number is < max_reg_before_loop. */
214 int max_reg_before_loop
;
216 /* This obstack is used in product_cheap_p to allocate its rtl. It
217 may call gen_reg_rtx which, in turn, may reallocate regno_reg_rtx.
218 If we used the same obstack that it did, we would be deallocating
221 static struct obstack temp_obstack
;
223 /* This is where the pointer to the obstack being used for RTL is stored. */
225 extern struct obstack
*rtl_obstack
;
227 #define obstack_chunk_alloc xmalloc
228 #define obstack_chunk_free free
230 extern char *oballoc ();
232 /* During the analysis of a loop, a chain of `struct movable's
233 is made to record all the movable insns found.
234 Then the entire chain can be scanned to decide which to move. */
238 rtx insn
; /* A movable insn */
239 rtx set_src
; /* The expression this reg is set from. */
240 rtx set_dest
; /* The destination of this SET. */
241 rtx dependencies
; /* When INSN is libcall, this is an EXPR_LIST
242 of any registers used within the LIBCALL. */
243 int consec
; /* Number of consecutive following insns
244 that must be moved with this one. */
245 int regno
; /* The register it sets */
246 short lifetime
; /* lifetime of that register;
247 may be adjusted when matching movables
248 that load the same value are found. */
249 short savings
; /* Number of insns we can move for this reg,
250 including other movables that force this
251 or match this one. */
252 unsigned int cond
: 1; /* 1 if only conditionally movable */
253 unsigned int force
: 1; /* 1 means MUST move this insn */
254 unsigned int global
: 1; /* 1 means reg is live outside this loop */
255 /* If PARTIAL is 1, GLOBAL means something different:
256 that the reg is live outside the range from where it is set
257 to the following label. */
258 unsigned int done
: 1; /* 1 inhibits further processing of this */
260 unsigned int partial
: 1; /* 1 means this reg is used for zero-extending.
261 In particular, moving it does not make it
263 unsigned int move_insn
: 1; /* 1 means that we call emit_move_insn to
264 load SRC, rather than copying INSN. */
265 unsigned int move_insn_first
:1;/* Same as above, if this is necessary for the
266 first insn of a consecutive sets group. */
267 unsigned int is_equiv
: 1; /* 1 means a REG_EQUIV is present on INSN. */
268 enum machine_mode savemode
; /* Nonzero means it is a mode for a low part
269 that we should avoid changing when clearing
270 the rest of the reg. */
271 struct movable
*match
; /* First entry for same value */
272 struct movable
*forces
; /* An insn that must be moved if this is */
273 struct movable
*next
;
276 FILE *loop_dump_stream
;
278 /* Forward declarations. */
280 static void find_and_verify_loops
PROTO((rtx
));
281 static void mark_loop_jump
PROTO((rtx
, int));
282 static void prescan_loop
PROTO((rtx
, rtx
));
283 static int reg_in_basic_block_p
PROTO((rtx
, rtx
));
284 static int consec_sets_invariant_p
PROTO((rtx
, int, rtx
));
285 static rtx libcall_other_reg
PROTO((rtx
, rtx
));
286 static int labels_in_range_p
PROTO((rtx
, int));
287 static void count_loop_regs_set
PROTO((rtx
, rtx
, char *, rtx
*, int *, int));
288 static void note_addr_stored
PROTO((rtx
, rtx
));
289 static int loop_reg_used_before_p
PROTO((rtx
, rtx
, rtx
, rtx
, rtx
));
290 static void scan_loop
PROTO((rtx
, rtx
, int, int));
292 static void replace_call_address
PROTO(());
294 static rtx skip_consec_insns
PROTO((rtx
, int));
295 static int libcall_benefit
PROTO((rtx
));
296 static void ignore_some_movables
PROTO((struct movable
*));
297 static void force_movables
PROTO((struct movable
*));
298 static void combine_movables
PROTO((struct movable
*, int));
299 static int regs_patch_p
PROTO((rtx
, rtx
, struct movable
*));
300 static int rtx_equal_for_loop_p
PROTO((rtx
, rtx
, struct movable
*));
301 static void add_label_notes
PROTO((rtx
, rtx
));
302 static void move_movables
PROTO((struct movable
*, int, int, rtx
, rtx
, int));
303 static int count_nonfixed_reads
PROTO((rtx
));
304 static void strength_reduce
PROTO((rtx
, rtx
, rtx
, int, rtx
, rtx
, int));
305 static void find_single_use_in_loop
PROTO((rtx
, rtx
, rtx
*));
306 static int valid_initial_value_p
PROTO((rtx
, rtx
, int, rtx
));
307 static void find_mem_givs
PROTO((rtx
, rtx
, int, rtx
, rtx
));
308 static void record_biv
PROTO((struct induction
*, rtx
, rtx
, rtx
, rtx
, int, int));
309 static void check_final_value
PROTO((struct induction
*, rtx
, rtx
));
310 static void record_giv
PROTO((struct induction
*, rtx
, rtx
, rtx
, rtx
, rtx
, int, enum g_types
, int, rtx
*, rtx
, rtx
));
311 static void update_giv_derive
PROTO((rtx
));
312 static int basic_induction_var
PROTO((rtx
, enum machine_mode
, rtx
, rtx
, rtx
*, rtx
*));
313 static rtx simplify_giv_expr
PROTO((rtx
, int *));
314 static int general_induction_var
PROTO((rtx
, rtx
*, rtx
*, rtx
*));
315 static int consec_sets_giv
PROTO((int, rtx
, rtx
, rtx
, rtx
*, rtx
*));
316 static int check_dbra_loop
PROTO((rtx
, int, rtx
));
318 static rtx express_from
PROTO((struct induction
*, struct induction
*));
320 static int combine_givs_p
PROTO((struct induction
*, struct induction
*));
321 #ifdef GIV_SORT_CRITERION
322 static int giv_sort
PROTO((struct induction
**, struct induction
**));
324 static void combine_givs
PROTO((struct iv_class
*));
325 static int product_cheap_p
PROTO((rtx
, rtx
));
326 static int maybe_eliminate_biv
PROTO((struct iv_class
*, rtx
, rtx
, int, int, int));
327 static int maybe_eliminate_biv_1
PROTO((rtx
, rtx
, struct iv_class
*, int, rtx
));
328 static int last_use_this_basic_block
PROTO((rtx
, rtx
));
329 static void record_initial
PROTO((rtx
, rtx
));
330 static void update_reg_last_use
PROTO((rtx
, rtx
));
333 /* This is extern from unroll.c */
334 extern void iteration_info
PROTO((rtx
, rtx
*, rtx
*, rtx
, rtx
));
336 /* Two main functions for implementing bct:
337 first - to be called before loop unrolling, and the second - after */
338 #ifdef HAVE_decrement_and_branch_on_count
339 static void analyze_loop_iterations
PROTO((rtx
, rtx
));
340 static void insert_bct
PROTO((rtx
, rtx
));
342 /* Auxiliary function that inserts the bct pattern into the loop */
343 static void instrument_loop_bct
PROTO((rtx
, rtx
, rtx
));
344 #endif /* HAVE_decrement_and_branch_on_count */
347 /* Indirect_jump_in_function is computed once per function. */
348 int indirect_jump_in_function
= 0;
349 static int indirect_jump_in_function_p
PROTO((rtx
));
352 /* Relative gain of eliminating various kinds of operations. */
359 /* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to
360 copy the value of the strength reduced giv to its original register. */
366 char *free_point
= (char *) oballoc (1);
367 rtx reg
= gen_rtx_REG (word_mode
, LAST_VIRTUAL_REGISTER
+ 1);
369 add_cost
= rtx_cost (gen_rtx_PLUS (word_mode
, reg
, reg
), SET
);
371 /* We multiply by 2 to reconcile the difference in scale between
372 these two ways of computing costs. Otherwise the cost of a copy
373 will be far less than the cost of an add. */
377 /* Free the objects we just allocated. */
380 /* Initialize the obstack used for rtl in product_cheap_p. */
381 gcc_obstack_init (&temp_obstack
);
384 /* Entry point of this file. Perform loop optimization
385 on the current function. F is the first insn of the function
386 and DUMPFILE is a stream for output of a trace of actions taken
387 (or 0 if none should be output). */
390 loop_optimize (f
, dumpfile
, unroll_p
)
391 /* f is the first instruction of a chain of insns for one function */
400 loop_dump_stream
= dumpfile
;
402 init_recog_no_volatile ();
403 init_alias_analysis ();
405 max_reg_before_loop
= max_reg_num ();
407 moved_once
= (char *) alloca (max_reg_before_loop
);
408 bzero (moved_once
, max_reg_before_loop
);
412 /* Count the number of loops. */
415 for (insn
= f
; insn
; insn
= NEXT_INSN (insn
))
417 if (GET_CODE (insn
) == NOTE
418 && NOTE_LINE_NUMBER (insn
) == NOTE_INSN_LOOP_BEG
)
422 /* Don't waste time if no loops. */
423 if (max_loop_num
== 0)
426 /* Get size to use for tables indexed by uids.
427 Leave some space for labels allocated by find_and_verify_loops. */
428 max_uid_for_loop
= get_max_uid () + 1 + max_loop_num
* 32;
430 uid_luid
= (int *) alloca (max_uid_for_loop
* sizeof (int));
431 uid_loop_num
= (int *) alloca (max_uid_for_loop
* sizeof (int));
433 bzero ((char *) uid_luid
, max_uid_for_loop
* sizeof (int));
434 bzero ((char *) uid_loop_num
, max_uid_for_loop
* sizeof (int));
436 /* Allocate tables for recording each loop. We set each entry, so they need
438 loop_number_loop_starts
= (rtx
*) alloca (max_loop_num
* sizeof (rtx
));
439 loop_number_loop_ends
= (rtx
*) alloca (max_loop_num
* sizeof (rtx
));
440 loop_outer_loop
= (int *) alloca (max_loop_num
* sizeof (int));
441 loop_invalid
= (char *) alloca (max_loop_num
* sizeof (char));
442 loop_number_exit_labels
= (rtx
*) alloca (max_loop_num
* sizeof (rtx
));
443 loop_number_exit_count
= (int *) alloca (max_loop_num
* sizeof (int));
445 /* This is initialized by the unrolling code, so we go ahead
446 and clear them just in case we are not performing loop
448 loop_unroll_factor
= (int *) alloca (max_loop_num
*sizeof (int));
449 bzero ((char *) loop_unroll_factor
, max_loop_num
* sizeof (int));
452 /* Allocate for BCT optimization */
453 loop_can_insert_bct
= (int *) alloca (max_loop_num
* sizeof (int));
454 bzero ((char *) loop_can_insert_bct
, max_loop_num
* sizeof (int));
456 loop_used_count_register
= (int *) alloca (max_loop_num
* sizeof (int));
457 bzero ((char *) loop_used_count_register
, max_loop_num
* sizeof (int));
459 loop_increment
= (rtx
*) alloca (max_loop_num
* sizeof (rtx
));
460 loop_comparison_value
= (rtx
*) alloca (max_loop_num
* sizeof (rtx
));
461 loop_start_value
= (rtx
*) alloca (max_loop_num
* sizeof (rtx
));
462 bzero ((char *) loop_increment
, max_loop_num
* sizeof (rtx
));
463 bzero ((char *) loop_comparison_value
, max_loop_num
* sizeof (rtx
));
464 bzero ((char *) loop_start_value
, max_loop_num
* sizeof (rtx
));
467 = (enum rtx_code
*) alloca (max_loop_num
* sizeof (enum rtx_code
));
468 bzero ((char *) loop_comparison_code
, max_loop_num
* sizeof (enum rtx_code
));
471 /* Find and process each loop.
472 First, find them, and record them in order of their beginnings. */
473 find_and_verify_loops (f
);
475 /* Now find all register lifetimes. This must be done after
476 find_and_verify_loops, because it might reorder the insns in the
478 reg_scan (f
, max_reg_num (), 1);
480 /* See if we went too far. */
481 if (get_max_uid () > max_uid_for_loop
)
484 /* Compute the mapping from uids to luids.
485 LUIDs are numbers assigned to insns, like uids,
486 except that luids increase monotonically through the code.
487 Don't assign luids to line-number NOTEs, so that the distance in luids
488 between two insns is not affected by -g. */
490 for (insn
= f
, i
= 0; insn
; insn
= NEXT_INSN (insn
))
493 if (GET_CODE (insn
) != NOTE
494 || NOTE_LINE_NUMBER (insn
) <= 0)
495 uid_luid
[INSN_UID (insn
)] = ++i
;
497 /* Give a line number note the same luid as preceding insn. */
498 uid_luid
[INSN_UID (insn
)] = i
;
503 /* Don't leave gaps in uid_luid for insns that have been
504 deleted. It is possible that the first or last insn
505 using some register has been deleted by cross-jumping.
506 Make sure that uid_luid for that former insn's uid
507 points to the general area where that insn used to be. */
508 for (i
= 0; i
< max_uid_for_loop
; i
++)
510 uid_luid
[0] = uid_luid
[i
];
511 if (uid_luid
[0] != 0)
514 for (i
= 0; i
< max_uid_for_loop
; i
++)
515 if (uid_luid
[i
] == 0)
516 uid_luid
[i
] = uid_luid
[i
- 1];
518 /* Create a mapping from loops to BLOCK tree nodes. */
519 if (unroll_p
&& write_symbols
!= NO_DEBUG
)
520 find_loop_tree_blocks ();
522 /* Determine if the function has indirect jump. On some systems
523 this prevents low overhead loop instructions from being used. */
524 indirect_jump_in_function
= indirect_jump_in_function_p (f
);
526 /* Now scan the loops, last ones first, since this means inner ones are done
527 before outer ones. */
528 for (i
= max_loop_num
-1; i
>= 0; i
--)
529 if (! loop_invalid
[i
] && loop_number_loop_ends
[i
])
530 scan_loop (loop_number_loop_starts
[i
], loop_number_loop_ends
[i
],
531 max_reg_num (), unroll_p
);
533 /* If debugging and unrolling loops, we must replicate the tree nodes
534 corresponding to the blocks inside the loop, so that the original one
535 to one mapping will remain. */
536 if (unroll_p
&& write_symbols
!= NO_DEBUG
)
537 unroll_block_trees ();
540 /* Optimize one loop whose start is LOOP_START and end is END.
541 LOOP_START is the NOTE_INSN_LOOP_BEG and END is the matching
542 NOTE_INSN_LOOP_END. */
544 /* ??? Could also move memory writes out of loops if the destination address
545 is invariant, the source is invariant, the memory write is not volatile,
546 and if we can prove that no read inside the loop can read this address
547 before the write occurs. If there is a read of this address after the
548 write, then we can also mark the memory read as invariant. */
551 scan_loop (loop_start
, end
, nregs
, unroll_p
)
558 /* 1 if we are scanning insns that could be executed zero times. */
560 /* 1 if we are scanning insns that might never be executed
561 due to a subroutine call which might exit before they are reached. */
563 /* For a rotated loop that is entered near the bottom,
564 this is the label at the top. Otherwise it is zero. */
566 /* Jump insn that enters the loop, or 0 if control drops in. */
567 rtx loop_entry_jump
= 0;
568 /* Place in the loop where control enters. */
570 /* Number of insns in the loop. */
575 /* The SET from an insn, if it is the only SET in the insn. */
577 /* Chain describing insns movable in current loop. */
578 struct movable
*movables
= 0;
579 /* Last element in `movables' -- so we can add elements at the end. */
580 struct movable
*last_movable
= 0;
581 /* Ratio of extra register life span we can justify
582 for saving an instruction. More if loop doesn't call subroutines
583 since in that case saving an insn makes more difference
584 and more registers are available. */
586 /* If we have calls, contains the insn in which a register was used
587 if it was used exactly once; contains const0_rtx if it was used more
589 rtx
*reg_single_usage
= 0;
590 /* Nonzero if we are scanning instructions in a sub-loop. */
593 n_times_set
= (int *) alloca (nregs
* sizeof (int));
594 n_times_used
= (int *) alloca (nregs
* sizeof (int));
595 may_not_optimize
= (char *) alloca (nregs
);
597 /* Determine whether this loop starts with a jump down to a test at
598 the end. This will occur for a small number of loops with a test
599 that is too complex to duplicate in front of the loop.
601 We search for the first insn or label in the loop, skipping NOTEs.
602 However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG
603 (because we might have a loop executed only once that contains a
604 loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END
605 (in case we have a degenerate loop).
607 Note that if we mistakenly think that a loop is entered at the top
608 when, in fact, it is entered at the exit test, the only effect will be
609 slightly poorer optimization. Making the opposite error can generate
610 incorrect code. Since very few loops now start with a jump to the
611 exit test, the code here to detect that case is very conservative. */
613 for (p
= NEXT_INSN (loop_start
);
615 && GET_CODE (p
) != CODE_LABEL
&& GET_RTX_CLASS (GET_CODE (p
)) != 'i'
616 && (GET_CODE (p
) != NOTE
617 || (NOTE_LINE_NUMBER (p
) != NOTE_INSN_LOOP_BEG
618 && NOTE_LINE_NUMBER (p
) != NOTE_INSN_LOOP_END
));
624 /* Set up variables describing this loop. */
625 prescan_loop (loop_start
, end
);
626 threshold
= (loop_has_call
? 1 : 2) * (1 + n_non_fixed_regs
);
628 /* If loop has a jump before the first label,
629 the true entry is the target of that jump.
630 Start scan from there.
631 But record in LOOP_TOP the place where the end-test jumps
632 back to so we can scan that after the end of the loop. */
633 if (GET_CODE (p
) == JUMP_INSN
)
637 /* Loop entry must be unconditional jump (and not a RETURN) */
639 && JUMP_LABEL (p
) != 0
640 /* Check to see whether the jump actually
641 jumps out of the loop (meaning it's no loop).
642 This case can happen for things like
643 do {..} while (0). If this label was generated previously
644 by loop, we can't tell anything about it and have to reject
646 && INSN_UID (JUMP_LABEL (p
)) < max_uid_for_loop
647 && INSN_LUID (JUMP_LABEL (p
)) >= INSN_LUID (loop_start
)
648 && INSN_LUID (JUMP_LABEL (p
)) < INSN_LUID (end
))
650 loop_top
= next_label (scan_start
);
651 scan_start
= JUMP_LABEL (p
);
655 /* If SCAN_START was an insn created by loop, we don't know its luid
656 as required by loop_reg_used_before_p. So skip such loops. (This
657 test may never be true, but it's best to play it safe.)
659 Also, skip loops where we do not start scanning at a label. This
660 test also rejects loops starting with a JUMP_INSN that failed the
663 if (INSN_UID (scan_start
) >= max_uid_for_loop
664 || GET_CODE (scan_start
) != CODE_LABEL
)
666 if (loop_dump_stream
)
667 fprintf (loop_dump_stream
, "\nLoop from %d to %d is phony.\n\n",
668 INSN_UID (loop_start
), INSN_UID (end
));
672 /* Count number of times each reg is set during this loop.
673 Set may_not_optimize[I] if it is not safe to move out
674 the setting of register I. If this loop has calls, set
675 reg_single_usage[I]. */
677 bzero ((char *) n_times_set
, nregs
* sizeof (int));
678 bzero (may_not_optimize
, nregs
);
682 reg_single_usage
= (rtx
*) alloca (nregs
* sizeof (rtx
));
683 bzero ((char *) reg_single_usage
, nregs
* sizeof (rtx
));
686 count_loop_regs_set (loop_top
? loop_top
: loop_start
, end
,
687 may_not_optimize
, reg_single_usage
, &insn_count
, nregs
);
689 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
690 may_not_optimize
[i
] = 1, n_times_set
[i
] = 1;
691 bcopy ((char *) n_times_set
, (char *) n_times_used
, nregs
* sizeof (int));
693 if (loop_dump_stream
)
695 fprintf (loop_dump_stream
, "\nLoop from %d to %d: %d real insns.\n",
696 INSN_UID (loop_start
), INSN_UID (end
), insn_count
);
698 fprintf (loop_dump_stream
, "Continue at insn %d.\n",
699 INSN_UID (loop_continue
));
702 /* Scan through the loop finding insns that are safe to move.
703 Set n_times_set negative for the reg being set, so that
704 this reg will be considered invariant for subsequent insns.
705 We consider whether subsequent insns use the reg
706 in deciding whether it is worth actually moving.
708 MAYBE_NEVER is nonzero if we have passed a conditional jump insn
709 and therefore it is possible that the insns we are scanning
710 would never be executed. At such times, we must make sure
711 that it is safe to execute the insn once instead of zero times.
712 When MAYBE_NEVER is 0, all insns will be executed at least once
713 so that is not a problem. */
719 /* At end of a straight-in loop, we are done.
720 At end of a loop entered at the bottom, scan the top. */
733 if (GET_RTX_CLASS (GET_CODE (p
)) == 'i'
734 && find_reg_note (p
, REG_LIBCALL
, NULL_RTX
))
736 else if (GET_RTX_CLASS (GET_CODE (p
)) == 'i'
737 && find_reg_note (p
, REG_RETVAL
, NULL_RTX
))
740 if (GET_CODE (p
) == INSN
741 && (set
= single_set (p
))
742 && GET_CODE (SET_DEST (set
)) == REG
743 && ! may_not_optimize
[REGNO (SET_DEST (set
))])
748 rtx src
= SET_SRC (set
);
749 rtx dependencies
= 0;
751 /* Figure out what to use as a source of this insn. If a REG_EQUIV
752 note is given or if a REG_EQUAL note with a constant operand is
753 specified, use it as the source and mark that we should move
754 this insn by calling emit_move_insn rather that duplicating the
757 Otherwise, only use the REG_EQUAL contents if a REG_RETVAL note
759 temp
= find_reg_note (p
, REG_EQUIV
, NULL_RTX
);
761 src
= XEXP (temp
, 0), move_insn
= 1;
764 temp
= find_reg_note (p
, REG_EQUAL
, NULL_RTX
);
765 if (temp
&& CONSTANT_P (XEXP (temp
, 0)))
766 src
= XEXP (temp
, 0), move_insn
= 1;
767 if (temp
&& find_reg_note (p
, REG_RETVAL
, NULL_RTX
))
769 src
= XEXP (temp
, 0);
770 /* A libcall block can use regs that don't appear in
771 the equivalent expression. To move the libcall,
772 we must move those regs too. */
773 dependencies
= libcall_other_reg (p
, src
);
777 /* Don't try to optimize a register that was made
778 by loop-optimization for an inner loop.
779 We don't know its life-span, so we can't compute the benefit. */
780 if (REGNO (SET_DEST (set
)) >= max_reg_before_loop
)
782 /* In order to move a register, we need to have one of three cases:
783 (1) it is used only in the same basic block as the set
784 (2) it is not a user variable and it is not used in the
785 exit test (this can cause the variable to be used
786 before it is set just like a user-variable).
787 (3) the set is guaranteed to be executed once the loop starts,
788 and the reg is not used until after that. */
789 else if (! ((! maybe_never
790 && ! loop_reg_used_before_p (set
, p
, loop_start
,
792 || (! REG_USERVAR_P (SET_DEST (set
))
793 && ! REG_LOOP_TEST_P (SET_DEST (set
)))
794 || reg_in_basic_block_p (p
, SET_DEST (set
))))
796 else if ((tem
= invariant_p (src
))
797 && (dependencies
== 0
798 || (tem2
= invariant_p (dependencies
)) != 0)
799 && (n_times_set
[REGNO (SET_DEST (set
))] == 1
801 = consec_sets_invariant_p (SET_DEST (set
),
802 n_times_set
[REGNO (SET_DEST (set
))],
804 /* If the insn can cause a trap (such as divide by zero),
805 can't move it unless it's guaranteed to be executed
806 once loop is entered. Even a function call might
807 prevent the trap insn from being reached
808 (since it might exit!) */
809 && ! ((maybe_never
|| call_passed
)
810 && may_trap_p (src
)))
812 register struct movable
*m
;
813 register int regno
= REGNO (SET_DEST (set
));
815 /* A potential lossage is where we have a case where two insns
816 can be combined as long as they are both in the loop, but
817 we move one of them outside the loop. For large loops,
818 this can lose. The most common case of this is the address
819 of a function being called.
821 Therefore, if this register is marked as being used exactly
822 once if we are in a loop with calls (a "large loop"), see if
823 we can replace the usage of this register with the source
824 of this SET. If we can, delete this insn.
826 Don't do this if P has a REG_RETVAL note or if we have
827 SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */
829 if (reg_single_usage
&& reg_single_usage
[regno
] != 0
830 && reg_single_usage
[regno
] != const0_rtx
831 && REGNO_FIRST_UID (regno
) == INSN_UID (p
)
832 && (REGNO_LAST_UID (regno
)
833 == INSN_UID (reg_single_usage
[regno
]))
834 && n_times_set
[REGNO (SET_DEST (set
))] == 1
835 && ! side_effects_p (SET_SRC (set
))
836 && ! find_reg_note (p
, REG_RETVAL
, NULL_RTX
)
837 && (! SMALL_REGISTER_CLASSES
838 || (! (GET_CODE (SET_SRC (set
)) == REG
839 && REGNO (SET_SRC (set
)) < FIRST_PSEUDO_REGISTER
)))
840 /* This test is not redundant; SET_SRC (set) might be
841 a call-clobbered register and the life of REGNO
842 might span a call. */
843 && ! modified_between_p (SET_SRC (set
), p
,
844 reg_single_usage
[regno
])
845 && no_labels_between_p (p
, reg_single_usage
[regno
])
846 && validate_replace_rtx (SET_DEST (set
), SET_SRC (set
),
847 reg_single_usage
[regno
]))
849 /* Replace any usage in a REG_EQUAL note. Must copy the
850 new source, so that we don't get rtx sharing between the
851 SET_SOURCE and REG_NOTES of insn p. */
852 REG_NOTES (reg_single_usage
[regno
])
853 = replace_rtx (REG_NOTES (reg_single_usage
[regno
]),
854 SET_DEST (set
), copy_rtx (SET_SRC (set
)));
857 NOTE_LINE_NUMBER (p
) = NOTE_INSN_DELETED
;
858 NOTE_SOURCE_FILE (p
) = 0;
859 n_times_set
[regno
] = 0;
863 m
= (struct movable
*) alloca (sizeof (struct movable
));
867 m
->dependencies
= dependencies
;
868 m
->set_dest
= SET_DEST (set
);
870 m
->consec
= n_times_set
[REGNO (SET_DEST (set
))] - 1;
874 m
->move_insn
= move_insn
;
875 m
->move_insn_first
= 0;
876 m
->is_equiv
= (find_reg_note (p
, REG_EQUIV
, NULL_RTX
) != 0);
877 m
->savemode
= VOIDmode
;
879 /* Set M->cond if either invariant_p or consec_sets_invariant_p
880 returned 2 (only conditionally invariant). */
881 m
->cond
= ((tem
| tem1
| tem2
) > 1);
882 m
->global
= (uid_luid
[REGNO_LAST_UID (regno
)] > INSN_LUID (end
)
883 || uid_luid
[REGNO_FIRST_UID (regno
)] < INSN_LUID (loop_start
));
885 m
->lifetime
= (uid_luid
[REGNO_LAST_UID (regno
)]
886 - uid_luid
[REGNO_FIRST_UID (regno
)]);
887 m
->savings
= n_times_used
[regno
];
888 if (find_reg_note (p
, REG_RETVAL
, NULL_RTX
))
889 m
->savings
+= libcall_benefit (p
);
890 n_times_set
[regno
] = move_insn
? -2 : -1;
891 /* Add M to the end of the chain MOVABLES. */
895 last_movable
->next
= m
;
900 /* It is possible for the first instruction to have a
901 REG_EQUAL note but a non-invariant SET_SRC, so we must
902 remember the status of the first instruction in case
903 the last instruction doesn't have a REG_EQUAL note. */
904 m
->move_insn_first
= m
->move_insn
;
906 /* Skip this insn, not checking REG_LIBCALL notes. */
907 p
= next_nonnote_insn (p
);
908 /* Skip the consecutive insns, if there are any. */
909 p
= skip_consec_insns (p
, m
->consec
);
910 /* Back up to the last insn of the consecutive group. */
911 p
= prev_nonnote_insn (p
);
913 /* We must now reset m->move_insn, m->is_equiv, and possibly
914 m->set_src to correspond to the effects of all the
916 temp
= find_reg_note (p
, REG_EQUIV
, NULL_RTX
);
918 m
->set_src
= XEXP (temp
, 0), m
->move_insn
= 1;
921 temp
= find_reg_note (p
, REG_EQUAL
, NULL_RTX
);
922 if (temp
&& CONSTANT_P (XEXP (temp
, 0)))
923 m
->set_src
= XEXP (temp
, 0), m
->move_insn
= 1;
928 m
->is_equiv
= (find_reg_note (p
, REG_EQUIV
, NULL_RTX
) != 0);
931 /* If this register is always set within a STRICT_LOW_PART
932 or set to zero, then its high bytes are constant.
933 So clear them outside the loop and within the loop
934 just load the low bytes.
935 We must check that the machine has an instruction to do so.
936 Also, if the value loaded into the register
937 depends on the same register, this cannot be done. */
938 else if (SET_SRC (set
) == const0_rtx
939 && GET_CODE (NEXT_INSN (p
)) == INSN
940 && (set1
= single_set (NEXT_INSN (p
)))
941 && GET_CODE (set1
) == SET
942 && (GET_CODE (SET_DEST (set1
)) == STRICT_LOW_PART
)
943 && (GET_CODE (XEXP (SET_DEST (set1
), 0)) == SUBREG
)
944 && (SUBREG_REG (XEXP (SET_DEST (set1
), 0))
946 && !reg_mentioned_p (SET_DEST (set
), SET_SRC (set1
)))
948 register int regno
= REGNO (SET_DEST (set
));
949 if (n_times_set
[regno
] == 2)
951 register struct movable
*m
;
952 m
= (struct movable
*) alloca (sizeof (struct movable
));
955 m
->set_dest
= SET_DEST (set
);
962 m
->move_insn_first
= 0;
964 /* If the insn may not be executed on some cycles,
965 we can't clear the whole reg; clear just high part.
966 Not even if the reg is used only within this loop.
973 Clearing x before the inner loop could clobber a value
974 being saved from the last time around the outer loop.
975 However, if the reg is not used outside this loop
976 and all uses of the register are in the same
977 basic block as the store, there is no problem.
979 If this insn was made by loop, we don't know its
980 INSN_LUID and hence must make a conservative
982 m
->global
= (INSN_UID (p
) >= max_uid_for_loop
983 || (uid_luid
[REGNO_LAST_UID (regno
)]
985 || (uid_luid
[REGNO_FIRST_UID (regno
)]
987 || (labels_in_range_p
988 (p
, uid_luid
[REGNO_FIRST_UID (regno
)])));
989 if (maybe_never
&& m
->global
)
990 m
->savemode
= GET_MODE (SET_SRC (set1
));
992 m
->savemode
= VOIDmode
;
996 m
->lifetime
= (uid_luid
[REGNO_LAST_UID (regno
)]
997 - uid_luid
[REGNO_FIRST_UID (regno
)]);
999 n_times_set
[regno
] = -1;
1000 /* Add M to the end of the chain MOVABLES. */
1004 last_movable
->next
= m
;
1009 /* Past a call insn, we get to insns which might not be executed
1010 because the call might exit. This matters for insns that trap.
1011 Call insns inside a REG_LIBCALL/REG_RETVAL block always return,
1012 so they don't count. */
1013 else if (GET_CODE (p
) == CALL_INSN
&& ! in_libcall
)
1015 /* Past a label or a jump, we get to insns for which we
1016 can't count on whether or how many times they will be
1017 executed during each iteration. Therefore, we can
1018 only move out sets of trivial variables
1019 (those not used after the loop). */
1020 /* Similar code appears twice in strength_reduce. */
1021 else if ((GET_CODE (p
) == CODE_LABEL
|| GET_CODE (p
) == JUMP_INSN
)
1022 /* If we enter the loop in the middle, and scan around to the
1023 beginning, don't set maybe_never for that. This must be an
1024 unconditional jump, otherwise the code at the top of the
1025 loop might never be executed. Unconditional jumps are
1026 followed a by barrier then loop end. */
1027 && ! (GET_CODE (p
) == JUMP_INSN
&& JUMP_LABEL (p
) == loop_top
1028 && NEXT_INSN (NEXT_INSN (p
)) == end
1029 && simplejump_p (p
)))
1031 else if (GET_CODE (p
) == NOTE
)
1033 /* At the virtual top of a converted loop, insns are again known to
1034 be executed: logically, the loop begins here even though the exit
1035 code has been duplicated. */
1036 if (NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_VTOP
&& loop_depth
== 0)
1037 maybe_never
= call_passed
= 0;
1038 else if (NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_BEG
)
1040 else if (NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_END
)
1045 /* If one movable subsumes another, ignore that other. */
1047 ignore_some_movables (movables
);
1049 /* For each movable insn, see if the reg that it loads
1050 leads when it dies right into another conditionally movable insn.
1051 If so, record that the second insn "forces" the first one,
1052 since the second can be moved only if the first is. */
1054 force_movables (movables
);
1056 /* See if there are multiple movable insns that load the same value.
1057 If there are, make all but the first point at the first one
1058 through the `match' field, and add the priorities of them
1059 all together as the priority of the first. */
1061 combine_movables (movables
, nregs
);
1063 /* Now consider each movable insn to decide whether it is worth moving.
1064 Store 0 in n_times_set for each reg that is moved. */
1066 move_movables (movables
, threshold
,
1067 insn_count
, loop_start
, end
, nregs
);
1069 /* Now candidates that still are negative are those not moved.
1070 Change n_times_set to indicate that those are not actually invariant. */
1071 for (i
= 0; i
< nregs
; i
++)
1072 if (n_times_set
[i
] < 0)
1073 n_times_set
[i
] = n_times_used
[i
];
1075 if (flag_strength_reduce
)
1076 strength_reduce (scan_start
, end
, loop_top
,
1077 insn_count
, loop_start
, end
, unroll_p
);
1080 /* Add elements to *OUTPUT to record all the pseudo-regs
1081 mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */
1084 record_excess_regs (in_this
, not_in_this
, output
)
1085 rtx in_this
, not_in_this
;
1092 code
= GET_CODE (in_this
);
1106 if (REGNO (in_this
) >= FIRST_PSEUDO_REGISTER
1107 && ! reg_mentioned_p (in_this
, not_in_this
))
1108 *output
= gen_rtx_EXPR_LIST (VOIDmode
, in_this
, *output
);
1115 fmt
= GET_RTX_FORMAT (code
);
1116 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1123 for (j
= 0; j
< XVECLEN (in_this
, i
); j
++)
1124 record_excess_regs (XVECEXP (in_this
, i
, j
), not_in_this
, output
);
1128 record_excess_regs (XEXP (in_this
, i
), not_in_this
, output
);
1134 /* Check what regs are referred to in the libcall block ending with INSN,
1135 aside from those mentioned in the equivalent value.
1136 If there are none, return 0.
1137 If there are one or more, return an EXPR_LIST containing all of them. */
1140 libcall_other_reg (insn
, equiv
)
1143 rtx note
= find_reg_note (insn
, REG_RETVAL
, NULL_RTX
);
1144 rtx p
= XEXP (note
, 0);
1147 /* First, find all the regs used in the libcall block
1148 that are not mentioned as inputs to the result. */
1152 if (GET_CODE (p
) == INSN
|| GET_CODE (p
) == JUMP_INSN
1153 || GET_CODE (p
) == CALL_INSN
)
1154 record_excess_regs (PATTERN (p
), equiv
, &output
);
1161 /* Return 1 if all uses of REG
1162 are between INSN and the end of the basic block. */
1165 reg_in_basic_block_p (insn
, reg
)
1168 int regno
= REGNO (reg
);
1171 if (REGNO_FIRST_UID (regno
) != INSN_UID (insn
))
1174 /* Search this basic block for the already recorded last use of the reg. */
1175 for (p
= insn
; p
; p
= NEXT_INSN (p
))
1177 switch (GET_CODE (p
))
1184 /* Ordinary insn: if this is the last use, we win. */
1185 if (REGNO_LAST_UID (regno
) == INSN_UID (p
))
1190 /* Jump insn: if this is the last use, we win. */
1191 if (REGNO_LAST_UID (regno
) == INSN_UID (p
))
1193 /* Otherwise, it's the end of the basic block, so we lose. */
1198 /* It's the end of the basic block, so we lose. */
1206 /* The "last use" doesn't follow the "first use"?? */
1210 /* Compute the benefit of eliminating the insns in the block whose
1211 last insn is LAST. This may be a group of insns used to compute a
1212 value directly or can contain a library call. */
1215 libcall_benefit (last
)
1221 for (insn
= XEXP (find_reg_note (last
, REG_RETVAL
, NULL_RTX
), 0);
1222 insn
!= last
; insn
= NEXT_INSN (insn
))
1224 if (GET_CODE (insn
) == CALL_INSN
)
1225 benefit
+= 10; /* Assume at least this many insns in a library
1227 else if (GET_CODE (insn
) == INSN
1228 && GET_CODE (PATTERN (insn
)) != USE
1229 && GET_CODE (PATTERN (insn
)) != CLOBBER
)
1236 /* Skip COUNT insns from INSN, counting library calls as 1 insn. */
1239 skip_consec_insns (insn
, count
)
1243 for (; count
> 0; count
--)
1247 /* If first insn of libcall sequence, skip to end. */
1248 /* Do this at start of loop, since INSN is guaranteed to
1250 if (GET_CODE (insn
) != NOTE
1251 && (temp
= find_reg_note (insn
, REG_LIBCALL
, NULL_RTX
)))
1252 insn
= XEXP (temp
, 0);
1254 do insn
= NEXT_INSN (insn
);
1255 while (GET_CODE (insn
) == NOTE
);
1261 /* Ignore any movable whose insn falls within a libcall
1262 which is part of another movable.
1263 We make use of the fact that the movable for the libcall value
1264 was made later and so appears later on the chain. */
1267 ignore_some_movables (movables
)
1268 struct movable
*movables
;
1270 register struct movable
*m
, *m1
;
1272 for (m
= movables
; m
; m
= m
->next
)
1274 /* Is this a movable for the value of a libcall? */
1275 rtx note
= find_reg_note (m
->insn
, REG_RETVAL
, NULL_RTX
);
1279 /* Check for earlier movables inside that range,
1280 and mark them invalid. We cannot use LUIDs here because
1281 insns created by loop.c for prior loops don't have LUIDs.
1282 Rather than reject all such insns from movables, we just
1283 explicitly check each insn in the libcall (since invariant
1284 libcalls aren't that common). */
1285 for (insn
= XEXP (note
, 0); insn
!= m
->insn
; insn
= NEXT_INSN (insn
))
1286 for (m1
= movables
; m1
!= m
; m1
= m1
->next
)
1287 if (m1
->insn
== insn
)
1293 /* For each movable insn, see if the reg that it loads
1294 leads when it dies right into another conditionally movable insn.
1295 If so, record that the second insn "forces" the first one,
1296 since the second can be moved only if the first is. */
1299 force_movables (movables
)
1300 struct movable
*movables
;
1302 register struct movable
*m
, *m1
;
1303 for (m1
= movables
; m1
; m1
= m1
->next
)
1304 /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */
1305 if (!m1
->partial
&& !m1
->done
)
1307 int regno
= m1
->regno
;
1308 for (m
= m1
->next
; m
; m
= m
->next
)
1309 /* ??? Could this be a bug? What if CSE caused the
1310 register of M1 to be used after this insn?
1311 Since CSE does not update regno_last_uid,
1312 this insn M->insn might not be where it dies.
1313 But very likely this doesn't matter; what matters is
1314 that M's reg is computed from M1's reg. */
1315 if (INSN_UID (m
->insn
) == REGNO_LAST_UID (regno
)
1318 if (m
!= 0 && m
->set_src
== m1
->set_dest
1319 /* If m->consec, m->set_src isn't valid. */
1323 /* Increase the priority of the moving the first insn
1324 since it permits the second to be moved as well. */
1328 m1
->lifetime
+= m
->lifetime
;
1329 m1
->savings
+= m
->savings
;
1334 /* Find invariant expressions that are equal and can be combined into
1338 combine_movables (movables
, nregs
)
1339 struct movable
*movables
;
1342 register struct movable
*m
;
1343 char *matched_regs
= (char *) alloca (nregs
);
1344 enum machine_mode mode
;
1346 /* Regs that are set more than once are not allowed to match
1347 or be matched. I'm no longer sure why not. */
1348 /* Perhaps testing m->consec_sets would be more appropriate here? */
1350 for (m
= movables
; m
; m
= m
->next
)
1351 if (m
->match
== 0 && n_times_used
[m
->regno
] == 1 && !m
->partial
)
1353 register struct movable
*m1
;
1354 int regno
= m
->regno
;
1356 bzero (matched_regs
, nregs
);
1357 matched_regs
[regno
] = 1;
1359 /* We want later insns to match the first one. Don't make the first
1360 one match any later ones. So start this loop at m->next. */
1361 for (m1
= m
->next
; m1
; m1
= m1
->next
)
1362 if (m
!= m1
&& m1
->match
== 0 && n_times_used
[m1
->regno
] == 1
1363 /* A reg used outside the loop mustn't be eliminated. */
1365 /* A reg used for zero-extending mustn't be eliminated. */
1367 && (matched_regs
[m1
->regno
]
1370 /* Can combine regs with different modes loaded from the
1371 same constant only if the modes are the same or
1372 if both are integer modes with M wider or the same
1373 width as M1. The check for integer is redundant, but
1374 safe, since the only case of differing destination
1375 modes with equal sources is when both sources are
1376 VOIDmode, i.e., CONST_INT. */
1377 (GET_MODE (m
->set_dest
) == GET_MODE (m1
->set_dest
)
1378 || (GET_MODE_CLASS (GET_MODE (m
->set_dest
)) == MODE_INT
1379 && GET_MODE_CLASS (GET_MODE (m1
->set_dest
)) == MODE_INT
1380 && (GET_MODE_BITSIZE (GET_MODE (m
->set_dest
))
1381 >= GET_MODE_BITSIZE (GET_MODE (m1
->set_dest
)))))
1382 /* See if the source of M1 says it matches M. */
1383 && ((GET_CODE (m1
->set_src
) == REG
1384 && matched_regs
[REGNO (m1
->set_src
)])
1385 || rtx_equal_for_loop_p (m
->set_src
, m1
->set_src
,
1387 && ((m
->dependencies
== m1
->dependencies
)
1388 || rtx_equal_p (m
->dependencies
, m1
->dependencies
)))
1390 m
->lifetime
+= m1
->lifetime
;
1391 m
->savings
+= m1
->savings
;
1394 matched_regs
[m1
->regno
] = 1;
1398 /* Now combine the regs used for zero-extension.
1399 This can be done for those not marked `global'
1400 provided their lives don't overlap. */
1402 for (mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
); mode
!= VOIDmode
;
1403 mode
= GET_MODE_WIDER_MODE (mode
))
1405 register struct movable
*m0
= 0;
1407 /* Combine all the registers for extension from mode MODE.
1408 Don't combine any that are used outside this loop. */
1409 for (m
= movables
; m
; m
= m
->next
)
1410 if (m
->partial
&& ! m
->global
1411 && mode
== GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m
->insn
)))))
1413 register struct movable
*m1
;
1414 int first
= uid_luid
[REGNO_FIRST_UID (m
->regno
)];
1415 int last
= uid_luid
[REGNO_LAST_UID (m
->regno
)];
1419 /* First one: don't check for overlap, just record it. */
1424 /* Make sure they extend to the same mode.
1425 (Almost always true.) */
1426 if (GET_MODE (m
->set_dest
) != GET_MODE (m0
->set_dest
))
1429 /* We already have one: check for overlap with those
1430 already combined together. */
1431 for (m1
= movables
; m1
!= m
; m1
= m1
->next
)
1432 if (m1
== m0
|| (m1
->partial
&& m1
->match
== m0
))
1433 if (! (uid_luid
[REGNO_FIRST_UID (m1
->regno
)] > last
1434 || uid_luid
[REGNO_LAST_UID (m1
->regno
)] < first
))
1437 /* No overlap: we can combine this with the others. */
1438 m0
->lifetime
+= m
->lifetime
;
1439 m0
->savings
+= m
->savings
;
1448 /* Return 1 if regs X and Y will become the same if moved. */
1451 regs_match_p (x
, y
, movables
)
1453 struct movable
*movables
;
1457 struct movable
*mx
, *my
;
1459 for (mx
= movables
; mx
; mx
= mx
->next
)
1460 if (mx
->regno
== xn
)
1463 for (my
= movables
; my
; my
= my
->next
)
1464 if (my
->regno
== yn
)
1468 && ((mx
->match
== my
->match
&& mx
->match
!= 0)
1470 || mx
== my
->match
));
1473 /* Return 1 if X and Y are identical-looking rtx's.
1474 This is the Lisp function EQUAL for rtx arguments.
1476 If two registers are matching movables or a movable register and an
1477 equivalent constant, consider them equal. */
1480 rtx_equal_for_loop_p (x
, y
, movables
)
1482 struct movable
*movables
;
1486 register struct movable
*m
;
1487 register enum rtx_code code
;
1492 if (x
== 0 || y
== 0)
1495 code
= GET_CODE (x
);
1497 /* If we have a register and a constant, they may sometimes be
1499 if (GET_CODE (x
) == REG
&& n_times_set
[REGNO (x
)] == -2
1502 for (m
= movables
; m
; m
= m
->next
)
1503 if (m
->move_insn
&& m
->regno
== REGNO (x
)
1504 && rtx_equal_p (m
->set_src
, y
))
1507 else if (GET_CODE (y
) == REG
&& n_times_set
[REGNO (y
)] == -2
1510 for (m
= movables
; m
; m
= m
->next
)
1511 if (m
->move_insn
&& m
->regno
== REGNO (y
)
1512 && rtx_equal_p (m
->set_src
, x
))
1516 /* Otherwise, rtx's of different codes cannot be equal. */
1517 if (code
!= GET_CODE (y
))
1520 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1521 (REG:SI x) and (REG:HI x) are NOT equivalent. */
1523 if (GET_MODE (x
) != GET_MODE (y
))
1526 /* These three types of rtx's can be compared nonrecursively. */
1528 return (REGNO (x
) == REGNO (y
) || regs_match_p (x
, y
, movables
));
1530 if (code
== LABEL_REF
)
1531 return XEXP (x
, 0) == XEXP (y
, 0);
1532 if (code
== SYMBOL_REF
)
1533 return XSTR (x
, 0) == XSTR (y
, 0);
1535 /* Compare the elements. If any pair of corresponding elements
1536 fail to match, return 0 for the whole things. */
1538 fmt
= GET_RTX_FORMAT (code
);
1539 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1544 if (XWINT (x
, i
) != XWINT (y
, i
))
1549 if (XINT (x
, i
) != XINT (y
, i
))
1554 /* Two vectors must have the same length. */
1555 if (XVECLEN (x
, i
) != XVECLEN (y
, i
))
1558 /* And the corresponding elements must match. */
1559 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
1560 if (rtx_equal_for_loop_p (XVECEXP (x
, i
, j
), XVECEXP (y
, i
, j
), movables
) == 0)
1565 if (rtx_equal_for_loop_p (XEXP (x
, i
), XEXP (y
, i
), movables
) == 0)
1570 if (strcmp (XSTR (x
, i
), XSTR (y
, i
)))
1575 /* These are just backpointers, so they don't matter. */
1581 /* It is believed that rtx's at this level will never
1582 contain anything but integers and other rtx's,
1583 except for within LABEL_REFs and SYMBOL_REFs. */
1591 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to all
1592 insns in INSNS which use thet reference. */
1595 add_label_notes (x
, insns
)
1599 enum rtx_code code
= GET_CODE (x
);
1604 if (code
== LABEL_REF
&& !LABEL_REF_NONLOCAL_P (x
))
1606 rtx next
= next_real_insn (XEXP (x
, 0));
1608 /* Don't record labels that refer to dispatch tables.
1609 This is not necessary, since the tablejump references the same label.
1610 And if we did record them, flow.c would make worse code. */
1612 || ! (GET_CODE (next
) == JUMP_INSN
1613 && (GET_CODE (PATTERN (next
)) == ADDR_VEC
1614 || GET_CODE (PATTERN (next
)) == ADDR_DIFF_VEC
)))
1616 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
1617 if (reg_mentioned_p (XEXP (x
, 0), insn
))
1618 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_LABEL
, XEXP (x
, 0),
1624 fmt
= GET_RTX_FORMAT (code
);
1625 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1628 add_label_notes (XEXP (x
, i
), insns
);
1629 else if (fmt
[i
] == 'E')
1630 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1631 add_label_notes (XVECEXP (x
, i
, j
), insns
);
1635 /* Scan MOVABLES, and move the insns that deserve to be moved.
1636 If two matching movables are combined, replace one reg with the
1637 other throughout. */
1640 move_movables (movables
, threshold
, insn_count
, loop_start
, end
, nregs
)
1641 struct movable
*movables
;
1649 register struct movable
*m
;
1651 /* Map of pseudo-register replacements to handle combining
1652 when we move several insns that load the same value
1653 into different pseudo-registers. */
1654 rtx
*reg_map
= (rtx
*) alloca (nregs
* sizeof (rtx
));
1655 char *already_moved
= (char *) alloca (nregs
);
1657 bzero (already_moved
, nregs
);
1658 bzero ((char *) reg_map
, nregs
* sizeof (rtx
));
1662 for (m
= movables
; m
; m
= m
->next
)
1664 /* Describe this movable insn. */
1666 if (loop_dump_stream
)
1668 fprintf (loop_dump_stream
, "Insn %d: regno %d (life %d), ",
1669 INSN_UID (m
->insn
), m
->regno
, m
->lifetime
);
1671 fprintf (loop_dump_stream
, "consec %d, ", m
->consec
);
1673 fprintf (loop_dump_stream
, "cond ");
1675 fprintf (loop_dump_stream
, "force ");
1677 fprintf (loop_dump_stream
, "global ");
1679 fprintf (loop_dump_stream
, "done ");
1681 fprintf (loop_dump_stream
, "move-insn ");
1683 fprintf (loop_dump_stream
, "matches %d ",
1684 INSN_UID (m
->match
->insn
));
1686 fprintf (loop_dump_stream
, "forces %d ",
1687 INSN_UID (m
->forces
->insn
));
1690 /* Count movables. Value used in heuristics in strength_reduce. */
1693 /* Ignore the insn if it's already done (it matched something else).
1694 Otherwise, see if it is now safe to move. */
1698 || (1 == invariant_p (m
->set_src
)
1699 && (m
->dependencies
== 0
1700 || 1 == invariant_p (m
->dependencies
))
1702 || 1 == consec_sets_invariant_p (m
->set_dest
,
1705 && (! m
->forces
|| m
->forces
->done
))
1709 int savings
= m
->savings
;
1711 /* We have an insn that is safe to move.
1712 Compute its desirability. */
1717 if (loop_dump_stream
)
1718 fprintf (loop_dump_stream
, "savings %d ", savings
);
1720 if (moved_once
[regno
])
1724 if (loop_dump_stream
)
1725 fprintf (loop_dump_stream
, "halved since already moved ");
1728 /* An insn MUST be moved if we already moved something else
1729 which is safe only if this one is moved too: that is,
1730 if already_moved[REGNO] is nonzero. */
1732 /* An insn is desirable to move if the new lifetime of the
1733 register is no more than THRESHOLD times the old lifetime.
1734 If it's not desirable, it means the loop is so big
1735 that moving won't speed things up much,
1736 and it is liable to make register usage worse. */
1738 /* It is also desirable to move if it can be moved at no
1739 extra cost because something else was already moved. */
1741 if (already_moved
[regno
]
1742 || flag_move_all_movables
1743 || (threshold
* savings
* m
->lifetime
) >= insn_count
1744 || (m
->forces
&& m
->forces
->done
1745 && n_times_used
[m
->forces
->regno
] == 1))
1748 register struct movable
*m1
;
1751 /* Now move the insns that set the reg. */
1753 if (m
->partial
&& m
->match
)
1757 /* Find the end of this chain of matching regs.
1758 Thus, we load each reg in the chain from that one reg.
1759 And that reg is loaded with 0 directly,
1760 since it has ->match == 0. */
1761 for (m1
= m
; m1
->match
; m1
= m1
->match
);
1762 newpat
= gen_move_insn (SET_DEST (PATTERN (m
->insn
)),
1763 SET_DEST (PATTERN (m1
->insn
)));
1764 i1
= emit_insn_before (newpat
, loop_start
);
1766 /* Mark the moved, invariant reg as being allowed to
1767 share a hard reg with the other matching invariant. */
1768 REG_NOTES (i1
) = REG_NOTES (m
->insn
);
1769 r1
= SET_DEST (PATTERN (m
->insn
));
1770 r2
= SET_DEST (PATTERN (m1
->insn
));
1772 = gen_rtx_EXPR_LIST (VOIDmode
, r1
,
1773 gen_rtx_EXPR_LIST (VOIDmode
, r2
,
1775 delete_insn (m
->insn
);
1780 if (loop_dump_stream
)
1781 fprintf (loop_dump_stream
, " moved to %d", INSN_UID (i1
));
1783 /* If we are to re-generate the item being moved with a
1784 new move insn, first delete what we have and then emit
1785 the move insn before the loop. */
1786 else if (m
->move_insn
)
1790 for (count
= m
->consec
; count
>= 0; count
--)
1792 /* If this is the first insn of a library call sequence,
1794 if (GET_CODE (p
) != NOTE
1795 && (temp
= find_reg_note (p
, REG_LIBCALL
, NULL_RTX
)))
1798 /* If this is the last insn of a libcall sequence, then
1799 delete every insn in the sequence except the last.
1800 The last insn is handled in the normal manner. */
1801 if (GET_CODE (p
) != NOTE
1802 && (temp
= find_reg_note (p
, REG_RETVAL
, NULL_RTX
)))
1804 temp
= XEXP (temp
, 0);
1806 temp
= delete_insn (temp
);
1809 p
= delete_insn (p
);
1810 while (p
&& GET_CODE (p
) == NOTE
)
1815 emit_move_insn (m
->set_dest
, m
->set_src
);
1816 temp
= get_insns ();
1819 add_label_notes (m
->set_src
, temp
);
1821 i1
= emit_insns_before (temp
, loop_start
);
1822 if (! find_reg_note (i1
, REG_EQUAL
, NULL_RTX
))
1824 = gen_rtx_EXPR_LIST (m
->is_equiv
? REG_EQUIV
: REG_EQUAL
,
1825 m
->set_src
, REG_NOTES (i1
));
1827 if (loop_dump_stream
)
1828 fprintf (loop_dump_stream
, " moved to %d", INSN_UID (i1
));
1830 /* The more regs we move, the less we like moving them. */
1835 for (count
= m
->consec
; count
>= 0; count
--)
1839 /* If first insn of libcall sequence, skip to end. */
1840 /* Do this at start of loop, since p is guaranteed to
1842 if (GET_CODE (p
) != NOTE
1843 && (temp
= find_reg_note (p
, REG_LIBCALL
, NULL_RTX
)))
1846 /* If last insn of libcall sequence, move all
1847 insns except the last before the loop. The last
1848 insn is handled in the normal manner. */
1849 if (GET_CODE (p
) != NOTE
1850 && (temp
= find_reg_note (p
, REG_RETVAL
, NULL_RTX
)))
1854 rtx fn_address_insn
= 0;
1857 for (temp
= XEXP (temp
, 0); temp
!= p
;
1858 temp
= NEXT_INSN (temp
))
1864 if (GET_CODE (temp
) == NOTE
)
1867 body
= PATTERN (temp
);
1869 /* Find the next insn after TEMP,
1870 not counting USE or NOTE insns. */
1871 for (next
= NEXT_INSN (temp
); next
!= p
;
1872 next
= NEXT_INSN (next
))
1873 if (! (GET_CODE (next
) == INSN
1874 && GET_CODE (PATTERN (next
)) == USE
)
1875 && GET_CODE (next
) != NOTE
)
1878 /* If that is the call, this may be the insn
1879 that loads the function address.
1881 Extract the function address from the insn
1882 that loads it into a register.
1883 If this insn was cse'd, we get incorrect code.
1885 So emit a new move insn that copies the
1886 function address into the register that the
1887 call insn will use. flow.c will delete any
1888 redundant stores that we have created. */
1889 if (GET_CODE (next
) == CALL_INSN
1890 && GET_CODE (body
) == SET
1891 && GET_CODE (SET_DEST (body
)) == REG
1892 && (n
= find_reg_note (temp
, REG_EQUAL
,
1895 fn_reg
= SET_SRC (body
);
1896 if (GET_CODE (fn_reg
) != REG
)
1897 fn_reg
= SET_DEST (body
);
1898 fn_address
= XEXP (n
, 0);
1899 fn_address_insn
= temp
;
1901 /* We have the call insn.
1902 If it uses the register we suspect it might,
1903 load it with the correct address directly. */
1904 if (GET_CODE (temp
) == CALL_INSN
1906 && reg_referenced_p (fn_reg
, body
))
1907 emit_insn_after (gen_move_insn (fn_reg
,
1911 if (GET_CODE (temp
) == CALL_INSN
)
1913 i1
= emit_call_insn_before (body
, loop_start
);
1914 /* Because the USAGE information potentially
1915 contains objects other than hard registers
1916 we need to copy it. */
1917 if (CALL_INSN_FUNCTION_USAGE (temp
))
1918 CALL_INSN_FUNCTION_USAGE (i1
)
1919 = copy_rtx (CALL_INSN_FUNCTION_USAGE (temp
));
1922 i1
= emit_insn_before (body
, loop_start
);
1925 if (temp
== fn_address_insn
)
1926 fn_address_insn
= i1
;
1927 REG_NOTES (i1
) = REG_NOTES (temp
);
1931 if (m
->savemode
!= VOIDmode
)
1933 /* P sets REG to zero; but we should clear only
1934 the bits that are not covered by the mode
1936 rtx reg
= m
->set_dest
;
1942 (GET_MODE (reg
), and_optab
, reg
,
1943 GEN_INT ((((HOST_WIDE_INT
) 1
1944 << GET_MODE_BITSIZE (m
->savemode
)))
1946 reg
, 1, OPTAB_LIB_WIDEN
);
1950 emit_move_insn (reg
, tem
);
1951 sequence
= gen_sequence ();
1953 i1
= emit_insn_before (sequence
, loop_start
);
1955 else if (GET_CODE (p
) == CALL_INSN
)
1957 i1
= emit_call_insn_before (PATTERN (p
), loop_start
);
1958 /* Because the USAGE information potentially
1959 contains objects other than hard registers
1960 we need to copy it. */
1961 if (CALL_INSN_FUNCTION_USAGE (p
))
1962 CALL_INSN_FUNCTION_USAGE (i1
)
1963 = copy_rtx (CALL_INSN_FUNCTION_USAGE (p
));
1965 else if (count
== m
->consec
&& m
->move_insn_first
)
1967 /* The SET_SRC might not be invariant, so we must
1968 use the REG_EQUAL note. */
1970 emit_move_insn (m
->set_dest
, m
->set_src
);
1971 temp
= get_insns ();
1974 add_label_notes (m
->set_src
, temp
);
1976 i1
= emit_insns_before (temp
, loop_start
);
1977 if (! find_reg_note (i1
, REG_EQUAL
, NULL_RTX
))
1979 = gen_rtx_EXPR_LIST ((m
->is_equiv
? REG_EQUIV
1981 m
->set_src
, REG_NOTES (i1
));
1984 i1
= emit_insn_before (PATTERN (p
), loop_start
);
1986 if (REG_NOTES (i1
) == 0)
1988 REG_NOTES (i1
) = REG_NOTES (p
);
1990 /* If there is a REG_EQUAL note present whose value
1991 is not loop invariant, then delete it, since it
1992 may cause problems with later optimization passes.
1993 It is possible for cse to create such notes
1994 like this as a result of record_jump_cond. */
1996 if ((temp
= find_reg_note (i1
, REG_EQUAL
, NULL_RTX
))
1997 && ! invariant_p (XEXP (temp
, 0)))
1998 remove_note (i1
, temp
);
2004 if (loop_dump_stream
)
2005 fprintf (loop_dump_stream
, " moved to %d",
2008 /* If library call, now fix the REG_NOTES that contain
2009 insn pointers, namely REG_LIBCALL on FIRST
2010 and REG_RETVAL on I1. */
2011 if ((temp
= find_reg_note (i1
, REG_RETVAL
, NULL_RTX
)))
2013 XEXP (temp
, 0) = first
;
2014 temp
= find_reg_note (first
, REG_LIBCALL
, NULL_RTX
);
2015 XEXP (temp
, 0) = i1
;
2019 do p
= NEXT_INSN (p
);
2020 while (p
&& GET_CODE (p
) == NOTE
);
2023 /* The more regs we move, the less we like moving them. */
2027 /* Any other movable that loads the same register
2029 already_moved
[regno
] = 1;
2031 /* This reg has been moved out of one loop. */
2032 moved_once
[regno
] = 1;
2034 /* The reg set here is now invariant. */
2036 n_times_set
[regno
] = 0;
2040 /* Change the length-of-life info for the register
2041 to say it lives at least the full length of this loop.
2042 This will help guide optimizations in outer loops. */
2044 if (uid_luid
[REGNO_FIRST_UID (regno
)] > INSN_LUID (loop_start
))
2045 /* This is the old insn before all the moved insns.
2046 We can't use the moved insn because it is out of range
2047 in uid_luid. Only the old insns have luids. */
2048 REGNO_FIRST_UID (regno
) = INSN_UID (loop_start
);
2049 if (uid_luid
[REGNO_LAST_UID (regno
)] < INSN_LUID (end
))
2050 REGNO_LAST_UID (regno
) = INSN_UID (end
);
2052 /* Combine with this moved insn any other matching movables. */
2055 for (m1
= movables
; m1
; m1
= m1
->next
)
2060 /* Schedule the reg loaded by M1
2061 for replacement so that shares the reg of M.
2062 If the modes differ (only possible in restricted
2063 circumstances, make a SUBREG. */
2064 if (GET_MODE (m
->set_dest
) == GET_MODE (m1
->set_dest
))
2065 reg_map
[m1
->regno
] = m
->set_dest
;
2068 = gen_lowpart_common (GET_MODE (m1
->set_dest
),
2071 /* Get rid of the matching insn
2072 and prevent further processing of it. */
2075 /* if library call, delete all insn except last, which
2077 if ((temp
= find_reg_note (m1
->insn
, REG_RETVAL
,
2080 for (temp
= XEXP (temp
, 0); temp
!= m1
->insn
;
2081 temp
= NEXT_INSN (temp
))
2084 delete_insn (m1
->insn
);
2086 /* Any other movable that loads the same register
2088 already_moved
[m1
->regno
] = 1;
2090 /* The reg merged here is now invariant,
2091 if the reg it matches is invariant. */
2093 n_times_set
[m1
->regno
] = 0;
2096 else if (loop_dump_stream
)
2097 fprintf (loop_dump_stream
, "not desirable");
2099 else if (loop_dump_stream
&& !m
->match
)
2100 fprintf (loop_dump_stream
, "not safe");
2102 if (loop_dump_stream
)
2103 fprintf (loop_dump_stream
, "\n");
2107 new_start
= loop_start
;
2109 /* Go through all the instructions in the loop, making
2110 all the register substitutions scheduled in REG_MAP. */
2111 for (p
= new_start
; p
!= end
; p
= NEXT_INSN (p
))
2112 if (GET_CODE (p
) == INSN
|| GET_CODE (p
) == JUMP_INSN
2113 || GET_CODE (p
) == CALL_INSN
)
2115 replace_regs (PATTERN (p
), reg_map
, nregs
, 0);
2116 replace_regs (REG_NOTES (p
), reg_map
, nregs
, 0);
2122 /* Scan X and replace the address of any MEM in it with ADDR.
2123 REG is the address that MEM should have before the replacement. */
2126 replace_call_address (x
, reg
, addr
)
2129 register enum rtx_code code
;
2135 code
= GET_CODE (x
);
2149 /* Short cut for very common case. */
2150 replace_call_address (XEXP (x
, 1), reg
, addr
);
2154 /* Short cut for very common case. */
2155 replace_call_address (XEXP (x
, 0), reg
, addr
);
2159 /* If this MEM uses a reg other than the one we expected,
2160 something is wrong. */
2161 if (XEXP (x
, 0) != reg
)
2170 fmt
= GET_RTX_FORMAT (code
);
2171 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2174 replace_call_address (XEXP (x
, i
), reg
, addr
);
2178 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2179 replace_call_address (XVECEXP (x
, i
, j
), reg
, addr
);
2185 /* Return the number of memory refs to addresses that vary
2189 count_nonfixed_reads (x
)
2192 register enum rtx_code code
;
2200 code
= GET_CODE (x
);
2214 return ((invariant_p (XEXP (x
, 0)) != 1)
2215 + count_nonfixed_reads (XEXP (x
, 0)));
2222 fmt
= GET_RTX_FORMAT (code
);
2223 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2226 value
+= count_nonfixed_reads (XEXP (x
, i
));
2230 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2231 value
+= count_nonfixed_reads (XVECEXP (x
, i
, j
));
2239 /* P is an instruction that sets a register to the result of a ZERO_EXTEND.
2240 Replace it with an instruction to load just the low bytes
2241 if the machine supports such an instruction,
2242 and insert above LOOP_START an instruction to clear the register. */
2245 constant_high_bytes (p
, loop_start
)
2249 register int insn_code_number
;
2251 /* Try to change (SET (REG ...) (ZERO_EXTEND (..:B ...)))
2252 to (SET (STRICT_LOW_PART (SUBREG:B (REG...))) ...). */
2254 new = gen_rtx_SET (VOIDmode
,
2255 gen_rtx_STRICT_LOW_PART (VOIDmode
,
2256 gen_rtx_SUBREG (GET_MODE (XEXP (SET_SRC (PATTERN (p
)), 0)),
2257 SET_DEST (PATTERN (p
)),
2259 XEXP (SET_SRC (PATTERN (p
)), 0));
2260 insn_code_number
= recog (new, p
);
2262 if (insn_code_number
)
2266 /* Clear destination register before the loop. */
2267 emit_insn_before (gen_rtx_SET (VOIDmode
, SET_DEST (PATTERN (p
)),
2271 /* Inside the loop, just load the low part. */
2277 /* Scan a loop setting the variables `unknown_address_altered',
2278 `num_mem_sets', `loop_continue', loops_enclosed', `loop_has_call',
2279 and `loop_has_volatile'.
2280 Also, fill in the array `loop_store_mems'. */
2283 prescan_loop (start
, end
)
2286 register int level
= 1;
2289 unknown_address_altered
= 0;
2291 loop_has_volatile
= 0;
2292 loop_store_mems_idx
= 0;
2298 for (insn
= NEXT_INSN (start
); insn
!= NEXT_INSN (end
);
2299 insn
= NEXT_INSN (insn
))
2301 if (GET_CODE (insn
) == NOTE
)
2303 if (NOTE_LINE_NUMBER (insn
) == NOTE_INSN_LOOP_BEG
)
2306 /* Count number of loops contained in this one. */
2309 else if (NOTE_LINE_NUMBER (insn
) == NOTE_INSN_LOOP_END
)
2318 else if (NOTE_LINE_NUMBER (insn
) == NOTE_INSN_LOOP_CONT
)
2321 loop_continue
= insn
;
2324 else if (GET_CODE (insn
) == CALL_INSN
)
2326 if (! CONST_CALL_P (insn
))
2327 unknown_address_altered
= 1;
2332 if (GET_CODE (insn
) == INSN
|| GET_CODE (insn
) == JUMP_INSN
)
2334 if (volatile_refs_p (PATTERN (insn
)))
2335 loop_has_volatile
= 1;
2337 note_stores (PATTERN (insn
), note_addr_stored
);
2343 /* Scan the function looking for loops. Record the start and end of each loop.
2344 Also mark as invalid loops any loops that contain a setjmp or are branched
2345 to from outside the loop. */
2348 find_and_verify_loops (f
)
2352 int current_loop
= -1;
2356 /* If there are jumps to undefined labels,
2357 treat them as jumps out of any/all loops.
2358 This also avoids writing past end of tables when there are no loops. */
2359 uid_loop_num
[0] = -1;
2361 /* Find boundaries of loops, mark which loops are contained within
2362 loops, and invalidate loops that have setjmp. */
2364 for (insn
= f
; insn
; insn
= NEXT_INSN (insn
))
2366 if (GET_CODE (insn
) == NOTE
)
2367 switch (NOTE_LINE_NUMBER (insn
))
2369 case NOTE_INSN_LOOP_BEG
:
2370 loop_number_loop_starts
[++next_loop
] = insn
;
2371 loop_number_loop_ends
[next_loop
] = 0;
2372 loop_outer_loop
[next_loop
] = current_loop
;
2373 loop_invalid
[next_loop
] = 0;
2374 loop_number_exit_labels
[next_loop
] = 0;
2375 loop_number_exit_count
[next_loop
] = 0;
2376 current_loop
= next_loop
;
2379 case NOTE_INSN_SETJMP
:
2380 /* In this case, we must invalidate our current loop and any
2382 for (loop
= current_loop
; loop
!= -1; loop
= loop_outer_loop
[loop
])
2384 loop_invalid
[loop
] = 1;
2385 if (loop_dump_stream
)
2386 fprintf (loop_dump_stream
,
2387 "\nLoop at %d ignored due to setjmp.\n",
2388 INSN_UID (loop_number_loop_starts
[loop
]));
2392 case NOTE_INSN_LOOP_END
:
2393 if (current_loop
== -1)
2396 loop_number_loop_ends
[current_loop
] = insn
;
2397 current_loop
= loop_outer_loop
[current_loop
];
2404 /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the
2405 enclosing loop, but this doesn't matter. */
2406 uid_loop_num
[INSN_UID (insn
)] = current_loop
;
2409 /* Any loop containing a label used in an initializer must be invalidated,
2410 because it can be jumped into from anywhere. */
2412 for (label
= forced_labels
; label
; label
= XEXP (label
, 1))
2416 for (loop_num
= uid_loop_num
[INSN_UID (XEXP (label
, 0))];
2418 loop_num
= loop_outer_loop
[loop_num
])
2419 loop_invalid
[loop_num
] = 1;
2422 /* Any loop containing a label used for an exception handler must be
2423 invalidated, because it can be jumped into from anywhere. */
2425 for (label
= exception_handler_labels
; label
; label
= XEXP (label
, 1))
2429 for (loop_num
= uid_loop_num
[INSN_UID (XEXP (label
, 0))];
2431 loop_num
= loop_outer_loop
[loop_num
])
2432 loop_invalid
[loop_num
] = 1;
2435 /* Now scan all insn's in the function. If any JUMP_INSN branches into a
2436 loop that it is not contained within, that loop is marked invalid.
2437 If any INSN or CALL_INSN uses a label's address, then the loop containing
2438 that label is marked invalid, because it could be jumped into from
2441 Also look for blocks of code ending in an unconditional branch that
2442 exits the loop. If such a block is surrounded by a conditional
2443 branch around the block, move the block elsewhere (see below) and
2444 invert the jump to point to the code block. This may eliminate a
2445 label in our loop and will simplify processing by both us and a
2446 possible second cse pass. */
2448 for (insn
= f
; insn
; insn
= NEXT_INSN (insn
))
2449 if (GET_RTX_CLASS (GET_CODE (insn
)) == 'i')
2451 int this_loop_num
= uid_loop_num
[INSN_UID (insn
)];
2453 if (GET_CODE (insn
) == INSN
|| GET_CODE (insn
) == CALL_INSN
)
2455 rtx note
= find_reg_note (insn
, REG_LABEL
, NULL_RTX
);
2460 for (loop_num
= uid_loop_num
[INSN_UID (XEXP (note
, 0))];
2462 loop_num
= loop_outer_loop
[loop_num
])
2463 loop_invalid
[loop_num
] = 1;
2467 if (GET_CODE (insn
) != JUMP_INSN
)
2470 mark_loop_jump (PATTERN (insn
), this_loop_num
);
2472 /* See if this is an unconditional branch outside the loop. */
2473 if (this_loop_num
!= -1
2474 && (GET_CODE (PATTERN (insn
)) == RETURN
2475 || (simplejump_p (insn
)
2476 && (uid_loop_num
[INSN_UID (JUMP_LABEL (insn
))]
2478 && get_max_uid () < max_uid_for_loop
)
2481 rtx our_next
= next_real_insn (insn
);
2483 int outer_loop
= -1;
2485 /* Go backwards until we reach the start of the loop, a label,
2487 for (p
= PREV_INSN (insn
);
2488 GET_CODE (p
) != CODE_LABEL
2489 && ! (GET_CODE (p
) == NOTE
2490 && NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_BEG
)
2491 && GET_CODE (p
) != JUMP_INSN
;
2495 /* Check for the case where we have a jump to an inner nested
2496 loop, and do not perform the optimization in that case. */
2498 if (JUMP_LABEL (insn
))
2500 dest_loop
= uid_loop_num
[INSN_UID (JUMP_LABEL (insn
))];
2501 if (dest_loop
!= -1)
2503 for (outer_loop
= dest_loop
; outer_loop
!= -1;
2504 outer_loop
= loop_outer_loop
[outer_loop
])
2505 if (outer_loop
== this_loop_num
)
2510 /* Make sure that the target of P is within the current loop. */
2512 if (GET_CODE (p
) == JUMP_INSN
&& JUMP_LABEL (p
)
2513 && uid_loop_num
[INSN_UID (JUMP_LABEL (p
))] != this_loop_num
)
2514 outer_loop
= this_loop_num
;
2516 /* If we stopped on a JUMP_INSN to the next insn after INSN,
2517 we have a block of code to try to move.
2519 We look backward and then forward from the target of INSN
2520 to find a BARRIER at the same loop depth as the target.
2521 If we find such a BARRIER, we make a new label for the start
2522 of the block, invert the jump in P and point it to that label,
2523 and move the block of code to the spot we found. */
2525 if (outer_loop
== -1
2526 && GET_CODE (p
) == JUMP_INSN
2527 && JUMP_LABEL (p
) != 0
2528 /* Just ignore jumps to labels that were never emitted.
2529 These always indicate compilation errors. */
2530 && INSN_UID (JUMP_LABEL (p
)) != 0
2532 && ! simplejump_p (p
)
2533 && next_real_insn (JUMP_LABEL (p
)) == our_next
)
2536 = JUMP_LABEL (insn
) ? JUMP_LABEL (insn
) : get_last_insn ();
2537 int target_loop_num
= uid_loop_num
[INSN_UID (target
)];
2540 for (loc
= target
; loc
; loc
= PREV_INSN (loc
))
2541 if (GET_CODE (loc
) == BARRIER
2542 && uid_loop_num
[INSN_UID (loc
)] == target_loop_num
)
2546 for (loc
= target
; loc
; loc
= NEXT_INSN (loc
))
2547 if (GET_CODE (loc
) == BARRIER
2548 && uid_loop_num
[INSN_UID (loc
)] == target_loop_num
)
2553 rtx cond_label
= JUMP_LABEL (p
);
2554 rtx new_label
= get_label_after (p
);
2556 /* Ensure our label doesn't go away. */
2557 LABEL_NUSES (cond_label
)++;
2559 /* Verify that uid_loop_num is large enough and that
2561 if (invert_jump (p
, new_label
))
2565 /* If no suitable BARRIER was found, create a suitable
2566 one before TARGET. Since TARGET is a fall through
2567 path, we'll need to insert an jump around our block
2568 and a add a BARRIER before TARGET.
2570 This creates an extra unconditional jump outside
2571 the loop. However, the benefits of removing rarely
2572 executed instructions from inside the loop usually
2573 outweighs the cost of the extra unconditional jump
2574 outside the loop. */
2579 temp
= gen_jump (JUMP_LABEL (insn
));
2580 temp
= emit_jump_insn_before (temp
, target
);
2581 JUMP_LABEL (temp
) = JUMP_LABEL (insn
);
2582 LABEL_NUSES (JUMP_LABEL (insn
))++;
2583 loc
= emit_barrier_before (target
);
2586 /* Include the BARRIER after INSN and copy the
2588 new_label
= squeeze_notes (new_label
, NEXT_INSN (insn
));
2589 reorder_insns (new_label
, NEXT_INSN (insn
), loc
);
2591 /* All those insns are now in TARGET_LOOP_NUM. */
2592 for (q
= new_label
; q
!= NEXT_INSN (NEXT_INSN (insn
));
2594 uid_loop_num
[INSN_UID (q
)] = target_loop_num
;
2596 /* The label jumped to by INSN is no longer a loop exit.
2597 Unless INSN does not have a label (e.g., it is a
2598 RETURN insn), search loop_number_exit_labels to find
2599 its label_ref, and remove it. Also turn off
2600 LABEL_OUTSIDE_LOOP_P bit. */
2601 if (JUMP_LABEL (insn
))
2606 r
= loop_number_exit_labels
[this_loop_num
];
2607 r
; q
= r
, r
= LABEL_NEXTREF (r
))
2608 if (XEXP (r
, 0) == JUMP_LABEL (insn
))
2610 LABEL_OUTSIDE_LOOP_P (r
) = 0;
2612 LABEL_NEXTREF (q
) = LABEL_NEXTREF (r
);
2614 loop_number_exit_labels
[this_loop_num
]
2615 = LABEL_NEXTREF (r
);
2619 for (loop_num
= this_loop_num
;
2620 loop_num
!= -1 && loop_num
!= target_loop_num
;
2621 loop_num
= loop_outer_loop
[loop_num
])
2622 loop_number_exit_count
[loop_num
]--;
2624 /* If we didn't find it, then something is wrong. */
2629 /* P is now a jump outside the loop, so it must be put
2630 in loop_number_exit_labels, and marked as such.
2631 The easiest way to do this is to just call
2632 mark_loop_jump again for P. */
2633 mark_loop_jump (PATTERN (p
), this_loop_num
);
2635 /* If INSN now jumps to the insn after it,
2637 if (JUMP_LABEL (insn
) != 0
2638 && (next_real_insn (JUMP_LABEL (insn
))
2639 == next_real_insn (insn
)))
2643 /* Continue the loop after where the conditional
2644 branch used to jump, since the only branch insn
2645 in the block (if it still remains) is an inter-loop
2646 branch and hence needs no processing. */
2647 insn
= NEXT_INSN (cond_label
);
2649 if (--LABEL_NUSES (cond_label
) == 0)
2650 delete_insn (cond_label
);
2652 /* This loop will be continued with NEXT_INSN (insn). */
2653 insn
= PREV_INSN (insn
);
2660 /* If any label in X jumps to a loop different from LOOP_NUM and any of the
2661 loops it is contained in, mark the target loop invalid.
2663 For speed, we assume that X is part of a pattern of a JUMP_INSN. */
2666 mark_loop_jump (x
, loop_num
)
2674 switch (GET_CODE (x
))
2687 /* There could be a label reference in here. */
2688 mark_loop_jump (XEXP (x
, 0), loop_num
);
2694 mark_loop_jump (XEXP (x
, 0), loop_num
);
2695 mark_loop_jump (XEXP (x
, 1), loop_num
);
2700 mark_loop_jump (XEXP (x
, 0), loop_num
);
2704 dest_loop
= uid_loop_num
[INSN_UID (XEXP (x
, 0))];
2706 /* Link together all labels that branch outside the loop. This
2707 is used by final_[bg]iv_value and the loop unrolling code. Also
2708 mark this LABEL_REF so we know that this branch should predict
2711 /* A check to make sure the label is not in an inner nested loop,
2712 since this does not count as a loop exit. */
2713 if (dest_loop
!= -1)
2715 for (outer_loop
= dest_loop
; outer_loop
!= -1;
2716 outer_loop
= loop_outer_loop
[outer_loop
])
2717 if (outer_loop
== loop_num
)
2723 if (loop_num
!= -1 && outer_loop
== -1)
2725 LABEL_OUTSIDE_LOOP_P (x
) = 1;
2726 LABEL_NEXTREF (x
) = loop_number_exit_labels
[loop_num
];
2727 loop_number_exit_labels
[loop_num
] = x
;
2729 for (outer_loop
= loop_num
;
2730 outer_loop
!= -1 && outer_loop
!= dest_loop
;
2731 outer_loop
= loop_outer_loop
[outer_loop
])
2732 loop_number_exit_count
[outer_loop
]++;
2735 /* If this is inside a loop, but not in the current loop or one enclosed
2736 by it, it invalidates at least one loop. */
2738 if (dest_loop
== -1)
2741 /* We must invalidate every nested loop containing the target of this
2742 label, except those that also contain the jump insn. */
2744 for (; dest_loop
!= -1; dest_loop
= loop_outer_loop
[dest_loop
])
2746 /* Stop when we reach a loop that also contains the jump insn. */
2747 for (outer_loop
= loop_num
; outer_loop
!= -1;
2748 outer_loop
= loop_outer_loop
[outer_loop
])
2749 if (dest_loop
== outer_loop
)
2752 /* If we get here, we know we need to invalidate a loop. */
2753 if (loop_dump_stream
&& ! loop_invalid
[dest_loop
])
2754 fprintf (loop_dump_stream
,
2755 "\nLoop at %d ignored due to multiple entry points.\n",
2756 INSN_UID (loop_number_loop_starts
[dest_loop
]));
2758 loop_invalid
[dest_loop
] = 1;
2763 /* If this is not setting pc, ignore. */
2764 if (SET_DEST (x
) == pc_rtx
)
2765 mark_loop_jump (SET_SRC (x
), loop_num
);
2769 mark_loop_jump (XEXP (x
, 1), loop_num
);
2770 mark_loop_jump (XEXP (x
, 2), loop_num
);
2775 for (i
= 0; i
< XVECLEN (x
, 0); i
++)
2776 mark_loop_jump (XVECEXP (x
, 0, i
), loop_num
);
2780 for (i
= 0; i
< XVECLEN (x
, 1); i
++)
2781 mark_loop_jump (XVECEXP (x
, 1, i
), loop_num
);
2785 /* Treat anything else (such as a symbol_ref)
2786 as a branch out of this loop, but not into any loop. */
2791 LABEL_OUTSIDE_LOOP_P (x
) = 1;
2792 LABEL_NEXTREF (x
) = loop_number_exit_labels
[loop_num
];
2795 loop_number_exit_labels
[loop_num
] = x
;
2797 for (outer_loop
= loop_num
; outer_loop
!= -1;
2798 outer_loop
= loop_outer_loop
[outer_loop
])
2799 loop_number_exit_count
[outer_loop
]++;
2805 /* Return nonzero if there is a label in the range from
2806 insn INSN to and including the insn whose luid is END
2807 INSN must have an assigned luid (i.e., it must not have
2808 been previously created by loop.c). */
2811 labels_in_range_p (insn
, end
)
2815 while (insn
&& INSN_LUID (insn
) <= end
)
2817 if (GET_CODE (insn
) == CODE_LABEL
)
2819 insn
= NEXT_INSN (insn
);
2825 /* Record that a memory reference X is being set. */
2828 note_addr_stored (x
, y
)
2830 rtx y ATTRIBUTE_UNUSED
;
2834 if (x
== 0 || GET_CODE (x
) != MEM
)
2837 /* Count number of memory writes.
2838 This affects heuristics in strength_reduce. */
2841 /* BLKmode MEM means all memory is clobbered. */
2842 if (GET_MODE (x
) == BLKmode
)
2843 unknown_address_altered
= 1;
2845 if (unknown_address_altered
)
2848 for (i
= 0; i
< loop_store_mems_idx
; i
++)
2849 if (rtx_equal_p (XEXP (loop_store_mems
[i
], 0), XEXP (x
, 0))
2850 && MEM_IN_STRUCT_P (x
) == MEM_IN_STRUCT_P (loop_store_mems
[i
]))
2852 /* We are storing at the same address as previously noted. Save the
2854 if (GET_MODE_SIZE (GET_MODE (x
))
2855 > GET_MODE_SIZE (GET_MODE (loop_store_mems
[i
])))
2856 loop_store_mems
[i
] = x
;
2860 if (i
== NUM_STORES
)
2861 unknown_address_altered
= 1;
2863 else if (i
== loop_store_mems_idx
)
2864 loop_store_mems
[loop_store_mems_idx
++] = x
;
2867 /* Return nonzero if the rtx X is invariant over the current loop.
2869 The value is 2 if we refer to something only conditionally invariant.
2871 If `unknown_address_altered' is nonzero, no memory ref is invariant.
2872 Otherwise, a memory ref is invariant if it does not conflict with
2873 anything stored in `loop_store_mems'. */
2880 register enum rtx_code code
;
2882 int conditional
= 0;
2886 code
= GET_CODE (x
);
2896 /* A LABEL_REF is normally invariant, however, if we are unrolling
2897 loops, and this label is inside the loop, then it isn't invariant.
2898 This is because each unrolled copy of the loop body will have
2899 a copy of this label. If this was invariant, then an insn loading
2900 the address of this label into a register might get moved outside
2901 the loop, and then each loop body would end up using the same label.
2903 We don't know the loop bounds here though, so just fail for all
2905 if (flag_unroll_loops
)
2912 case UNSPEC_VOLATILE
:
2916 /* We used to check RTX_UNCHANGING_P (x) here, but that is invalid
2917 since the reg might be set by initialization within the loop. */
2919 if ((x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
2920 || x
== arg_pointer_rtx
)
2921 && ! current_function_has_nonlocal_goto
)
2925 && REGNO (x
) < FIRST_PSEUDO_REGISTER
&& call_used_regs
[REGNO (x
)])
2928 if (n_times_set
[REGNO (x
)] < 0)
2931 return n_times_set
[REGNO (x
)] == 0;
2934 /* Volatile memory references must be rejected. Do this before
2935 checking for read-only items, so that volatile read-only items
2936 will be rejected also. */
2937 if (MEM_VOLATILE_P (x
))
2940 /* Read-only items (such as constants in a constant pool) are
2941 invariant if their address is. */
2942 if (RTX_UNCHANGING_P (x
))
2945 /* If we filled the table (or had a subroutine call), any location
2946 in memory could have been clobbered. */
2947 if (unknown_address_altered
)
2950 /* See if there is any dependence between a store and this load. */
2951 for (i
= loop_store_mems_idx
- 1; i
>= 0; i
--)
2952 if (true_dependence (loop_store_mems
[i
], VOIDmode
, x
, rtx_varies_p
))
2955 /* It's not invalidated by a store in memory
2956 but we must still verify the address is invariant. */
2960 /* Don't mess with insns declared volatile. */
2961 if (MEM_VOLATILE_P (x
))
2969 fmt
= GET_RTX_FORMAT (code
);
2970 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2974 int tem
= invariant_p (XEXP (x
, i
));
2980 else if (fmt
[i
] == 'E')
2983 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2985 int tem
= invariant_p (XVECEXP (x
, i
, j
));
2995 return 1 + conditional
;
2999 /* Return nonzero if all the insns in the loop that set REG
3000 are INSN and the immediately following insns,
3001 and if each of those insns sets REG in an invariant way
3002 (not counting uses of REG in them).
3004 The value is 2 if some of these insns are only conditionally invariant.
3006 We assume that INSN itself is the first set of REG
3007 and that its source is invariant. */
3010 consec_sets_invariant_p (reg
, n_sets
, insn
)
3014 register rtx p
= insn
;
3015 register int regno
= REGNO (reg
);
3017 /* Number of sets we have to insist on finding after INSN. */
3018 int count
= n_sets
- 1;
3019 int old
= n_times_set
[regno
];
3023 /* If N_SETS hit the limit, we can't rely on its value. */
3027 n_times_set
[regno
] = 0;
3031 register enum rtx_code code
;
3035 code
= GET_CODE (p
);
3037 /* If library call, skip to end of of it. */
3038 if (code
== INSN
&& (temp
= find_reg_note (p
, REG_LIBCALL
, NULL_RTX
)))
3043 && (set
= single_set (p
))
3044 && GET_CODE (SET_DEST (set
)) == REG
3045 && REGNO (SET_DEST (set
)) == regno
)
3047 this = invariant_p (SET_SRC (set
));
3050 else if ((temp
= find_reg_note (p
, REG_EQUAL
, NULL_RTX
)))
3052 /* If this is a libcall, then any invariant REG_EQUAL note is OK.
3053 If this is an ordinary insn, then only CONSTANT_P REG_EQUAL
3055 this = (CONSTANT_P (XEXP (temp
, 0))
3056 || (find_reg_note (p
, REG_RETVAL
, NULL_RTX
)
3057 && invariant_p (XEXP (temp
, 0))));
3064 else if (code
!= NOTE
)
3066 n_times_set
[regno
] = old
;
3071 n_times_set
[regno
] = old
;
3072 /* If invariant_p ever returned 2, we return 2. */
3073 return 1 + (value
& 2);
3077 /* I don't think this condition is sufficient to allow INSN
3078 to be moved, so we no longer test it. */
3080 /* Return 1 if all insns in the basic block of INSN and following INSN
3081 that set REG are invariant according to TABLE. */
3084 all_sets_invariant_p (reg
, insn
, table
)
3088 register rtx p
= insn
;
3089 register int regno
= REGNO (reg
);
3093 register enum rtx_code code
;
3095 code
= GET_CODE (p
);
3096 if (code
== CODE_LABEL
|| code
== JUMP_INSN
)
3098 if (code
== INSN
&& GET_CODE (PATTERN (p
)) == SET
3099 && GET_CODE (SET_DEST (PATTERN (p
))) == REG
3100 && REGNO (SET_DEST (PATTERN (p
))) == regno
)
3102 if (!invariant_p (SET_SRC (PATTERN (p
)), table
))
3109 /* Look at all uses (not sets) of registers in X. For each, if it is
3110 the single use, set USAGE[REGNO] to INSN; if there was a previous use in
3111 a different insn, set USAGE[REGNO] to const0_rtx. */
3114 find_single_use_in_loop (insn
, x
, usage
)
3119 enum rtx_code code
= GET_CODE (x
);
3120 char *fmt
= GET_RTX_FORMAT (code
);
3125 = (usage
[REGNO (x
)] != 0 && usage
[REGNO (x
)] != insn
)
3126 ? const0_rtx
: insn
;
3128 else if (code
== SET
)
3130 /* Don't count SET_DEST if it is a REG; otherwise count things
3131 in SET_DEST because if a register is partially modified, it won't
3132 show up as a potential movable so we don't care how USAGE is set
3134 if (GET_CODE (SET_DEST (x
)) != REG
)
3135 find_single_use_in_loop (insn
, SET_DEST (x
), usage
);
3136 find_single_use_in_loop (insn
, SET_SRC (x
), usage
);
3139 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3141 if (fmt
[i
] == 'e' && XEXP (x
, i
) != 0)
3142 find_single_use_in_loop (insn
, XEXP (x
, i
), usage
);
3143 else if (fmt
[i
] == 'E')
3144 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
3145 find_single_use_in_loop (insn
, XVECEXP (x
, i
, j
), usage
);
3149 /* Increment N_TIMES_SET at the index of each register
3150 that is modified by an insn between FROM and TO.
3151 If the value of an element of N_TIMES_SET becomes 127 or more,
3152 stop incrementing it, to avoid overflow.
3154 Store in SINGLE_USAGE[I] the single insn in which register I is
3155 used, if it is only used once. Otherwise, it is set to 0 (for no
3156 uses) or const0_rtx for more than one use. This parameter may be zero,
3157 in which case this processing is not done.
3159 Store in *COUNT_PTR the number of actual instruction
3160 in the loop. We use this to decide what is worth moving out. */
3162 /* last_set[n] is nonzero iff reg n has been set in the current basic block.
3163 In that case, it is the insn that last set reg n. */
3166 count_loop_regs_set (from
, to
, may_not_move
, single_usage
, count_ptr
, nregs
)
3167 register rtx from
, to
;
3173 register rtx
*last_set
= (rtx
*) alloca (nregs
* sizeof (rtx
));
3175 register int count
= 0;
3178 bzero ((char *) last_set
, nregs
* sizeof (rtx
));
3179 for (insn
= from
; insn
!= to
; insn
= NEXT_INSN (insn
))
3181 if (GET_RTX_CLASS (GET_CODE (insn
)) == 'i')
3185 /* If requested, record registers that have exactly one use. */
3188 find_single_use_in_loop (insn
, PATTERN (insn
), single_usage
);
3190 /* Include uses in REG_EQUAL notes. */
3191 if (REG_NOTES (insn
))
3192 find_single_use_in_loop (insn
, REG_NOTES (insn
), single_usage
);
3195 if (GET_CODE (PATTERN (insn
)) == CLOBBER
3196 && GET_CODE (XEXP (PATTERN (insn
), 0)) == REG
)
3197 /* Don't move a reg that has an explicit clobber.
3198 We might do so sometimes, but it's not worth the pain. */
3199 may_not_move
[REGNO (XEXP (PATTERN (insn
), 0))] = 1;
3201 if (GET_CODE (PATTERN (insn
)) == SET
3202 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
3204 dest
= SET_DEST (PATTERN (insn
));
3205 while (GET_CODE (dest
) == SUBREG
3206 || GET_CODE (dest
) == ZERO_EXTRACT
3207 || GET_CODE (dest
) == SIGN_EXTRACT
3208 || GET_CODE (dest
) == STRICT_LOW_PART
)
3209 dest
= XEXP (dest
, 0);
3210 if (GET_CODE (dest
) == REG
)
3212 register int regno
= REGNO (dest
);
3213 /* If this is the first setting of this reg
3214 in current basic block, and it was set before,
3215 it must be set in two basic blocks, so it cannot
3216 be moved out of the loop. */
3217 if (n_times_set
[regno
] > 0 && last_set
[regno
] == 0)
3218 may_not_move
[regno
] = 1;
3219 /* If this is not first setting in current basic block,
3220 see if reg was used in between previous one and this.
3221 If so, neither one can be moved. */
3222 if (last_set
[regno
] != 0
3223 && reg_used_between_p (dest
, last_set
[regno
], insn
))
3224 may_not_move
[regno
] = 1;
3225 if (n_times_set
[regno
] < 127)
3226 ++n_times_set
[regno
];
3227 last_set
[regno
] = insn
;
3230 else if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
3233 for (i
= XVECLEN (PATTERN (insn
), 0) - 1; i
>= 0; i
--)
3235 register rtx x
= XVECEXP (PATTERN (insn
), 0, i
);
3236 if (GET_CODE (x
) == CLOBBER
&& GET_CODE (XEXP (x
, 0)) == REG
)
3237 /* Don't move a reg that has an explicit clobber.
3238 It's not worth the pain to try to do it correctly. */
3239 may_not_move
[REGNO (XEXP (x
, 0))] = 1;
3241 if (GET_CODE (x
) == SET
|| GET_CODE (x
) == CLOBBER
)
3243 dest
= SET_DEST (x
);
3244 while (GET_CODE (dest
) == SUBREG
3245 || GET_CODE (dest
) == ZERO_EXTRACT
3246 || GET_CODE (dest
) == SIGN_EXTRACT
3247 || GET_CODE (dest
) == STRICT_LOW_PART
)
3248 dest
= XEXP (dest
, 0);
3249 if (GET_CODE (dest
) == REG
)
3251 register int regno
= REGNO (dest
);
3252 if (n_times_set
[regno
] > 0 && last_set
[regno
] == 0)
3253 may_not_move
[regno
] = 1;
3254 if (last_set
[regno
] != 0
3255 && reg_used_between_p (dest
, last_set
[regno
], insn
))
3256 may_not_move
[regno
] = 1;
3257 if (n_times_set
[regno
] < 127)
3258 ++n_times_set
[regno
];
3259 last_set
[regno
] = insn
;
3266 if (GET_CODE (insn
) == CODE_LABEL
|| GET_CODE (insn
) == JUMP_INSN
)
3267 bzero ((char *) last_set
, nregs
* sizeof (rtx
));
3272 /* Given a loop that is bounded by LOOP_START and LOOP_END
3273 and that is entered at SCAN_START,
3274 return 1 if the register set in SET contained in insn INSN is used by
3275 any insn that precedes INSN in cyclic order starting
3276 from the loop entry point.
3278 We don't want to use INSN_LUID here because if we restrict INSN to those
3279 that have a valid INSN_LUID, it means we cannot move an invariant out
3280 from an inner loop past two loops. */
3283 loop_reg_used_before_p (set
, insn
, loop_start
, scan_start
, loop_end
)
3284 rtx set
, insn
, loop_start
, scan_start
, loop_end
;
3286 rtx reg
= SET_DEST (set
);
3289 /* Scan forward checking for register usage. If we hit INSN, we
3290 are done. Otherwise, if we hit LOOP_END, wrap around to LOOP_START. */
3291 for (p
= scan_start
; p
!= insn
; p
= NEXT_INSN (p
))
3293 if (GET_RTX_CLASS (GET_CODE (p
)) == 'i'
3294 && reg_overlap_mentioned_p (reg
, PATTERN (p
)))
3304 /* A "basic induction variable" or biv is a pseudo reg that is set
3305 (within this loop) only by incrementing or decrementing it. */
3306 /* A "general induction variable" or giv is a pseudo reg whose
3307 value is a linear function of a biv. */
3309 /* Bivs are recognized by `basic_induction_var';
3310 Givs by `general_induct_var'. */
3312 /* Indexed by register number, indicates whether or not register is an
3313 induction variable, and if so what type. */
3315 enum iv_mode
*reg_iv_type
;
3317 /* Indexed by register number, contains pointer to `struct induction'
3318 if register is an induction variable. This holds general info for
3319 all induction variables. */
3321 struct induction
**reg_iv_info
;
3323 /* Indexed by register number, contains pointer to `struct iv_class'
3324 if register is a basic induction variable. This holds info describing
3325 the class (a related group) of induction variables that the biv belongs
3328 struct iv_class
**reg_biv_class
;
3330 /* The head of a list which links together (via the next field)
3331 every iv class for the current loop. */
3333 struct iv_class
*loop_iv_list
;
3335 /* Communication with routines called via `note_stores'. */
3337 static rtx note_insn
;
3339 /* Dummy register to have non-zero DEST_REG for DEST_ADDR type givs. */
3341 static rtx addr_placeholder
;
3343 /* ??? Unfinished optimizations, and possible future optimizations,
3344 for the strength reduction code. */
3346 /* ??? There is one more optimization you might be interested in doing: to
3347 allocate pseudo registers for frequently-accessed memory locations.
3348 If the same memory location is referenced each time around, it might
3349 be possible to copy it into a register before and out after.
3350 This is especially useful when the memory location is a variable which
3351 is in a stack slot because somewhere its address is taken. If the
3352 loop doesn't contain a function call and the variable isn't volatile,
3353 it is safe to keep the value in a register for the duration of the
3354 loop. One tricky thing is that the copying of the value back from the
3355 register has to be done on all exits from the loop. You need to check that
3356 all the exits from the loop go to the same place. */
3358 /* ??? The interaction of biv elimination, and recognition of 'constant'
3359 bivs, may cause problems. */
3361 /* ??? Add heuristics so that DEST_ADDR strength reduction does not cause
3362 performance problems.
3364 Perhaps don't eliminate things that can be combined with an addressing
3365 mode. Find all givs that have the same biv, mult_val, and add_val;
3366 then for each giv, check to see if its only use dies in a following
3367 memory address. If so, generate a new memory address and check to see
3368 if it is valid. If it is valid, then store the modified memory address,
3369 otherwise, mark the giv as not done so that it will get its own iv. */
3371 /* ??? Could try to optimize branches when it is known that a biv is always
3374 /* ??? When replace a biv in a compare insn, we should replace with closest
3375 giv so that an optimized branch can still be recognized by the combiner,
3376 e.g. the VAX acb insn. */
3378 /* ??? Many of the checks involving uid_luid could be simplified if regscan
3379 was rerun in loop_optimize whenever a register was added or moved.
3380 Also, some of the optimizations could be a little less conservative. */
3382 /* Perform strength reduction and induction variable elimination. */
3384 /* Pseudo registers created during this function will be beyond the last
3385 valid index in several tables including n_times_set and regno_last_uid.
3386 This does not cause a problem here, because the added registers cannot be
3387 givs outside of their loop, and hence will never be reconsidered.
3388 But scan_loop must check regnos to make sure they are in bounds. */
3391 strength_reduce (scan_start
, end
, loop_top
, insn_count
,
3392 loop_start
, loop_end
, unroll_p
)
3406 /* This is 1 if current insn is not executed at least once for every loop
3408 int not_every_iteration
= 0;
3409 /* This is 1 if current insn may be executed more than once for every
3411 int maybe_multiple
= 0;
3412 /* Temporary list pointers for traversing loop_iv_list. */
3413 struct iv_class
*bl
, **backbl
;
3414 /* Ratio of extra register life span we can justify
3415 for saving an instruction. More if loop doesn't call subroutines
3416 since in that case saving an insn makes more difference
3417 and more registers are available. */
3418 /* ??? could set this to last value of threshold in move_movables */
3419 int threshold
= (loop_has_call
? 1 : 2) * (3 + n_non_fixed_regs
);
3420 /* Map of pseudo-register replacements. */
3424 rtx end_insert_before
;
3427 reg_iv_type
= (enum iv_mode
*) alloca (max_reg_before_loop
3428 * sizeof (enum iv_mode
*));
3429 bzero ((char *) reg_iv_type
, max_reg_before_loop
* sizeof (enum iv_mode
*));
3430 reg_iv_info
= (struct induction
**)
3431 alloca (max_reg_before_loop
* sizeof (struct induction
*));
3432 bzero ((char *) reg_iv_info
, (max_reg_before_loop
3433 * sizeof (struct induction
*)));
3434 reg_biv_class
= (struct iv_class
**)
3435 alloca (max_reg_before_loop
* sizeof (struct iv_class
*));
3436 bzero ((char *) reg_biv_class
, (max_reg_before_loop
3437 * sizeof (struct iv_class
*)));
3440 addr_placeholder
= gen_reg_rtx (Pmode
);
3442 /* Save insn immediately after the loop_end. Insns inserted after loop_end
3443 must be put before this insn, so that they will appear in the right
3444 order (i.e. loop order).
3446 If loop_end is the end of the current function, then emit a
3447 NOTE_INSN_DELETED after loop_end and set end_insert_before to the
3449 if (NEXT_INSN (loop_end
) != 0)
3450 end_insert_before
= NEXT_INSN (loop_end
);
3452 end_insert_before
= emit_note_after (NOTE_INSN_DELETED
, loop_end
);
3454 /* Scan through loop to find all possible bivs. */
3460 /* At end of a straight-in loop, we are done.
3461 At end of a loop entered at the bottom, scan the top. */
3462 if (p
== scan_start
)
3470 if (p
== scan_start
)
3474 if (GET_CODE (p
) == INSN
3475 && (set
= single_set (p
))
3476 && GET_CODE (SET_DEST (set
)) == REG
)
3478 dest_reg
= SET_DEST (set
);
3479 if (REGNO (dest_reg
) < max_reg_before_loop
3480 && REGNO (dest_reg
) >= FIRST_PSEUDO_REGISTER
3481 && reg_iv_type
[REGNO (dest_reg
)] != NOT_BASIC_INDUCT
)
3483 if (basic_induction_var (SET_SRC (set
), GET_MODE (SET_SRC (set
)),
3484 dest_reg
, p
, &inc_val
, &mult_val
))
3486 /* It is a possible basic induction variable.
3487 Create and initialize an induction structure for it. */
3490 = (struct induction
*) alloca (sizeof (struct induction
));
3492 record_biv (v
, p
, dest_reg
, inc_val
, mult_val
,
3493 not_every_iteration
, maybe_multiple
);
3494 reg_iv_type
[REGNO (dest_reg
)] = BASIC_INDUCT
;
3496 else if (REGNO (dest_reg
) < max_reg_before_loop
)
3497 reg_iv_type
[REGNO (dest_reg
)] = NOT_BASIC_INDUCT
;
3501 /* Past CODE_LABEL, we get to insns that may be executed multiple
3502 times. The only way we can be sure that they can't is if every
3503 every jump insn between here and the end of the loop either
3504 returns, exits the loop, is a forward jump, or is a jump
3505 to the loop start. */
3507 if (GET_CODE (p
) == CODE_LABEL
)
3515 insn
= NEXT_INSN (insn
);
3516 if (insn
== scan_start
)
3524 if (insn
== scan_start
)
3528 if (GET_CODE (insn
) == JUMP_INSN
3529 && GET_CODE (PATTERN (insn
)) != RETURN
3530 && (! condjump_p (insn
)
3531 || (JUMP_LABEL (insn
) != 0
3532 && JUMP_LABEL (insn
) != scan_start
3533 && (INSN_UID (JUMP_LABEL (insn
)) >= max_uid_for_loop
3534 || INSN_UID (insn
) >= max_uid_for_loop
3535 || (INSN_LUID (JUMP_LABEL (insn
))
3536 < INSN_LUID (insn
))))))
3544 /* Past a jump, we get to insns for which we can't count
3545 on whether they will be executed during each iteration. */
3546 /* This code appears twice in strength_reduce. There is also similar
3547 code in scan_loop. */
3548 if (GET_CODE (p
) == JUMP_INSN
3549 /* If we enter the loop in the middle, and scan around to the
3550 beginning, don't set not_every_iteration for that.
3551 This can be any kind of jump, since we want to know if insns
3552 will be executed if the loop is executed. */
3553 && ! (JUMP_LABEL (p
) == loop_top
3554 && ((NEXT_INSN (NEXT_INSN (p
)) == loop_end
&& simplejump_p (p
))
3555 || (NEXT_INSN (p
) == loop_end
&& condjump_p (p
)))))
3559 /* If this is a jump outside the loop, then it also doesn't
3560 matter. Check to see if the target of this branch is on the
3561 loop_number_exits_labels list. */
3563 for (label
= loop_number_exit_labels
[uid_loop_num
[INSN_UID (loop_start
)]];
3565 label
= LABEL_NEXTREF (label
))
3566 if (XEXP (label
, 0) == JUMP_LABEL (p
))
3570 not_every_iteration
= 1;
3573 else if (GET_CODE (p
) == NOTE
)
3575 /* At the virtual top of a converted loop, insns are again known to
3576 be executed each iteration: logically, the loop begins here
3577 even though the exit code has been duplicated. */
3578 if (NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_VTOP
&& loop_depth
== 0)
3579 not_every_iteration
= 0;
3580 else if (NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_BEG
)
3582 else if (NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_END
)
3586 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
3587 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
3588 or not an insn is known to be executed each iteration of the
3589 loop, whether or not any iterations are known to occur.
3591 Therefore, if we have just passed a label and have no more labels
3592 between here and the test insn of the loop, we know these insns
3593 will be executed each iteration. */
3595 if (not_every_iteration
&& GET_CODE (p
) == CODE_LABEL
3596 && no_labels_between_p (p
, loop_end
))
3597 not_every_iteration
= 0;
3600 /* Scan loop_iv_list to remove all regs that proved not to be bivs.
3601 Make a sanity check against n_times_set. */
3602 for (backbl
= &loop_iv_list
, bl
= *backbl
; bl
; bl
= bl
->next
)
3604 if (reg_iv_type
[bl
->regno
] != BASIC_INDUCT
3605 /* Above happens if register modified by subreg, etc. */
3606 /* Make sure it is not recognized as a basic induction var: */
3607 || n_times_set
[bl
->regno
] != bl
->biv_count
3608 /* If never incremented, it is invariant that we decided not to
3609 move. So leave it alone. */
3610 || ! bl
->incremented
)
3612 if (loop_dump_stream
)
3613 fprintf (loop_dump_stream
, "Reg %d: biv discarded, %s\n",
3615 (reg_iv_type
[bl
->regno
] != BASIC_INDUCT
3616 ? "not induction variable"
3617 : (! bl
->incremented
? "never incremented"
3620 reg_iv_type
[bl
->regno
] = NOT_BASIC_INDUCT
;
3627 if (loop_dump_stream
)
3628 fprintf (loop_dump_stream
, "Reg %d: biv verified\n", bl
->regno
);
3632 /* Exit if there are no bivs. */
3635 /* Can still unroll the loop anyways, but indicate that there is no
3636 strength reduction info available. */
3638 unroll_loop (loop_end
, insn_count
, loop_start
, end_insert_before
, 0);
3643 /* Find initial value for each biv by searching backwards from loop_start,
3644 halting at first label. Also record any test condition. */
3647 for (p
= loop_start
; p
&& GET_CODE (p
) != CODE_LABEL
; p
= PREV_INSN (p
))
3651 if (GET_CODE (p
) == CALL_INSN
)
3654 if (GET_CODE (p
) == INSN
|| GET_CODE (p
) == JUMP_INSN
3655 || GET_CODE (p
) == CALL_INSN
)
3656 note_stores (PATTERN (p
), record_initial
);
3658 /* Record any test of a biv that branches around the loop if no store
3659 between it and the start of loop. We only care about tests with
3660 constants and registers and only certain of those. */
3661 if (GET_CODE (p
) == JUMP_INSN
3662 && JUMP_LABEL (p
) != 0
3663 && next_real_insn (JUMP_LABEL (p
)) == next_real_insn (loop_end
)
3664 && (test
= get_condition_for_loop (p
)) != 0
3665 && GET_CODE (XEXP (test
, 0)) == REG
3666 && REGNO (XEXP (test
, 0)) < max_reg_before_loop
3667 && (bl
= reg_biv_class
[REGNO (XEXP (test
, 0))]) != 0
3668 && valid_initial_value_p (XEXP (test
, 1), p
, call_seen
, loop_start
)
3669 && bl
->init_insn
== 0)
3671 /* If an NE test, we have an initial value! */
3672 if (GET_CODE (test
) == NE
)
3675 bl
->init_set
= gen_rtx_SET (VOIDmode
,
3676 XEXP (test
, 0), XEXP (test
, 1));
3679 bl
->initial_test
= test
;
3683 /* Look at the each biv and see if we can say anything better about its
3684 initial value from any initializing insns set up above. (This is done
3685 in two passes to avoid missing SETs in a PARALLEL.) */
3686 for (bl
= loop_iv_list
; bl
; bl
= bl
->next
)
3691 if (! bl
->init_insn
)
3694 /* IF INIT_INSN has a REG_EQUAL or REG_EQUIV note and the value
3695 is a constant, use the value of that. */
3696 if (((note
= find_reg_note (bl
->init_insn
, REG_EQUAL
, 0)) != NULL
3697 && CONSTANT_P (XEXP (note
, 0)))
3698 || ((note
= find_reg_note (bl
->init_insn
, REG_EQUIV
, 0)) != NULL
3699 && CONSTANT_P (XEXP (note
, 0))))
3700 src
= XEXP (note
, 0);
3702 src
= SET_SRC (bl
->init_set
);
3704 if (loop_dump_stream
)
3705 fprintf (loop_dump_stream
,
3706 "Biv %d initialized at insn %d: initial value ",
3707 bl
->regno
, INSN_UID (bl
->init_insn
));
3709 if ((GET_MODE (src
) == GET_MODE (regno_reg_rtx
[bl
->regno
])
3710 || GET_MODE (src
) == VOIDmode
)
3711 && valid_initial_value_p (src
, bl
->init_insn
, call_seen
, loop_start
))
3713 bl
->initial_value
= src
;
3715 if (loop_dump_stream
)
3717 if (GET_CODE (src
) == CONST_INT
)
3719 fprintf (loop_dump_stream
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (src
));
3720 fputc ('\n', loop_dump_stream
);
3724 print_rtl (loop_dump_stream
, src
);
3725 fprintf (loop_dump_stream
, "\n");
3731 /* Biv initial value is not simple move,
3732 so let it keep initial value of "itself". */
3734 if (loop_dump_stream
)
3735 fprintf (loop_dump_stream
, "is complex\n");
3739 /* Search the loop for general induction variables. */
3741 /* A register is a giv if: it is only set once, it is a function of a
3742 biv and a constant (or invariant), and it is not a biv. */
3744 not_every_iteration
= 0;
3750 /* At end of a straight-in loop, we are done.
3751 At end of a loop entered at the bottom, scan the top. */
3752 if (p
== scan_start
)
3760 if (p
== scan_start
)
3764 /* Look for a general induction variable in a register. */
3765 if (GET_CODE (p
) == INSN
3766 && (set
= single_set (p
))
3767 && GET_CODE (SET_DEST (set
)) == REG
3768 && ! may_not_optimize
[REGNO (SET_DEST (set
))])
3776 dest_reg
= SET_DEST (set
);
3777 if (REGNO (dest_reg
) < FIRST_PSEUDO_REGISTER
)
3780 if (/* SET_SRC is a giv. */
3781 ((benefit
= general_induction_var (SET_SRC (set
),
3784 /* Equivalent expression is a giv. */
3785 || ((regnote
= find_reg_note (p
, REG_EQUAL
, NULL_RTX
))
3786 && (benefit
= general_induction_var (XEXP (regnote
, 0),
3788 &add_val
, &mult_val
))))
3789 /* Don't try to handle any regs made by loop optimization.
3790 We have nothing on them in regno_first_uid, etc. */
3791 && REGNO (dest_reg
) < max_reg_before_loop
3792 /* Don't recognize a BASIC_INDUCT_VAR here. */
3793 && dest_reg
!= src_reg
3794 /* This must be the only place where the register is set. */
3795 && (n_times_set
[REGNO (dest_reg
)] == 1
3796 /* or all sets must be consecutive and make a giv. */
3797 || (benefit
= consec_sets_giv (benefit
, p
,
3799 &add_val
, &mult_val
))))
3803 = (struct induction
*) alloca (sizeof (struct induction
));
3806 /* If this is a library call, increase benefit. */
3807 if (find_reg_note (p
, REG_RETVAL
, NULL_RTX
))
3808 benefit
+= libcall_benefit (p
);
3810 /* Skip the consecutive insns, if there are any. */
3811 for (count
= n_times_set
[REGNO (dest_reg
)] - 1;
3814 /* If first insn of libcall sequence, skip to end.
3815 Do this at start of loop, since INSN is guaranteed to
3817 if (GET_CODE (p
) != NOTE
3818 && (temp
= find_reg_note (p
, REG_LIBCALL
, NULL_RTX
)))
3821 do p
= NEXT_INSN (p
);
3822 while (GET_CODE (p
) == NOTE
);
3825 record_giv (v
, p
, src_reg
, dest_reg
, mult_val
, add_val
, benefit
,
3826 DEST_REG
, not_every_iteration
, NULL_PTR
, loop_start
,
3832 #ifndef DONT_REDUCE_ADDR
3833 /* Look for givs which are memory addresses. */
3834 /* This resulted in worse code on a VAX 8600. I wonder if it
3836 if (GET_CODE (p
) == INSN
)
3837 find_mem_givs (PATTERN (p
), p
, not_every_iteration
, loop_start
,
3841 /* Update the status of whether giv can derive other givs. This can
3842 change when we pass a label or an insn that updates a biv. */
3843 if (GET_CODE (p
) == INSN
|| GET_CODE (p
) == JUMP_INSN
3844 || GET_CODE (p
) == CODE_LABEL
)
3845 update_giv_derive (p
);
3847 /* Past a jump, we get to insns for which we can't count
3848 on whether they will be executed during each iteration. */
3849 /* This code appears twice in strength_reduce. There is also similar
3850 code in scan_loop. */
3851 if (GET_CODE (p
) == JUMP_INSN
3852 /* If we enter the loop in the middle, and scan around to the
3853 beginning, don't set not_every_iteration for that.
3854 This can be any kind of jump, since we want to know if insns
3855 will be executed if the loop is executed. */
3856 && ! (JUMP_LABEL (p
) == loop_top
3857 && ((NEXT_INSN (NEXT_INSN (p
)) == loop_end
&& simplejump_p (p
))
3858 || (NEXT_INSN (p
) == loop_end
&& condjump_p (p
)))))
3862 /* If this is a jump outside the loop, then it also doesn't
3863 matter. Check to see if the target of this branch is on the
3864 loop_number_exits_labels list. */
3866 for (label
= loop_number_exit_labels
[uid_loop_num
[INSN_UID (loop_start
)]];
3868 label
= LABEL_NEXTREF (label
))
3869 if (XEXP (label
, 0) == JUMP_LABEL (p
))
3873 not_every_iteration
= 1;
3876 else if (GET_CODE (p
) == NOTE
)
3878 /* At the virtual top of a converted loop, insns are again known to
3879 be executed each iteration: logically, the loop begins here
3880 even though the exit code has been duplicated. */
3881 if (NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_VTOP
&& loop_depth
== 0)
3882 not_every_iteration
= 0;
3883 else if (NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_BEG
)
3885 else if (NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_END
)
3889 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
3890 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
3891 or not an insn is known to be executed each iteration of the
3892 loop, whether or not any iterations are known to occur.
3894 Therefore, if we have just passed a label and have no more labels
3895 between here and the test insn of the loop, we know these insns
3896 will be executed each iteration. */
3898 if (not_every_iteration
&& GET_CODE (p
) == CODE_LABEL
3899 && no_labels_between_p (p
, loop_end
))
3900 not_every_iteration
= 0;
3903 /* Try to calculate and save the number of loop iterations. This is
3904 set to zero if the actual number can not be calculated. This must
3905 be called after all giv's have been identified, since otherwise it may
3906 fail if the iteration variable is a giv. */
3908 loop_n_iterations
= loop_iterations (loop_start
, loop_end
);
3910 /* Now for each giv for which we still don't know whether or not it is
3911 replaceable, check to see if it is replaceable because its final value
3912 can be calculated. This must be done after loop_iterations is called,
3913 so that final_giv_value will work correctly. */
3915 for (bl
= loop_iv_list
; bl
; bl
= bl
->next
)
3917 struct induction
*v
;
3919 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
3920 if (! v
->replaceable
&& ! v
->not_replaceable
)
3921 check_final_value (v
, loop_start
, loop_end
);
3924 /* Try to prove that the loop counter variable (if any) is always
3925 nonnegative; if so, record that fact with a REG_NONNEG note
3926 so that "decrement and branch until zero" insn can be used. */
3927 check_dbra_loop (loop_end
, insn_count
, loop_start
);
3930 /* record loop-variables relevant for BCT optimization before unrolling
3931 the loop. Unrolling may update part of this information, and the
3932 correct data will be used for generating the BCT. */
3933 #ifdef HAVE_decrement_and_branch_on_count
3934 if (HAVE_decrement_and_branch_on_count
)
3935 analyze_loop_iterations (loop_start
, loop_end
);
3939 /* Create reg_map to hold substitutions for replaceable giv regs. */
3940 reg_map
= (rtx
*) alloca (max_reg_before_loop
* sizeof (rtx
));
3941 bzero ((char *) reg_map
, max_reg_before_loop
* sizeof (rtx
));
3943 /* Examine each iv class for feasibility of strength reduction/induction
3944 variable elimination. */
3946 for (bl
= loop_iv_list
; bl
; bl
= bl
->next
)
3948 struct induction
*v
;
3951 rtx final_value
= 0;
3953 /* Test whether it will be possible to eliminate this biv
3954 provided all givs are reduced. This is possible if either
3955 the reg is not used outside the loop, or we can compute
3956 what its final value will be.
3958 For architectures with a decrement_and_branch_until_zero insn,
3959 don't do this if we put a REG_NONNEG note on the endtest for
3962 /* Compare against bl->init_insn rather than loop_start.
3963 We aren't concerned with any uses of the biv between
3964 init_insn and loop_start since these won't be affected
3965 by the value of the biv elsewhere in the function, so
3966 long as init_insn doesn't use the biv itself.
3967 March 14, 1989 -- self@bayes.arc.nasa.gov */
3969 if ((uid_luid
[REGNO_LAST_UID (bl
->regno
)] < INSN_LUID (loop_end
)
3971 && INSN_UID (bl
->init_insn
) < max_uid_for_loop
3972 && uid_luid
[REGNO_FIRST_UID (bl
->regno
)] >= INSN_LUID (bl
->init_insn
)
3973 #ifdef HAVE_decrement_and_branch_until_zero
3976 && ! reg_mentioned_p (bl
->biv
->dest_reg
, SET_SRC (bl
->init_set
)))
3977 || ((final_value
= final_biv_value (bl
, loop_start
, loop_end
))
3978 #ifdef HAVE_decrement_and_branch_until_zero
3982 bl
->eliminable
= maybe_eliminate_biv (bl
, loop_start
, end
, 0,
3983 threshold
, insn_count
);
3986 if (loop_dump_stream
)
3988 fprintf (loop_dump_stream
,
3989 "Cannot eliminate biv %d.\n",
3991 fprintf (loop_dump_stream
,
3992 "First use: insn %d, last use: insn %d.\n",
3993 REGNO_FIRST_UID (bl
->regno
),
3994 REGNO_LAST_UID (bl
->regno
));
3998 /* Combine all giv's for this iv_class. */
4001 /* This will be true at the end, if all givs which depend on this
4002 biv have been strength reduced.
4003 We can't (currently) eliminate the biv unless this is so. */
4006 /* Check each giv in this class to see if we will benefit by reducing
4007 it. Skip giv's combined with others. */
4008 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
4010 struct induction
*tv
;
4012 if (v
->ignore
|| v
->same
)
4015 benefit
= v
->benefit
;
4017 /* Reduce benefit if not replaceable, since we will insert
4018 a move-insn to replace the insn that calculates this giv.
4019 Don't do this unless the giv is a user variable, since it
4020 will often be marked non-replaceable because of the duplication
4021 of the exit code outside the loop. In such a case, the copies
4022 we insert are dead and will be deleted. So they don't have
4023 a cost. Similar situations exist. */
4024 /* ??? The new final_[bg]iv_value code does a much better job
4025 of finding replaceable giv's, and hence this code may no longer
4027 if (! v
->replaceable
&& ! bl
->eliminable
4028 && REG_USERVAR_P (v
->dest_reg
))
4029 benefit
-= copy_cost
;
4031 /* Decrease the benefit to count the add-insns that we will
4032 insert to increment the reduced reg for the giv. */
4033 benefit
-= add_cost
* bl
->biv_count
;
4035 /* Decide whether to strength-reduce this giv or to leave the code
4036 unchanged (recompute it from the biv each time it is used).
4037 This decision can be made independently for each giv. */
4040 /* Attempt to guess whether autoincrement will handle some of the
4041 new add insns; if so, increase BENEFIT (undo the subtraction of
4042 add_cost that was done above). */
4043 if (v
->giv_type
== DEST_ADDR
4044 && GET_CODE (v
->mult_val
) == CONST_INT
)
4046 #if defined (HAVE_POST_INCREMENT) || defined (HAVE_PRE_INCREMENT)
4047 if (INTVAL (v
->mult_val
) == GET_MODE_SIZE (v
->mem_mode
))
4048 benefit
+= add_cost
* bl
->biv_count
;
4050 #if defined (HAVE_POST_DECREMENT) || defined (HAVE_PRE_DECREMENT)
4051 if (-INTVAL (v
->mult_val
) == GET_MODE_SIZE (v
->mem_mode
))
4052 benefit
+= add_cost
* bl
->biv_count
;
4057 /* If an insn is not to be strength reduced, then set its ignore
4058 flag, and clear all_reduced. */
4060 /* A giv that depends on a reversed biv must be reduced if it is
4061 used after the loop exit, otherwise, it would have the wrong
4062 value after the loop exit. To make it simple, just reduce all
4063 of such giv's whether or not we know they are used after the loop
4066 if ( ! flag_reduce_all_givs
&& v
->lifetime
* threshold
* benefit
< insn_count
4069 if (loop_dump_stream
)
4070 fprintf (loop_dump_stream
,
4071 "giv of insn %d not worth while, %d vs %d.\n",
4073 v
->lifetime
* threshold
* benefit
, insn_count
);
4079 /* Check that we can increment the reduced giv without a
4080 multiply insn. If not, reject it. */
4082 for (tv
= bl
->biv
; tv
; tv
= tv
->next_iv
)
4083 if (tv
->mult_val
== const1_rtx
4084 && ! product_cheap_p (tv
->add_val
, v
->mult_val
))
4086 if (loop_dump_stream
)
4087 fprintf (loop_dump_stream
,
4088 "giv of insn %d: would need a multiply.\n",
4089 INSN_UID (v
->insn
));
4097 /* Reduce each giv that we decided to reduce. */
4099 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
4101 struct induction
*tv
;
4102 if (! v
->ignore
&& v
->same
== 0)
4104 int auto_inc_opt
= 0;
4106 v
->new_reg
= gen_reg_rtx (v
->mode
);
4109 /* If the target has auto-increment addressing modes, and
4110 this is an address giv, then try to put the increment
4111 immediately after its use, so that flow can create an
4112 auto-increment addressing mode. */
4113 if (v
->giv_type
== DEST_ADDR
&& bl
->biv_count
== 1
4114 && bl
->biv
->always_executed
&& ! bl
->biv
->maybe_multiple
4115 /* We don't handle reversed biv's because bl->biv->insn
4116 does not have a valid INSN_LUID. */
4118 && v
->always_executed
&& ! v
->maybe_multiple
)
4120 /* If other giv's have been combined with this one, then
4121 this will work only if all uses of the other giv's occur
4122 before this giv's insn. This is difficult to check.
4124 We simplify this by looking for the common case where
4125 there is one DEST_REG giv, and this giv's insn is the
4126 last use of the dest_reg of that DEST_REG giv. If the
4127 the increment occurs after the address giv, then we can
4128 perform the optimization. (Otherwise, the increment
4129 would have to go before other_giv, and we would not be
4130 able to combine it with the address giv to get an
4131 auto-inc address.) */
4132 if (v
->combined_with
)
4134 struct induction
*other_giv
= 0;
4136 for (tv
= bl
->giv
; tv
; tv
= tv
->next_iv
)
4144 if (! tv
&& other_giv
4145 && REGNO (other_giv
->dest_reg
) < max_reg_before_loop
4146 && (REGNO_LAST_UID (REGNO (other_giv
->dest_reg
))
4147 == INSN_UID (v
->insn
))
4148 && INSN_LUID (v
->insn
) < INSN_LUID (bl
->biv
->insn
))
4151 /* Check for case where increment is before the the address
4152 giv. Do this test in "loop order". */
4153 else if ((INSN_LUID (v
->insn
) > INSN_LUID (bl
->biv
->insn
)
4154 && (INSN_LUID (v
->insn
) < INSN_LUID (scan_start
)
4155 || (INSN_LUID (bl
->biv
->insn
)
4156 > INSN_LUID (scan_start
))))
4157 || (INSN_LUID (v
->insn
) < INSN_LUID (scan_start
)
4158 && (INSN_LUID (scan_start
)
4159 < INSN_LUID (bl
->biv
->insn
))))
4168 /* We can't put an insn immediately after one setting
4169 cc0, or immediately before one using cc0. */
4170 if ((auto_inc_opt
== 1 && sets_cc0_p (PATTERN (v
->insn
)))
4171 || (auto_inc_opt
== -1
4172 && (prev
= prev_nonnote_insn (v
->insn
)) != 0
4173 && GET_RTX_CLASS (GET_CODE (prev
)) == 'i'
4174 && sets_cc0_p (PATTERN (prev
))))
4180 v
->auto_inc_opt
= 1;
4184 /* For each place where the biv is incremented, add an insn
4185 to increment the new, reduced reg for the giv. */
4186 for (tv
= bl
->biv
; tv
; tv
= tv
->next_iv
)
4191 insert_before
= tv
->insn
;
4192 else if (auto_inc_opt
== 1)
4193 insert_before
= NEXT_INSN (v
->insn
);
4195 insert_before
= v
->insn
;
4197 if (tv
->mult_val
== const1_rtx
)
4198 emit_iv_add_mult (tv
->add_val
, v
->mult_val
,
4199 v
->new_reg
, v
->new_reg
, insert_before
);
4200 else /* tv->mult_val == const0_rtx */
4201 /* A multiply is acceptable here
4202 since this is presumed to be seldom executed. */
4203 emit_iv_add_mult (tv
->add_val
, v
->mult_val
,
4204 v
->add_val
, v
->new_reg
, insert_before
);
4207 /* Add code at loop start to initialize giv's reduced reg. */
4209 emit_iv_add_mult (bl
->initial_value
, v
->mult_val
,
4210 v
->add_val
, v
->new_reg
, loop_start
);
4214 /* Rescan all givs. If a giv is the same as a giv not reduced, mark it
4217 For each giv register that can be reduced now: if replaceable,
4218 substitute reduced reg wherever the old giv occurs;
4219 else add new move insn "giv_reg = reduced_reg".
4221 Also check for givs whose first use is their definition and whose
4222 last use is the definition of another giv. If so, it is likely
4223 dead and should not be used to eliminate a biv. */
4224 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
4226 if (v
->same
&& v
->same
->ignore
)
4232 if (v
->giv_type
== DEST_REG
4233 && REGNO_FIRST_UID (REGNO (v
->dest_reg
)) == INSN_UID (v
->insn
))
4235 struct induction
*v1
;
4237 for (v1
= bl
->giv
; v1
; v1
= v1
->next_iv
)
4238 if (REGNO_LAST_UID (REGNO (v
->dest_reg
)) == INSN_UID (v1
->insn
))
4242 /* Update expression if this was combined, in case other giv was
4245 v
->new_reg
= replace_rtx (v
->new_reg
,
4246 v
->same
->dest_reg
, v
->same
->new_reg
);
4248 if (v
->giv_type
== DEST_ADDR
)
4249 /* Store reduced reg as the address in the memref where we found
4251 validate_change (v
->insn
, v
->location
, v
->new_reg
, 0);
4252 else if (v
->replaceable
)
4254 reg_map
[REGNO (v
->dest_reg
)] = v
->new_reg
;
4257 /* I can no longer duplicate the original problem. Perhaps
4258 this is unnecessary now? */
4260 /* Replaceable; it isn't strictly necessary to delete the old
4261 insn and emit a new one, because v->dest_reg is now dead.
4263 However, especially when unrolling loops, the special
4264 handling for (set REG0 REG1) in the second cse pass may
4265 make v->dest_reg live again. To avoid this problem, emit
4266 an insn to set the original giv reg from the reduced giv.
4267 We can not delete the original insn, since it may be part
4268 of a LIBCALL, and the code in flow that eliminates dead
4269 libcalls will fail if it is deleted. */
4270 emit_insn_after (gen_move_insn (v
->dest_reg
, v
->new_reg
),
4276 /* Not replaceable; emit an insn to set the original giv reg from
4277 the reduced giv, same as above. */
4278 emit_insn_after (gen_move_insn (v
->dest_reg
, v
->new_reg
),
4282 /* When a loop is reversed, givs which depend on the reversed
4283 biv, and which are live outside the loop, must be set to their
4284 correct final value. This insn is only needed if the giv is
4285 not replaceable. The correct final value is the same as the
4286 value that the giv starts the reversed loop with. */
4287 if (bl
->reversed
&& ! v
->replaceable
)
4288 emit_iv_add_mult (bl
->initial_value
, v
->mult_val
,
4289 v
->add_val
, v
->dest_reg
, end_insert_before
);
4290 else if (v
->final_value
)
4294 /* If the loop has multiple exits, emit the insn before the
4295 loop to ensure that it will always be executed no matter
4296 how the loop exits. Otherwise, emit the insn after the loop,
4297 since this is slightly more efficient. */
4298 if (loop_number_exit_count
[uid_loop_num
[INSN_UID (loop_start
)]])
4299 insert_before
= loop_start
;
4301 insert_before
= end_insert_before
;
4302 emit_insn_before (gen_move_insn (v
->dest_reg
, v
->final_value
),
4306 /* If the insn to set the final value of the giv was emitted
4307 before the loop, then we must delete the insn inside the loop
4308 that sets it. If this is a LIBCALL, then we must delete
4309 every insn in the libcall. Note, however, that
4310 final_giv_value will only succeed when there are multiple
4311 exits if the giv is dead at each exit, hence it does not
4312 matter that the original insn remains because it is dead
4314 /* Delete the insn inside the loop that sets the giv since
4315 the giv is now set before (or after) the loop. */
4316 delete_insn (v
->insn
);
4320 if (loop_dump_stream
)
4322 fprintf (loop_dump_stream
, "giv at %d reduced to ",
4323 INSN_UID (v
->insn
));
4324 print_rtl (loop_dump_stream
, v
->new_reg
);
4325 fprintf (loop_dump_stream
, "\n");
4329 /* All the givs based on the biv bl have been reduced if they
4332 /* For each giv not marked as maybe dead that has been combined with a
4333 second giv, clear any "maybe dead" mark on that second giv.
4334 v->new_reg will either be or refer to the register of the giv it
4337 Doing this clearing avoids problems in biv elimination where a
4338 giv's new_reg is a complex value that can't be put in the insn but
4339 the giv combined with (with a reg as new_reg) is marked maybe_dead.
4340 Since the register will be used in either case, we'd prefer it be
4341 used from the simpler giv. */
4343 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
4344 if (! v
->maybe_dead
&& v
->same
)
4345 v
->same
->maybe_dead
= 0;
4347 /* Try to eliminate the biv, if it is a candidate.
4348 This won't work if ! all_reduced,
4349 since the givs we planned to use might not have been reduced.
4351 We have to be careful that we didn't initially think we could eliminate
4352 this biv because of a giv that we now think may be dead and shouldn't
4353 be used as a biv replacement.
4355 Also, there is the possibility that we may have a giv that looks
4356 like it can be used to eliminate a biv, but the resulting insn
4357 isn't valid. This can happen, for example, on the 88k, where a
4358 JUMP_INSN can compare a register only with zero. Attempts to
4359 replace it with a compare with a constant will fail.
4361 Note that in cases where this call fails, we may have replaced some
4362 of the occurrences of the biv with a giv, but no harm was done in
4363 doing so in the rare cases where it can occur. */
4365 if (all_reduced
== 1 && bl
->eliminable
4366 && maybe_eliminate_biv (bl
, loop_start
, end
, 1,
4367 threshold
, insn_count
))
4370 /* ?? If we created a new test to bypass the loop entirely,
4371 or otherwise drop straight in, based on this test, then
4372 we might want to rewrite it also. This way some later
4373 pass has more hope of removing the initialization of this
4376 /* If final_value != 0, then the biv may be used after loop end
4377 and we must emit an insn to set it just in case.
4379 Reversed bivs already have an insn after the loop setting their
4380 value, so we don't need another one. We can't calculate the
4381 proper final value for such a biv here anyways. */
4382 if (final_value
!= 0 && ! bl
->reversed
)
4386 /* If the loop has multiple exits, emit the insn before the
4387 loop to ensure that it will always be executed no matter
4388 how the loop exits. Otherwise, emit the insn after the
4389 loop, since this is slightly more efficient. */
4390 if (loop_number_exit_count
[uid_loop_num
[INSN_UID (loop_start
)]])
4391 insert_before
= loop_start
;
4393 insert_before
= end_insert_before
;
4395 emit_insn_before (gen_move_insn (bl
->biv
->dest_reg
, final_value
),
4400 /* Delete all of the instructions inside the loop which set
4401 the biv, as they are all dead. If is safe to delete them,
4402 because an insn setting a biv will never be part of a libcall. */
4403 /* However, deleting them will invalidate the regno_last_uid info,
4404 so keeping them around is more convenient. Final_biv_value
4405 will only succeed when there are multiple exits if the biv
4406 is dead at each exit, hence it does not matter that the original
4407 insn remains, because it is dead anyways. */
4408 for (v
= bl
->biv
; v
; v
= v
->next_iv
)
4409 delete_insn (v
->insn
);
4412 if (loop_dump_stream
)
4413 fprintf (loop_dump_stream
, "Reg %d: biv eliminated\n",
4418 /* Go through all the instructions in the loop, making all the
4419 register substitutions scheduled in REG_MAP. */
4421 for (p
= loop_start
; p
!= end
; p
= NEXT_INSN (p
))
4422 if (GET_CODE (p
) == INSN
|| GET_CODE (p
) == JUMP_INSN
4423 || GET_CODE (p
) == CALL_INSN
)
4425 replace_regs (PATTERN (p
), reg_map
, max_reg_before_loop
, 0);
4426 replace_regs (REG_NOTES (p
), reg_map
, max_reg_before_loop
, 0);
4430 /* Unroll loops from within strength reduction so that we can use the
4431 induction variable information that strength_reduce has already
4435 unroll_loop (loop_end
, insn_count
, loop_start
, end_insert_before
, 1);
4438 /* instrument the loop with bct insn */
4439 #ifdef HAVE_decrement_and_branch_on_count
4440 if (HAVE_decrement_and_branch_on_count
)
4441 insert_bct (loop_start
, loop_end
);
4445 if (loop_dump_stream
)
4446 fprintf (loop_dump_stream
, "\n");
4449 /* Return 1 if X is a valid source for an initial value (or as value being
4450 compared against in an initial test).
4452 X must be either a register or constant and must not be clobbered between
4453 the current insn and the start of the loop.
4455 INSN is the insn containing X. */
4458 valid_initial_value_p (x
, insn
, call_seen
, loop_start
)
4467 /* Only consider pseudos we know about initialized in insns whose luids
4469 if (GET_CODE (x
) != REG
4470 || REGNO (x
) >= max_reg_before_loop
)
4473 /* Don't use call-clobbered registers across a call which clobbers it. On
4474 some machines, don't use any hard registers at all. */
4475 if (REGNO (x
) < FIRST_PSEUDO_REGISTER
4476 && (SMALL_REGISTER_CLASSES
4477 || (call_used_regs
[REGNO (x
)] && call_seen
)))
4480 /* Don't use registers that have been clobbered before the start of the
4482 if (reg_set_between_p (x
, insn
, loop_start
))
4488 /* Scan X for memory refs and check each memory address
4489 as a possible giv. INSN is the insn whose pattern X comes from.
4490 NOT_EVERY_ITERATION is 1 if the insn might not be executed during
4491 every loop iteration. */
4494 find_mem_givs (x
, insn
, not_every_iteration
, loop_start
, loop_end
)
4497 int not_every_iteration
;
4498 rtx loop_start
, loop_end
;
4501 register enum rtx_code code
;
4507 code
= GET_CODE (x
);
4531 benefit
= general_induction_var (XEXP (x
, 0),
4532 &src_reg
, &add_val
, &mult_val
);
4534 /* Don't make a DEST_ADDR giv with mult_val == 1 && add_val == 0.
4535 Such a giv isn't useful. */
4536 if (benefit
> 0 && (mult_val
!= const1_rtx
|| add_val
!= const0_rtx
))
4538 /* Found one; record it. */
4540 = (struct induction
*) oballoc (sizeof (struct induction
));
4542 record_giv (v
, insn
, src_reg
, addr_placeholder
, mult_val
,
4543 add_val
, benefit
, DEST_ADDR
, not_every_iteration
,
4544 &XEXP (x
, 0), loop_start
, loop_end
);
4546 v
->mem_mode
= GET_MODE (x
);
4555 /* Recursively scan the subexpressions for other mem refs. */
4557 fmt
= GET_RTX_FORMAT (code
);
4558 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
4560 find_mem_givs (XEXP (x
, i
), insn
, not_every_iteration
, loop_start
,
4562 else if (fmt
[i
] == 'E')
4563 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
4564 find_mem_givs (XVECEXP (x
, i
, j
), insn
, not_every_iteration
,
4565 loop_start
, loop_end
);
4568 /* Fill in the data about one biv update.
4569 V is the `struct induction' in which we record the biv. (It is
4570 allocated by the caller, with alloca.)
4571 INSN is the insn that sets it.
4572 DEST_REG is the biv's reg.
4574 MULT_VAL is const1_rtx if the biv is being incremented here, in which case
4575 INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is
4576 being set to INC_VAL.
4578 NOT_EVERY_ITERATION is nonzero if this biv update is not know to be
4579 executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update
4580 can be executed more than once per iteration. If MAYBE_MULTIPLE
4581 and NOT_EVERY_ITERATION are both zero, we know that the biv update is
4582 executed exactly once per iteration. */
4585 record_biv (v
, insn
, dest_reg
, inc_val
, mult_val
,
4586 not_every_iteration
, maybe_multiple
)
4587 struct induction
*v
;
4592 int not_every_iteration
;
4595 struct iv_class
*bl
;
4598 v
->src_reg
= dest_reg
;
4599 v
->dest_reg
= dest_reg
;
4600 v
->mult_val
= mult_val
;
4601 v
->add_val
= inc_val
;
4602 v
->mode
= GET_MODE (dest_reg
);
4603 v
->always_computable
= ! not_every_iteration
;
4604 v
->always_executed
= ! not_every_iteration
;
4605 v
->maybe_multiple
= maybe_multiple
;
4607 /* Add this to the reg's iv_class, creating a class
4608 if this is the first incrementation of the reg. */
4610 bl
= reg_biv_class
[REGNO (dest_reg
)];
4613 /* Create and initialize new iv_class. */
4615 bl
= (struct iv_class
*) oballoc (sizeof (struct iv_class
));
4617 bl
->regno
= REGNO (dest_reg
);
4623 /* Set initial value to the reg itself. */
4624 bl
->initial_value
= dest_reg
;
4625 /* We haven't seen the initializing insn yet */
4628 bl
->initial_test
= 0;
4629 bl
->incremented
= 0;
4633 bl
->total_benefit
= 0;
4635 /* Add this class to loop_iv_list. */
4636 bl
->next
= loop_iv_list
;
4639 /* Put it in the array of biv register classes. */
4640 reg_biv_class
[REGNO (dest_reg
)] = bl
;
4643 /* Update IV_CLASS entry for this biv. */
4644 v
->next_iv
= bl
->biv
;
4647 if (mult_val
== const1_rtx
)
4648 bl
->incremented
= 1;
4650 if (loop_dump_stream
)
4652 fprintf (loop_dump_stream
,
4653 "Insn %d: possible biv, reg %d,",
4654 INSN_UID (insn
), REGNO (dest_reg
));
4655 if (GET_CODE (inc_val
) == CONST_INT
)
4657 fprintf (loop_dump_stream
, " const =");
4658 fprintf (loop_dump_stream
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (inc_val
));
4659 fputc ('\n', loop_dump_stream
);
4663 fprintf (loop_dump_stream
, " const = ");
4664 print_rtl (loop_dump_stream
, inc_val
);
4665 fprintf (loop_dump_stream
, "\n");
4670 /* Fill in the data about one giv.
4671 V is the `struct induction' in which we record the giv. (It is
4672 allocated by the caller, with alloca.)
4673 INSN is the insn that sets it.
4674 BENEFIT estimates the savings from deleting this insn.
4675 TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed
4676 into a register or is used as a memory address.
4678 SRC_REG is the biv reg which the giv is computed from.
4679 DEST_REG is the giv's reg (if the giv is stored in a reg).
4680 MULT_VAL and ADD_VAL are the coefficients used to compute the giv.
4681 LOCATION points to the place where this giv's value appears in INSN. */
4684 record_giv (v
, insn
, src_reg
, dest_reg
, mult_val
, add_val
, benefit
,
4685 type
, not_every_iteration
, location
, loop_start
, loop_end
)
4686 struct induction
*v
;
4690 rtx mult_val
, add_val
;
4693 int not_every_iteration
;
4695 rtx loop_start
, loop_end
;
4697 struct induction
*b
;
4698 struct iv_class
*bl
;
4699 rtx set
= single_set (insn
);
4702 v
->src_reg
= src_reg
;
4704 v
->dest_reg
= dest_reg
;
4705 v
->mult_val
= mult_val
;
4706 v
->add_val
= add_val
;
4707 v
->benefit
= benefit
;
4708 v
->location
= location
;
4710 v
->combined_with
= 0;
4711 v
->maybe_multiple
= 0;
4713 v
->derive_adjustment
= 0;
4719 v
->auto_inc_opt
= 0;
4723 /* The v->always_computable field is used in update_giv_derive, to
4724 determine whether a giv can be used to derive another giv. For a
4725 DEST_REG giv, INSN computes a new value for the giv, so its value
4726 isn't computable if INSN insn't executed every iteration.
4727 However, for a DEST_ADDR giv, INSN merely uses the value of the giv;
4728 it does not compute a new value. Hence the value is always computable
4729 regardless of whether INSN is executed each iteration. */
4731 if (type
== DEST_ADDR
)
4732 v
->always_computable
= 1;
4734 v
->always_computable
= ! not_every_iteration
;
4736 v
->always_executed
= ! not_every_iteration
;
4738 if (type
== DEST_ADDR
)
4740 v
->mode
= GET_MODE (*location
);
4744 else /* type == DEST_REG */
4746 v
->mode
= GET_MODE (SET_DEST (set
));
4748 v
->lifetime
= (uid_luid
[REGNO_LAST_UID (REGNO (dest_reg
))]
4749 - uid_luid
[REGNO_FIRST_UID (REGNO (dest_reg
))]);
4751 v
->times_used
= n_times_used
[REGNO (dest_reg
)];
4753 /* If the lifetime is zero, it means that this register is
4754 really a dead store. So mark this as a giv that can be
4755 ignored. This will not prevent the biv from being eliminated. */
4756 if (v
->lifetime
== 0)
4759 reg_iv_type
[REGNO (dest_reg
)] = GENERAL_INDUCT
;
4760 reg_iv_info
[REGNO (dest_reg
)] = v
;
4763 /* Add the giv to the class of givs computed from one biv. */
4765 bl
= reg_biv_class
[REGNO (src_reg
)];
4768 v
->next_iv
= bl
->giv
;
4770 /* Don't count DEST_ADDR. This is supposed to count the number of
4771 insns that calculate givs. */
4772 if (type
== DEST_REG
)
4774 bl
->total_benefit
+= benefit
;
4777 /* Fatal error, biv missing for this giv? */
4780 if (type
== DEST_ADDR
)
4784 /* The giv can be replaced outright by the reduced register only if all
4785 of the following conditions are true:
4786 - the insn that sets the giv is always executed on any iteration
4787 on which the giv is used at all
4788 (there are two ways to deduce this:
4789 either the insn is executed on every iteration,
4790 or all uses follow that insn in the same basic block),
4791 - the giv is not used outside the loop
4792 - no assignments to the biv occur during the giv's lifetime. */
4794 if (REGNO_FIRST_UID (REGNO (dest_reg
)) == INSN_UID (insn
)
4795 /* Previous line always fails if INSN was moved by loop opt. */
4796 && uid_luid
[REGNO_LAST_UID (REGNO (dest_reg
))] < INSN_LUID (loop_end
)
4797 && (! not_every_iteration
4798 || last_use_this_basic_block (dest_reg
, insn
)))
4800 /* Now check that there are no assignments to the biv within the
4801 giv's lifetime. This requires two separate checks. */
4803 /* Check each biv update, and fail if any are between the first
4804 and last use of the giv.
4806 If this loop contains an inner loop that was unrolled, then
4807 the insn modifying the biv may have been emitted by the loop
4808 unrolling code, and hence does not have a valid luid. Just
4809 mark the biv as not replaceable in this case. It is not very
4810 useful as a biv, because it is used in two different loops.
4811 It is very unlikely that we would be able to optimize the giv
4812 using this biv anyways. */
4815 for (b
= bl
->biv
; b
; b
= b
->next_iv
)
4817 if (INSN_UID (b
->insn
) >= max_uid_for_loop
4818 || ((uid_luid
[INSN_UID (b
->insn
)]
4819 >= uid_luid
[REGNO_FIRST_UID (REGNO (dest_reg
))])
4820 && (uid_luid
[INSN_UID (b
->insn
)]
4821 <= uid_luid
[REGNO_LAST_UID (REGNO (dest_reg
))])))
4824 v
->not_replaceable
= 1;
4829 /* If there are any backwards branches that go from after the
4830 biv update to before it, then this giv is not replaceable. */
4832 for (b
= bl
->biv
; b
; b
= b
->next_iv
)
4833 if (back_branch_in_range_p (b
->insn
, loop_start
, loop_end
))
4836 v
->not_replaceable
= 1;
4842 /* May still be replaceable, we don't have enough info here to
4845 v
->not_replaceable
= 0;
4849 if (loop_dump_stream
)
4851 if (type
== DEST_REG
)
4852 fprintf (loop_dump_stream
, "Insn %d: giv reg %d",
4853 INSN_UID (insn
), REGNO (dest_reg
));
4855 fprintf (loop_dump_stream
, "Insn %d: dest address",
4858 fprintf (loop_dump_stream
, " src reg %d benefit %d",
4859 REGNO (src_reg
), v
->benefit
);
4860 fprintf (loop_dump_stream
, " used %d lifetime %d",
4861 v
->times_used
, v
->lifetime
);
4864 fprintf (loop_dump_stream
, " replaceable");
4866 if (GET_CODE (mult_val
) == CONST_INT
)
4868 fprintf (loop_dump_stream
, " mult ");
4869 fprintf (loop_dump_stream
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (mult_val
));
4873 fprintf (loop_dump_stream
, " mult ");
4874 print_rtl (loop_dump_stream
, mult_val
);
4877 if (GET_CODE (add_val
) == CONST_INT
)
4879 fprintf (loop_dump_stream
, " add ");
4880 fprintf (loop_dump_stream
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (add_val
));
4884 fprintf (loop_dump_stream
, " add ");
4885 print_rtl (loop_dump_stream
, add_val
);
4889 if (loop_dump_stream
)
4890 fprintf (loop_dump_stream
, "\n");
4895 /* All this does is determine whether a giv can be made replaceable because
4896 its final value can be calculated. This code can not be part of record_giv
4897 above, because final_giv_value requires that the number of loop iterations
4898 be known, and that can not be accurately calculated until after all givs
4899 have been identified. */
4902 check_final_value (v
, loop_start
, loop_end
)
4903 struct induction
*v
;
4904 rtx loop_start
, loop_end
;
4906 struct iv_class
*bl
;
4907 rtx final_value
= 0;
4909 bl
= reg_biv_class
[REGNO (v
->src_reg
)];
4911 /* DEST_ADDR givs will never reach here, because they are always marked
4912 replaceable above in record_giv. */
4914 /* The giv can be replaced outright by the reduced register only if all
4915 of the following conditions are true:
4916 - the insn that sets the giv is always executed on any iteration
4917 on which the giv is used at all
4918 (there are two ways to deduce this:
4919 either the insn is executed on every iteration,
4920 or all uses follow that insn in the same basic block),
4921 - its final value can be calculated (this condition is different
4922 than the one above in record_giv)
4923 - no assignments to the biv occur during the giv's lifetime. */
4926 /* This is only called now when replaceable is known to be false. */
4927 /* Clear replaceable, so that it won't confuse final_giv_value. */
4931 if ((final_value
= final_giv_value (v
, loop_start
, loop_end
))
4932 && (v
->always_computable
|| last_use_this_basic_block (v
->dest_reg
, v
->insn
)))
4934 int biv_increment_seen
= 0;
4940 /* When trying to determine whether or not a biv increment occurs
4941 during the lifetime of the giv, we can ignore uses of the variable
4942 outside the loop because final_value is true. Hence we can not
4943 use regno_last_uid and regno_first_uid as above in record_giv. */
4945 /* Search the loop to determine whether any assignments to the
4946 biv occur during the giv's lifetime. Start with the insn
4947 that sets the giv, and search around the loop until we come
4948 back to that insn again.
4950 Also fail if there is a jump within the giv's lifetime that jumps
4951 to somewhere outside the lifetime but still within the loop. This
4952 catches spaghetti code where the execution order is not linear, and
4953 hence the above test fails. Here we assume that the giv lifetime
4954 does not extend from one iteration of the loop to the next, so as
4955 to make the test easier. Since the lifetime isn't known yet,
4956 this requires two loops. See also record_giv above. */
4958 last_giv_use
= v
->insn
;
4964 p
= NEXT_INSN (loop_start
);
4968 if (GET_CODE (p
) == INSN
|| GET_CODE (p
) == JUMP_INSN
4969 || GET_CODE (p
) == CALL_INSN
)
4971 if (biv_increment_seen
)
4973 if (reg_mentioned_p (v
->dest_reg
, PATTERN (p
)))
4976 v
->not_replaceable
= 1;
4980 else if (reg_set_p (v
->src_reg
, PATTERN (p
)))
4981 biv_increment_seen
= 1;
4982 else if (reg_mentioned_p (v
->dest_reg
, PATTERN (p
)))
4987 /* Now that the lifetime of the giv is known, check for branches
4988 from within the lifetime to outside the lifetime if it is still
4998 p
= NEXT_INSN (loop_start
);
4999 if (p
== last_giv_use
)
5002 if (GET_CODE (p
) == JUMP_INSN
&& JUMP_LABEL (p
)
5003 && LABEL_NAME (JUMP_LABEL (p
))
5004 && ((INSN_UID (JUMP_LABEL (p
)) >= max_uid_for_loop
)
5005 || (INSN_UID (v
->insn
) >= max_uid_for_loop
)
5006 || (INSN_UID (last_giv_use
) >= max_uid_for_loop
)
5007 || (INSN_LUID (JUMP_LABEL (p
)) < INSN_LUID (v
->insn
)
5008 && INSN_LUID (JUMP_LABEL (p
)) > INSN_LUID (loop_start
))
5009 || (INSN_LUID (JUMP_LABEL (p
)) > INSN_LUID (last_giv_use
)
5010 && INSN_LUID (JUMP_LABEL (p
)) < INSN_LUID (loop_end
))))
5013 v
->not_replaceable
= 1;
5015 if (loop_dump_stream
)
5016 fprintf (loop_dump_stream
,
5017 "Found branch outside giv lifetime.\n");
5024 /* If it is replaceable, then save the final value. */
5026 v
->final_value
= final_value
;
5029 if (loop_dump_stream
&& v
->replaceable
)
5030 fprintf (loop_dump_stream
, "Insn %d: giv reg %d final_value replaceable\n",
5031 INSN_UID (v
->insn
), REGNO (v
->dest_reg
));
5034 /* Update the status of whether a giv can derive other givs.
5036 We need to do something special if there is or may be an update to the biv
5037 between the time the giv is defined and the time it is used to derive
5040 In addition, a giv that is only conditionally set is not allowed to
5041 derive another giv once a label has been passed.
5043 The cases we look at are when a label or an update to a biv is passed. */
5046 update_giv_derive (p
)
5049 struct iv_class
*bl
;
5050 struct induction
*biv
, *giv
;
5054 /* Search all IV classes, then all bivs, and finally all givs.
5056 There are three cases we are concerned with. First we have the situation
5057 of a giv that is only updated conditionally. In that case, it may not
5058 derive any givs after a label is passed.
5060 The second case is when a biv update occurs, or may occur, after the
5061 definition of a giv. For certain biv updates (see below) that are
5062 known to occur between the giv definition and use, we can adjust the
5063 giv definition. For others, or when the biv update is conditional,
5064 we must prevent the giv from deriving any other givs. There are two
5065 sub-cases within this case.
5067 If this is a label, we are concerned with any biv update that is done
5068 conditionally, since it may be done after the giv is defined followed by
5069 a branch here (actually, we need to pass both a jump and a label, but
5070 this extra tracking doesn't seem worth it).
5072 If this is a jump, we are concerned about any biv update that may be
5073 executed multiple times. We are actually only concerned about
5074 backward jumps, but it is probably not worth performing the test
5075 on the jump again here.
5077 If this is a biv update, we must adjust the giv status to show that a
5078 subsequent biv update was performed. If this adjustment cannot be done,
5079 the giv cannot derive further givs. */
5081 for (bl
= loop_iv_list
; bl
; bl
= bl
->next
)
5082 for (biv
= bl
->biv
; biv
; biv
= biv
->next_iv
)
5083 if (GET_CODE (p
) == CODE_LABEL
|| GET_CODE (p
) == JUMP_INSN
5086 for (giv
= bl
->giv
; giv
; giv
= giv
->next_iv
)
5088 /* If cant_derive is already true, there is no point in
5089 checking all of these conditions again. */
5090 if (giv
->cant_derive
)
5093 /* If this giv is conditionally set and we have passed a label,
5094 it cannot derive anything. */
5095 if (GET_CODE (p
) == CODE_LABEL
&& ! giv
->always_computable
)
5096 giv
->cant_derive
= 1;
5098 /* Skip givs that have mult_val == 0, since
5099 they are really invariants. Also skip those that are
5100 replaceable, since we know their lifetime doesn't contain
5102 else if (giv
->mult_val
== const0_rtx
|| giv
->replaceable
)
5105 /* The only way we can allow this giv to derive another
5106 is if this is a biv increment and we can form the product
5107 of biv->add_val and giv->mult_val. In this case, we will
5108 be able to compute a compensation. */
5109 else if (biv
->insn
== p
)
5113 if (biv
->mult_val
== const1_rtx
)
5114 tem
= simplify_giv_expr (gen_rtx_MULT (giv
->mode
,
5119 if (tem
&& giv
->derive_adjustment
)
5120 tem
= simplify_giv_expr (gen_rtx_PLUS (giv
->mode
, tem
,
5121 giv
->derive_adjustment
),
5124 giv
->derive_adjustment
= tem
;
5126 giv
->cant_derive
= 1;
5128 else if ((GET_CODE (p
) == CODE_LABEL
&& ! biv
->always_computable
)
5129 || (GET_CODE (p
) == JUMP_INSN
&& biv
->maybe_multiple
))
5130 giv
->cant_derive
= 1;
5135 /* Check whether an insn is an increment legitimate for a basic induction var.
5136 X is the source of insn P, or a part of it.
5137 MODE is the mode in which X should be interpreted.
5139 DEST_REG is the putative biv, also the destination of the insn.
5140 We accept patterns of these forms:
5141 REG = REG + INVARIANT (includes REG = REG - CONSTANT)
5142 REG = INVARIANT + REG
5144 If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX,
5145 and store the additive term into *INC_VAL.
5147 If X is an assignment of an invariant into DEST_REG, we set
5148 *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL.
5150 We also want to detect a BIV when it corresponds to a variable
5151 whose mode was promoted via PROMOTED_MODE. In that case, an increment
5152 of the variable may be a PLUS that adds a SUBREG of that variable to
5153 an invariant and then sign- or zero-extends the result of the PLUS
5156 Most GIVs in such cases will be in the promoted mode, since that is the
5157 probably the natural computation mode (and almost certainly the mode
5158 used for addresses) on the machine. So we view the pseudo-reg containing
5159 the variable as the BIV, as if it were simply incremented.
5161 Note that treating the entire pseudo as a BIV will result in making
5162 simple increments to any GIVs based on it. However, if the variable
5163 overflows in its declared mode but not its promoted mode, the result will
5164 be incorrect. This is acceptable if the variable is signed, since
5165 overflows in such cases are undefined, but not if it is unsigned, since
5166 those overflows are defined. So we only check for SIGN_EXTEND and
5169 If we cannot find a biv, we return 0. */
5172 basic_induction_var (x
, mode
, dest_reg
, p
, inc_val
, mult_val
)
5174 enum machine_mode mode
;
5180 register enum rtx_code code
;
5184 code
= GET_CODE (x
);
5188 if (XEXP (x
, 0) == dest_reg
5189 || (GET_CODE (XEXP (x
, 0)) == SUBREG
5190 && SUBREG_PROMOTED_VAR_P (XEXP (x
, 0))
5191 && SUBREG_REG (XEXP (x
, 0)) == dest_reg
))
5193 else if (XEXP (x
, 1) == dest_reg
5194 || (GET_CODE (XEXP (x
, 1)) == SUBREG
5195 && SUBREG_PROMOTED_VAR_P (XEXP (x
, 1))
5196 && SUBREG_REG (XEXP (x
, 1)) == dest_reg
))
5201 if (invariant_p (arg
) != 1)
5204 *inc_val
= convert_modes (GET_MODE (dest_reg
), GET_MODE (x
), arg
, 0);
5205 *mult_val
= const1_rtx
;
5209 /* If this is a SUBREG for a promoted variable, check the inner
5211 if (SUBREG_PROMOTED_VAR_P (x
))
5212 return basic_induction_var (SUBREG_REG (x
), GET_MODE (SUBREG_REG (x
)),
5213 dest_reg
, p
, inc_val
, mult_val
);
5217 /* If this register is assigned in the previous insn, look at its
5218 source, but don't go outside the loop or past a label. */
5220 for (insn
= PREV_INSN (p
);
5221 (insn
&& GET_CODE (insn
) == NOTE
5222 && NOTE_LINE_NUMBER (insn
) != NOTE_INSN_LOOP_BEG
);
5223 insn
= PREV_INSN (insn
))
5227 set
= single_set (insn
);
5230 && (SET_DEST (set
) == x
5231 || (GET_CODE (SET_DEST (set
)) == SUBREG
5232 && (GET_MODE_SIZE (GET_MODE (SET_DEST (set
)))
5234 && SUBREG_REG (SET_DEST (set
)) == x
)))
5235 return basic_induction_var (SET_SRC (set
),
5236 (GET_MODE (SET_SRC (set
)) == VOIDmode
5238 : GET_MODE (SET_SRC (set
))),
5241 /* ... fall through ... */
5243 /* Can accept constant setting of biv only when inside inner most loop.
5244 Otherwise, a biv of an inner loop may be incorrectly recognized
5245 as a biv of the outer loop,
5246 causing code to be moved INTO the inner loop. */
5248 if (invariant_p (x
) != 1)
5253 if (loops_enclosed
== 1)
5255 /* Possible bug here? Perhaps we don't know the mode of X. */
5256 *inc_val
= convert_modes (GET_MODE (dest_reg
), mode
, x
, 0);
5257 *mult_val
= const0_rtx
;
5264 return basic_induction_var (XEXP (x
, 0), GET_MODE (XEXP (x
, 0)),
5265 dest_reg
, p
, inc_val
, mult_val
);
5267 /* Similar, since this can be a sign extension. */
5268 for (insn
= PREV_INSN (p
);
5269 (insn
&& GET_CODE (insn
) == NOTE
5270 && NOTE_LINE_NUMBER (insn
) != NOTE_INSN_LOOP_BEG
);
5271 insn
= PREV_INSN (insn
))
5275 set
= single_set (insn
);
5277 if (set
&& SET_DEST (set
) == XEXP (x
, 0)
5278 && GET_CODE (XEXP (x
, 1)) == CONST_INT
5279 && INTVAL (XEXP (x
, 1)) >= 0
5280 && GET_CODE (SET_SRC (set
)) == ASHIFT
5281 && XEXP (x
, 1) == XEXP (SET_SRC (set
), 1))
5282 return basic_induction_var (XEXP (SET_SRC (set
), 0),
5283 GET_MODE (XEXP (x
, 0)),
5284 dest_reg
, insn
, inc_val
, mult_val
);
5292 /* A general induction variable (giv) is any quantity that is a linear
5293 function of a basic induction variable,
5294 i.e. giv = biv * mult_val + add_val.
5295 The coefficients can be any loop invariant quantity.
5296 A giv need not be computed directly from the biv;
5297 it can be computed by way of other givs. */
5299 /* Determine whether X computes a giv.
5300 If it does, return a nonzero value
5301 which is the benefit from eliminating the computation of X;
5302 set *SRC_REG to the register of the biv that it is computed from;
5303 set *ADD_VAL and *MULT_VAL to the coefficients,
5304 such that the value of X is biv * mult + add; */
5307 general_induction_var (x
, src_reg
, add_val
, mult_val
)
5317 /* If this is an invariant, forget it, it isn't a giv. */
5318 if (invariant_p (x
) == 1)
5321 /* See if the expression could be a giv and get its form.
5322 Mark our place on the obstack in case we don't find a giv. */
5323 storage
= (char *) oballoc (0);
5324 x
= simplify_giv_expr (x
, &benefit
);
5331 switch (GET_CODE (x
))
5335 /* Since this is now an invariant and wasn't before, it must be a giv
5336 with MULT_VAL == 0. It doesn't matter which BIV we associate this
5338 *src_reg
= loop_iv_list
->biv
->dest_reg
;
5339 *mult_val
= const0_rtx
;
5344 /* This is equivalent to a BIV. */
5346 *mult_val
= const1_rtx
;
5347 *add_val
= const0_rtx
;
5351 /* Either (plus (biv) (invar)) or
5352 (plus (mult (biv) (invar_1)) (invar_2)). */
5353 if (GET_CODE (XEXP (x
, 0)) == MULT
)
5355 *src_reg
= XEXP (XEXP (x
, 0), 0);
5356 *mult_val
= XEXP (XEXP (x
, 0), 1);
5360 *src_reg
= XEXP (x
, 0);
5361 *mult_val
= const1_rtx
;
5363 *add_val
= XEXP (x
, 1);
5367 /* ADD_VAL is zero. */
5368 *src_reg
= XEXP (x
, 0);
5369 *mult_val
= XEXP (x
, 1);
5370 *add_val
= const0_rtx
;
5377 /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be
5378 unless they are CONST_INT). */
5379 if (GET_CODE (*add_val
) == USE
)
5380 *add_val
= XEXP (*add_val
, 0);
5381 if (GET_CODE (*mult_val
) == USE
)
5382 *mult_val
= XEXP (*mult_val
, 0);
5384 benefit
+= rtx_cost (orig_x
, SET
);
5386 /* Always return some benefit if this is a giv so it will be detected
5387 as such. This allows elimination of bivs that might otherwise
5388 not be eliminated. */
5389 return benefit
== 0 ? 1 : benefit
;
5392 /* Given an expression, X, try to form it as a linear function of a biv.
5393 We will canonicalize it to be of the form
5394 (plus (mult (BIV) (invar_1))
5396 with possible degeneracies.
5398 The invariant expressions must each be of a form that can be used as a
5399 machine operand. We surround then with a USE rtx (a hack, but localized
5400 and certainly unambiguous!) if not a CONST_INT for simplicity in this
5401 routine; it is the caller's responsibility to strip them.
5403 If no such canonicalization is possible (i.e., two biv's are used or an
5404 expression that is neither invariant nor a biv or giv), this routine
5407 For a non-zero return, the result will have a code of CONST_INT, USE,
5408 REG (for a BIV), PLUS, or MULT. No other codes will occur.
5410 *BENEFIT will be incremented by the benefit of any sub-giv encountered. */
5413 simplify_giv_expr (x
, benefit
)
5417 enum machine_mode mode
= GET_MODE (x
);
5421 /* If this is not an integer mode, or if we cannot do arithmetic in this
5422 mode, this can't be a giv. */
5423 if (mode
!= VOIDmode
5424 && (GET_MODE_CLASS (mode
) != MODE_INT
5425 || GET_MODE_BITSIZE (mode
) > HOST_BITS_PER_WIDE_INT
))
5428 switch (GET_CODE (x
))
5431 arg0
= simplify_giv_expr (XEXP (x
, 0), benefit
);
5432 arg1
= simplify_giv_expr (XEXP (x
, 1), benefit
);
5433 if (arg0
== 0 || arg1
== 0)
5436 /* Put constant last, CONST_INT last if both constant. */
5437 if ((GET_CODE (arg0
) == USE
5438 || GET_CODE (arg0
) == CONST_INT
)
5439 && GET_CODE (arg1
) != CONST_INT
)
5440 tem
= arg0
, arg0
= arg1
, arg1
= tem
;
5442 /* Handle addition of zero, then addition of an invariant. */
5443 if (arg1
== const0_rtx
)
5445 else if (GET_CODE (arg1
) == CONST_INT
|| GET_CODE (arg1
) == USE
)
5446 switch (GET_CODE (arg0
))
5450 /* Both invariant. Only valid if sum is machine operand.
5451 First strip off possible USE on the operands. */
5452 if (GET_CODE (arg0
) == USE
)
5453 arg0
= XEXP (arg0
, 0);
5455 if (GET_CODE (arg1
) == USE
)
5456 arg1
= XEXP (arg1
, 0);
5459 if (CONSTANT_P (arg0
) && GET_CODE (arg1
) == CONST_INT
)
5461 tem
= plus_constant (arg0
, INTVAL (arg1
));
5462 if (GET_CODE (tem
) != CONST_INT
)
5463 tem
= gen_rtx_USE (mode
, tem
);
5467 /* Adding two invariants must result in an invariant,
5468 so enclose addition operation inside a USE and
5470 tem
= gen_rtx_USE (mode
, gen_rtx_PLUS (mode
, arg0
, arg1
));
5477 /* biv + invar or mult + invar. Return sum. */
5478 return gen_rtx_PLUS (mode
, arg0
, arg1
);
5481 /* (a + invar_1) + invar_2. Associate. */
5482 return simplify_giv_expr (gen_rtx_PLUS (mode
,
5485 XEXP (arg0
, 1), arg1
)),
5492 /* Each argument must be either REG, PLUS, or MULT. Convert REG to
5493 MULT to reduce cases. */
5494 if (GET_CODE (arg0
) == REG
)
5495 arg0
= gen_rtx_MULT (mode
, arg0
, const1_rtx
);
5496 if (GET_CODE (arg1
) == REG
)
5497 arg1
= gen_rtx_MULT (mode
, arg1
, const1_rtx
);
5499 /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT.
5500 Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT.
5501 Recurse to associate the second PLUS. */
5502 if (GET_CODE (arg1
) == MULT
)
5503 tem
= arg0
, arg0
= arg1
, arg1
= tem
;
5505 if (GET_CODE (arg1
) == PLUS
)
5506 return simplify_giv_expr (gen_rtx_PLUS (mode
,
5507 gen_rtx_PLUS (mode
, arg0
,
5512 /* Now must have MULT + MULT. Distribute if same biv, else not giv. */
5513 if (GET_CODE (arg0
) != MULT
|| GET_CODE (arg1
) != MULT
)
5516 if (XEXP (arg0
, 0) != XEXP (arg1
, 0))
5519 return simplify_giv_expr (gen_rtx_MULT (mode
,
5527 /* Handle "a - b" as "a + b * (-1)". */
5528 return simplify_giv_expr (gen_rtx_PLUS (mode
,
5530 gen_rtx_MULT (mode
, XEXP (x
, 1),
5535 arg0
= simplify_giv_expr (XEXP (x
, 0), benefit
);
5536 arg1
= simplify_giv_expr (XEXP (x
, 1), benefit
);
5537 if (arg0
== 0 || arg1
== 0)
5540 /* Put constant last, CONST_INT last if both constant. */
5541 if ((GET_CODE (arg0
) == USE
|| GET_CODE (arg0
) == CONST_INT
)
5542 && GET_CODE (arg1
) != CONST_INT
)
5543 tem
= arg0
, arg0
= arg1
, arg1
= tem
;
5545 /* If second argument is not now constant, not giv. */
5546 if (GET_CODE (arg1
) != USE
&& GET_CODE (arg1
) != CONST_INT
)
5549 /* Handle multiply by 0 or 1. */
5550 if (arg1
== const0_rtx
)
5553 else if (arg1
== const1_rtx
)
5556 switch (GET_CODE (arg0
))
5559 /* biv * invar. Done. */
5560 return gen_rtx_MULT (mode
, arg0
, arg1
);
5563 /* Product of two constants. */
5564 return GEN_INT (INTVAL (arg0
) * INTVAL (arg1
));
5567 /* invar * invar. Not giv. */
5571 /* (a * invar_1) * invar_2. Associate. */
5572 return simplify_giv_expr (gen_rtx_MULT (mode
, XEXP (arg0
, 0),
5579 /* (a + invar_1) * invar_2. Distribute. */
5580 return simplify_giv_expr (gen_rtx_PLUS (mode
,
5594 /* Shift by constant is multiply by power of two. */
5595 if (GET_CODE (XEXP (x
, 1)) != CONST_INT
)
5598 return simplify_giv_expr (gen_rtx_MULT (mode
,
5600 GEN_INT ((HOST_WIDE_INT
) 1
5601 << INTVAL (XEXP (x
, 1)))),
5605 /* "-a" is "a * (-1)" */
5606 return simplify_giv_expr (gen_rtx_MULT (mode
, XEXP (x
, 0), constm1_rtx
),
5610 /* "~a" is "-a - 1". Silly, but easy. */
5611 return simplify_giv_expr (gen_rtx_MINUS (mode
,
5612 gen_rtx_NEG (mode
, XEXP (x
, 0)),
5617 /* Already in proper form for invariant. */
5621 /* If this is a new register, we can't deal with it. */
5622 if (REGNO (x
) >= max_reg_before_loop
)
5625 /* Check for biv or giv. */
5626 switch (reg_iv_type
[REGNO (x
)])
5630 case GENERAL_INDUCT
:
5632 struct induction
*v
= reg_iv_info
[REGNO (x
)];
5634 /* Form expression from giv and add benefit. Ensure this giv
5635 can derive another and subtract any needed adjustment if so. */
5636 *benefit
+= v
->benefit
;
5640 tem
= gen_rtx_PLUS (mode
, gen_rtx_MULT (mode
, v
->src_reg
,
5643 if (v
->derive_adjustment
)
5644 tem
= gen_rtx_MINUS (mode
, tem
, v
->derive_adjustment
);
5645 return simplify_giv_expr (tem
, benefit
);
5652 /* Fall through to general case. */
5654 /* If invariant, return as USE (unless CONST_INT).
5655 Otherwise, not giv. */
5656 if (GET_CODE (x
) == USE
)
5659 if (invariant_p (x
) == 1)
5661 if (GET_CODE (x
) == CONST_INT
)
5664 return gen_rtx_USE (mode
, x
);
5671 /* Help detect a giv that is calculated by several consecutive insns;
5675 The caller has already identified the first insn P as having a giv as dest;
5676 we check that all other insns that set the same register follow
5677 immediately after P, that they alter nothing else,
5678 and that the result of the last is still a giv.
5680 The value is 0 if the reg set in P is not really a giv.
5681 Otherwise, the value is the amount gained by eliminating
5682 all the consecutive insns that compute the value.
5684 FIRST_BENEFIT is the amount gained by eliminating the first insn, P.
5685 SRC_REG is the reg of the biv; DEST_REG is the reg of the giv.
5687 The coefficients of the ultimate giv value are stored in
5688 *MULT_VAL and *ADD_VAL. */
5691 consec_sets_giv (first_benefit
, p
, src_reg
, dest_reg
,
5706 /* Indicate that this is a giv so that we can update the value produced in
5707 each insn of the multi-insn sequence.
5709 This induction structure will be used only by the call to
5710 general_induction_var below, so we can allocate it on our stack.
5711 If this is a giv, our caller will replace the induct var entry with
5712 a new induction structure. */
5714 = (struct induction
*) alloca (sizeof (struct induction
));
5715 v
->src_reg
= src_reg
;
5716 v
->mult_val
= *mult_val
;
5717 v
->add_val
= *add_val
;
5718 v
->benefit
= first_benefit
;
5720 v
->derive_adjustment
= 0;
5722 reg_iv_type
[REGNO (dest_reg
)] = GENERAL_INDUCT
;
5723 reg_iv_info
[REGNO (dest_reg
)] = v
;
5725 count
= n_times_set
[REGNO (dest_reg
)] - 1;
5730 code
= GET_CODE (p
);
5732 /* If libcall, skip to end of call sequence. */
5733 if (code
== INSN
&& (temp
= find_reg_note (p
, REG_LIBCALL
, NULL_RTX
)))
5737 && (set
= single_set (p
))
5738 && GET_CODE (SET_DEST (set
)) == REG
5739 && SET_DEST (set
) == dest_reg
5740 && ((benefit
= general_induction_var (SET_SRC (set
), &src_reg
,
5742 /* Giv created by equivalent expression. */
5743 || ((temp
= find_reg_note (p
, REG_EQUAL
, NULL_RTX
))
5744 && (benefit
= general_induction_var (XEXP (temp
, 0), &src_reg
,
5745 add_val
, mult_val
))))
5746 && src_reg
== v
->src_reg
)
5748 if (find_reg_note (p
, REG_RETVAL
, NULL_RTX
))
5749 benefit
+= libcall_benefit (p
);
5752 v
->mult_val
= *mult_val
;
5753 v
->add_val
= *add_val
;
5754 v
->benefit
= benefit
;
5756 else if (code
!= NOTE
)
5758 /* Allow insns that set something other than this giv to a
5759 constant. Such insns are needed on machines which cannot
5760 include long constants and should not disqualify a giv. */
5762 && (set
= single_set (p
))
5763 && SET_DEST (set
) != dest_reg
5764 && CONSTANT_P (SET_SRC (set
)))
5767 reg_iv_type
[REGNO (dest_reg
)] = UNKNOWN_INDUCT
;
5775 /* Return an rtx, if any, that expresses giv G2 as a function of the register
5776 represented by G1. If no such expression can be found, or it is clear that
5777 it cannot possibly be a valid address, 0 is returned.
5779 To perform the computation, we note that
5782 where `v' is the biv.
5784 So G2 = (c/a) * G1 + (d - b*c/a) */
5788 express_from (g1
, g2
)
5789 struct induction
*g1
, *g2
;
5793 /* The value that G1 will be multiplied by must be a constant integer. Also,
5794 the only chance we have of getting a valid address is if b*c/a (see above
5795 for notation) is also an integer. */
5796 if (GET_CODE (g1
->mult_val
) != CONST_INT
5797 || GET_CODE (g2
->mult_val
) != CONST_INT
5798 || GET_CODE (g1
->add_val
) != CONST_INT
5799 || g1
->mult_val
== const0_rtx
5800 || INTVAL (g2
->mult_val
) % INTVAL (g1
->mult_val
) != 0)
5803 mult
= GEN_INT (INTVAL (g2
->mult_val
) / INTVAL (g1
->mult_val
));
5804 add
= plus_constant (g2
->add_val
, - INTVAL (g1
->add_val
) * INTVAL (mult
));
5806 /* Form simplified final result. */
5807 if (mult
== const0_rtx
)
5809 else if (mult
== const1_rtx
)
5810 mult
= g1
->dest_reg
;
5812 mult
= gen_rtx_MULT (g2
->mode
, g1
->dest_reg
, mult
);
5814 if (add
== const0_rtx
)
5817 return gen_rtx_PLUS (g2
->mode
, mult
, add
);
5821 /* Return 1 if giv G2 can be combined with G1. This means that G2 can use
5822 (either directly or via an address expression) a register used to represent
5823 G1. Set g2->new_reg to a represtation of G1 (normally just
5827 combine_givs_p (g1
, g2
)
5828 struct induction
*g1
, *g2
;
5832 /* If these givs are identical, they can be combined. */
5833 if (rtx_equal_p (g1
->mult_val
, g2
->mult_val
)
5834 && rtx_equal_p (g1
->add_val
, g2
->add_val
))
5836 g2
->new_reg
= g1
->dest_reg
;
5841 /* If G2 can be expressed as a function of G1 and that function is valid
5842 as an address and no more expensive than using a register for G2,
5843 the expression of G2 in terms of G1 can be used. */
5844 if (g2
->giv_type
== DEST_ADDR
5845 && (tem
= express_from (g1
, g2
)) != 0
5846 && memory_address_p (g2
->mem_mode
, tem
)
5847 && ADDRESS_COST (tem
) <= ADDRESS_COST (*g2
->location
))
5857 #ifdef GIV_SORT_CRITERION
5858 /* Compare two givs and sort the most desirable one for combinations first.
5859 This is used only in one qsort call below. */
5863 struct induction
**x
, **y
;
5865 GIV_SORT_CRITERION (*x
, *y
);
5871 /* Check all pairs of givs for iv_class BL and see if any can be combined with
5872 any other. If so, point SAME to the giv combined with and set NEW_REG to
5873 be an expression (in terms of the other giv's DEST_REG) equivalent to the
5874 giv. Also, update BENEFIT and related fields for cost/benefit analysis. */
5878 struct iv_class
*bl
;
5880 struct induction
*g1
, *g2
, **giv_array
;
5881 int i
, j
, giv_count
, pass
;
5883 /* Count givs, because bl->giv_count is incorrect here. */
5885 for (g1
= bl
->giv
; g1
; g1
= g1
->next_iv
)
5889 = (struct induction
**) alloca (giv_count
* sizeof (struct induction
*));
5891 for (g1
= bl
->giv
; g1
; g1
= g1
->next_iv
)
5892 giv_array
[i
++] = g1
;
5894 #ifdef GIV_SORT_CRITERION
5895 /* Sort the givs if GIV_SORT_CRITERION is defined.
5896 This is usually defined for processors which lack
5897 negative register offsets so more givs may be combined. */
5899 if (loop_dump_stream
)
5900 fprintf (loop_dump_stream
, "%d givs counted, sorting...\n", giv_count
);
5902 qsort (giv_array
, giv_count
, sizeof (struct induction
*), giv_sort
);
5905 for (i
= 0; i
< giv_count
; i
++)
5908 for (pass
= 0; pass
<= 1; pass
++)
5909 for (j
= 0; j
< giv_count
; j
++)
5913 /* First try to combine with replaceable givs, then all givs. */
5914 && (g1
->replaceable
|| pass
== 1)
5915 /* If either has already been combined or is to be ignored, can't
5917 && ! g1
->ignore
&& ! g2
->ignore
&& ! g1
->same
&& ! g2
->same
5918 /* If something has been based on G2, G2 cannot itself be based
5919 on something else. */
5920 && ! g2
->combined_with
5921 && combine_givs_p (g1
, g2
))
5923 /* g2->new_reg set by `combine_givs_p' */
5925 g1
->combined_with
= 1;
5927 /* If one of these givs is a DEST_REG that was only used
5928 once, by the other giv, this is actually a single use.
5929 The DEST_REG has the correct cost, while the other giv
5930 counts the REG use too often. */
5931 if (g2
->giv_type
== DEST_REG
5932 && n_times_used
[REGNO (g2
->dest_reg
)] == 1
5933 && reg_mentioned_p (g2
->dest_reg
, PATTERN (g1
->insn
)))
5934 g1
->benefit
= g2
->benefit
;
5935 else if (g1
->giv_type
!= DEST_REG
5936 || n_times_used
[REGNO (g1
->dest_reg
)] != 1
5937 || ! reg_mentioned_p (g1
->dest_reg
,
5938 PATTERN (g2
->insn
)))
5940 g1
->benefit
+= g2
->benefit
;
5941 g1
->times_used
+= g2
->times_used
;
5943 /* ??? The new final_[bg]iv_value code does a much better job
5944 of finding replaceable giv's, and hence this code may no
5945 longer be necessary. */
5946 if (! g2
->replaceable
&& REG_USERVAR_P (g2
->dest_reg
))
5947 g1
->benefit
-= copy_cost
;
5948 g1
->lifetime
+= g2
->lifetime
;
5950 if (loop_dump_stream
)
5951 fprintf (loop_dump_stream
, "giv at %d combined with giv at %d\n",
5952 INSN_UID (g2
->insn
), INSN_UID (g1
->insn
));
5958 /* EMIT code before INSERT_BEFORE to set REG = B * M + A. */
5961 emit_iv_add_mult (b
, m
, a
, reg
, insert_before
)
5962 rtx b
; /* initial value of basic induction variable */
5963 rtx m
; /* multiplicative constant */
5964 rtx a
; /* additive constant */
5965 rtx reg
; /* destination register */
5971 /* Prevent unexpected sharing of these rtx. */
5975 /* Increase the lifetime of any invariants moved further in code. */
5976 update_reg_last_use (a
, insert_before
);
5977 update_reg_last_use (b
, insert_before
);
5978 update_reg_last_use (m
, insert_before
);
5981 result
= expand_mult_add (b
, reg
, m
, a
, GET_MODE (reg
), 0);
5983 emit_move_insn (reg
, result
);
5984 seq
= gen_sequence ();
5987 emit_insn_before (seq
, insert_before
);
5989 record_base_value (REGNO (reg
), b
, 0);
5992 /* Test whether A * B can be computed without
5993 an actual multiply insn. Value is 1 if so. */
5996 product_cheap_p (a
, b
)
6002 struct obstack
*old_rtl_obstack
= rtl_obstack
;
6003 char *storage
= (char *) obstack_alloc (&temp_obstack
, 0);
6006 /* If only one is constant, make it B. */
6007 if (GET_CODE (a
) == CONST_INT
)
6008 tmp
= a
, a
= b
, b
= tmp
;
6010 /* If first constant, both constant, so don't need multiply. */
6011 if (GET_CODE (a
) == CONST_INT
)
6014 /* If second not constant, neither is constant, so would need multiply. */
6015 if (GET_CODE (b
) != CONST_INT
)
6018 /* One operand is constant, so might not need multiply insn. Generate the
6019 code for the multiply and see if a call or multiply, or long sequence
6020 of insns is generated. */
6022 rtl_obstack
= &temp_obstack
;
6024 expand_mult (GET_MODE (a
), a
, b
, NULL_RTX
, 0);
6025 tmp
= gen_sequence ();
6028 if (GET_CODE (tmp
) == SEQUENCE
)
6030 if (XVEC (tmp
, 0) == 0)
6032 else if (XVECLEN (tmp
, 0) > 3)
6035 for (i
= 0; i
< XVECLEN (tmp
, 0); i
++)
6037 rtx insn
= XVECEXP (tmp
, 0, i
);
6039 if (GET_CODE (insn
) != INSN
6040 || (GET_CODE (PATTERN (insn
)) == SET
6041 && GET_CODE (SET_SRC (PATTERN (insn
))) == MULT
)
6042 || (GET_CODE (PATTERN (insn
)) == PARALLEL
6043 && GET_CODE (XVECEXP (PATTERN (insn
), 0, 0)) == SET
6044 && GET_CODE (SET_SRC (XVECEXP (PATTERN (insn
), 0, 0))) == MULT
))
6051 else if (GET_CODE (tmp
) == SET
6052 && GET_CODE (SET_SRC (tmp
)) == MULT
)
6054 else if (GET_CODE (tmp
) == PARALLEL
6055 && GET_CODE (XVECEXP (tmp
, 0, 0)) == SET
6056 && GET_CODE (SET_SRC (XVECEXP (tmp
, 0, 0))) == MULT
)
6059 /* Free any storage we obtained in generating this multiply and restore rtl
6060 allocation to its normal obstack. */
6061 obstack_free (&temp_obstack
, storage
);
6062 rtl_obstack
= old_rtl_obstack
;
6067 /* Check to see if loop can be terminated by a "decrement and branch until
6068 zero" instruction. If so, add a REG_NONNEG note to the branch insn if so.
6069 Also try reversing an increment loop to a decrement loop
6070 to see if the optimization can be performed.
6071 Value is nonzero if optimization was performed. */
6073 /* This is useful even if the architecture doesn't have such an insn,
6074 because it might change a loops which increments from 0 to n to a loop
6075 which decrements from n to 0. A loop that decrements to zero is usually
6076 faster than one that increments from zero. */
6078 /* ??? This could be rewritten to use some of the loop unrolling procedures,
6079 such as approx_final_value, biv_total_increment, loop_iterations, and
6080 final_[bg]iv_value. */
6083 check_dbra_loop (loop_end
, insn_count
, loop_start
)
6088 struct iv_class
*bl
;
6095 rtx before_comparison
;
6099 int compare_and_branch
;
6101 /* If last insn is a conditional branch, and the insn before tests a
6102 register value, try to optimize it. Otherwise, we can't do anything. */
6104 jump
= PREV_INSN (loop_end
);
6105 comparison
= get_condition_for_loop (jump
);
6106 if (comparison
== 0)
6109 /* Try to compute whether the compare/branch at the loop end is one or
6110 two instructions. */
6111 get_condition (jump
, &first_compare
);
6112 if (first_compare
== jump
)
6113 compare_and_branch
= 1;
6114 else if (first_compare
== prev_nonnote_insn (jump
))
6115 compare_and_branch
= 2;
6119 /* Check all of the bivs to see if the compare uses one of them.
6120 Skip biv's set more than once because we can't guarantee that
6121 it will be zero on the last iteration. Also skip if the biv is
6122 used between its update and the test insn. */
6124 for (bl
= loop_iv_list
; bl
; bl
= bl
->next
)
6126 if (bl
->biv_count
== 1
6127 && bl
->biv
->dest_reg
== XEXP (comparison
, 0)
6128 && ! reg_used_between_p (regno_reg_rtx
[bl
->regno
], bl
->biv
->insn
,
6136 /* Look for the case where the basic induction variable is always
6137 nonnegative, and equals zero on the last iteration.
6138 In this case, add a reg_note REG_NONNEG, which allows the
6139 m68k DBRA instruction to be used. */
6141 if (((GET_CODE (comparison
) == GT
6142 && GET_CODE (XEXP (comparison
, 1)) == CONST_INT
6143 && INTVAL (XEXP (comparison
, 1)) == -1)
6144 || (GET_CODE (comparison
) == NE
&& XEXP (comparison
, 1) == const0_rtx
))
6145 && GET_CODE (bl
->biv
->add_val
) == CONST_INT
6146 && INTVAL (bl
->biv
->add_val
) < 0)
6148 /* Initial value must be greater than 0,
6149 init_val % -dec_value == 0 to ensure that it equals zero on
6150 the last iteration */
6152 if (GET_CODE (bl
->initial_value
) == CONST_INT
6153 && INTVAL (bl
->initial_value
) > 0
6154 && (INTVAL (bl
->initial_value
)
6155 % (-INTVAL (bl
->biv
->add_val
))) == 0)
6157 /* register always nonnegative, add REG_NOTE to branch */
6158 REG_NOTES (PREV_INSN (loop_end
))
6159 = gen_rtx_EXPR_LIST (REG_NONNEG
, NULL_RTX
,
6160 REG_NOTES (PREV_INSN (loop_end
)));
6166 /* If the decrement is 1 and the value was tested as >= 0 before
6167 the loop, then we can safely optimize. */
6168 for (p
= loop_start
; p
; p
= PREV_INSN (p
))
6170 if (GET_CODE (p
) == CODE_LABEL
)
6172 if (GET_CODE (p
) != JUMP_INSN
)
6175 before_comparison
= get_condition_for_loop (p
);
6176 if (before_comparison
6177 && XEXP (before_comparison
, 0) == bl
->biv
->dest_reg
6178 && GET_CODE (before_comparison
) == LT
6179 && XEXP (before_comparison
, 1) == const0_rtx
6180 && ! reg_set_between_p (bl
->biv
->dest_reg
, p
, loop_start
)
6181 && INTVAL (bl
->biv
->add_val
) == -1)
6183 REG_NOTES (PREV_INSN (loop_end
))
6184 = gen_rtx_EXPR_LIST (REG_NONNEG
, NULL_RTX
,
6185 REG_NOTES (PREV_INSN (loop_end
)));
6192 else if (num_mem_sets
<= 1)
6194 /* Try to change inc to dec, so can apply above optimization. */
6196 all registers modified are induction variables or invariant,
6197 all memory references have non-overlapping addresses
6198 (obviously true if only one write)
6199 allow 2 insns for the compare/jump at the end of the loop. */
6200 /* Also, we must avoid any instructions which use both the reversed
6201 biv and another biv. Such instructions will fail if the loop is
6202 reversed. We meet this condition by requiring that either
6203 no_use_except_counting is true, or else that there is only
6205 int num_nonfixed_reads
= 0;
6206 /* 1 if the iteration var is used only to count iterations. */
6207 int no_use_except_counting
= 0;
6208 /* 1 if the loop has no memory store, or it has a single memory store
6209 which is reversible. */
6210 int reversible_mem_store
= 1;
6212 for (p
= loop_start
; p
!= loop_end
; p
= NEXT_INSN (p
))
6213 if (GET_RTX_CLASS (GET_CODE (p
)) == 'i')
6214 num_nonfixed_reads
+= count_nonfixed_reads (PATTERN (p
));
6216 if (bl
->giv_count
== 0
6217 && ! loop_number_exit_count
[uid_loop_num
[INSN_UID (loop_start
)]])
6219 rtx bivreg
= regno_reg_rtx
[bl
->regno
];
6221 /* If there are no givs for this biv, and the only exit is the
6222 fall through at the end of the the loop, then
6223 see if perhaps there are no uses except to count. */
6224 no_use_except_counting
= 1;
6225 for (p
= loop_start
; p
!= loop_end
; p
= NEXT_INSN (p
))
6226 if (GET_RTX_CLASS (GET_CODE (p
)) == 'i')
6228 rtx set
= single_set (p
);
6230 if (set
&& GET_CODE (SET_DEST (set
)) == REG
6231 && REGNO (SET_DEST (set
)) == bl
->regno
)
6232 /* An insn that sets the biv is okay. */
6234 else if (p
== prev_nonnote_insn (prev_nonnote_insn (loop_end
))
6235 || p
== prev_nonnote_insn (loop_end
))
6236 /* Don't bother about the end test. */
6238 else if (reg_mentioned_p (bivreg
, PATTERN (p
)))
6239 /* Any other use of the biv is no good. */
6241 no_use_except_counting
= 0;
6247 /* If the loop has a single store, and the destination address is
6248 invariant, then we can't reverse the loop, because this address
6249 might then have the wrong value at loop exit.
6250 This would work if the source was invariant also, however, in that
6251 case, the insn should have been moved out of the loop. */
6253 if (num_mem_sets
== 1)
6254 reversible_mem_store
6255 = (! unknown_address_altered
6256 && ! invariant_p (XEXP (loop_store_mems
[0], 0)));
6258 /* This code only acts for innermost loops. Also it simplifies
6259 the memory address check by only reversing loops with
6260 zero or one memory access.
6261 Two memory accesses could involve parts of the same array,
6262 and that can't be reversed. */
6264 if (num_nonfixed_reads
<= 1
6266 && !loop_has_volatile
6267 && reversible_mem_store
6268 && (no_use_except_counting
6269 || ((bl
->giv_count
+ bl
->biv_count
+ num_mem_sets
6270 + num_movables
+ compare_and_branch
== insn_count
)
6271 && (bl
== loop_iv_list
&& bl
->next
== 0))))
6275 /* Loop can be reversed. */
6276 if (loop_dump_stream
)
6277 fprintf (loop_dump_stream
, "Can reverse loop\n");
6279 /* Now check other conditions:
6281 The increment must be a constant, as must the initial value,
6282 and the comparison code must be LT.
6284 This test can probably be improved since +/- 1 in the constant
6285 can be obtained by changing LT to LE and vice versa; this is
6289 && GET_CODE (XEXP (comparison
, 1)) == CONST_INT
6290 /* LE gets turned into LT */
6291 && GET_CODE (comparison
) == LT
6292 && GET_CODE (bl
->initial_value
) == CONST_INT
)
6294 HOST_WIDE_INT add_val
, comparison_val
;
6297 add_val
= INTVAL (bl
->biv
->add_val
);
6298 comparison_val
= INTVAL (XEXP (comparison
, 1));
6299 initial_value
= bl
->initial_value
;
6301 /* Normalize the initial value if it is an integer and
6302 has no other use except as a counter. This will allow
6303 a few more loops to be reversed. */
6304 if (no_use_except_counting
6305 && GET_CODE (initial_value
) == CONST_INT
)
6307 comparison_val
= comparison_val
- INTVAL (bl
->initial_value
);
6308 /* Check for overflow. If comparison_val ends up as a
6309 negative value, then we can't reverse the loop. */
6310 if (comparison_val
>= 0)
6311 initial_value
= const0_rtx
;
6314 /* If the initial value is not zero, or if the comparison
6315 value is not an exact multiple of the increment, then we
6316 can not reverse this loop. */
6317 if (initial_value
!= const0_rtx
6318 || (comparison_val
% add_val
) != 0)
6321 /* Reset these in case we normalized the initial value
6322 and comparison value above. */
6323 bl
->initial_value
= initial_value
;
6324 XEXP (comparison
, 1) = GEN_INT (comparison_val
);
6326 /* Register will always be nonnegative, with value
6327 0 on last iteration if loop reversed */
6329 /* Save some info needed to produce the new insns. */
6330 reg
= bl
->biv
->dest_reg
;
6331 jump_label
= XEXP (SET_SRC (PATTERN (PREV_INSN (loop_end
))), 1);
6332 if (jump_label
== pc_rtx
)
6333 jump_label
= XEXP (SET_SRC (PATTERN (PREV_INSN (loop_end
))), 2);
6334 new_add_val
= GEN_INT (- INTVAL (bl
->biv
->add_val
));
6336 final_value
= XEXP (comparison
, 1);
6337 start_value
= GEN_INT (INTVAL (XEXP (comparison
, 1))
6338 - INTVAL (bl
->biv
->add_val
));
6340 /* Initialize biv to start_value before loop start.
6341 The old initializing insn will be deleted as a
6342 dead store by flow.c. */
6343 emit_insn_before (gen_move_insn (reg
, start_value
), loop_start
);
6345 /* Add insn to decrement register, and delete insn
6346 that incremented the register. */
6347 p
= emit_insn_before (gen_add2_insn (reg
, new_add_val
),
6349 delete_insn (bl
->biv
->insn
);
6351 /* Update biv info to reflect its new status. */
6353 bl
->initial_value
= start_value
;
6354 bl
->biv
->add_val
= new_add_val
;
6356 /* Inc LABEL_NUSES so that delete_insn will
6357 not delete the label. */
6358 LABEL_NUSES (XEXP (jump_label
, 0)) ++;
6360 /* Emit an insn after the end of the loop to set the biv's
6361 proper exit value if it is used anywhere outside the loop. */
6362 if ((REGNO_LAST_UID (bl
->regno
) != INSN_UID (first_compare
))
6364 || REGNO_FIRST_UID (bl
->regno
) != INSN_UID (bl
->init_insn
))
6365 emit_insn_after (gen_move_insn (reg
, final_value
),
6368 /* Delete compare/branch at end of loop. */
6369 delete_insn (PREV_INSN (loop_end
));
6370 if (compare_and_branch
== 2)
6371 delete_insn (first_compare
);
6373 /* Add new compare/branch insn at end of loop. */
6375 emit_cmp_insn (reg
, const0_rtx
, GE
, NULL_RTX
,
6376 GET_MODE (reg
), 0, 0);
6377 emit_jump_insn (gen_bge (XEXP (jump_label
, 0)));
6378 tem
= gen_sequence ();
6380 emit_jump_insn_before (tem
, loop_end
);
6382 for (tem
= PREV_INSN (loop_end
);
6383 tem
&& GET_CODE (tem
) != JUMP_INSN
; tem
= PREV_INSN (tem
))
6387 JUMP_LABEL (tem
) = XEXP (jump_label
, 0);
6389 /* Increment of LABEL_NUSES done above. */
6390 /* Register is now always nonnegative,
6391 so add REG_NONNEG note to the branch. */
6392 REG_NOTES (tem
) = gen_rtx_EXPR_LIST (REG_NONNEG
, NULL_RTX
,
6398 /* Mark that this biv has been reversed. Each giv which depends
6399 on this biv, and which is also live past the end of the loop
6400 will have to be fixed up. */
6404 if (loop_dump_stream
)
6405 fprintf (loop_dump_stream
,
6406 "Reversed loop and added reg_nonneg\n");
6416 /* Verify whether the biv BL appears to be eliminable,
6417 based on the insns in the loop that refer to it.
6418 LOOP_START is the first insn of the loop, and END is the end insn.
6420 If ELIMINATE_P is non-zero, actually do the elimination.
6422 THRESHOLD and INSN_COUNT are from loop_optimize and are used to
6423 determine whether invariant insns should be placed inside or at the
6424 start of the loop. */
6427 maybe_eliminate_biv (bl
, loop_start
, end
, eliminate_p
, threshold
, insn_count
)
6428 struct iv_class
*bl
;
6432 int threshold
, insn_count
;
6434 rtx reg
= bl
->biv
->dest_reg
;
6437 /* Scan all insns in the loop, stopping if we find one that uses the
6438 biv in a way that we cannot eliminate. */
6440 for (p
= loop_start
; p
!= end
; p
= NEXT_INSN (p
))
6442 enum rtx_code code
= GET_CODE (p
);
6443 rtx where
= threshold
>= insn_count
? loop_start
: p
;
6445 if ((code
== INSN
|| code
== JUMP_INSN
|| code
== CALL_INSN
)
6446 && reg_mentioned_p (reg
, PATTERN (p
))
6447 && ! maybe_eliminate_biv_1 (PATTERN (p
), p
, bl
, eliminate_p
, where
))
6449 if (loop_dump_stream
)
6450 fprintf (loop_dump_stream
,
6451 "Cannot eliminate biv %d: biv used in insn %d.\n",
6452 bl
->regno
, INSN_UID (p
));
6459 if (loop_dump_stream
)
6460 fprintf (loop_dump_stream
, "biv %d %s eliminated.\n",
6461 bl
->regno
, eliminate_p
? "was" : "can be");
6468 /* If BL appears in X (part of the pattern of INSN), see if we can
6469 eliminate its use. If so, return 1. If not, return 0.
6471 If BIV does not appear in X, return 1.
6473 If ELIMINATE_P is non-zero, actually do the elimination. WHERE indicates
6474 where extra insns should be added. Depending on how many items have been
6475 moved out of the loop, it will either be before INSN or at the start of
6479 maybe_eliminate_biv_1 (x
, insn
, bl
, eliminate_p
, where
)
6481 struct iv_class
*bl
;
6485 enum rtx_code code
= GET_CODE (x
);
6486 rtx reg
= bl
->biv
->dest_reg
;
6487 enum machine_mode mode
= GET_MODE (reg
);
6488 struct induction
*v
;
6500 /* If we haven't already been able to do something with this BIV,
6501 we can't eliminate it. */
6507 /* If this sets the BIV, it is not a problem. */
6508 if (SET_DEST (x
) == reg
)
6511 /* If this is an insn that defines a giv, it is also ok because
6512 it will go away when the giv is reduced. */
6513 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
6514 if (v
->giv_type
== DEST_REG
&& SET_DEST (x
) == v
->dest_reg
)
6518 if (SET_DEST (x
) == cc0_rtx
&& SET_SRC (x
) == reg
)
6520 /* Can replace with any giv that was reduced and
6521 that has (MULT_VAL != 0) and (ADD_VAL == 0).
6522 Require a constant for MULT_VAL, so we know it's nonzero.
6523 ??? We disable this optimization to avoid potential
6526 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
6527 if (CONSTANT_P (v
->mult_val
) && v
->mult_val
!= const0_rtx
6528 && v
->add_val
== const0_rtx
6529 && ! v
->ignore
&& ! v
->maybe_dead
&& v
->always_computable
6533 /* If the giv V had the auto-inc address optimization applied
6534 to it, and INSN occurs between the giv insn and the biv
6535 insn, then we must adjust the value used here.
6536 This is rare, so we don't bother to do so. */
6538 && ((INSN_LUID (v
->insn
) < INSN_LUID (insn
)
6539 && INSN_LUID (insn
) < INSN_LUID (bl
->biv
->insn
))
6540 || (INSN_LUID (v
->insn
) > INSN_LUID (insn
)
6541 && INSN_LUID (insn
) > INSN_LUID (bl
->biv
->insn
))))
6547 /* If the giv has the opposite direction of change,
6548 then reverse the comparison. */
6549 if (INTVAL (v
->mult_val
) < 0)
6550 new = gen_rtx_COMPARE (GET_MODE (v
->new_reg
),
6551 const0_rtx
, v
->new_reg
);
6555 /* We can probably test that giv's reduced reg. */
6556 if (validate_change (insn
, &SET_SRC (x
), new, 0))
6560 /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0);
6561 replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL).
6562 Require a constant for MULT_VAL, so we know it's nonzero.
6563 ??? Do this only if ADD_VAL is a pointer to avoid a potential
6564 overflow problem. */
6566 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
6567 if (CONSTANT_P (v
->mult_val
) && v
->mult_val
!= const0_rtx
6568 && ! v
->ignore
&& ! v
->maybe_dead
&& v
->always_computable
6570 && (GET_CODE (v
->add_val
) == SYMBOL_REF
6571 || GET_CODE (v
->add_val
) == LABEL_REF
6572 || GET_CODE (v
->add_val
) == CONST
6573 || (GET_CODE (v
->add_val
) == REG
6574 && REGNO_POINTER_FLAG (REGNO (v
->add_val
)))))
6576 /* If the giv V had the auto-inc address optimization applied
6577 to it, and INSN occurs between the giv insn and the biv
6578 insn, then we must adjust the value used here.
6579 This is rare, so we don't bother to do so. */
6581 && ((INSN_LUID (v
->insn
) < INSN_LUID (insn
)
6582 && INSN_LUID (insn
) < INSN_LUID (bl
->biv
->insn
))
6583 || (INSN_LUID (v
->insn
) > INSN_LUID (insn
)
6584 && INSN_LUID (insn
) > INSN_LUID (bl
->biv
->insn
))))
6590 /* If the giv has the opposite direction of change,
6591 then reverse the comparison. */
6592 if (INTVAL (v
->mult_val
) < 0)
6593 new = gen_rtx_COMPARE (VOIDmode
, copy_rtx (v
->add_val
),
6596 new = gen_rtx_COMPARE (VOIDmode
, v
->new_reg
,
6597 copy_rtx (v
->add_val
));
6599 /* Replace biv with the giv's reduced register. */
6600 update_reg_last_use (v
->add_val
, insn
);
6601 if (validate_change (insn
, &SET_SRC (PATTERN (insn
)), new, 0))
6604 /* Insn doesn't support that constant or invariant. Copy it
6605 into a register (it will be a loop invariant.) */
6606 tem
= gen_reg_rtx (GET_MODE (v
->new_reg
));
6608 emit_insn_before (gen_move_insn (tem
, copy_rtx (v
->add_val
)),
6611 /* Substitute the new register for its invariant value in
6612 the compare expression. */
6613 XEXP (new, (INTVAL (v
->mult_val
) < 0) ? 0 : 1) = tem
;
6614 if (validate_change (insn
, &SET_SRC (PATTERN (insn
)), new, 0))
6623 case GT
: case GE
: case GTU
: case GEU
:
6624 case LT
: case LE
: case LTU
: case LEU
:
6625 /* See if either argument is the biv. */
6626 if (XEXP (x
, 0) == reg
)
6627 arg
= XEXP (x
, 1), arg_operand
= 1;
6628 else if (XEXP (x
, 1) == reg
)
6629 arg
= XEXP (x
, 0), arg_operand
= 0;
6633 if (CONSTANT_P (arg
))
6635 /* First try to replace with any giv that has constant positive
6636 mult_val and constant add_val. We might be able to support
6637 negative mult_val, but it seems complex to do it in general. */
6639 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
6640 if (CONSTANT_P (v
->mult_val
) && INTVAL (v
->mult_val
) > 0
6641 && (GET_CODE (v
->add_val
) == SYMBOL_REF
6642 || GET_CODE (v
->add_val
) == LABEL_REF
6643 || GET_CODE (v
->add_val
) == CONST
6644 || (GET_CODE (v
->add_val
) == REG
6645 && REGNO_POINTER_FLAG (REGNO (v
->add_val
))))
6646 && ! v
->ignore
&& ! v
->maybe_dead
&& v
->always_computable
6649 /* If the giv V had the auto-inc address optimization applied
6650 to it, and INSN occurs between the giv insn and the biv
6651 insn, then we must adjust the value used here.
6652 This is rare, so we don't bother to do so. */
6654 && ((INSN_LUID (v
->insn
) < INSN_LUID (insn
)
6655 && INSN_LUID (insn
) < INSN_LUID (bl
->biv
->insn
))
6656 || (INSN_LUID (v
->insn
) > INSN_LUID (insn
)
6657 && INSN_LUID (insn
) > INSN_LUID (bl
->biv
->insn
))))
6663 /* Replace biv with the giv's reduced reg. */
6664 XEXP (x
, 1-arg_operand
) = v
->new_reg
;
6666 /* If all constants are actually constant integers and
6667 the derived constant can be directly placed in the COMPARE,
6669 if (GET_CODE (arg
) == CONST_INT
6670 && GET_CODE (v
->mult_val
) == CONST_INT
6671 && GET_CODE (v
->add_val
) == CONST_INT
6672 && validate_change (insn
, &XEXP (x
, arg_operand
),
6673 GEN_INT (INTVAL (arg
)
6674 * INTVAL (v
->mult_val
)
6675 + INTVAL (v
->add_val
)), 0))
6678 /* Otherwise, load it into a register. */
6679 tem
= gen_reg_rtx (mode
);
6680 emit_iv_add_mult (arg
, v
->mult_val
, v
->add_val
, tem
, where
);
6681 if (validate_change (insn
, &XEXP (x
, arg_operand
), tem
, 0))
6684 /* If that failed, put back the change we made above. */
6685 XEXP (x
, 1-arg_operand
) = reg
;
6688 /* Look for giv with positive constant mult_val and nonconst add_val.
6689 Insert insns to calculate new compare value.
6690 ??? Turn this off due to possible overflow. */
6692 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
6693 if (CONSTANT_P (v
->mult_val
) && INTVAL (v
->mult_val
) > 0
6694 && ! v
->ignore
&& ! v
->maybe_dead
&& v
->always_computable
6700 /* If the giv V had the auto-inc address optimization applied
6701 to it, and INSN occurs between the giv insn and the biv
6702 insn, then we must adjust the value used here.
6703 This is rare, so we don't bother to do so. */
6705 && ((INSN_LUID (v
->insn
) < INSN_LUID (insn
)
6706 && INSN_LUID (insn
) < INSN_LUID (bl
->biv
->insn
))
6707 || (INSN_LUID (v
->insn
) > INSN_LUID (insn
)
6708 && INSN_LUID (insn
) > INSN_LUID (bl
->biv
->insn
))))
6714 tem
= gen_reg_rtx (mode
);
6716 /* Replace biv with giv's reduced register. */
6717 validate_change (insn
, &XEXP (x
, 1 - arg_operand
),
6720 /* Compute value to compare against. */
6721 emit_iv_add_mult (arg
, v
->mult_val
, v
->add_val
, tem
, where
);
6722 /* Use it in this insn. */
6723 validate_change (insn
, &XEXP (x
, arg_operand
), tem
, 1);
6724 if (apply_change_group ())
6728 else if (GET_CODE (arg
) == REG
|| GET_CODE (arg
) == MEM
)
6730 if (invariant_p (arg
) == 1)
6732 /* Look for giv with constant positive mult_val and nonconst
6733 add_val. Insert insns to compute new compare value.
6734 ??? Turn this off due to possible overflow. */
6736 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
6737 if (CONSTANT_P (v
->mult_val
) && INTVAL (v
->mult_val
) > 0
6738 && ! v
->ignore
&& ! v
->maybe_dead
&& v
->always_computable
6744 /* If the giv V had the auto-inc address optimization applied
6745 to it, and INSN occurs between the giv insn and the biv
6746 insn, then we must adjust the value used here.
6747 This is rare, so we don't bother to do so. */
6749 && ((INSN_LUID (v
->insn
) < INSN_LUID (insn
)
6750 && INSN_LUID (insn
) < INSN_LUID (bl
->biv
->insn
))
6751 || (INSN_LUID (v
->insn
) > INSN_LUID (insn
)
6752 && INSN_LUID (insn
) > INSN_LUID (bl
->biv
->insn
))))
6758 tem
= gen_reg_rtx (mode
);
6760 /* Replace biv with giv's reduced register. */
6761 validate_change (insn
, &XEXP (x
, 1 - arg_operand
),
6764 /* Compute value to compare against. */
6765 emit_iv_add_mult (arg
, v
->mult_val
, v
->add_val
,
6767 validate_change (insn
, &XEXP (x
, arg_operand
), tem
, 1);
6768 if (apply_change_group ())
6773 /* This code has problems. Basically, you can't know when
6774 seeing if we will eliminate BL, whether a particular giv
6775 of ARG will be reduced. If it isn't going to be reduced,
6776 we can't eliminate BL. We can try forcing it to be reduced,
6777 but that can generate poor code.
6779 The problem is that the benefit of reducing TV, below should
6780 be increased if BL can actually be eliminated, but this means
6781 we might have to do a topological sort of the order in which
6782 we try to process biv. It doesn't seem worthwhile to do
6783 this sort of thing now. */
6786 /* Otherwise the reg compared with had better be a biv. */
6787 if (GET_CODE (arg
) != REG
6788 || reg_iv_type
[REGNO (arg
)] != BASIC_INDUCT
)
6791 /* Look for a pair of givs, one for each biv,
6792 with identical coefficients. */
6793 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
6795 struct induction
*tv
;
6797 if (v
->ignore
|| v
->maybe_dead
|| v
->mode
!= mode
)
6800 for (tv
= reg_biv_class
[REGNO (arg
)]->giv
; tv
; tv
= tv
->next_iv
)
6801 if (! tv
->ignore
&& ! tv
->maybe_dead
6802 && rtx_equal_p (tv
->mult_val
, v
->mult_val
)
6803 && rtx_equal_p (tv
->add_val
, v
->add_val
)
6804 && tv
->mode
== mode
)
6806 /* If the giv V had the auto-inc address optimization applied
6807 to it, and INSN occurs between the giv insn and the biv
6808 insn, then we must adjust the value used here.
6809 This is rare, so we don't bother to do so. */
6811 && ((INSN_LUID (v
->insn
) < INSN_LUID (insn
)
6812 && INSN_LUID (insn
) < INSN_LUID (bl
->biv
->insn
))
6813 || (INSN_LUID (v
->insn
) > INSN_LUID (insn
)
6814 && INSN_LUID (insn
) > INSN_LUID (bl
->biv
->insn
))))
6820 /* Replace biv with its giv's reduced reg. */
6821 XEXP (x
, 1-arg_operand
) = v
->new_reg
;
6822 /* Replace other operand with the other giv's
6824 XEXP (x
, arg_operand
) = tv
->new_reg
;
6831 /* If we get here, the biv can't be eliminated. */
6835 /* If this address is a DEST_ADDR giv, it doesn't matter if the
6836 biv is used in it, since it will be replaced. */
6837 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
6838 if (v
->giv_type
== DEST_ADDR
&& v
->location
== &XEXP (x
, 0))
6846 /* See if any subexpression fails elimination. */
6847 fmt
= GET_RTX_FORMAT (code
);
6848 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
6853 if (! maybe_eliminate_biv_1 (XEXP (x
, i
), insn
, bl
,
6854 eliminate_p
, where
))
6859 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
6860 if (! maybe_eliminate_biv_1 (XVECEXP (x
, i
, j
), insn
, bl
,
6861 eliminate_p
, where
))
6870 /* Return nonzero if the last use of REG
6871 is in an insn following INSN in the same basic block. */
6874 last_use_this_basic_block (reg
, insn
)
6880 n
&& GET_CODE (n
) != CODE_LABEL
&& GET_CODE (n
) != JUMP_INSN
;
6883 if (REGNO_LAST_UID (REGNO (reg
)) == INSN_UID (n
))
6889 /* Called via `note_stores' to record the initial value of a biv. Here we
6890 just record the location of the set and process it later. */
6893 record_initial (dest
, set
)
6897 struct iv_class
*bl
;
6899 if (GET_CODE (dest
) != REG
6900 || REGNO (dest
) >= max_reg_before_loop
6901 || reg_iv_type
[REGNO (dest
)] != BASIC_INDUCT
)
6904 bl
= reg_biv_class
[REGNO (dest
)];
6906 /* If this is the first set found, record it. */
6907 if (bl
->init_insn
== 0)
6909 bl
->init_insn
= note_insn
;
6914 /* If any of the registers in X are "old" and currently have a last use earlier
6915 than INSN, update them to have a last use of INSN. Their actual last use
6916 will be the previous insn but it will not have a valid uid_luid so we can't
6920 update_reg_last_use (x
, insn
)
6924 /* Check for the case where INSN does not have a valid luid. In this case,
6925 there is no need to modify the regno_last_uid, as this can only happen
6926 when code is inserted after the loop_end to set a pseudo's final value,
6927 and hence this insn will never be the last use of x. */
6928 if (GET_CODE (x
) == REG
&& REGNO (x
) < max_reg_before_loop
6929 && INSN_UID (insn
) < max_uid_for_loop
6930 && uid_luid
[REGNO_LAST_UID (REGNO (x
))] < uid_luid
[INSN_UID (insn
)])
6931 REGNO_LAST_UID (REGNO (x
)) = INSN_UID (insn
);
6935 register char *fmt
= GET_RTX_FORMAT (GET_CODE (x
));
6936 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
6939 update_reg_last_use (XEXP (x
, i
), insn
);
6940 else if (fmt
[i
] == 'E')
6941 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
6942 update_reg_last_use (XVECEXP (x
, i
, j
), insn
);
6947 /* Given a jump insn JUMP, return the condition that will cause it to branch
6948 to its JUMP_LABEL. If the condition cannot be understood, or is an
6949 inequality floating-point comparison which needs to be reversed, 0 will
6952 If EARLIEST is non-zero, it is a pointer to a place where the earliest
6953 insn used in locating the condition was found. If a replacement test
6954 of the condition is desired, it should be placed in front of that
6955 insn and we will be sure that the inputs are still valid.
6957 The condition will be returned in a canonical form to simplify testing by
6958 callers. Specifically:
6960 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
6961 (2) Both operands will be machine operands; (cc0) will have been replaced.
6962 (3) If an operand is a constant, it will be the second operand.
6963 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
6964 for GE, GEU, and LEU. */
6967 get_condition (jump
, earliest
)
6976 int reverse_code
= 0;
6977 int did_reverse_condition
= 0;
6979 /* If this is not a standard conditional jump, we can't parse it. */
6980 if (GET_CODE (jump
) != JUMP_INSN
6981 || ! condjump_p (jump
) || simplejump_p (jump
))
6984 code
= GET_CODE (XEXP (SET_SRC (PATTERN (jump
)), 0));
6985 op0
= XEXP (XEXP (SET_SRC (PATTERN (jump
)), 0), 0);
6986 op1
= XEXP (XEXP (SET_SRC (PATTERN (jump
)), 0), 1);
6991 /* If this branches to JUMP_LABEL when the condition is false, reverse
6993 if (GET_CODE (XEXP (SET_SRC (PATTERN (jump
)), 2)) == LABEL_REF
6994 && XEXP (XEXP (SET_SRC (PATTERN (jump
)), 2), 0) == JUMP_LABEL (jump
))
6995 code
= reverse_condition (code
), did_reverse_condition
^= 1;
6997 /* If we are comparing a register with zero, see if the register is set
6998 in the previous insn to a COMPARE or a comparison operation. Perform
6999 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
7002 while (GET_RTX_CLASS (code
) == '<' && op1
== CONST0_RTX (GET_MODE (op0
)))
7004 /* Set non-zero when we find something of interest. */
7008 /* If comparison with cc0, import actual comparison from compare
7012 if ((prev
= prev_nonnote_insn (prev
)) == 0
7013 || GET_CODE (prev
) != INSN
7014 || (set
= single_set (prev
)) == 0
7015 || SET_DEST (set
) != cc0_rtx
)
7018 op0
= SET_SRC (set
);
7019 op1
= CONST0_RTX (GET_MODE (op0
));
7025 /* If this is a COMPARE, pick up the two things being compared. */
7026 if (GET_CODE (op0
) == COMPARE
)
7028 op1
= XEXP (op0
, 1);
7029 op0
= XEXP (op0
, 0);
7032 else if (GET_CODE (op0
) != REG
)
7035 /* Go back to the previous insn. Stop if it is not an INSN. We also
7036 stop if it isn't a single set or if it has a REG_INC note because
7037 we don't want to bother dealing with it. */
7039 if ((prev
= prev_nonnote_insn (prev
)) == 0
7040 || GET_CODE (prev
) != INSN
7041 || FIND_REG_INC_NOTE (prev
, 0)
7042 || (set
= single_set (prev
)) == 0)
7045 /* If this is setting OP0, get what it sets it to if it looks
7047 if (rtx_equal_p (SET_DEST (set
), op0
))
7049 enum machine_mode inner_mode
= GET_MODE (SET_SRC (set
));
7051 if ((GET_CODE (SET_SRC (set
)) == COMPARE
7054 && GET_MODE_CLASS (inner_mode
) == MODE_INT
7055 && (GET_MODE_BITSIZE (inner_mode
)
7056 <= HOST_BITS_PER_WIDE_INT
)
7057 && (STORE_FLAG_VALUE
7058 & ((HOST_WIDE_INT
) 1
7059 << (GET_MODE_BITSIZE (inner_mode
) - 1))))
7060 #ifdef FLOAT_STORE_FLAG_VALUE
7062 && GET_MODE_CLASS (inner_mode
) == MODE_FLOAT
7063 && FLOAT_STORE_FLAG_VALUE
< 0)
7066 && GET_RTX_CLASS (GET_CODE (SET_SRC (set
))) == '<')))
7068 else if (((code
== EQ
7070 && (GET_MODE_BITSIZE (inner_mode
)
7071 <= HOST_BITS_PER_WIDE_INT
)
7072 && GET_MODE_CLASS (inner_mode
) == MODE_INT
7073 && (STORE_FLAG_VALUE
7074 & ((HOST_WIDE_INT
) 1
7075 << (GET_MODE_BITSIZE (inner_mode
) - 1))))
7076 #ifdef FLOAT_STORE_FLAG_VALUE
7078 && GET_MODE_CLASS (inner_mode
) == MODE_FLOAT
7079 && FLOAT_STORE_FLAG_VALUE
< 0)
7082 && GET_RTX_CLASS (GET_CODE (SET_SRC (set
))) == '<')
7084 /* We might have reversed a LT to get a GE here. But this wasn't
7085 actually the comparison of data, so we don't flag that we
7086 have had to reverse the condition. */
7087 did_reverse_condition
^= 1;
7095 else if (reg_set_p (op0
, prev
))
7096 /* If this sets OP0, but not directly, we have to give up. */
7101 if (GET_RTX_CLASS (GET_CODE (x
)) == '<')
7102 code
= GET_CODE (x
);
7105 code
= reverse_condition (code
);
7106 did_reverse_condition
^= 1;
7110 op0
= XEXP (x
, 0), op1
= XEXP (x
, 1);
7116 /* If constant is first, put it last. */
7117 if (CONSTANT_P (op0
))
7118 code
= swap_condition (code
), tem
= op0
, op0
= op1
, op1
= tem
;
7120 /* If OP0 is the result of a comparison, we weren't able to find what
7121 was really being compared, so fail. */
7122 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
)
7125 /* Canonicalize any ordered comparison with integers involving equality
7126 if we can do computations in the relevant mode and we do not
7129 if (GET_CODE (op1
) == CONST_INT
7130 && GET_MODE (op0
) != VOIDmode
7131 && GET_MODE_BITSIZE (GET_MODE (op0
)) <= HOST_BITS_PER_WIDE_INT
)
7133 HOST_WIDE_INT const_val
= INTVAL (op1
);
7134 unsigned HOST_WIDE_INT uconst_val
= const_val
;
7135 unsigned HOST_WIDE_INT max_val
7136 = (unsigned HOST_WIDE_INT
) GET_MODE_MASK (GET_MODE (op0
));
7141 if (const_val
!= max_val
>> 1)
7142 code
= LT
, op1
= GEN_INT (const_val
+ 1);
7145 /* When cross-compiling, const_val might be sign-extended from
7146 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
7148 if ((const_val
& max_val
)
7149 != (((HOST_WIDE_INT
) 1
7150 << (GET_MODE_BITSIZE (GET_MODE (op0
)) - 1))))
7151 code
= GT
, op1
= GEN_INT (const_val
- 1);
7155 if (uconst_val
< max_val
)
7156 code
= LTU
, op1
= GEN_INT (uconst_val
+ 1);
7160 if (uconst_val
!= 0)
7161 code
= GTU
, op1
= GEN_INT (uconst_val
- 1);
7169 /* If this was floating-point and we reversed anything other than an
7170 EQ or NE, return zero. */
7171 if (TARGET_FLOAT_FORMAT
== IEEE_FLOAT_FORMAT
7172 && did_reverse_condition
&& code
!= NE
&& code
!= EQ
7174 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_FLOAT
)
7178 /* Never return CC0; return zero instead. */
7183 return gen_rtx_fmt_ee (code
, VOIDmode
, op0
, op1
);
7186 /* Similar to above routine, except that we also put an invariant last
7187 unless both operands are invariants. */
7190 get_condition_for_loop (x
)
7193 rtx comparison
= get_condition (x
, NULL_PTR
);
7196 || ! invariant_p (XEXP (comparison
, 0))
7197 || invariant_p (XEXP (comparison
, 1)))
7200 return gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison
)), VOIDmode
,
7201 XEXP (comparison
, 1), XEXP (comparison
, 0));
7205 /* Analyze a loop in order to instrument it with the use of count register.
7206 loop_start and loop_end are the first and last insns of the loop.
7207 This function works in cooperation with insert_bct ().
7208 loop_can_insert_bct[loop_num] is set according to whether the optimization
7209 is applicable to the loop. When it is applicable, the following variables
7211 loop_start_value[loop_num]
7212 loop_comparison_value[loop_num]
7213 loop_increment[loop_num]
7214 loop_comparison_code[loop_num] */
7216 #ifdef HAVE_decrement_and_branch_on_count
7218 void analyze_loop_iterations (loop_start
, loop_end
)
7219 rtx loop_start
, loop_end
;
7221 rtx comparison
, comparison_value
;
7222 rtx iteration_var
, initial_value
, increment
;
7223 enum rtx_code comparison_code
;
7229 /* loop_variable mode */
7230 enum machine_mode original_mode
;
7232 /* find the number of the loop */
7233 int loop_num
= uid_loop_num
[INSN_UID (loop_start
)];
7235 /* we change our mind only when we are sure that loop will be instrumented */
7236 loop_can_insert_bct
[loop_num
] = 0;
7238 /* is the optimization suppressed. */
7239 if ( !flag_branch_on_count_reg
)
7242 /* make sure that count-reg is not in use */
7243 if (loop_used_count_register
[loop_num
]){
7244 if (loop_dump_stream
)
7245 fprintf (loop_dump_stream
,
7246 "analyze_loop_iterations %d: BCT instrumentation failed: count register already in use\n",
7251 /* make sure that the function has no indirect jumps. */
7252 if (indirect_jump_in_function
){
7253 if (loop_dump_stream
)
7254 fprintf (loop_dump_stream
,
7255 "analyze_loop_iterations %d: BCT instrumentation failed: indirect jump in function\n",
7260 /* make sure that the last loop insn is a conditional jump */
7261 last_loop_insn
= PREV_INSN (loop_end
);
7262 if (GET_CODE (last_loop_insn
) != JUMP_INSN
|| !condjump_p (last_loop_insn
)) {
7263 if (loop_dump_stream
)
7264 fprintf (loop_dump_stream
,
7265 "analyze_loop_iterations %d: BCT instrumentation failed: invalid jump at loop end\n",
7270 /* First find the iteration variable. If the last insn is a conditional
7271 branch, and the insn preceding it tests a register value, make that
7272 register the iteration variable. */
7274 /* We used to use prev_nonnote_insn here, but that fails because it might
7275 accidentally get the branch for a contained loop if the branch for this
7276 loop was deleted. We can only trust branches immediately before the
7279 comparison
= get_condition_for_loop (last_loop_insn
);
7280 /* ??? Get_condition may switch position of induction variable and
7281 invariant register when it canonicalizes the comparison. */
7283 if (comparison
== 0) {
7284 if (loop_dump_stream
)
7285 fprintf (loop_dump_stream
,
7286 "analyze_loop_iterations %d: BCT instrumentation failed: comparison not found\n",
7291 comparison_code
= GET_CODE (comparison
);
7292 iteration_var
= XEXP (comparison
, 0);
7293 comparison_value
= XEXP (comparison
, 1);
7295 original_mode
= GET_MODE (iteration_var
);
7296 if (GET_MODE_CLASS (original_mode
) != MODE_INT
7297 || GET_MODE_SIZE (original_mode
) != UNITS_PER_WORD
) {
7298 if (loop_dump_stream
)
7299 fprintf (loop_dump_stream
,
7300 "analyze_loop_iterations %d: BCT Instrumentation failed: loop variable not integer\n",
7305 /* get info about loop bounds and increment */
7306 iteration_info (iteration_var
, &initial_value
, &increment
,
7307 loop_start
, loop_end
);
7309 /* make sure that all required loop data were found */
7310 if (!(initial_value
&& increment
&& comparison_value
7311 && invariant_p (comparison_value
) && invariant_p (increment
)
7312 && ! indirect_jump_in_function
))
7314 if (loop_dump_stream
) {
7315 fprintf (loop_dump_stream
,
7316 "analyze_loop_iterations %d: BCT instrumentation failed because of wrong loop: ", loop_num
);
7317 if (!(initial_value
&& increment
&& comparison_value
)) {
7318 fprintf (loop_dump_stream
, "\tbounds not available: ");
7319 if ( ! initial_value
)
7320 fprintf (loop_dump_stream
, "initial ");
7322 fprintf (loop_dump_stream
, "increment ");
7323 if ( ! comparison_value
)
7324 fprintf (loop_dump_stream
, "comparison ");
7325 fprintf (loop_dump_stream
, "\n");
7327 if (!invariant_p (comparison_value
) || !invariant_p (increment
))
7328 fprintf (loop_dump_stream
, "\tloop bounds not invariant\n");
7333 /* make sure that the increment is constant */
7334 if (GET_CODE (increment
) != CONST_INT
) {
7335 if (loop_dump_stream
)
7336 fprintf (loop_dump_stream
,
7337 "analyze_loop_iterations %d: instrumentation failed: not arithmetic loop\n",
7342 /* make sure that the loop contains neither function call, nor jump on table.
7343 (the count register might be altered by the called function, and might
7344 be used for a branch on table). */
7345 for (insn
= loop_start
; insn
&& insn
!= loop_end
; insn
= NEXT_INSN (insn
)) {
7346 if (GET_CODE (insn
) == CALL_INSN
){
7347 if (loop_dump_stream
)
7348 fprintf (loop_dump_stream
,
7349 "analyze_loop_iterations %d: BCT instrumentation failed: function call in the loop\n",
7354 if (GET_CODE (insn
) == JUMP_INSN
7355 && (GET_CODE (PATTERN (insn
)) == ADDR_DIFF_VEC
7356 || GET_CODE (PATTERN (insn
)) == ADDR_VEC
)){
7357 if (loop_dump_stream
)
7358 fprintf (loop_dump_stream
,
7359 "analyze_loop_iterations %d: BCT instrumentation failed: computed branch in the loop\n",
7365 /* At this point, we are sure that the loop can be instrumented with BCT.
7366 Some of the loops, however, will not be instrumented - the final decision
7367 is taken by insert_bct () */
7368 if (loop_dump_stream
)
7369 fprintf (loop_dump_stream
,
7370 "analyze_loop_iterations: loop (luid =%d) can be BCT instrumented.\n",
7373 /* mark all enclosing loops that they cannot use count register */
7374 /* ???: In fact, since insert_bct may decide not to instrument this loop,
7375 marking here may prevent instrumenting an enclosing loop that could
7376 actually be instrumented. But since this is rare, it is safer to mark
7377 here in case the order of calling (analyze/insert)_bct would be changed. */
7378 for (i
=loop_num
; i
!= -1; i
= loop_outer_loop
[i
])
7379 loop_used_count_register
[i
] = 1;
7381 /* Set data structures which will be used by the instrumentation phase */
7382 loop_start_value
[loop_num
] = initial_value
;
7383 loop_comparison_value
[loop_num
] = comparison_value
;
7384 loop_increment
[loop_num
] = increment
;
7385 loop_comparison_code
[loop_num
] = comparison_code
;
7386 loop_can_insert_bct
[loop_num
] = 1;
7390 /* instrument loop for insertion of bct instruction. We distinguish between
7391 loops with compile-time bounds, to those with run-time bounds. The loop
7392 behaviour is analized according to the following characteristics/variables:
7394 ; comparison-value: the value to which the iteration counter is compared.
7395 ; initial-value: iteration-counter initial value.
7396 ; increment: iteration-counter increment.
7397 ; Computed variables:
7398 ; increment-direction: the sign of the increment.
7399 ; compare-direction: '1' for GT, GTE, '-1' for LT, LTE, '0' for NE.
7400 ; range-direction: sign (comparison-value - initial-value)
7401 We give up on the following cases:
7402 ; loop variable overflow.
7403 ; run-time loop bounds with comparison code NE.
7407 insert_bct (loop_start
, loop_end
)
7408 rtx loop_start
, loop_end
;
7410 rtx initial_value
, comparison_value
, increment
;
7411 enum rtx_code comparison_code
;
7413 int increment_direction
, compare_direction
;
7416 /* if the loop condition is <= or >=, the number of iteration
7417 is 1 more than the range of the bounds of the loop */
7418 int add_iteration
= 0;
7420 /* the only machine mode we work with - is the integer of the size that the
7422 enum machine_mode loop_var_mode
= SImode
;
7424 int loop_num
= uid_loop_num
[INSN_UID (loop_start
)];
7426 /* get loop-variables. No need to check that these are valid - already
7427 checked in analyze_loop_iterations (). */
7428 comparison_code
= loop_comparison_code
[loop_num
];
7429 initial_value
= loop_start_value
[loop_num
];
7430 comparison_value
= loop_comparison_value
[loop_num
];
7431 increment
= loop_increment
[loop_num
];
7433 /* check analyze_loop_iterations decision for this loop. */
7434 if (! loop_can_insert_bct
[loop_num
]){
7435 if (loop_dump_stream
)
7436 fprintf (loop_dump_stream
,
7437 "insert_bct: [%d] - was decided not to instrument by analyze_loop_iterations ()\n",
7442 /* It's impossible to instrument a competely unrolled loop. */
7443 if (loop_unroll_factor
[loop_num
] == -1)
7446 /* make sure that the last loop insn is a conditional jump .
7447 This check is repeated from analyze_loop_iterations (),
7448 because unrolling might have changed that. */
7449 if (GET_CODE (PREV_INSN (loop_end
)) != JUMP_INSN
7450 || !condjump_p (PREV_INSN (loop_end
))) {
7451 if (loop_dump_stream
)
7452 fprintf (loop_dump_stream
,
7453 "insert_bct: not instrumenting BCT because of invalid branch\n");
7457 /* fix increment in case loop was unrolled. */
7458 if (loop_unroll_factor
[loop_num
] > 1)
7459 increment
= GEN_INT ( INTVAL (increment
) * loop_unroll_factor
[loop_num
] );
7461 /* determine properties and directions of the loop */
7462 increment_direction
= (INTVAL (increment
) > 0) ? 1:-1;
7463 switch ( comparison_code
) {
7468 compare_direction
= 1;
7475 compare_direction
= -1;
7479 /* in this case we cannot know the number of iterations */
7480 if (loop_dump_stream
)
7481 fprintf (loop_dump_stream
,
7482 "insert_bct: %d: loop cannot be instrumented: == in condition\n",
7489 compare_direction
= 1;
7495 compare_direction
= -1;
7498 compare_direction
= 0;
7505 /* make sure that the loop does not end by an overflow */
7506 if (compare_direction
!= increment_direction
) {
7507 if (loop_dump_stream
)
7508 fprintf (loop_dump_stream
,
7509 "insert_bct: %d: loop cannot be instrumented: terminated by overflow\n",
7514 /* try to instrument the loop. */
7516 /* Handle the simpler case, where the bounds are known at compile time. */
7517 if (GET_CODE (initial_value
) == CONST_INT
&& GET_CODE (comparison_value
) == CONST_INT
)
7520 int increment_value_abs
= INTVAL (increment
) * increment_direction
;
7522 /* check the relation between compare-val and initial-val */
7523 int difference
= INTVAL (comparison_value
) - INTVAL (initial_value
);
7524 int range_direction
= (difference
> 0) ? 1 : -1;
7526 /* make sure the loop executes enough iterations to gain from BCT */
7527 if (difference
> -3 && difference
< 3) {
7528 if (loop_dump_stream
)
7529 fprintf (loop_dump_stream
,
7530 "insert_bct: loop %d not BCT instrumented: too small iteration count.\n",
7535 /* make sure that the loop executes at least once */
7536 if ((range_direction
== 1 && compare_direction
== -1)
7537 || (range_direction
== -1 && compare_direction
== 1))
7539 if (loop_dump_stream
)
7540 fprintf (loop_dump_stream
,
7541 "insert_bct: loop %d: does not iterate even once. Not instrumenting.\n",
7546 /* make sure that the loop does not end by an overflow (in compile time
7547 bounds we must have an additional check for overflow, because here
7548 we also support the compare code of 'NE'. */
7549 if (comparison_code
== NE
7550 && increment_direction
!= range_direction
) {
7551 if (loop_dump_stream
)
7552 fprintf (loop_dump_stream
,
7553 "insert_bct (compile time bounds): %d: loop not instrumented: terminated by overflow\n",
7558 /* Determine the number of iterations by:
7560 ; compare-val - initial-val + (increment -1) + additional-iteration
7561 ; num_iterations = -----------------------------------------------------------------
7564 difference
= (range_direction
> 0) ? difference
: -difference
;
7566 fprintf (stderr
, "difference is: %d\n", difference
); /* @*/
7567 fprintf (stderr
, "increment_value_abs is: %d\n", increment_value_abs
); /* @*/
7568 fprintf (stderr
, "add_iteration is: %d\n", add_iteration
); /* @*/
7569 fprintf (stderr
, "INTVAL (comparison_value) is: %d\n", INTVAL (comparison_value
)); /* @*/
7570 fprintf (stderr
, "INTVAL (initial_value) is: %d\n", INTVAL (initial_value
)); /* @*/
7573 if (increment_value_abs
== 0) {
7574 fprintf (stderr
, "insert_bct: error: increment == 0 !!!\n");
7577 n_iterations
= (difference
+ increment_value_abs
- 1 + add_iteration
)
7578 / increment_value_abs
;
7581 fprintf (stderr
, "number of iterations is: %d\n", n_iterations
); /* @*/
7583 instrument_loop_bct (loop_start
, loop_end
, GEN_INT (n_iterations
));
7585 /* Done with this loop. */
7589 /* Handle the more complex case, that the bounds are NOT known at compile time. */
7590 /* In this case we generate run_time calculation of the number of iterations */
7592 /* With runtime bounds, if the compare is of the form '!=' we give up */
7593 if (comparison_code
== NE
) {
7594 if (loop_dump_stream
)
7595 fprintf (loop_dump_stream
,
7596 "insert_bct: fail for loop %d: runtime bounds with != comparison\n",
7602 /* We rely on the existence of run-time guard to ensure that the
7603 loop executes at least once. */
7605 rtx iterations_num_reg
;
7607 int increment_value_abs
= INTVAL (increment
) * increment_direction
;
7609 /* make sure that the increment is a power of two, otherwise (an
7610 expensive) divide is needed. */
7611 if (exact_log2 (increment_value_abs
) == -1)
7613 if (loop_dump_stream
)
7614 fprintf (loop_dump_stream
,
7615 "insert_bct: not instrumenting BCT because the increment is not power of 2\n");
7619 /* compute the number of iterations */
7624 /* Again, the number of iterations is calculated by:
7626 ; compare-val - initial-val + (increment -1) + additional-iteration
7627 ; num_iterations = -----------------------------------------------------------------
7630 /* ??? Do we have to call copy_rtx here before passing rtx to
7632 if (compare_direction
> 0) {
7633 /* <, <= :the loop variable is increasing */
7634 temp_reg
= expand_binop (loop_var_mode
, sub_optab
, comparison_value
,
7635 initial_value
, NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
7638 temp_reg
= expand_binop (loop_var_mode
, sub_optab
, initial_value
,
7639 comparison_value
, NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
7642 if (increment_value_abs
- 1 + add_iteration
!= 0)
7643 temp_reg
= expand_binop (loop_var_mode
, add_optab
, temp_reg
,
7644 GEN_INT (increment_value_abs
- 1 + add_iteration
),
7645 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
7647 if (increment_value_abs
!= 1)
7649 /* ??? This will generate an expensive divide instruction for
7650 most targets. The original authors apparently expected this
7651 to be a shift, since they test for power-of-2 divisors above,
7652 but just naively generating a divide instruction will not give
7653 a shift. It happens to work for the PowerPC target because
7654 the rs6000.md file has a divide pattern that emits shifts.
7655 It will probably not work for any other target. */
7656 iterations_num_reg
= expand_binop (loop_var_mode
, sdiv_optab
,
7658 GEN_INT (increment_value_abs
),
7659 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
7662 iterations_num_reg
= temp_reg
;
7664 sequence
= gen_sequence ();
7666 emit_insn_before (sequence
, loop_start
);
7667 instrument_loop_bct (loop_start
, loop_end
, iterations_num_reg
);
7671 /* instrument loop by inserting a bct in it. This is done in the following way:
7672 1. A new register is created and assigned the hard register number of the count
7674 2. In the head of the loop the new variable is initialized by the value passed in the
7675 loop_num_iterations parameter.
7676 3. At the end of the loop, comparison of the register with 0 is generated.
7677 The created comparison follows the pattern defined for the
7678 decrement_and_branch_on_count insn, so this insn will be generated in assembly
7680 4. The compare&branch on the old variable is deleted. So, if the loop-variable was
7681 not used elsewhere, it will be eliminated by data-flow analisys. */
7684 instrument_loop_bct (loop_start
, loop_end
, loop_num_iterations
)
7685 rtx loop_start
, loop_end
;
7686 rtx loop_num_iterations
;
7688 rtx temp_reg1
, temp_reg2
;
7692 enum machine_mode loop_var_mode
= SImode
;
7694 if (HAVE_decrement_and_branch_on_count
)
7696 if (loop_dump_stream
)
7697 fprintf (loop_dump_stream
, "Loop: Inserting BCT\n");
7699 /* eliminate the check on the old variable */
7700 delete_insn (PREV_INSN (loop_end
));
7701 delete_insn (PREV_INSN (loop_end
));
7703 /* insert the label which will delimit the start of the loop */
7704 start_label
= gen_label_rtx ();
7705 emit_label_after (start_label
, loop_start
);
7707 /* insert initialization of the count register into the loop header */
7709 temp_reg1
= gen_reg_rtx (loop_var_mode
);
7710 emit_insn (gen_move_insn (temp_reg1
, loop_num_iterations
));
7712 /* this will be count register */
7713 temp_reg2
= gen_rtx_REG (loop_var_mode
, COUNT_REGISTER_REGNUM
);
7714 /* we have to move the value to the count register from an GPR
7715 because rtx pointed to by loop_num_iterations could contain
7716 expression which cannot be moved into count register */
7717 emit_insn (gen_move_insn (temp_reg2
, temp_reg1
));
7719 sequence
= gen_sequence ();
7721 emit_insn_after (sequence
, loop_start
);
7723 /* insert new comparison on the count register instead of the
7724 old one, generating the needed BCT pattern (that will be
7725 later recognized by assembly generation phase). */
7726 emit_jump_insn_before (gen_decrement_and_branch_on_count (temp_reg2
, start_label
),
7728 LABEL_NUSES (start_label
)++;
7732 #endif /* HAVE_decrement_and_branch_on_count */
7736 /* Scan the function and determine whether it has indirect (computed) jumps.
7738 This is taken mostly from flow.c; similar code exists elsewhere
7739 in the compiler. It may be useful to put this into rtlanal.c. */
7741 indirect_jump_in_function_p (start
)
7746 for (insn
= start
; insn
; insn
= NEXT_INSN (insn
))
7747 if (computed_jump_p (insn
))