1 /* Perform various loop optimizations, including strength reduction.
2 Copyright (C) 1987, 88, 89, 91-97, 1998 Free Software Foundation, Inc.
4 This file is part of GNU CC.
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
22 /* This is the loop optimization pass of the compiler.
23 It finds invariant computations within loops and moves them
24 to the beginning of the loop. Then it identifies basic and
25 general induction variables. Strength reduction is applied to the general
26 induction variables, and induction variable elimination is applied to
27 the basic induction variables.
29 It also finds cases where
30 a register is set within the loop by zero-extending a narrower value
31 and changes these to zero the entire register once before the loop
32 and merely copy the low part within the loop.
34 Most of the complexity is in heuristics to decide when it is worth
35 while to do these things. */
42 #include "insn-config.h"
43 #include "insn-flags.h"
45 #include "hard-reg-set.h"
53 /* Vector mapping INSN_UIDs to luids.
54 The luids are like uids but increase monotonically always.
55 We use them to see whether a jump comes from outside a given loop. */
59 /* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop
60 number the insn is contained in. */
64 /* 1 + largest uid of any insn. */
68 /* 1 + luid of last insn. */
72 /* Number of loops detected in current function. Used as index to the
75 static int max_loop_num
;
77 /* Indexed by loop number, contains the first and last insn of each loop. */
79 static rtx
*loop_number_loop_starts
, *loop_number_loop_ends
;
81 /* For each loop, gives the containing loop number, -1 if none. */
86 /* The main output of analyze_loop_iterations is placed here */
88 int *loop_can_insert_bct
;
90 /* For each loop, determines whether some of its inner loops has used
93 int *loop_used_count_register
;
95 /* loop parameters for arithmetic loops. These loops have a loop variable
96 which is initialized to loop_start_value, incremented in each iteration
97 by "loop_increment". At the end of the iteration the loop variable is
98 compared to the loop_comparison_value (using loop_comparison_code). */
101 rtx
*loop_comparison_value
;
102 rtx
*loop_start_value
;
103 enum rtx_code
*loop_comparison_code
;
106 /* For each loop, keep track of its unrolling factor.
110 -1: completely unrolled
111 >0: holds the unroll exact factor. */
112 int *loop_unroll_factor
;
114 /* Indexed by loop number, contains a nonzero value if the "loop" isn't
115 really a loop (an insn outside the loop branches into it). */
117 static char *loop_invalid
;
119 /* Indexed by loop number, links together all LABEL_REFs which refer to
120 code labels outside the loop. Used by routines that need to know all
121 loop exits, such as final_biv_value and final_giv_value.
123 This does not include loop exits due to return instructions. This is
124 because all bivs and givs are pseudos, and hence must be dead after a
125 return, so the presense of a return does not affect any of the
126 optimizations that use this info. It is simpler to just not include return
127 instructions on this list. */
129 rtx
*loop_number_exit_labels
;
131 /* Indexed by loop number, counts the number of LABEL_REFs on
132 loop_number_exit_labels for this loop and all loops nested inside it. */
134 int *loop_number_exit_count
;
136 /* Holds the number of loop iterations. It is zero if the number could not be
137 calculated. Must be unsigned since the number of iterations can
138 be as high as 2^wordsize-1. For loops with a wider iterator, this number
139 will be zero if the number of loop iterations is too large for an
140 unsigned integer to hold. */
142 unsigned HOST_WIDE_INT loop_n_iterations
;
144 /* Nonzero if there is a subroutine call in the current loop. */
146 static int loop_has_call
;
148 /* Nonzero if there is a volatile memory reference in the current
151 static int loop_has_volatile
;
153 /* Added loop_continue which is the NOTE_INSN_LOOP_CONT of the
154 current loop. A continue statement will generate a branch to
155 NEXT_INSN (loop_continue). */
157 static rtx loop_continue
;
159 /* Indexed by register number, contains the number of times the reg
160 is set during the loop being scanned.
161 During code motion, a negative value indicates a reg that has been
162 made a candidate; in particular -2 means that it is an candidate that
163 we know is equal to a constant and -1 means that it is an candidate
164 not known equal to a constant.
165 After code motion, regs moved have 0 (which is accurate now)
166 while the failed candidates have the original number of times set.
168 Therefore, at all times, == 0 indicates an invariant register;
169 < 0 a conditionally invariant one. */
171 static int *n_times_set
;
173 /* Original value of n_times_set; same except that this value
174 is not set negative for a reg whose sets have been made candidates
175 and not set to 0 for a reg that is moved. */
177 static int *n_times_used
;
179 /* Index by register number, 1 indicates that the register
180 cannot be moved or strength reduced. */
182 static char *may_not_optimize
;
184 /* Nonzero means reg N has already been moved out of one loop.
185 This reduces the desire to move it out of another. */
187 static char *moved_once
;
189 /* Array of MEMs that are stored in this loop. If there are too many to fit
190 here, we just turn on unknown_address_altered. */
192 #define NUM_STORES 30
193 static rtx loop_store_mems
[NUM_STORES
];
195 /* Index of first available slot in above array. */
196 static int loop_store_mems_idx
;
198 typedef struct loop_mem_info
{
199 rtx mem
; /* The MEM itself. */
200 rtx reg
; /* Corresponding pseudo, if any. */
201 int optimize
; /* Nonzero if we can optimize access to this MEM. */
204 /* Array of MEMs that are used (read or written) in this loop, but
205 cannot be aliased by anything in this loop, except perhaps
206 themselves. In other words, if loop_mems[i] is altered during the
207 loop, it is altered by an expression that is rtx_equal_p to it. */
209 static loop_mem_info
*loop_mems
;
211 /* The index of the next available slot in LOOP_MEMS. */
213 static int loop_mems_idx
;
215 /* The number of elements allocated in LOOP_MEMs. */
217 static int loop_mems_allocated
;
219 /* Nonzero if we don't know what MEMs were changed in the current loop.
220 This happens if the loop contains a call (in which case `loop_has_call'
221 will also be set) or if we store into more than NUM_STORES MEMs. */
223 static int unknown_address_altered
;
225 /* Count of movable (i.e. invariant) instructions discovered in the loop. */
226 static int num_movables
;
228 /* Count of memory write instructions discovered in the loop. */
229 static int num_mem_sets
;
231 /* Number of loops contained within the current one, including itself. */
232 static int loops_enclosed
;
234 /* Bound on pseudo register number before loop optimization.
235 A pseudo has valid regscan info if its number is < max_reg_before_loop. */
236 int max_reg_before_loop
;
238 /* This obstack is used in product_cheap_p to allocate its rtl. It
239 may call gen_reg_rtx which, in turn, may reallocate regno_reg_rtx.
240 If we used the same obstack that it did, we would be deallocating
243 static struct obstack temp_obstack
;
245 /* This is where the pointer to the obstack being used for RTL is stored. */
247 extern struct obstack
*rtl_obstack
;
249 #define obstack_chunk_alloc xmalloc
250 #define obstack_chunk_free free
252 /* During the analysis of a loop, a chain of `struct movable's
253 is made to record all the movable insns found.
254 Then the entire chain can be scanned to decide which to move. */
258 rtx insn
; /* A movable insn */
259 rtx set_src
; /* The expression this reg is set from. */
260 rtx set_dest
; /* The destination of this SET. */
261 rtx dependencies
; /* When INSN is libcall, this is an EXPR_LIST
262 of any registers used within the LIBCALL. */
263 int consec
; /* Number of consecutive following insns
264 that must be moved with this one. */
265 int regno
; /* The register it sets */
266 short lifetime
; /* lifetime of that register;
267 may be adjusted when matching movables
268 that load the same value are found. */
269 short savings
; /* Number of insns we can move for this reg,
270 including other movables that force this
271 or match this one. */
272 unsigned int cond
: 1; /* 1 if only conditionally movable */
273 unsigned int force
: 1; /* 1 means MUST move this insn */
274 unsigned int global
: 1; /* 1 means reg is live outside this loop */
275 /* If PARTIAL is 1, GLOBAL means something different:
276 that the reg is live outside the range from where it is set
277 to the following label. */
278 unsigned int done
: 1; /* 1 inhibits further processing of this */
280 unsigned int partial
: 1; /* 1 means this reg is used for zero-extending.
281 In particular, moving it does not make it
283 unsigned int move_insn
: 1; /* 1 means that we call emit_move_insn to
284 load SRC, rather than copying INSN. */
285 unsigned int move_insn_first
:1;/* Same as above, if this is necessary for the
286 first insn of a consecutive sets group. */
287 unsigned int is_equiv
: 1; /* 1 means a REG_EQUIV is present on INSN. */
288 enum machine_mode savemode
; /* Nonzero means it is a mode for a low part
289 that we should avoid changing when clearing
290 the rest of the reg. */
291 struct movable
*match
; /* First entry for same value */
292 struct movable
*forces
; /* An insn that must be moved if this is */
293 struct movable
*next
;
296 static struct movable
*the_movables
;
298 FILE *loop_dump_stream
;
300 /* Forward declarations. */
302 static void find_and_verify_loops
PROTO((rtx
));
303 static void mark_loop_jump
PROTO((rtx
, int));
304 static void prescan_loop
PROTO((rtx
, rtx
));
305 static int reg_in_basic_block_p
PROTO((rtx
, rtx
));
306 static int consec_sets_invariant_p
PROTO((rtx
, int, rtx
));
307 static rtx libcall_other_reg
PROTO((rtx
, rtx
));
308 static int labels_in_range_p
PROTO((rtx
, int));
309 static void count_loop_regs_set
PROTO((rtx
, rtx
, char *, rtx
*, int *, int));
310 static void note_addr_stored
PROTO((rtx
, rtx
));
311 static int loop_reg_used_before_p
PROTO((rtx
, rtx
, rtx
, rtx
, rtx
));
312 static void scan_loop
PROTO((rtx
, rtx
, int));
314 static void replace_call_address
PROTO((rtx
, rtx
, rtx
));
316 static rtx skip_consec_insns
PROTO((rtx
, int));
317 static int libcall_benefit
PROTO((rtx
));
318 static void ignore_some_movables
PROTO((struct movable
*));
319 static void force_movables
PROTO((struct movable
*));
320 static void combine_movables
PROTO((struct movable
*, int));
321 static int regs_match_p
PROTO((rtx
, rtx
, struct movable
*));
322 static int rtx_equal_for_loop_p
PROTO((rtx
, rtx
, struct movable
*));
323 static void add_label_notes
PROTO((rtx
, rtx
));
324 static void move_movables
PROTO((struct movable
*, int, int, rtx
, rtx
, int));
325 static int count_nonfixed_reads
PROTO((rtx
));
326 static void strength_reduce
PROTO((rtx
, rtx
, rtx
, int, rtx
, rtx
, int));
327 static void find_single_use_in_loop
PROTO((rtx
, rtx
, rtx
*));
328 static int valid_initial_value_p
PROTO((rtx
, rtx
, int, rtx
));
329 static void find_mem_givs
PROTO((rtx
, rtx
, int, rtx
, rtx
));
330 static void record_biv
PROTO((struct induction
*, rtx
, rtx
, rtx
, rtx
, int, int));
331 static void check_final_value
PROTO((struct induction
*, rtx
, rtx
));
332 static void record_giv
PROTO((struct induction
*, rtx
, rtx
, rtx
, rtx
, rtx
, int, enum g_types
, int, rtx
*, rtx
, rtx
));
333 static void update_giv_derive
PROTO((rtx
));
334 static int basic_induction_var
PROTO((rtx
, enum machine_mode
, rtx
, rtx
, rtx
*, rtx
*));
335 static rtx simplify_giv_expr
PROTO((rtx
, int *));
336 static int general_induction_var
PROTO((rtx
, rtx
*, rtx
*, rtx
*, int, int *));
337 static int consec_sets_giv
PROTO((int, rtx
, rtx
, rtx
, rtx
*, rtx
*));
338 static int check_dbra_loop
PROTO((rtx
, int, rtx
));
339 static rtx express_from_1
PROTO((rtx
, rtx
, rtx
));
340 static rtx express_from
PROTO((struct induction
*, struct induction
*));
341 static rtx combine_givs_p
PROTO((struct induction
*, struct induction
*));
342 static void combine_givs
PROTO((struct iv_class
*));
343 static int product_cheap_p
PROTO((rtx
, rtx
));
344 static int maybe_eliminate_biv
PROTO((struct iv_class
*, rtx
, rtx
, int, int, int));
345 static int maybe_eliminate_biv_1
PROTO((rtx
, rtx
, struct iv_class
*, int, rtx
));
346 static int last_use_this_basic_block
PROTO((rtx
, rtx
));
347 static void record_initial
PROTO((rtx
, rtx
));
348 static void update_reg_last_use
PROTO((rtx
, rtx
));
349 static rtx next_insn_in_loop
PROTO((rtx
, rtx
, rtx
, rtx
));
350 static void load_mems_and_recount_loop_regs_set
PROTO((rtx
, rtx
, rtx
,
352 static void load_mems
PROTO((rtx
, rtx
, rtx
, rtx
));
353 static int insert_loop_mem
PROTO((rtx
*, void *));
354 static int replace_loop_mem
PROTO((rtx
*, void *));
355 static int replace_label
PROTO((rtx
*, void *));
357 typedef struct rtx_and_int
{
362 typedef struct rtx_pair
{
367 /* Nonzero iff INSN is between START and END, inclusive. */
368 #define INSN_IN_RANGE_P(INSN, START, END) \
369 (INSN_UID (INSN) < max_uid_for_loop \
370 && INSN_LUID (INSN) >= INSN_LUID (START) \
371 && INSN_LUID (INSN) <= INSN_LUID (END))
374 /* This is extern from unroll.c */
375 extern void iteration_info
PROTO((rtx
, rtx
*, rtx
*, rtx
, rtx
));
377 /* Two main functions for implementing bct:
378 first - to be called before loop unrolling, and the second - after */
379 #ifdef HAVE_decrement_and_branch_on_count
380 static void analyze_loop_iterations
PROTO((rtx
, rtx
));
381 static void insert_bct
PROTO((rtx
, rtx
));
383 /* Auxiliary function that inserts the bct pattern into the loop */
384 static void instrument_loop_bct
PROTO((rtx
, rtx
, rtx
));
385 #endif /* HAVE_decrement_and_branch_on_count */
388 /* Indirect_jump_in_function is computed once per function. */
389 int indirect_jump_in_function
= 0;
390 static int indirect_jump_in_function_p
PROTO((rtx
));
393 /* Relative gain of eliminating various kinds of operations. */
396 static int shift_cost
;
397 static int mult_cost
;
400 /* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to
401 copy the value of the strength reduced giv to its original register. */
402 static int copy_cost
;
404 /* Cost of using a register, to normalize the benefits of a giv. */
405 static int reg_address_cost
;
411 char *free_point
= (char *) oballoc (1);
412 rtx reg
= gen_rtx_REG (word_mode
, LAST_VIRTUAL_REGISTER
+ 1);
414 add_cost
= rtx_cost (gen_rtx_PLUS (word_mode
, reg
, reg
), SET
);
417 reg_address_cost
= ADDRESS_COST (reg
);
419 reg_address_cost
= rtx_cost (reg
, MEM
);
422 /* We multiply by 2 to reconcile the difference in scale between
423 these two ways of computing costs. Otherwise the cost of a copy
424 will be far less than the cost of an add. */
428 /* Free the objects we just allocated. */
431 /* Initialize the obstack used for rtl in product_cheap_p. */
432 gcc_obstack_init (&temp_obstack
);
435 /* Entry point of this file. Perform loop optimization
436 on the current function. F is the first insn of the function
437 and DUMPFILE is a stream for output of a trace of actions taken
438 (or 0 if none should be output). */
441 loop_optimize (f
, dumpfile
, unroll_p
)
442 /* f is the first instruction of a chain of insns for one function */
451 loop_dump_stream
= dumpfile
;
453 init_recog_no_volatile ();
455 max_reg_before_loop
= max_reg_num ();
457 moved_once
= (char *) alloca (max_reg_before_loop
);
458 bzero (moved_once
, max_reg_before_loop
);
462 /* Count the number of loops. */
465 for (insn
= f
; insn
; insn
= NEXT_INSN (insn
))
467 if (GET_CODE (insn
) == NOTE
468 && NOTE_LINE_NUMBER (insn
) == NOTE_INSN_LOOP_BEG
)
472 /* Don't waste time if no loops. */
473 if (max_loop_num
== 0)
476 /* Get size to use for tables indexed by uids.
477 Leave some space for labels allocated by find_and_verify_loops. */
478 max_uid_for_loop
= get_max_uid () + 1 + max_loop_num
* 32;
480 uid_luid
= (int *) alloca (max_uid_for_loop
* sizeof (int));
481 uid_loop_num
= (int *) alloca (max_uid_for_loop
* sizeof (int));
483 bzero ((char *) uid_luid
, max_uid_for_loop
* sizeof (int));
484 bzero ((char *) uid_loop_num
, max_uid_for_loop
* sizeof (int));
486 /* Allocate tables for recording each loop. We set each entry, so they need
488 loop_number_loop_starts
= (rtx
*) alloca (max_loop_num
* sizeof (rtx
));
489 loop_number_loop_ends
= (rtx
*) alloca (max_loop_num
* sizeof (rtx
));
490 loop_outer_loop
= (int *) alloca (max_loop_num
* sizeof (int));
491 loop_invalid
= (char *) alloca (max_loop_num
* sizeof (char));
492 loop_number_exit_labels
= (rtx
*) alloca (max_loop_num
* sizeof (rtx
));
493 loop_number_exit_count
= (int *) alloca (max_loop_num
* sizeof (int));
495 /* This is initialized by the unrolling code, so we go ahead
496 and clear them just in case we are not performing loop
498 loop_unroll_factor
= (int *) alloca (max_loop_num
*sizeof (int));
499 bzero ((char *) loop_unroll_factor
, max_loop_num
* sizeof (int));
502 /* Allocate for BCT optimization */
503 loop_can_insert_bct
= (int *) alloca (max_loop_num
* sizeof (int));
504 bzero ((char *) loop_can_insert_bct
, max_loop_num
* sizeof (int));
506 loop_used_count_register
= (int *) alloca (max_loop_num
* sizeof (int));
507 bzero ((char *) loop_used_count_register
, max_loop_num
* sizeof (int));
509 loop_increment
= (rtx
*) alloca (max_loop_num
* sizeof (rtx
));
510 loop_comparison_value
= (rtx
*) alloca (max_loop_num
* sizeof (rtx
));
511 loop_start_value
= (rtx
*) alloca (max_loop_num
* sizeof (rtx
));
512 bzero ((char *) loop_increment
, max_loop_num
* sizeof (rtx
));
513 bzero ((char *) loop_comparison_value
, max_loop_num
* sizeof (rtx
));
514 bzero ((char *) loop_start_value
, max_loop_num
* sizeof (rtx
));
517 = (enum rtx_code
*) alloca (max_loop_num
* sizeof (enum rtx_code
));
518 bzero ((char *) loop_comparison_code
, max_loop_num
* sizeof (enum rtx_code
));
521 /* Find and process each loop.
522 First, find them, and record them in order of their beginnings. */
523 find_and_verify_loops (f
);
525 /* Now find all register lifetimes. This must be done after
526 find_and_verify_loops, because it might reorder the insns in the
528 reg_scan (f
, max_reg_num (), 1);
530 /* This must occur after reg_scan so that registers created by gcse
531 will have entries in the register tables.
533 We could have added a call to reg_scan after gcse_main in toplev.c,
534 but moving this call to init_alias_analysis is more efficient. */
535 init_alias_analysis ();
537 /* See if we went too far. */
538 if (get_max_uid () > max_uid_for_loop
)
540 /* Now reset it to the actual size we need. See above. */
541 max_uid_for_loop
= get_max_uid () + 1;
543 /* Compute the mapping from uids to luids.
544 LUIDs are numbers assigned to insns, like uids,
545 except that luids increase monotonically through the code.
546 Don't assign luids to line-number NOTEs, so that the distance in luids
547 between two insns is not affected by -g. */
549 for (insn
= f
, i
= 0; insn
; insn
= NEXT_INSN (insn
))
552 if (GET_CODE (insn
) != NOTE
553 || NOTE_LINE_NUMBER (insn
) <= 0)
554 uid_luid
[INSN_UID (insn
)] = ++i
;
556 /* Give a line number note the same luid as preceding insn. */
557 uid_luid
[INSN_UID (insn
)] = i
;
562 /* Don't leave gaps in uid_luid for insns that have been
563 deleted. It is possible that the first or last insn
564 using some register has been deleted by cross-jumping.
565 Make sure that uid_luid for that former insn's uid
566 points to the general area where that insn used to be. */
567 for (i
= 0; i
< max_uid_for_loop
; i
++)
569 uid_luid
[0] = uid_luid
[i
];
570 if (uid_luid
[0] != 0)
573 for (i
= 0; i
< max_uid_for_loop
; i
++)
574 if (uid_luid
[i
] == 0)
575 uid_luid
[i
] = uid_luid
[i
- 1];
577 /* Create a mapping from loops to BLOCK tree nodes. */
578 if (unroll_p
&& write_symbols
!= NO_DEBUG
)
579 find_loop_tree_blocks ();
581 /* Determine if the function has indirect jump. On some systems
582 this prevents low overhead loop instructions from being used. */
583 indirect_jump_in_function
= indirect_jump_in_function_p (f
);
585 /* Now scan the loops, last ones first, since this means inner ones are done
586 before outer ones. */
587 for (i
= max_loop_num
-1; i
>= 0; i
--)
588 if (! loop_invalid
[i
] && loop_number_loop_ends
[i
])
589 scan_loop (loop_number_loop_starts
[i
], loop_number_loop_ends
[i
],
592 /* If debugging and unrolling loops, we must replicate the tree nodes
593 corresponding to the blocks inside the loop, so that the original one
594 to one mapping will remain. */
595 if (unroll_p
&& write_symbols
!= NO_DEBUG
)
596 unroll_block_trees ();
598 end_alias_analysis ();
601 /* Returns the next insn, in execution order, after INSN. START and
602 END are the NOTE_INSN_LOOP_BEG and NOTE_INSN_LOOP_END for the loop,
603 respectively. LOOP_TOP, if non-NULL, is the top of the loop in the
604 insn-stream; it is used with loops that are entered near the
608 next_insn_in_loop (insn
, start
, end
, loop_top
)
614 insn
= NEXT_INSN (insn
);
619 /* Go to the top of the loop, and continue there. */
633 /* Optimize one loop whose start is LOOP_START and end is END.
634 LOOP_START is the NOTE_INSN_LOOP_BEG and END is the matching
635 NOTE_INSN_LOOP_END. */
637 /* ??? Could also move memory writes out of loops if the destination address
638 is invariant, the source is invariant, the memory write is not volatile,
639 and if we can prove that no read inside the loop can read this address
640 before the write occurs. If there is a read of this address after the
641 write, then we can also mark the memory read as invariant. */
644 scan_loop (loop_start
, end
, unroll_p
)
650 /* 1 if we are scanning insns that could be executed zero times. */
652 /* 1 if we are scanning insns that might never be executed
653 due to a subroutine call which might exit before they are reached. */
655 /* For a rotated loop that is entered near the bottom,
656 this is the label at the top. Otherwise it is zero. */
658 /* Jump insn that enters the loop, or 0 if control drops in. */
659 rtx loop_entry_jump
= 0;
660 /* Place in the loop where control enters. */
662 /* Number of insns in the loop. */
667 /* The SET from an insn, if it is the only SET in the insn. */
669 /* Chain describing insns movable in current loop. */
670 struct movable
*movables
= 0;
671 /* Last element in `movables' -- so we can add elements at the end. */
672 struct movable
*last_movable
= 0;
673 /* Ratio of extra register life span we can justify
674 for saving an instruction. More if loop doesn't call subroutines
675 since in that case saving an insn makes more difference
676 and more registers are available. */
678 /* If we have calls, contains the insn in which a register was used
679 if it was used exactly once; contains const0_rtx if it was used more
681 rtx
*reg_single_usage
= 0;
682 /* Nonzero if we are scanning instructions in a sub-loop. */
686 /* Determine whether this loop starts with a jump down to a test at
687 the end. This will occur for a small number of loops with a test
688 that is too complex to duplicate in front of the loop.
690 We search for the first insn or label in the loop, skipping NOTEs.
691 However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG
692 (because we might have a loop executed only once that contains a
693 loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END
694 (in case we have a degenerate loop).
696 Note that if we mistakenly think that a loop is entered at the top
697 when, in fact, it is entered at the exit test, the only effect will be
698 slightly poorer optimization. Making the opposite error can generate
699 incorrect code. Since very few loops now start with a jump to the
700 exit test, the code here to detect that case is very conservative. */
702 for (p
= NEXT_INSN (loop_start
);
704 && GET_CODE (p
) != CODE_LABEL
&& GET_RTX_CLASS (GET_CODE (p
)) != 'i'
705 && (GET_CODE (p
) != NOTE
706 || (NOTE_LINE_NUMBER (p
) != NOTE_INSN_LOOP_BEG
707 && NOTE_LINE_NUMBER (p
) != NOTE_INSN_LOOP_END
));
713 /* Set up variables describing this loop. */
714 prescan_loop (loop_start
, end
);
715 threshold
= (loop_has_call
? 1 : 2) * (1 + n_non_fixed_regs
);
717 /* If loop has a jump before the first label,
718 the true entry is the target of that jump.
719 Start scan from there.
720 But record in LOOP_TOP the place where the end-test jumps
721 back to so we can scan that after the end of the loop. */
722 if (GET_CODE (p
) == JUMP_INSN
)
726 /* Loop entry must be unconditional jump (and not a RETURN) */
728 && JUMP_LABEL (p
) != 0
729 /* Check to see whether the jump actually
730 jumps out of the loop (meaning it's no loop).
731 This case can happen for things like
732 do {..} while (0). If this label was generated previously
733 by loop, we can't tell anything about it and have to reject
735 && INSN_IN_RANGE_P (JUMP_LABEL (p
), loop_start
, end
))
737 loop_top
= next_label (scan_start
);
738 scan_start
= JUMP_LABEL (p
);
742 /* If SCAN_START was an insn created by loop, we don't know its luid
743 as required by loop_reg_used_before_p. So skip such loops. (This
744 test may never be true, but it's best to play it safe.)
746 Also, skip loops where we do not start scanning at a label. This
747 test also rejects loops starting with a JUMP_INSN that failed the
750 if (INSN_UID (scan_start
) >= max_uid_for_loop
751 || GET_CODE (scan_start
) != CODE_LABEL
)
753 if (loop_dump_stream
)
754 fprintf (loop_dump_stream
, "\nLoop from %d to %d is phony.\n\n",
755 INSN_UID (loop_start
), INSN_UID (end
));
759 /* Count number of times each reg is set during this loop.
760 Set may_not_optimize[I] if it is not safe to move out
761 the setting of register I. If this loop has calls, set
762 reg_single_usage[I]. */
764 /* Allocate extra space for REGS that might be created by
766 nregs
= max_reg_num () + loop_mems_idx
;
767 n_times_set
= (int *) alloca (nregs
* sizeof (int));
768 n_times_used
= (int *) alloca (nregs
* sizeof (int));
769 may_not_optimize
= (char *) alloca (nregs
);
770 bzero ((char *) n_times_set
, nregs
* sizeof (int));
771 bzero (may_not_optimize
, nregs
);
775 reg_single_usage
= (rtx
*) alloca (nregs
* sizeof (rtx
));
776 bzero ((char *) reg_single_usage
, nregs
* sizeof (rtx
));
779 count_loop_regs_set (loop_top
? loop_top
: loop_start
, end
,
780 may_not_optimize
, reg_single_usage
, &insn_count
, nregs
);
782 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
783 may_not_optimize
[i
] = 1, n_times_set
[i
] = 1;
784 bcopy ((char *) n_times_set
, (char *) n_times_used
, nregs
* sizeof (int));
786 if (loop_dump_stream
)
788 fprintf (loop_dump_stream
, "\nLoop from %d to %d: %d real insns.\n",
789 INSN_UID (loop_start
), INSN_UID (end
), insn_count
);
791 fprintf (loop_dump_stream
, "Continue at insn %d.\n",
792 INSN_UID (loop_continue
));
795 /* Scan through the loop finding insns that are safe to move.
796 Set n_times_set negative for the reg being set, so that
797 this reg will be considered invariant for subsequent insns.
798 We consider whether subsequent insns use the reg
799 in deciding whether it is worth actually moving.
801 MAYBE_NEVER is nonzero if we have passed a conditional jump insn
802 and therefore it is possible that the insns we are scanning
803 would never be executed. At such times, we must make sure
804 that it is safe to execute the insn once instead of zero times.
805 When MAYBE_NEVER is 0, all insns will be executed at least once
806 so that is not a problem. */
808 for (p
= next_insn_in_loop (scan_start
, scan_start
, end
, loop_top
);
810 p
= next_insn_in_loop (p
, scan_start
, end
, loop_top
))
812 if (GET_RTX_CLASS (GET_CODE (p
)) == 'i'
813 && find_reg_note (p
, REG_LIBCALL
, NULL_RTX
))
815 else if (GET_RTX_CLASS (GET_CODE (p
)) == 'i'
816 && find_reg_note (p
, REG_RETVAL
, NULL_RTX
))
819 if (GET_CODE (p
) == INSN
820 && (set
= single_set (p
))
821 && GET_CODE (SET_DEST (set
)) == REG
822 && ! may_not_optimize
[REGNO (SET_DEST (set
))])
827 rtx src
= SET_SRC (set
);
828 rtx dependencies
= 0;
830 /* Figure out what to use as a source of this insn. If a REG_EQUIV
831 note is given or if a REG_EQUAL note with a constant operand is
832 specified, use it as the source and mark that we should move
833 this insn by calling emit_move_insn rather that duplicating the
836 Otherwise, only use the REG_EQUAL contents if a REG_RETVAL note
838 temp
= find_reg_note (p
, REG_EQUIV
, NULL_RTX
);
840 src
= XEXP (temp
, 0), move_insn
= 1;
843 temp
= find_reg_note (p
, REG_EQUAL
, NULL_RTX
);
844 if (temp
&& CONSTANT_P (XEXP (temp
, 0)))
845 src
= XEXP (temp
, 0), move_insn
= 1;
846 if (temp
&& find_reg_note (p
, REG_RETVAL
, NULL_RTX
))
848 src
= XEXP (temp
, 0);
849 /* A libcall block can use regs that don't appear in
850 the equivalent expression. To move the libcall,
851 we must move those regs too. */
852 dependencies
= libcall_other_reg (p
, src
);
856 /* Don't try to optimize a register that was made
857 by loop-optimization for an inner loop.
858 We don't know its life-span, so we can't compute the benefit. */
859 if (REGNO (SET_DEST (set
)) >= max_reg_before_loop
)
861 /* In order to move a register, we need to have one of three cases:
862 (1) it is used only in the same basic block as the set
863 (2) it is not a user variable and it is not used in the
864 exit test (this can cause the variable to be used
865 before it is set just like a user-variable).
866 (3) the set is guaranteed to be executed once the loop starts,
867 and the reg is not used until after that. */
868 else if (! ((! maybe_never
869 && ! loop_reg_used_before_p (set
, p
, loop_start
,
871 || (! REG_USERVAR_P (SET_DEST (set
))
872 && ! REG_LOOP_TEST_P (SET_DEST (set
)))
873 || reg_in_basic_block_p (p
, SET_DEST (set
))))
875 else if ((tem
= invariant_p (src
))
876 && (dependencies
== 0
877 || (tem2
= invariant_p (dependencies
)) != 0)
878 && (n_times_set
[REGNO (SET_DEST (set
))] == 1
880 = consec_sets_invariant_p (SET_DEST (set
),
881 n_times_set
[REGNO (SET_DEST (set
))],
883 /* If the insn can cause a trap (such as divide by zero),
884 can't move it unless it's guaranteed to be executed
885 once loop is entered. Even a function call might
886 prevent the trap insn from being reached
887 (since it might exit!) */
888 && ! ((maybe_never
|| call_passed
)
889 && may_trap_p (src
)))
891 register struct movable
*m
;
892 register int regno
= REGNO (SET_DEST (set
));
894 /* A potential lossage is where we have a case where two insns
895 can be combined as long as they are both in the loop, but
896 we move one of them outside the loop. For large loops,
897 this can lose. The most common case of this is the address
898 of a function being called.
900 Therefore, if this register is marked as being used exactly
901 once if we are in a loop with calls (a "large loop"), see if
902 we can replace the usage of this register with the source
903 of this SET. If we can, delete this insn.
905 Don't do this if P has a REG_RETVAL note or if we have
906 SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */
908 if (reg_single_usage
&& reg_single_usage
[regno
] != 0
909 && reg_single_usage
[regno
] != const0_rtx
910 && REGNO_FIRST_UID (regno
) == INSN_UID (p
)
911 && (REGNO_LAST_UID (regno
)
912 == INSN_UID (reg_single_usage
[regno
]))
913 && n_times_set
[REGNO (SET_DEST (set
))] == 1
914 && ! side_effects_p (SET_SRC (set
))
915 && ! find_reg_note (p
, REG_RETVAL
, NULL_RTX
)
916 && (! SMALL_REGISTER_CLASSES
917 || (! (GET_CODE (SET_SRC (set
)) == REG
918 && REGNO (SET_SRC (set
)) < FIRST_PSEUDO_REGISTER
)))
919 /* This test is not redundant; SET_SRC (set) might be
920 a call-clobbered register and the life of REGNO
921 might span a call. */
922 && ! modified_between_p (SET_SRC (set
), p
,
923 reg_single_usage
[regno
])
924 && no_labels_between_p (p
, reg_single_usage
[regno
])
925 && validate_replace_rtx (SET_DEST (set
), SET_SRC (set
),
926 reg_single_usage
[regno
]))
928 /* Replace any usage in a REG_EQUAL note. Must copy the
929 new source, so that we don't get rtx sharing between the
930 SET_SOURCE and REG_NOTES of insn p. */
931 REG_NOTES (reg_single_usage
[regno
])
932 = replace_rtx (REG_NOTES (reg_single_usage
[regno
]),
933 SET_DEST (set
), copy_rtx (SET_SRC (set
)));
936 NOTE_LINE_NUMBER (p
) = NOTE_INSN_DELETED
;
937 NOTE_SOURCE_FILE (p
) = 0;
938 n_times_set
[regno
] = 0;
942 m
= (struct movable
*) alloca (sizeof (struct movable
));
946 m
->dependencies
= dependencies
;
947 m
->set_dest
= SET_DEST (set
);
949 m
->consec
= n_times_set
[REGNO (SET_DEST (set
))] - 1;
953 m
->move_insn
= move_insn
;
954 m
->move_insn_first
= 0;
955 m
->is_equiv
= (find_reg_note (p
, REG_EQUIV
, NULL_RTX
) != 0);
956 m
->savemode
= VOIDmode
;
958 /* Set M->cond if either invariant_p or consec_sets_invariant_p
959 returned 2 (only conditionally invariant). */
960 m
->cond
= ((tem
| tem1
| tem2
) > 1);
961 m
->global
= (uid_luid
[REGNO_LAST_UID (regno
)] > INSN_LUID (end
)
962 || uid_luid
[REGNO_FIRST_UID (regno
)] < INSN_LUID (loop_start
));
964 m
->lifetime
= (uid_luid
[REGNO_LAST_UID (regno
)]
965 - uid_luid
[REGNO_FIRST_UID (regno
)]);
966 m
->savings
= n_times_used
[regno
];
967 if (find_reg_note (p
, REG_RETVAL
, NULL_RTX
))
968 m
->savings
+= libcall_benefit (p
);
969 n_times_set
[regno
] = move_insn
? -2 : -1;
970 /* Add M to the end of the chain MOVABLES. */
974 last_movable
->next
= m
;
979 /* It is possible for the first instruction to have a
980 REG_EQUAL note but a non-invariant SET_SRC, so we must
981 remember the status of the first instruction in case
982 the last instruction doesn't have a REG_EQUAL note. */
983 m
->move_insn_first
= m
->move_insn
;
985 /* Skip this insn, not checking REG_LIBCALL notes. */
986 p
= next_nonnote_insn (p
);
987 /* Skip the consecutive insns, if there are any. */
988 p
= skip_consec_insns (p
, m
->consec
);
989 /* Back up to the last insn of the consecutive group. */
990 p
= prev_nonnote_insn (p
);
992 /* We must now reset m->move_insn, m->is_equiv, and possibly
993 m->set_src to correspond to the effects of all the
995 temp
= find_reg_note (p
, REG_EQUIV
, NULL_RTX
);
997 m
->set_src
= XEXP (temp
, 0), m
->move_insn
= 1;
1000 temp
= find_reg_note (p
, REG_EQUAL
, NULL_RTX
);
1001 if (temp
&& CONSTANT_P (XEXP (temp
, 0)))
1002 m
->set_src
= XEXP (temp
, 0), m
->move_insn
= 1;
1007 m
->is_equiv
= (find_reg_note (p
, REG_EQUIV
, NULL_RTX
) != 0);
1010 /* If this register is always set within a STRICT_LOW_PART
1011 or set to zero, then its high bytes are constant.
1012 So clear them outside the loop and within the loop
1013 just load the low bytes.
1014 We must check that the machine has an instruction to do so.
1015 Also, if the value loaded into the register
1016 depends on the same register, this cannot be done. */
1017 else if (SET_SRC (set
) == const0_rtx
1018 && GET_CODE (NEXT_INSN (p
)) == INSN
1019 && (set1
= single_set (NEXT_INSN (p
)))
1020 && GET_CODE (set1
) == SET
1021 && (GET_CODE (SET_DEST (set1
)) == STRICT_LOW_PART
)
1022 && (GET_CODE (XEXP (SET_DEST (set1
), 0)) == SUBREG
)
1023 && (SUBREG_REG (XEXP (SET_DEST (set1
), 0))
1025 && !reg_mentioned_p (SET_DEST (set
), SET_SRC (set1
)))
1027 register int regno
= REGNO (SET_DEST (set
));
1028 if (n_times_set
[regno
] == 2)
1030 register struct movable
*m
;
1031 m
= (struct movable
*) alloca (sizeof (struct movable
));
1034 m
->set_dest
= SET_DEST (set
);
1035 m
->dependencies
= 0;
1041 m
->move_insn_first
= 0;
1043 /* If the insn may not be executed on some cycles,
1044 we can't clear the whole reg; clear just high part.
1045 Not even if the reg is used only within this loop.
1052 Clearing x before the inner loop could clobber a value
1053 being saved from the last time around the outer loop.
1054 However, if the reg is not used outside this loop
1055 and all uses of the register are in the same
1056 basic block as the store, there is no problem.
1058 If this insn was made by loop, we don't know its
1059 INSN_LUID and hence must make a conservative
1061 m
->global
= (INSN_UID (p
) >= max_uid_for_loop
1062 || (uid_luid
[REGNO_LAST_UID (regno
)]
1064 || (uid_luid
[REGNO_FIRST_UID (regno
)]
1066 || (labels_in_range_p
1067 (p
, uid_luid
[REGNO_FIRST_UID (regno
)])));
1068 if (maybe_never
&& m
->global
)
1069 m
->savemode
= GET_MODE (SET_SRC (set1
));
1071 m
->savemode
= VOIDmode
;
1075 m
->lifetime
= (uid_luid
[REGNO_LAST_UID (regno
)]
1076 - uid_luid
[REGNO_FIRST_UID (regno
)]);
1078 n_times_set
[regno
] = -1;
1079 /* Add M to the end of the chain MOVABLES. */
1083 last_movable
->next
= m
;
1088 /* Past a call insn, we get to insns which might not be executed
1089 because the call might exit. This matters for insns that trap.
1090 Call insns inside a REG_LIBCALL/REG_RETVAL block always return,
1091 so they don't count. */
1092 else if (GET_CODE (p
) == CALL_INSN
&& ! in_libcall
)
1094 /* Past a label or a jump, we get to insns for which we
1095 can't count on whether or how many times they will be
1096 executed during each iteration. Therefore, we can
1097 only move out sets of trivial variables
1098 (those not used after the loop). */
1099 /* Similar code appears twice in strength_reduce. */
1100 else if ((GET_CODE (p
) == CODE_LABEL
|| GET_CODE (p
) == JUMP_INSN
)
1101 /* If we enter the loop in the middle, and scan around to the
1102 beginning, don't set maybe_never for that. This must be an
1103 unconditional jump, otherwise the code at the top of the
1104 loop might never be executed. Unconditional jumps are
1105 followed a by barrier then loop end. */
1106 && ! (GET_CODE (p
) == JUMP_INSN
&& JUMP_LABEL (p
) == loop_top
1107 && NEXT_INSN (NEXT_INSN (p
)) == end
1108 && simplejump_p (p
)))
1110 else if (GET_CODE (p
) == NOTE
)
1112 /* At the virtual top of a converted loop, insns are again known to
1113 be executed: logically, the loop begins here even though the exit
1114 code has been duplicated. */
1115 if (NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_VTOP
&& loop_depth
== 0)
1116 maybe_never
= call_passed
= 0;
1117 else if (NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_BEG
)
1119 else if (NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_END
)
1124 /* If one movable subsumes another, ignore that other. */
1126 ignore_some_movables (movables
);
1128 /* For each movable insn, see if the reg that it loads
1129 leads when it dies right into another conditionally movable insn.
1130 If so, record that the second insn "forces" the first one,
1131 since the second can be moved only if the first is. */
1133 force_movables (movables
);
1135 /* See if there are multiple movable insns that load the same value.
1136 If there are, make all but the first point at the first one
1137 through the `match' field, and add the priorities of them
1138 all together as the priority of the first. */
1140 combine_movables (movables
, nregs
);
1142 /* Now consider each movable insn to decide whether it is worth moving.
1143 Store 0 in n_times_set for each reg that is moved.
1145 Generally this increases code size, so do not move moveables when
1146 optimizing for code size. */
1148 if (! optimize_size
)
1149 move_movables (movables
, threshold
,
1150 insn_count
, loop_start
, end
, nregs
);
1152 /* Now candidates that still are negative are those not moved.
1153 Change n_times_set to indicate that those are not actually invariant. */
1154 for (i
= 0; i
< nregs
; i
++)
1155 if (n_times_set
[i
] < 0)
1156 n_times_set
[i
] = n_times_used
[i
];
1158 /* Now that we've moved some things out of the loop, we able to
1159 hoist even more memory references. There's no need to pass
1160 reg_single_usage this time, since we're done with it. */
1161 load_mems_and_recount_loop_regs_set (scan_start
, end
, loop_top
,
1165 if (flag_strength_reduce
)
1167 the_movables
= movables
;
1168 strength_reduce (scan_start
, end
, loop_top
,
1169 insn_count
, loop_start
, end
, unroll_p
);
1173 /* Add elements to *OUTPUT to record all the pseudo-regs
1174 mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */
1177 record_excess_regs (in_this
, not_in_this
, output
)
1178 rtx in_this
, not_in_this
;
1185 code
= GET_CODE (in_this
);
1199 if (REGNO (in_this
) >= FIRST_PSEUDO_REGISTER
1200 && ! reg_mentioned_p (in_this
, not_in_this
))
1201 *output
= gen_rtx_EXPR_LIST (VOIDmode
, in_this
, *output
);
1208 fmt
= GET_RTX_FORMAT (code
);
1209 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1216 for (j
= 0; j
< XVECLEN (in_this
, i
); j
++)
1217 record_excess_regs (XVECEXP (in_this
, i
, j
), not_in_this
, output
);
1221 record_excess_regs (XEXP (in_this
, i
), not_in_this
, output
);
1227 /* Check what regs are referred to in the libcall block ending with INSN,
1228 aside from those mentioned in the equivalent value.
1229 If there are none, return 0.
1230 If there are one or more, return an EXPR_LIST containing all of them. */
1233 libcall_other_reg (insn
, equiv
)
1236 rtx note
= find_reg_note (insn
, REG_RETVAL
, NULL_RTX
);
1237 rtx p
= XEXP (note
, 0);
1240 /* First, find all the regs used in the libcall block
1241 that are not mentioned as inputs to the result. */
1245 if (GET_CODE (p
) == INSN
|| GET_CODE (p
) == JUMP_INSN
1246 || GET_CODE (p
) == CALL_INSN
)
1247 record_excess_regs (PATTERN (p
), equiv
, &output
);
1254 /* Return 1 if all uses of REG
1255 are between INSN and the end of the basic block. */
1258 reg_in_basic_block_p (insn
, reg
)
1261 int regno
= REGNO (reg
);
1264 if (REGNO_FIRST_UID (regno
) != INSN_UID (insn
))
1267 /* Search this basic block for the already recorded last use of the reg. */
1268 for (p
= insn
; p
; p
= NEXT_INSN (p
))
1270 switch (GET_CODE (p
))
1277 /* Ordinary insn: if this is the last use, we win. */
1278 if (REGNO_LAST_UID (regno
) == INSN_UID (p
))
1283 /* Jump insn: if this is the last use, we win. */
1284 if (REGNO_LAST_UID (regno
) == INSN_UID (p
))
1286 /* Otherwise, it's the end of the basic block, so we lose. */
1291 /* It's the end of the basic block, so we lose. */
1299 /* The "last use" doesn't follow the "first use"?? */
1303 /* Compute the benefit of eliminating the insns in the block whose
1304 last insn is LAST. This may be a group of insns used to compute a
1305 value directly or can contain a library call. */
1308 libcall_benefit (last
)
1314 for (insn
= XEXP (find_reg_note (last
, REG_RETVAL
, NULL_RTX
), 0);
1315 insn
!= last
; insn
= NEXT_INSN (insn
))
1317 if (GET_CODE (insn
) == CALL_INSN
)
1318 benefit
+= 10; /* Assume at least this many insns in a library
1320 else if (GET_CODE (insn
) == INSN
1321 && GET_CODE (PATTERN (insn
)) != USE
1322 && GET_CODE (PATTERN (insn
)) != CLOBBER
)
1329 /* Skip COUNT insns from INSN, counting library calls as 1 insn. */
1332 skip_consec_insns (insn
, count
)
1336 for (; count
> 0; count
--)
1340 /* If first insn of libcall sequence, skip to end. */
1341 /* Do this at start of loop, since INSN is guaranteed to
1343 if (GET_CODE (insn
) != NOTE
1344 && (temp
= find_reg_note (insn
, REG_LIBCALL
, NULL_RTX
)))
1345 insn
= XEXP (temp
, 0);
1347 do insn
= NEXT_INSN (insn
);
1348 while (GET_CODE (insn
) == NOTE
);
1354 /* Ignore any movable whose insn falls within a libcall
1355 which is part of another movable.
1356 We make use of the fact that the movable for the libcall value
1357 was made later and so appears later on the chain. */
1360 ignore_some_movables (movables
)
1361 struct movable
*movables
;
1363 register struct movable
*m
, *m1
;
1365 for (m
= movables
; m
; m
= m
->next
)
1367 /* Is this a movable for the value of a libcall? */
1368 rtx note
= find_reg_note (m
->insn
, REG_RETVAL
, NULL_RTX
);
1372 /* Check for earlier movables inside that range,
1373 and mark them invalid. We cannot use LUIDs here because
1374 insns created by loop.c for prior loops don't have LUIDs.
1375 Rather than reject all such insns from movables, we just
1376 explicitly check each insn in the libcall (since invariant
1377 libcalls aren't that common). */
1378 for (insn
= XEXP (note
, 0); insn
!= m
->insn
; insn
= NEXT_INSN (insn
))
1379 for (m1
= movables
; m1
!= m
; m1
= m1
->next
)
1380 if (m1
->insn
== insn
)
1386 /* For each movable insn, see if the reg that it loads
1387 leads when it dies right into another conditionally movable insn.
1388 If so, record that the second insn "forces" the first one,
1389 since the second can be moved only if the first is. */
1392 force_movables (movables
)
1393 struct movable
*movables
;
1395 register struct movable
*m
, *m1
;
1396 for (m1
= movables
; m1
; m1
= m1
->next
)
1397 /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */
1398 if (!m1
->partial
&& !m1
->done
)
1400 int regno
= m1
->regno
;
1401 for (m
= m1
->next
; m
; m
= m
->next
)
1402 /* ??? Could this be a bug? What if CSE caused the
1403 register of M1 to be used after this insn?
1404 Since CSE does not update regno_last_uid,
1405 this insn M->insn might not be where it dies.
1406 But very likely this doesn't matter; what matters is
1407 that M's reg is computed from M1's reg. */
1408 if (INSN_UID (m
->insn
) == REGNO_LAST_UID (regno
)
1411 if (m
!= 0 && m
->set_src
== m1
->set_dest
1412 /* If m->consec, m->set_src isn't valid. */
1416 /* Increase the priority of the moving the first insn
1417 since it permits the second to be moved as well. */
1421 m1
->lifetime
+= m
->lifetime
;
1422 m1
->savings
+= m
->savings
;
1427 /* Find invariant expressions that are equal and can be combined into
1431 combine_movables (movables
, nregs
)
1432 struct movable
*movables
;
1435 register struct movable
*m
;
1436 char *matched_regs
= (char *) alloca (nregs
);
1437 enum machine_mode mode
;
1439 /* Regs that are set more than once are not allowed to match
1440 or be matched. I'm no longer sure why not. */
1441 /* Perhaps testing m->consec_sets would be more appropriate here? */
1443 for (m
= movables
; m
; m
= m
->next
)
1444 if (m
->match
== 0 && n_times_used
[m
->regno
] == 1 && !m
->partial
)
1446 register struct movable
*m1
;
1447 int regno
= m
->regno
;
1449 bzero (matched_regs
, nregs
);
1450 matched_regs
[regno
] = 1;
1452 /* We want later insns to match the first one. Don't make the first
1453 one match any later ones. So start this loop at m->next. */
1454 for (m1
= m
->next
; m1
; m1
= m1
->next
)
1455 if (m
!= m1
&& m1
->match
== 0 && n_times_used
[m1
->regno
] == 1
1456 /* A reg used outside the loop mustn't be eliminated. */
1458 /* A reg used for zero-extending mustn't be eliminated. */
1460 && (matched_regs
[m1
->regno
]
1463 /* Can combine regs with different modes loaded from the
1464 same constant only if the modes are the same or
1465 if both are integer modes with M wider or the same
1466 width as M1. The check for integer is redundant, but
1467 safe, since the only case of differing destination
1468 modes with equal sources is when both sources are
1469 VOIDmode, i.e., CONST_INT. */
1470 (GET_MODE (m
->set_dest
) == GET_MODE (m1
->set_dest
)
1471 || (GET_MODE_CLASS (GET_MODE (m
->set_dest
)) == MODE_INT
1472 && GET_MODE_CLASS (GET_MODE (m1
->set_dest
)) == MODE_INT
1473 && (GET_MODE_BITSIZE (GET_MODE (m
->set_dest
))
1474 >= GET_MODE_BITSIZE (GET_MODE (m1
->set_dest
)))))
1475 /* See if the source of M1 says it matches M. */
1476 && ((GET_CODE (m1
->set_src
) == REG
1477 && matched_regs
[REGNO (m1
->set_src
)])
1478 || rtx_equal_for_loop_p (m
->set_src
, m1
->set_src
,
1480 && ((m
->dependencies
== m1
->dependencies
)
1481 || rtx_equal_p (m
->dependencies
, m1
->dependencies
)))
1483 m
->lifetime
+= m1
->lifetime
;
1484 m
->savings
+= m1
->savings
;
1487 matched_regs
[m1
->regno
] = 1;
1491 /* Now combine the regs used for zero-extension.
1492 This can be done for those not marked `global'
1493 provided their lives don't overlap. */
1495 for (mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
); mode
!= VOIDmode
;
1496 mode
= GET_MODE_WIDER_MODE (mode
))
1498 register struct movable
*m0
= 0;
1500 /* Combine all the registers for extension from mode MODE.
1501 Don't combine any that are used outside this loop. */
1502 for (m
= movables
; m
; m
= m
->next
)
1503 if (m
->partial
&& ! m
->global
1504 && mode
== GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m
->insn
)))))
1506 register struct movable
*m1
;
1507 int first
= uid_luid
[REGNO_FIRST_UID (m
->regno
)];
1508 int last
= uid_luid
[REGNO_LAST_UID (m
->regno
)];
1512 /* First one: don't check for overlap, just record it. */
1517 /* Make sure they extend to the same mode.
1518 (Almost always true.) */
1519 if (GET_MODE (m
->set_dest
) != GET_MODE (m0
->set_dest
))
1522 /* We already have one: check for overlap with those
1523 already combined together. */
1524 for (m1
= movables
; m1
!= m
; m1
= m1
->next
)
1525 if (m1
== m0
|| (m1
->partial
&& m1
->match
== m0
))
1526 if (! (uid_luid
[REGNO_FIRST_UID (m1
->regno
)] > last
1527 || uid_luid
[REGNO_LAST_UID (m1
->regno
)] < first
))
1530 /* No overlap: we can combine this with the others. */
1531 m0
->lifetime
+= m
->lifetime
;
1532 m0
->savings
+= m
->savings
;
1541 /* Return 1 if regs X and Y will become the same if moved. */
1544 regs_match_p (x
, y
, movables
)
1546 struct movable
*movables
;
1550 struct movable
*mx
, *my
;
1552 for (mx
= movables
; mx
; mx
= mx
->next
)
1553 if (mx
->regno
== xn
)
1556 for (my
= movables
; my
; my
= my
->next
)
1557 if (my
->regno
== yn
)
1561 && ((mx
->match
== my
->match
&& mx
->match
!= 0)
1563 || mx
== my
->match
));
1566 /* Return 1 if X and Y are identical-looking rtx's.
1567 This is the Lisp function EQUAL for rtx arguments.
1569 If two registers are matching movables or a movable register and an
1570 equivalent constant, consider them equal. */
1573 rtx_equal_for_loop_p (x
, y
, movables
)
1575 struct movable
*movables
;
1579 register struct movable
*m
;
1580 register enum rtx_code code
;
1585 if (x
== 0 || y
== 0)
1588 code
= GET_CODE (x
);
1590 /* If we have a register and a constant, they may sometimes be
1592 if (GET_CODE (x
) == REG
&& n_times_set
[REGNO (x
)] == -2
1595 for (m
= movables
; m
; m
= m
->next
)
1596 if (m
->move_insn
&& m
->regno
== REGNO (x
)
1597 && rtx_equal_p (m
->set_src
, y
))
1600 else if (GET_CODE (y
) == REG
&& n_times_set
[REGNO (y
)] == -2
1603 for (m
= movables
; m
; m
= m
->next
)
1604 if (m
->move_insn
&& m
->regno
== REGNO (y
)
1605 && rtx_equal_p (m
->set_src
, x
))
1609 /* Otherwise, rtx's of different codes cannot be equal. */
1610 if (code
!= GET_CODE (y
))
1613 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1614 (REG:SI x) and (REG:HI x) are NOT equivalent. */
1616 if (GET_MODE (x
) != GET_MODE (y
))
1619 /* These three types of rtx's can be compared nonrecursively. */
1621 return (REGNO (x
) == REGNO (y
) || regs_match_p (x
, y
, movables
));
1623 if (code
== LABEL_REF
)
1624 return XEXP (x
, 0) == XEXP (y
, 0);
1625 if (code
== SYMBOL_REF
)
1626 return XSTR (x
, 0) == XSTR (y
, 0);
1628 /* Compare the elements. If any pair of corresponding elements
1629 fail to match, return 0 for the whole things. */
1631 fmt
= GET_RTX_FORMAT (code
);
1632 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1637 if (XWINT (x
, i
) != XWINT (y
, i
))
1642 if (XINT (x
, i
) != XINT (y
, i
))
1647 /* Two vectors must have the same length. */
1648 if (XVECLEN (x
, i
) != XVECLEN (y
, i
))
1651 /* And the corresponding elements must match. */
1652 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
1653 if (rtx_equal_for_loop_p (XVECEXP (x
, i
, j
), XVECEXP (y
, i
, j
), movables
) == 0)
1658 if (rtx_equal_for_loop_p (XEXP (x
, i
), XEXP (y
, i
), movables
) == 0)
1663 if (strcmp (XSTR (x
, i
), XSTR (y
, i
)))
1668 /* These are just backpointers, so they don't matter. */
1674 /* It is believed that rtx's at this level will never
1675 contain anything but integers and other rtx's,
1676 except for within LABEL_REFs and SYMBOL_REFs. */
1684 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to all
1685 insns in INSNS which use thet reference. */
1688 add_label_notes (x
, insns
)
1692 enum rtx_code code
= GET_CODE (x
);
1697 if (code
== LABEL_REF
&& !LABEL_REF_NONLOCAL_P (x
))
1699 /* This code used to ignore labels that referred to dispatch tables to
1700 avoid flow generating (slighly) worse code.
1702 We no longer ignore such label references (see LABEL_REF handling in
1703 mark_jump_label for additional information). */
1704 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
1705 if (reg_mentioned_p (XEXP (x
, 0), insn
))
1706 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_LABEL
, XEXP (x
, 0),
1710 fmt
= GET_RTX_FORMAT (code
);
1711 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1714 add_label_notes (XEXP (x
, i
), insns
);
1715 else if (fmt
[i
] == 'E')
1716 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1717 add_label_notes (XVECEXP (x
, i
, j
), insns
);
1721 /* Scan MOVABLES, and move the insns that deserve to be moved.
1722 If two matching movables are combined, replace one reg with the
1723 other throughout. */
1726 move_movables (movables
, threshold
, insn_count
, loop_start
, end
, nregs
)
1727 struct movable
*movables
;
1735 register struct movable
*m
;
1737 /* Map of pseudo-register replacements to handle combining
1738 when we move several insns that load the same value
1739 into different pseudo-registers. */
1740 rtx
*reg_map
= (rtx
*) alloca (nregs
* sizeof (rtx
));
1741 char *already_moved
= (char *) alloca (nregs
);
1743 bzero (already_moved
, nregs
);
1744 bzero ((char *) reg_map
, nregs
* sizeof (rtx
));
1748 for (m
= movables
; m
; m
= m
->next
)
1750 /* Describe this movable insn. */
1752 if (loop_dump_stream
)
1754 fprintf (loop_dump_stream
, "Insn %d: regno %d (life %d), ",
1755 INSN_UID (m
->insn
), m
->regno
, m
->lifetime
);
1757 fprintf (loop_dump_stream
, "consec %d, ", m
->consec
);
1759 fprintf (loop_dump_stream
, "cond ");
1761 fprintf (loop_dump_stream
, "force ");
1763 fprintf (loop_dump_stream
, "global ");
1765 fprintf (loop_dump_stream
, "done ");
1767 fprintf (loop_dump_stream
, "move-insn ");
1769 fprintf (loop_dump_stream
, "matches %d ",
1770 INSN_UID (m
->match
->insn
));
1772 fprintf (loop_dump_stream
, "forces %d ",
1773 INSN_UID (m
->forces
->insn
));
1776 /* Count movables. Value used in heuristics in strength_reduce. */
1779 /* Ignore the insn if it's already done (it matched something else).
1780 Otherwise, see if it is now safe to move. */
1784 || (1 == invariant_p (m
->set_src
)
1785 && (m
->dependencies
== 0
1786 || 1 == invariant_p (m
->dependencies
))
1788 || 1 == consec_sets_invariant_p (m
->set_dest
,
1791 && (! m
->forces
|| m
->forces
->done
))
1795 int savings
= m
->savings
;
1797 /* We have an insn that is safe to move.
1798 Compute its desirability. */
1803 if (loop_dump_stream
)
1804 fprintf (loop_dump_stream
, "savings %d ", savings
);
1806 if (moved_once
[regno
])
1810 if (loop_dump_stream
)
1811 fprintf (loop_dump_stream
, "halved since already moved ");
1814 /* An insn MUST be moved if we already moved something else
1815 which is safe only if this one is moved too: that is,
1816 if already_moved[REGNO] is nonzero. */
1818 /* An insn is desirable to move if the new lifetime of the
1819 register is no more than THRESHOLD times the old lifetime.
1820 If it's not desirable, it means the loop is so big
1821 that moving won't speed things up much,
1822 and it is liable to make register usage worse. */
1824 /* It is also desirable to move if it can be moved at no
1825 extra cost because something else was already moved. */
1827 if (already_moved
[regno
]
1828 || flag_move_all_movables
1829 || (threshold
* savings
* m
->lifetime
) >= insn_count
1830 || (m
->forces
&& m
->forces
->done
1831 && n_times_used
[m
->forces
->regno
] == 1))
1834 register struct movable
*m1
;
1837 /* Now move the insns that set the reg. */
1839 if (m
->partial
&& m
->match
)
1843 /* Find the end of this chain of matching regs.
1844 Thus, we load each reg in the chain from that one reg.
1845 And that reg is loaded with 0 directly,
1846 since it has ->match == 0. */
1847 for (m1
= m
; m1
->match
; m1
= m1
->match
);
1848 newpat
= gen_move_insn (SET_DEST (PATTERN (m
->insn
)),
1849 SET_DEST (PATTERN (m1
->insn
)));
1850 i1
= emit_insn_before (newpat
, loop_start
);
1852 /* Mark the moved, invariant reg as being allowed to
1853 share a hard reg with the other matching invariant. */
1854 REG_NOTES (i1
) = REG_NOTES (m
->insn
);
1855 r1
= SET_DEST (PATTERN (m
->insn
));
1856 r2
= SET_DEST (PATTERN (m1
->insn
));
1858 = gen_rtx_EXPR_LIST (VOIDmode
, r1
,
1859 gen_rtx_EXPR_LIST (VOIDmode
, r2
,
1861 delete_insn (m
->insn
);
1866 if (loop_dump_stream
)
1867 fprintf (loop_dump_stream
, " moved to %d", INSN_UID (i1
));
1869 /* If we are to re-generate the item being moved with a
1870 new move insn, first delete what we have and then emit
1871 the move insn before the loop. */
1872 else if (m
->move_insn
)
1876 for (count
= m
->consec
; count
>= 0; count
--)
1878 /* If this is the first insn of a library call sequence,
1880 if (GET_CODE (p
) != NOTE
1881 && (temp
= find_reg_note (p
, REG_LIBCALL
, NULL_RTX
)))
1884 /* If this is the last insn of a libcall sequence, then
1885 delete every insn in the sequence except the last.
1886 The last insn is handled in the normal manner. */
1887 if (GET_CODE (p
) != NOTE
1888 && (temp
= find_reg_note (p
, REG_RETVAL
, NULL_RTX
)))
1890 temp
= XEXP (temp
, 0);
1892 temp
= delete_insn (temp
);
1895 p
= delete_insn (p
);
1896 while (p
&& GET_CODE (p
) == NOTE
)
1901 emit_move_insn (m
->set_dest
, m
->set_src
);
1902 temp
= get_insns ();
1905 add_label_notes (m
->set_src
, temp
);
1907 i1
= emit_insns_before (temp
, loop_start
);
1908 if (! find_reg_note (i1
, REG_EQUAL
, NULL_RTX
))
1910 = gen_rtx_EXPR_LIST (m
->is_equiv
? REG_EQUIV
: REG_EQUAL
,
1911 m
->set_src
, REG_NOTES (i1
));
1913 if (loop_dump_stream
)
1914 fprintf (loop_dump_stream
, " moved to %d", INSN_UID (i1
));
1916 /* The more regs we move, the less we like moving them. */
1921 for (count
= m
->consec
; count
>= 0; count
--)
1925 /* If first insn of libcall sequence, skip to end. */
1926 /* Do this at start of loop, since p is guaranteed to
1928 if (GET_CODE (p
) != NOTE
1929 && (temp
= find_reg_note (p
, REG_LIBCALL
, NULL_RTX
)))
1932 /* If last insn of libcall sequence, move all
1933 insns except the last before the loop. The last
1934 insn is handled in the normal manner. */
1935 if (GET_CODE (p
) != NOTE
1936 && (temp
= find_reg_note (p
, REG_RETVAL
, NULL_RTX
)))
1940 rtx fn_address_insn
= 0;
1943 for (temp
= XEXP (temp
, 0); temp
!= p
;
1944 temp
= NEXT_INSN (temp
))
1950 if (GET_CODE (temp
) == NOTE
)
1953 body
= PATTERN (temp
);
1955 /* Find the next insn after TEMP,
1956 not counting USE or NOTE insns. */
1957 for (next
= NEXT_INSN (temp
); next
!= p
;
1958 next
= NEXT_INSN (next
))
1959 if (! (GET_CODE (next
) == INSN
1960 && GET_CODE (PATTERN (next
)) == USE
)
1961 && GET_CODE (next
) != NOTE
)
1964 /* If that is the call, this may be the insn
1965 that loads the function address.
1967 Extract the function address from the insn
1968 that loads it into a register.
1969 If this insn was cse'd, we get incorrect code.
1971 So emit a new move insn that copies the
1972 function address into the register that the
1973 call insn will use. flow.c will delete any
1974 redundant stores that we have created. */
1975 if (GET_CODE (next
) == CALL_INSN
1976 && GET_CODE (body
) == SET
1977 && GET_CODE (SET_DEST (body
)) == REG
1978 && (n
= find_reg_note (temp
, REG_EQUAL
,
1981 fn_reg
= SET_SRC (body
);
1982 if (GET_CODE (fn_reg
) != REG
)
1983 fn_reg
= SET_DEST (body
);
1984 fn_address
= XEXP (n
, 0);
1985 fn_address_insn
= temp
;
1987 /* We have the call insn.
1988 If it uses the register we suspect it might,
1989 load it with the correct address directly. */
1990 if (GET_CODE (temp
) == CALL_INSN
1992 && reg_referenced_p (fn_reg
, body
))
1993 emit_insn_after (gen_move_insn (fn_reg
,
1997 if (GET_CODE (temp
) == CALL_INSN
)
1999 i1
= emit_call_insn_before (body
, loop_start
);
2000 /* Because the USAGE information potentially
2001 contains objects other than hard registers
2002 we need to copy it. */
2003 if (CALL_INSN_FUNCTION_USAGE (temp
))
2004 CALL_INSN_FUNCTION_USAGE (i1
)
2005 = copy_rtx (CALL_INSN_FUNCTION_USAGE (temp
));
2008 i1
= emit_insn_before (body
, loop_start
);
2011 if (temp
== fn_address_insn
)
2012 fn_address_insn
= i1
;
2013 REG_NOTES (i1
) = REG_NOTES (temp
);
2017 if (m
->savemode
!= VOIDmode
)
2019 /* P sets REG to zero; but we should clear only
2020 the bits that are not covered by the mode
2022 rtx reg
= m
->set_dest
;
2028 (GET_MODE (reg
), and_optab
, reg
,
2029 GEN_INT ((((HOST_WIDE_INT
) 1
2030 << GET_MODE_BITSIZE (m
->savemode
)))
2032 reg
, 1, OPTAB_LIB_WIDEN
);
2036 emit_move_insn (reg
, tem
);
2037 sequence
= gen_sequence ();
2039 i1
= emit_insn_before (sequence
, loop_start
);
2041 else if (GET_CODE (p
) == CALL_INSN
)
2043 i1
= emit_call_insn_before (PATTERN (p
), loop_start
);
2044 /* Because the USAGE information potentially
2045 contains objects other than hard registers
2046 we need to copy it. */
2047 if (CALL_INSN_FUNCTION_USAGE (p
))
2048 CALL_INSN_FUNCTION_USAGE (i1
)
2049 = copy_rtx (CALL_INSN_FUNCTION_USAGE (p
));
2051 else if (count
== m
->consec
&& m
->move_insn_first
)
2053 /* The SET_SRC might not be invariant, so we must
2054 use the REG_EQUAL note. */
2056 emit_move_insn (m
->set_dest
, m
->set_src
);
2057 temp
= get_insns ();
2060 add_label_notes (m
->set_src
, temp
);
2062 i1
= emit_insns_before (temp
, loop_start
);
2063 if (! find_reg_note (i1
, REG_EQUAL
, NULL_RTX
))
2065 = gen_rtx_EXPR_LIST ((m
->is_equiv
? REG_EQUIV
2067 m
->set_src
, REG_NOTES (i1
));
2070 i1
= emit_insn_before (PATTERN (p
), loop_start
);
2072 if (REG_NOTES (i1
) == 0)
2074 REG_NOTES (i1
) = REG_NOTES (p
);
2076 /* If there is a REG_EQUAL note present whose value
2077 is not loop invariant, then delete it, since it
2078 may cause problems with later optimization passes.
2079 It is possible for cse to create such notes
2080 like this as a result of record_jump_cond. */
2082 if ((temp
= find_reg_note (i1
, REG_EQUAL
, NULL_RTX
))
2083 && ! invariant_p (XEXP (temp
, 0)))
2084 remove_note (i1
, temp
);
2090 if (loop_dump_stream
)
2091 fprintf (loop_dump_stream
, " moved to %d",
2094 /* If library call, now fix the REG_NOTES that contain
2095 insn pointers, namely REG_LIBCALL on FIRST
2096 and REG_RETVAL on I1. */
2097 if ((temp
= find_reg_note (i1
, REG_RETVAL
, NULL_RTX
)))
2099 XEXP (temp
, 0) = first
;
2100 temp
= find_reg_note (first
, REG_LIBCALL
, NULL_RTX
);
2101 XEXP (temp
, 0) = i1
;
2105 do p
= NEXT_INSN (p
);
2106 while (p
&& GET_CODE (p
) == NOTE
);
2109 /* The more regs we move, the less we like moving them. */
2113 /* Any other movable that loads the same register
2115 already_moved
[regno
] = 1;
2117 /* This reg has been moved out of one loop. */
2118 moved_once
[regno
] = 1;
2120 /* The reg set here is now invariant. */
2122 n_times_set
[regno
] = 0;
2126 /* Change the length-of-life info for the register
2127 to say it lives at least the full length of this loop.
2128 This will help guide optimizations in outer loops. */
2130 if (uid_luid
[REGNO_FIRST_UID (regno
)] > INSN_LUID (loop_start
))
2131 /* This is the old insn before all the moved insns.
2132 We can't use the moved insn because it is out of range
2133 in uid_luid. Only the old insns have luids. */
2134 REGNO_FIRST_UID (regno
) = INSN_UID (loop_start
);
2135 if (uid_luid
[REGNO_LAST_UID (regno
)] < INSN_LUID (end
))
2136 REGNO_LAST_UID (regno
) = INSN_UID (end
);
2138 /* Combine with this moved insn any other matching movables. */
2141 for (m1
= movables
; m1
; m1
= m1
->next
)
2146 /* Schedule the reg loaded by M1
2147 for replacement so that shares the reg of M.
2148 If the modes differ (only possible in restricted
2149 circumstances, make a SUBREG. */
2150 if (GET_MODE (m
->set_dest
) == GET_MODE (m1
->set_dest
))
2151 reg_map
[m1
->regno
] = m
->set_dest
;
2154 = gen_lowpart_common (GET_MODE (m1
->set_dest
),
2157 /* Get rid of the matching insn
2158 and prevent further processing of it. */
2161 /* if library call, delete all insn except last, which
2163 if ((temp
= find_reg_note (m1
->insn
, REG_RETVAL
,
2166 for (temp
= XEXP (temp
, 0); temp
!= m1
->insn
;
2167 temp
= NEXT_INSN (temp
))
2170 delete_insn (m1
->insn
);
2172 /* Any other movable that loads the same register
2174 already_moved
[m1
->regno
] = 1;
2176 /* The reg merged here is now invariant,
2177 if the reg it matches is invariant. */
2179 n_times_set
[m1
->regno
] = 0;
2182 else if (loop_dump_stream
)
2183 fprintf (loop_dump_stream
, "not desirable");
2185 else if (loop_dump_stream
&& !m
->match
)
2186 fprintf (loop_dump_stream
, "not safe");
2188 if (loop_dump_stream
)
2189 fprintf (loop_dump_stream
, "\n");
2193 new_start
= loop_start
;
2195 /* Go through all the instructions in the loop, making
2196 all the register substitutions scheduled in REG_MAP. */
2197 for (p
= new_start
; p
!= end
; p
= NEXT_INSN (p
))
2198 if (GET_CODE (p
) == INSN
|| GET_CODE (p
) == JUMP_INSN
2199 || GET_CODE (p
) == CALL_INSN
)
2201 replace_regs (PATTERN (p
), reg_map
, nregs
, 0);
2202 replace_regs (REG_NOTES (p
), reg_map
, nregs
, 0);
2208 /* Scan X and replace the address of any MEM in it with ADDR.
2209 REG is the address that MEM should have before the replacement. */
2212 replace_call_address (x
, reg
, addr
)
2215 register enum rtx_code code
;
2221 code
= GET_CODE (x
);
2235 /* Short cut for very common case. */
2236 replace_call_address (XEXP (x
, 1), reg
, addr
);
2240 /* Short cut for very common case. */
2241 replace_call_address (XEXP (x
, 0), reg
, addr
);
2245 /* If this MEM uses a reg other than the one we expected,
2246 something is wrong. */
2247 if (XEXP (x
, 0) != reg
)
2256 fmt
= GET_RTX_FORMAT (code
);
2257 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2260 replace_call_address (XEXP (x
, i
), reg
, addr
);
2264 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2265 replace_call_address (XVECEXP (x
, i
, j
), reg
, addr
);
2271 /* Return the number of memory refs to addresses that vary
2275 count_nonfixed_reads (x
)
2278 register enum rtx_code code
;
2286 code
= GET_CODE (x
);
2300 return ((invariant_p (XEXP (x
, 0)) != 1)
2301 + count_nonfixed_reads (XEXP (x
, 0)));
2308 fmt
= GET_RTX_FORMAT (code
);
2309 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2312 value
+= count_nonfixed_reads (XEXP (x
, i
));
2316 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2317 value
+= count_nonfixed_reads (XVECEXP (x
, i
, j
));
2325 /* P is an instruction that sets a register to the result of a ZERO_EXTEND.
2326 Replace it with an instruction to load just the low bytes
2327 if the machine supports such an instruction,
2328 and insert above LOOP_START an instruction to clear the register. */
2331 constant_high_bytes (p
, loop_start
)
2335 register int insn_code_number
;
2337 /* Try to change (SET (REG ...) (ZERO_EXTEND (..:B ...)))
2338 to (SET (STRICT_LOW_PART (SUBREG:B (REG...))) ...). */
2340 new = gen_rtx_SET (VOIDmode
,
2341 gen_rtx_STRICT_LOW_PART (VOIDmode
,
2342 gen_rtx_SUBREG (GET_MODE (XEXP (SET_SRC (PATTERN (p
)), 0)),
2343 SET_DEST (PATTERN (p
)),
2345 XEXP (SET_SRC (PATTERN (p
)), 0));
2346 insn_code_number
= recog (new, p
);
2348 if (insn_code_number
)
2352 /* Clear destination register before the loop. */
2353 emit_insn_before (gen_rtx_SET (VOIDmode
, SET_DEST (PATTERN (p
)),
2357 /* Inside the loop, just load the low part. */
2363 /* Scan a loop setting the variables `unknown_address_altered',
2364 `num_mem_sets', `loop_continue', loops_enclosed', `loop_has_call',
2365 and `loop_has_volatile'. Also, fill in the arrays `loop_mems' and
2366 `loop_store_mems'. */
2369 prescan_loop (start
, end
)
2372 register int level
= 1;
2374 int loop_has_multiple_exit_targets
= 0;
2375 /* The label after END. Jumping here is just like falling off the
2376 end of the loop. We use next_nonnote_insn instead of next_label
2377 as a hedge against the (pathological) case where some actual insn
2378 might end up between the two. */
2379 rtx exit_target
= next_nonnote_insn (end
);
2380 if (exit_target
== NULL_RTX
|| GET_CODE (exit_target
) != CODE_LABEL
)
2381 loop_has_multiple_exit_targets
= 1;
2383 unknown_address_altered
= 0;
2385 loop_has_volatile
= 0;
2386 loop_store_mems_idx
= 0;
2393 for (insn
= NEXT_INSN (start
); insn
!= NEXT_INSN (end
);
2394 insn
= NEXT_INSN (insn
))
2396 if (GET_CODE (insn
) == NOTE
)
2398 if (NOTE_LINE_NUMBER (insn
) == NOTE_INSN_LOOP_BEG
)
2401 /* Count number of loops contained in this one. */
2404 else if (NOTE_LINE_NUMBER (insn
) == NOTE_INSN_LOOP_END
)
2413 else if (NOTE_LINE_NUMBER (insn
) == NOTE_INSN_LOOP_CONT
)
2416 loop_continue
= insn
;
2419 else if (GET_CODE (insn
) == CALL_INSN
)
2421 if (! CONST_CALL_P (insn
))
2422 unknown_address_altered
= 1;
2425 else if (GET_CODE (insn
) == INSN
|| GET_CODE (insn
) == JUMP_INSN
)
2427 rtx label1
= NULL_RTX
;
2428 rtx label2
= NULL_RTX
;
2430 if (volatile_refs_p (PATTERN (insn
)))
2431 loop_has_volatile
= 1;
2433 note_stores (PATTERN (insn
), note_addr_stored
);
2435 if (!loop_has_multiple_exit_targets
2436 && GET_CODE (insn
) == JUMP_INSN
2437 && GET_CODE (PATTERN (insn
)) == SET
2438 && SET_DEST (PATTERN (insn
)) == pc_rtx
)
2440 if (GET_CODE (SET_SRC (PATTERN (insn
))) == IF_THEN_ELSE
)
2442 label1
= XEXP (SET_SRC (PATTERN (insn
)), 1);
2443 label2
= XEXP (SET_SRC (PATTERN (insn
)), 2);
2447 label1
= SET_SRC (PATTERN (insn
));
2451 if (label1
&& label1
!= pc_rtx
)
2453 if (GET_CODE (label1
) != LABEL_REF
)
2455 /* Something tricky. */
2456 loop_has_multiple_exit_targets
= 1;
2459 else if (XEXP (label1
, 0) != exit_target
2460 && LABEL_OUTSIDE_LOOP_P (label1
))
2462 /* A jump outside the current loop. */
2463 loop_has_multiple_exit_targets
= 1;
2473 else if (GET_CODE (insn
) == RETURN
)
2474 loop_has_multiple_exit_targets
= 1;
2477 /* Now, rescan the loop, setting up the LOOP_MEMS array. */
2478 if (/* We can't tell what MEMs are aliased by what. */
2479 !unknown_address_altered
2480 /* An exception thrown by a called function might land us
2483 /* We don't want loads for MEMs moved to a location before the
2484 one at which their stack memory becomes allocated. (Note
2485 that this is not a problem for malloc, etc., since those
2486 require actual function calls. */
2487 && !current_function_calls_alloca
2488 /* There are ways to leave the loop other than falling off the
2490 && !loop_has_multiple_exit_targets
)
2491 for (insn
= NEXT_INSN (start
); insn
!= NEXT_INSN (end
);
2492 insn
= NEXT_INSN (insn
))
2493 for_each_rtx (&insn
, insert_loop_mem
, 0);
2496 /* Scan the function looking for loops. Record the start and end of each loop.
2497 Also mark as invalid loops any loops that contain a setjmp or are branched
2498 to from outside the loop. */
2501 find_and_verify_loops (f
)
2505 int current_loop
= -1;
2509 /* If there are jumps to undefined labels,
2510 treat them as jumps out of any/all loops.
2511 This also avoids writing past end of tables when there are no loops. */
2512 uid_loop_num
[0] = -1;
2514 /* Find boundaries of loops, mark which loops are contained within
2515 loops, and invalidate loops that have setjmp. */
2517 for (insn
= f
; insn
; insn
= NEXT_INSN (insn
))
2519 if (GET_CODE (insn
) == NOTE
)
2520 switch (NOTE_LINE_NUMBER (insn
))
2522 case NOTE_INSN_LOOP_BEG
:
2523 loop_number_loop_starts
[++next_loop
] = insn
;
2524 loop_number_loop_ends
[next_loop
] = 0;
2525 loop_outer_loop
[next_loop
] = current_loop
;
2526 loop_invalid
[next_loop
] = 0;
2527 loop_number_exit_labels
[next_loop
] = 0;
2528 loop_number_exit_count
[next_loop
] = 0;
2529 current_loop
= next_loop
;
2532 case NOTE_INSN_SETJMP
:
2533 /* In this case, we must invalidate our current loop and any
2535 for (loop
= current_loop
; loop
!= -1; loop
= loop_outer_loop
[loop
])
2537 loop_invalid
[loop
] = 1;
2538 if (loop_dump_stream
)
2539 fprintf (loop_dump_stream
,
2540 "\nLoop at %d ignored due to setjmp.\n",
2541 INSN_UID (loop_number_loop_starts
[loop
]));
2545 case NOTE_INSN_LOOP_END
:
2546 if (current_loop
== -1)
2549 loop_number_loop_ends
[current_loop
] = insn
;
2550 current_loop
= loop_outer_loop
[current_loop
];
2557 /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the
2558 enclosing loop, but this doesn't matter. */
2559 uid_loop_num
[INSN_UID (insn
)] = current_loop
;
2562 /* Any loop containing a label used in an initializer must be invalidated,
2563 because it can be jumped into from anywhere. */
2565 for (label
= forced_labels
; label
; label
= XEXP (label
, 1))
2569 for (loop_num
= uid_loop_num
[INSN_UID (XEXP (label
, 0))];
2571 loop_num
= loop_outer_loop
[loop_num
])
2572 loop_invalid
[loop_num
] = 1;
2575 /* Any loop containing a label used for an exception handler must be
2576 invalidated, because it can be jumped into from anywhere. */
2578 for (label
= exception_handler_labels
; label
; label
= XEXP (label
, 1))
2582 for (loop_num
= uid_loop_num
[INSN_UID (XEXP (label
, 0))];
2584 loop_num
= loop_outer_loop
[loop_num
])
2585 loop_invalid
[loop_num
] = 1;
2588 /* Now scan all insn's in the function. If any JUMP_INSN branches into a
2589 loop that it is not contained within, that loop is marked invalid.
2590 If any INSN or CALL_INSN uses a label's address, then the loop containing
2591 that label is marked invalid, because it could be jumped into from
2594 Also look for blocks of code ending in an unconditional branch that
2595 exits the loop. If such a block is surrounded by a conditional
2596 branch around the block, move the block elsewhere (see below) and
2597 invert the jump to point to the code block. This may eliminate a
2598 label in our loop and will simplify processing by both us and a
2599 possible second cse pass. */
2601 for (insn
= f
; insn
; insn
= NEXT_INSN (insn
))
2602 if (GET_RTX_CLASS (GET_CODE (insn
)) == 'i')
2604 int this_loop_num
= uid_loop_num
[INSN_UID (insn
)];
2606 if (GET_CODE (insn
) == INSN
|| GET_CODE (insn
) == CALL_INSN
)
2608 rtx note
= find_reg_note (insn
, REG_LABEL
, NULL_RTX
);
2613 for (loop_num
= uid_loop_num
[INSN_UID (XEXP (note
, 0))];
2615 loop_num
= loop_outer_loop
[loop_num
])
2616 loop_invalid
[loop_num
] = 1;
2620 if (GET_CODE (insn
) != JUMP_INSN
)
2623 mark_loop_jump (PATTERN (insn
), this_loop_num
);
2625 /* See if this is an unconditional branch outside the loop. */
2626 if (this_loop_num
!= -1
2627 && (GET_CODE (PATTERN (insn
)) == RETURN
2628 || (simplejump_p (insn
)
2629 && (uid_loop_num
[INSN_UID (JUMP_LABEL (insn
))]
2631 && get_max_uid () < max_uid_for_loop
)
2634 rtx our_next
= next_real_insn (insn
);
2636 int outer_loop
= -1;
2638 /* Go backwards until we reach the start of the loop, a label,
2640 for (p
= PREV_INSN (insn
);
2641 GET_CODE (p
) != CODE_LABEL
2642 && ! (GET_CODE (p
) == NOTE
2643 && NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_BEG
)
2644 && GET_CODE (p
) != JUMP_INSN
;
2648 /* Check for the case where we have a jump to an inner nested
2649 loop, and do not perform the optimization in that case. */
2651 if (JUMP_LABEL (insn
))
2653 dest_loop
= uid_loop_num
[INSN_UID (JUMP_LABEL (insn
))];
2654 if (dest_loop
!= -1)
2656 for (outer_loop
= dest_loop
; outer_loop
!= -1;
2657 outer_loop
= loop_outer_loop
[outer_loop
])
2658 if (outer_loop
== this_loop_num
)
2663 /* Make sure that the target of P is within the current loop. */
2665 if (GET_CODE (p
) == JUMP_INSN
&& JUMP_LABEL (p
)
2666 && uid_loop_num
[INSN_UID (JUMP_LABEL (p
))] != this_loop_num
)
2667 outer_loop
= this_loop_num
;
2669 /* If we stopped on a JUMP_INSN to the next insn after INSN,
2670 we have a block of code to try to move.
2672 We look backward and then forward from the target of INSN
2673 to find a BARRIER at the same loop depth as the target.
2674 If we find such a BARRIER, we make a new label for the start
2675 of the block, invert the jump in P and point it to that label,
2676 and move the block of code to the spot we found. */
2678 if (outer_loop
== -1
2679 && GET_CODE (p
) == JUMP_INSN
2680 && JUMP_LABEL (p
) != 0
2681 /* Just ignore jumps to labels that were never emitted.
2682 These always indicate compilation errors. */
2683 && INSN_UID (JUMP_LABEL (p
)) != 0
2685 && ! simplejump_p (p
)
2686 && next_real_insn (JUMP_LABEL (p
)) == our_next
)
2689 = JUMP_LABEL (insn
) ? JUMP_LABEL (insn
) : get_last_insn ();
2690 int target_loop_num
= uid_loop_num
[INSN_UID (target
)];
2693 for (loc
= target
; loc
; loc
= PREV_INSN (loc
))
2694 if (GET_CODE (loc
) == BARRIER
2695 && uid_loop_num
[INSN_UID (loc
)] == target_loop_num
)
2699 for (loc
= target
; loc
; loc
= NEXT_INSN (loc
))
2700 if (GET_CODE (loc
) == BARRIER
2701 && uid_loop_num
[INSN_UID (loc
)] == target_loop_num
)
2706 rtx cond_label
= JUMP_LABEL (p
);
2707 rtx new_label
= get_label_after (p
);
2709 /* Ensure our label doesn't go away. */
2710 LABEL_NUSES (cond_label
)++;
2712 /* Verify that uid_loop_num is large enough and that
2714 if (invert_jump (p
, new_label
))
2718 /* If no suitable BARRIER was found, create a suitable
2719 one before TARGET. Since TARGET is a fall through
2720 path, we'll need to insert an jump around our block
2721 and a add a BARRIER before TARGET.
2723 This creates an extra unconditional jump outside
2724 the loop. However, the benefits of removing rarely
2725 executed instructions from inside the loop usually
2726 outweighs the cost of the extra unconditional jump
2727 outside the loop. */
2732 temp
= gen_jump (JUMP_LABEL (insn
));
2733 temp
= emit_jump_insn_before (temp
, target
);
2734 JUMP_LABEL (temp
) = JUMP_LABEL (insn
);
2735 LABEL_NUSES (JUMP_LABEL (insn
))++;
2736 loc
= emit_barrier_before (target
);
2739 /* Include the BARRIER after INSN and copy the
2741 new_label
= squeeze_notes (new_label
, NEXT_INSN (insn
));
2742 reorder_insns (new_label
, NEXT_INSN (insn
), loc
);
2744 /* All those insns are now in TARGET_LOOP_NUM. */
2745 for (q
= new_label
; q
!= NEXT_INSN (NEXT_INSN (insn
));
2747 uid_loop_num
[INSN_UID (q
)] = target_loop_num
;
2749 /* The label jumped to by INSN is no longer a loop exit.
2750 Unless INSN does not have a label (e.g., it is a
2751 RETURN insn), search loop_number_exit_labels to find
2752 its label_ref, and remove it. Also turn off
2753 LABEL_OUTSIDE_LOOP_P bit. */
2754 if (JUMP_LABEL (insn
))
2759 r
= loop_number_exit_labels
[this_loop_num
];
2760 r
; q
= r
, r
= LABEL_NEXTREF (r
))
2761 if (XEXP (r
, 0) == JUMP_LABEL (insn
))
2763 LABEL_OUTSIDE_LOOP_P (r
) = 0;
2765 LABEL_NEXTREF (q
) = LABEL_NEXTREF (r
);
2767 loop_number_exit_labels
[this_loop_num
]
2768 = LABEL_NEXTREF (r
);
2772 for (loop_num
= this_loop_num
;
2773 loop_num
!= -1 && loop_num
!= target_loop_num
;
2774 loop_num
= loop_outer_loop
[loop_num
])
2775 loop_number_exit_count
[loop_num
]--;
2777 /* If we didn't find it, then something is wrong. */
2782 /* P is now a jump outside the loop, so it must be put
2783 in loop_number_exit_labels, and marked as such.
2784 The easiest way to do this is to just call
2785 mark_loop_jump again for P. */
2786 mark_loop_jump (PATTERN (p
), this_loop_num
);
2788 /* If INSN now jumps to the insn after it,
2790 if (JUMP_LABEL (insn
) != 0
2791 && (next_real_insn (JUMP_LABEL (insn
))
2792 == next_real_insn (insn
)))
2796 /* Continue the loop after where the conditional
2797 branch used to jump, since the only branch insn
2798 in the block (if it still remains) is an inter-loop
2799 branch and hence needs no processing. */
2800 insn
= NEXT_INSN (cond_label
);
2802 if (--LABEL_NUSES (cond_label
) == 0)
2803 delete_insn (cond_label
);
2805 /* This loop will be continued with NEXT_INSN (insn). */
2806 insn
= PREV_INSN (insn
);
2813 /* If any label in X jumps to a loop different from LOOP_NUM and any of the
2814 loops it is contained in, mark the target loop invalid.
2816 For speed, we assume that X is part of a pattern of a JUMP_INSN. */
2819 mark_loop_jump (x
, loop_num
)
2827 switch (GET_CODE (x
))
2840 /* There could be a label reference in here. */
2841 mark_loop_jump (XEXP (x
, 0), loop_num
);
2847 mark_loop_jump (XEXP (x
, 0), loop_num
);
2848 mark_loop_jump (XEXP (x
, 1), loop_num
);
2853 mark_loop_jump (XEXP (x
, 0), loop_num
);
2857 dest_loop
= uid_loop_num
[INSN_UID (XEXP (x
, 0))];
2859 /* Link together all labels that branch outside the loop. This
2860 is used by final_[bg]iv_value and the loop unrolling code. Also
2861 mark this LABEL_REF so we know that this branch should predict
2864 /* A check to make sure the label is not in an inner nested loop,
2865 since this does not count as a loop exit. */
2866 if (dest_loop
!= -1)
2868 for (outer_loop
= dest_loop
; outer_loop
!= -1;
2869 outer_loop
= loop_outer_loop
[outer_loop
])
2870 if (outer_loop
== loop_num
)
2876 if (loop_num
!= -1 && outer_loop
== -1)
2878 LABEL_OUTSIDE_LOOP_P (x
) = 1;
2879 LABEL_NEXTREF (x
) = loop_number_exit_labels
[loop_num
];
2880 loop_number_exit_labels
[loop_num
] = x
;
2882 for (outer_loop
= loop_num
;
2883 outer_loop
!= -1 && outer_loop
!= dest_loop
;
2884 outer_loop
= loop_outer_loop
[outer_loop
])
2885 loop_number_exit_count
[outer_loop
]++;
2888 /* If this is inside a loop, but not in the current loop or one enclosed
2889 by it, it invalidates at least one loop. */
2891 if (dest_loop
== -1)
2894 /* We must invalidate every nested loop containing the target of this
2895 label, except those that also contain the jump insn. */
2897 for (; dest_loop
!= -1; dest_loop
= loop_outer_loop
[dest_loop
])
2899 /* Stop when we reach a loop that also contains the jump insn. */
2900 for (outer_loop
= loop_num
; outer_loop
!= -1;
2901 outer_loop
= loop_outer_loop
[outer_loop
])
2902 if (dest_loop
== outer_loop
)
2905 /* If we get here, we know we need to invalidate a loop. */
2906 if (loop_dump_stream
&& ! loop_invalid
[dest_loop
])
2907 fprintf (loop_dump_stream
,
2908 "\nLoop at %d ignored due to multiple entry points.\n",
2909 INSN_UID (loop_number_loop_starts
[dest_loop
]));
2911 loop_invalid
[dest_loop
] = 1;
2916 /* If this is not setting pc, ignore. */
2917 if (SET_DEST (x
) == pc_rtx
)
2918 mark_loop_jump (SET_SRC (x
), loop_num
);
2922 mark_loop_jump (XEXP (x
, 1), loop_num
);
2923 mark_loop_jump (XEXP (x
, 2), loop_num
);
2928 for (i
= 0; i
< XVECLEN (x
, 0); i
++)
2929 mark_loop_jump (XVECEXP (x
, 0, i
), loop_num
);
2933 for (i
= 0; i
< XVECLEN (x
, 1); i
++)
2934 mark_loop_jump (XVECEXP (x
, 1, i
), loop_num
);
2938 /* Treat anything else (such as a symbol_ref)
2939 as a branch out of this loop, but not into any loop. */
2944 LABEL_OUTSIDE_LOOP_P (x
) = 1;
2945 LABEL_NEXTREF (x
) = loop_number_exit_labels
[loop_num
];
2948 loop_number_exit_labels
[loop_num
] = x
;
2950 for (outer_loop
= loop_num
; outer_loop
!= -1;
2951 outer_loop
= loop_outer_loop
[outer_loop
])
2952 loop_number_exit_count
[outer_loop
]++;
2958 /* Return nonzero if there is a label in the range from
2959 insn INSN to and including the insn whose luid is END
2960 INSN must have an assigned luid (i.e., it must not have
2961 been previously created by loop.c). */
2964 labels_in_range_p (insn
, end
)
2968 while (insn
&& INSN_LUID (insn
) <= end
)
2970 if (GET_CODE (insn
) == CODE_LABEL
)
2972 insn
= NEXT_INSN (insn
);
2978 /* Record that a memory reference X is being set. */
2981 note_addr_stored (x
, y
)
2983 rtx y ATTRIBUTE_UNUSED
;
2987 if (x
== 0 || GET_CODE (x
) != MEM
)
2990 /* Count number of memory writes.
2991 This affects heuristics in strength_reduce. */
2994 /* BLKmode MEM means all memory is clobbered. */
2995 if (GET_MODE (x
) == BLKmode
)
2996 unknown_address_altered
= 1;
2998 if (unknown_address_altered
)
3001 for (i
= 0; i
< loop_store_mems_idx
; i
++)
3002 if (rtx_equal_p (XEXP (loop_store_mems
[i
], 0), XEXP (x
, 0))
3003 && MEM_IN_STRUCT_P (x
) == MEM_IN_STRUCT_P (loop_store_mems
[i
]))
3005 /* We are storing at the same address as previously noted. Save the
3007 if (GET_MODE_SIZE (GET_MODE (x
))
3008 > GET_MODE_SIZE (GET_MODE (loop_store_mems
[i
])))
3009 loop_store_mems
[i
] = x
;
3013 if (i
== NUM_STORES
)
3014 unknown_address_altered
= 1;
3016 else if (i
== loop_store_mems_idx
)
3017 loop_store_mems
[loop_store_mems_idx
++] = x
;
3020 /* Return nonzero if the rtx X is invariant over the current loop.
3022 The value is 2 if we refer to something only conditionally invariant.
3024 If `unknown_address_altered' is nonzero, no memory ref is invariant.
3025 Otherwise, a memory ref is invariant if it does not conflict with
3026 anything stored in `loop_store_mems'. */
3033 register enum rtx_code code
;
3035 int conditional
= 0;
3039 code
= GET_CODE (x
);
3049 /* A LABEL_REF is normally invariant, however, if we are unrolling
3050 loops, and this label is inside the loop, then it isn't invariant.
3051 This is because each unrolled copy of the loop body will have
3052 a copy of this label. If this was invariant, then an insn loading
3053 the address of this label into a register might get moved outside
3054 the loop, and then each loop body would end up using the same label.
3056 We don't know the loop bounds here though, so just fail for all
3058 if (flag_unroll_loops
)
3065 case UNSPEC_VOLATILE
:
3069 /* We used to check RTX_UNCHANGING_P (x) here, but that is invalid
3070 since the reg might be set by initialization within the loop. */
3072 if ((x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
3073 || x
== arg_pointer_rtx
)
3074 && ! current_function_has_nonlocal_goto
)
3078 && REGNO (x
) < FIRST_PSEUDO_REGISTER
&& call_used_regs
[REGNO (x
)])
3081 if (n_times_set
[REGNO (x
)] < 0)
3084 return n_times_set
[REGNO (x
)] == 0;
3087 /* Volatile memory references must be rejected. Do this before
3088 checking for read-only items, so that volatile read-only items
3089 will be rejected also. */
3090 if (MEM_VOLATILE_P (x
))
3093 /* Read-only items (such as constants in a constant pool) are
3094 invariant if their address is. */
3095 if (RTX_UNCHANGING_P (x
))
3098 /* If we filled the table (or had a subroutine call), any location
3099 in memory could have been clobbered. */
3100 if (unknown_address_altered
)
3103 /* See if there is any dependence between a store and this load. */
3104 for (i
= loop_store_mems_idx
- 1; i
>= 0; i
--)
3105 if (true_dependence (loop_store_mems
[i
], VOIDmode
, x
, rtx_varies_p
))
3108 /* It's not invalidated by a store in memory
3109 but we must still verify the address is invariant. */
3113 /* Don't mess with insns declared volatile. */
3114 if (MEM_VOLATILE_P (x
))
3122 fmt
= GET_RTX_FORMAT (code
);
3123 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3127 int tem
= invariant_p (XEXP (x
, i
));
3133 else if (fmt
[i
] == 'E')
3136 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
3138 int tem
= invariant_p (XVECEXP (x
, i
, j
));
3148 return 1 + conditional
;
3152 /* Return nonzero if all the insns in the loop that set REG
3153 are INSN and the immediately following insns,
3154 and if each of those insns sets REG in an invariant way
3155 (not counting uses of REG in them).
3157 The value is 2 if some of these insns are only conditionally invariant.
3159 We assume that INSN itself is the first set of REG
3160 and that its source is invariant. */
3163 consec_sets_invariant_p (reg
, n_sets
, insn
)
3167 register rtx p
= insn
;
3168 register int regno
= REGNO (reg
);
3170 /* Number of sets we have to insist on finding after INSN. */
3171 int count
= n_sets
- 1;
3172 int old
= n_times_set
[regno
];
3176 /* If N_SETS hit the limit, we can't rely on its value. */
3180 n_times_set
[regno
] = 0;
3184 register enum rtx_code code
;
3188 code
= GET_CODE (p
);
3190 /* If library call, skip to end of it. */
3191 if (code
== INSN
&& (temp
= find_reg_note (p
, REG_LIBCALL
, NULL_RTX
)))
3196 && (set
= single_set (p
))
3197 && GET_CODE (SET_DEST (set
)) == REG
3198 && REGNO (SET_DEST (set
)) == regno
)
3200 this = invariant_p (SET_SRC (set
));
3203 else if ((temp
= find_reg_note (p
, REG_EQUAL
, NULL_RTX
)))
3205 /* If this is a libcall, then any invariant REG_EQUAL note is OK.
3206 If this is an ordinary insn, then only CONSTANT_P REG_EQUAL
3208 this = (CONSTANT_P (XEXP (temp
, 0))
3209 || (find_reg_note (p
, REG_RETVAL
, NULL_RTX
)
3210 && invariant_p (XEXP (temp
, 0))));
3217 else if (code
!= NOTE
)
3219 n_times_set
[regno
] = old
;
3224 n_times_set
[regno
] = old
;
3225 /* If invariant_p ever returned 2, we return 2. */
3226 return 1 + (value
& 2);
3230 /* I don't think this condition is sufficient to allow INSN
3231 to be moved, so we no longer test it. */
3233 /* Return 1 if all insns in the basic block of INSN and following INSN
3234 that set REG are invariant according to TABLE. */
3237 all_sets_invariant_p (reg
, insn
, table
)
3241 register rtx p
= insn
;
3242 register int regno
= REGNO (reg
);
3246 register enum rtx_code code
;
3248 code
= GET_CODE (p
);
3249 if (code
== CODE_LABEL
|| code
== JUMP_INSN
)
3251 if (code
== INSN
&& GET_CODE (PATTERN (p
)) == SET
3252 && GET_CODE (SET_DEST (PATTERN (p
))) == REG
3253 && REGNO (SET_DEST (PATTERN (p
))) == regno
)
3255 if (!invariant_p (SET_SRC (PATTERN (p
)), table
))
3262 /* Look at all uses (not sets) of registers in X. For each, if it is
3263 the single use, set USAGE[REGNO] to INSN; if there was a previous use in
3264 a different insn, set USAGE[REGNO] to const0_rtx. */
3267 find_single_use_in_loop (insn
, x
, usage
)
3272 enum rtx_code code
= GET_CODE (x
);
3273 char *fmt
= GET_RTX_FORMAT (code
);
3278 = (usage
[REGNO (x
)] != 0 && usage
[REGNO (x
)] != insn
)
3279 ? const0_rtx
: insn
;
3281 else if (code
== SET
)
3283 /* Don't count SET_DEST if it is a REG; otherwise count things
3284 in SET_DEST because if a register is partially modified, it won't
3285 show up as a potential movable so we don't care how USAGE is set
3287 if (GET_CODE (SET_DEST (x
)) != REG
)
3288 find_single_use_in_loop (insn
, SET_DEST (x
), usage
);
3289 find_single_use_in_loop (insn
, SET_SRC (x
), usage
);
3292 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3294 if (fmt
[i
] == 'e' && XEXP (x
, i
) != 0)
3295 find_single_use_in_loop (insn
, XEXP (x
, i
), usage
);
3296 else if (fmt
[i
] == 'E')
3297 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
3298 find_single_use_in_loop (insn
, XVECEXP (x
, i
, j
), usage
);
3302 /* Increment N_TIMES_SET at the index of each register
3303 that is modified by an insn between FROM and TO.
3304 If the value of an element of N_TIMES_SET becomes 127 or more,
3305 stop incrementing it, to avoid overflow.
3307 Store in SINGLE_USAGE[I] the single insn in which register I is
3308 used, if it is only used once. Otherwise, it is set to 0 (for no
3309 uses) or const0_rtx for more than one use. This parameter may be zero,
3310 in which case this processing is not done.
3312 Store in *COUNT_PTR the number of actual instruction
3313 in the loop. We use this to decide what is worth moving out. */
3315 /* last_set[n] is nonzero iff reg n has been set in the current basic block.
3316 In that case, it is the insn that last set reg n. */
3319 count_loop_regs_set (from
, to
, may_not_move
, single_usage
, count_ptr
, nregs
)
3320 register rtx from
, to
;
3326 register rtx
*last_set
= (rtx
*) alloca (nregs
* sizeof (rtx
));
3328 register int count
= 0;
3331 bzero ((char *) last_set
, nregs
* sizeof (rtx
));
3332 for (insn
= from
; insn
!= to
; insn
= NEXT_INSN (insn
))
3334 if (GET_RTX_CLASS (GET_CODE (insn
)) == 'i')
3338 /* If requested, record registers that have exactly one use. */
3341 find_single_use_in_loop (insn
, PATTERN (insn
), single_usage
);
3343 /* Include uses in REG_EQUAL notes. */
3344 if (REG_NOTES (insn
))
3345 find_single_use_in_loop (insn
, REG_NOTES (insn
), single_usage
);
3348 if (GET_CODE (PATTERN (insn
)) == CLOBBER
3349 && GET_CODE (XEXP (PATTERN (insn
), 0)) == REG
)
3350 /* Don't move a reg that has an explicit clobber.
3351 We might do so sometimes, but it's not worth the pain. */
3352 may_not_move
[REGNO (XEXP (PATTERN (insn
), 0))] = 1;
3354 if (GET_CODE (PATTERN (insn
)) == SET
3355 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
3357 dest
= SET_DEST (PATTERN (insn
));
3358 while (GET_CODE (dest
) == SUBREG
3359 || GET_CODE (dest
) == ZERO_EXTRACT
3360 || GET_CODE (dest
) == SIGN_EXTRACT
3361 || GET_CODE (dest
) == STRICT_LOW_PART
)
3362 dest
= XEXP (dest
, 0);
3363 if (GET_CODE (dest
) == REG
)
3365 register int regno
= REGNO (dest
);
3366 /* If this is the first setting of this reg
3367 in current basic block, and it was set before,
3368 it must be set in two basic blocks, so it cannot
3369 be moved out of the loop. */
3370 if (n_times_set
[regno
] > 0 && last_set
[regno
] == 0)
3371 may_not_move
[regno
] = 1;
3372 /* If this is not first setting in current basic block,
3373 see if reg was used in between previous one and this.
3374 If so, neither one can be moved. */
3375 if (last_set
[regno
] != 0
3376 && reg_used_between_p (dest
, last_set
[regno
], insn
))
3377 may_not_move
[regno
] = 1;
3378 if (n_times_set
[regno
] < 127)
3379 ++n_times_set
[regno
];
3380 last_set
[regno
] = insn
;
3383 else if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
3386 for (i
= XVECLEN (PATTERN (insn
), 0) - 1; i
>= 0; i
--)
3388 register rtx x
= XVECEXP (PATTERN (insn
), 0, i
);
3389 if (GET_CODE (x
) == CLOBBER
&& GET_CODE (XEXP (x
, 0)) == REG
)
3390 /* Don't move a reg that has an explicit clobber.
3391 It's not worth the pain to try to do it correctly. */
3392 may_not_move
[REGNO (XEXP (x
, 0))] = 1;
3394 if (GET_CODE (x
) == SET
|| GET_CODE (x
) == CLOBBER
)
3396 dest
= SET_DEST (x
);
3397 while (GET_CODE (dest
) == SUBREG
3398 || GET_CODE (dest
) == ZERO_EXTRACT
3399 || GET_CODE (dest
) == SIGN_EXTRACT
3400 || GET_CODE (dest
) == STRICT_LOW_PART
)
3401 dest
= XEXP (dest
, 0);
3402 if (GET_CODE (dest
) == REG
)
3404 register int regno
= REGNO (dest
);
3405 if (n_times_set
[regno
] > 0 && last_set
[regno
] == 0)
3406 may_not_move
[regno
] = 1;
3407 if (last_set
[regno
] != 0
3408 && reg_used_between_p (dest
, last_set
[regno
], insn
))
3409 may_not_move
[regno
] = 1;
3410 if (n_times_set
[regno
] < 127)
3411 ++n_times_set
[regno
];
3412 last_set
[regno
] = insn
;
3419 if (GET_CODE (insn
) == CODE_LABEL
|| GET_CODE (insn
) == JUMP_INSN
)
3420 bzero ((char *) last_set
, nregs
* sizeof (rtx
));
3425 /* Given a loop that is bounded by LOOP_START and LOOP_END
3426 and that is entered at SCAN_START,
3427 return 1 if the register set in SET contained in insn INSN is used by
3428 any insn that precedes INSN in cyclic order starting
3429 from the loop entry point.
3431 We don't want to use INSN_LUID here because if we restrict INSN to those
3432 that have a valid INSN_LUID, it means we cannot move an invariant out
3433 from an inner loop past two loops. */
3436 loop_reg_used_before_p (set
, insn
, loop_start
, scan_start
, loop_end
)
3437 rtx set
, insn
, loop_start
, scan_start
, loop_end
;
3439 rtx reg
= SET_DEST (set
);
3442 /* Scan forward checking for register usage. If we hit INSN, we
3443 are done. Otherwise, if we hit LOOP_END, wrap around to LOOP_START. */
3444 for (p
= scan_start
; p
!= insn
; p
= NEXT_INSN (p
))
3446 if (GET_RTX_CLASS (GET_CODE (p
)) == 'i'
3447 && reg_overlap_mentioned_p (reg
, PATTERN (p
)))
3457 /* A "basic induction variable" or biv is a pseudo reg that is set
3458 (within this loop) only by incrementing or decrementing it. */
3459 /* A "general induction variable" or giv is a pseudo reg whose
3460 value is a linear function of a biv. */
3462 /* Bivs are recognized by `basic_induction_var';
3463 Givs by `general_induction_var'. */
3465 /* Indexed by register number, indicates whether or not register is an
3466 induction variable, and if so what type. */
3468 enum iv_mode
*reg_iv_type
;
3470 /* Indexed by register number, contains pointer to `struct induction'
3471 if register is an induction variable. This holds general info for
3472 all induction variables. */
3474 struct induction
**reg_iv_info
;
3476 /* Indexed by register number, contains pointer to `struct iv_class'
3477 if register is a basic induction variable. This holds info describing
3478 the class (a related group) of induction variables that the biv belongs
3481 struct iv_class
**reg_biv_class
;
3483 /* The head of a list which links together (via the next field)
3484 every iv class for the current loop. */
3486 struct iv_class
*loop_iv_list
;
3488 /* Communication with routines called via `note_stores'. */
3490 static rtx note_insn
;
3492 /* Dummy register to have non-zero DEST_REG for DEST_ADDR type givs. */
3494 static rtx addr_placeholder
;
3496 /* ??? Unfinished optimizations, and possible future optimizations,
3497 for the strength reduction code. */
3499 /* ??? The interaction of biv elimination, and recognition of 'constant'
3500 bivs, may cause problems. */
3502 /* ??? Add heuristics so that DEST_ADDR strength reduction does not cause
3503 performance problems.
3505 Perhaps don't eliminate things that can be combined with an addressing
3506 mode. Find all givs that have the same biv, mult_val, and add_val;
3507 then for each giv, check to see if its only use dies in a following
3508 memory address. If so, generate a new memory address and check to see
3509 if it is valid. If it is valid, then store the modified memory address,
3510 otherwise, mark the giv as not done so that it will get its own iv. */
3512 /* ??? Could try to optimize branches when it is known that a biv is always
3515 /* ??? When replace a biv in a compare insn, we should replace with closest
3516 giv so that an optimized branch can still be recognized by the combiner,
3517 e.g. the VAX acb insn. */
3519 /* ??? Many of the checks involving uid_luid could be simplified if regscan
3520 was rerun in loop_optimize whenever a register was added or moved.
3521 Also, some of the optimizations could be a little less conservative. */
3523 /* Perform strength reduction and induction variable elimination.
3525 Pseudo registers created during this function will be beyond the last
3526 valid index in several tables including n_times_set and regno_last_uid.
3527 This does not cause a problem here, because the added registers cannot be
3528 givs outside of their loop, and hence will never be reconsidered.
3529 But scan_loop must check regnos to make sure they are in bounds.
3531 SCAN_START is the first instruction in the loop, as the loop would
3532 actually be executed. END is the NOTE_INSN_LOOP_END. LOOP_TOP is
3533 the first instruction in the loop, as it is layed out in the
3534 instruction stream. LOOP_START is the NOTE_INSN_LOOP_BEG. */
3537 strength_reduce (scan_start
, end
, loop_top
, insn_count
,
3538 loop_start
, loop_end
, unroll_p
)
3552 /* This is 1 if current insn is not executed at least once for every loop
3554 int not_every_iteration
= 0;
3555 /* This is 1 if current insn may be executed more than once for every
3557 int maybe_multiple
= 0;
3558 /* Temporary list pointers for traversing loop_iv_list. */
3559 struct iv_class
*bl
, **backbl
;
3560 /* Ratio of extra register life span we can justify
3561 for saving an instruction. More if loop doesn't call subroutines
3562 since in that case saving an insn makes more difference
3563 and more registers are available. */
3564 /* ??? could set this to last value of threshold in move_movables */
3565 int threshold
= (loop_has_call
? 1 : 2) * (3 + n_non_fixed_regs
);
3566 /* Map of pseudo-register replacements. */
3570 rtx end_insert_before
;
3573 reg_iv_type
= (enum iv_mode
*) alloca (max_reg_before_loop
3574 * sizeof (enum iv_mode
*));
3575 bzero ((char *) reg_iv_type
, max_reg_before_loop
* sizeof (enum iv_mode
*));
3576 reg_iv_info
= (struct induction
**)
3577 alloca (max_reg_before_loop
* sizeof (struct induction
*));
3578 bzero ((char *) reg_iv_info
, (max_reg_before_loop
3579 * sizeof (struct induction
*)));
3580 reg_biv_class
= (struct iv_class
**)
3581 alloca (max_reg_before_loop
* sizeof (struct iv_class
*));
3582 bzero ((char *) reg_biv_class
, (max_reg_before_loop
3583 * sizeof (struct iv_class
*)));
3586 addr_placeholder
= gen_reg_rtx (Pmode
);
3588 /* Save insn immediately after the loop_end. Insns inserted after loop_end
3589 must be put before this insn, so that they will appear in the right
3590 order (i.e. loop order).
3592 If loop_end is the end of the current function, then emit a
3593 NOTE_INSN_DELETED after loop_end and set end_insert_before to the
3595 if (NEXT_INSN (loop_end
) != 0)
3596 end_insert_before
= NEXT_INSN (loop_end
);
3598 end_insert_before
= emit_note_after (NOTE_INSN_DELETED
, loop_end
);
3600 /* Scan through loop to find all possible bivs. */
3602 for (p
= next_insn_in_loop (scan_start
, scan_start
, end
, loop_top
);
3604 p
= next_insn_in_loop (p
, scan_start
, end
, loop_top
))
3606 if (GET_CODE (p
) == INSN
3607 && (set
= single_set (p
))
3608 && GET_CODE (SET_DEST (set
)) == REG
)
3610 dest_reg
= SET_DEST (set
);
3611 if (REGNO (dest_reg
) < max_reg_before_loop
3612 && REGNO (dest_reg
) >= FIRST_PSEUDO_REGISTER
3613 && reg_iv_type
[REGNO (dest_reg
)] != NOT_BASIC_INDUCT
)
3615 if (basic_induction_var (SET_SRC (set
), GET_MODE (SET_SRC (set
)),
3616 dest_reg
, p
, &inc_val
, &mult_val
))
3618 /* It is a possible basic induction variable.
3619 Create and initialize an induction structure for it. */
3622 = (struct induction
*) alloca (sizeof (struct induction
));
3624 record_biv (v
, p
, dest_reg
, inc_val
, mult_val
,
3625 not_every_iteration
, maybe_multiple
);
3626 reg_iv_type
[REGNO (dest_reg
)] = BASIC_INDUCT
;
3628 else if (REGNO (dest_reg
) < max_reg_before_loop
)
3629 reg_iv_type
[REGNO (dest_reg
)] = NOT_BASIC_INDUCT
;
3633 /* Past CODE_LABEL, we get to insns that may be executed multiple
3634 times. The only way we can be sure that they can't is if every
3635 jump insn between here and the end of the loop either
3636 returns, exits the loop, is a forward jump, or is a jump
3637 to the loop start. */
3639 if (GET_CODE (p
) == CODE_LABEL
)
3647 insn
= NEXT_INSN (insn
);
3648 if (insn
== scan_start
)
3656 if (insn
== scan_start
)
3660 if (GET_CODE (insn
) == JUMP_INSN
3661 && GET_CODE (PATTERN (insn
)) != RETURN
3662 && (! condjump_p (insn
)
3663 || (JUMP_LABEL (insn
) != 0
3664 && JUMP_LABEL (insn
) != scan_start
3665 && (INSN_UID (JUMP_LABEL (insn
)) >= max_uid_for_loop
3666 || INSN_UID (insn
) >= max_uid_for_loop
3667 || (INSN_LUID (JUMP_LABEL (insn
))
3668 < INSN_LUID (insn
))))))
3676 /* Past a jump, we get to insns for which we can't count
3677 on whether they will be executed during each iteration. */
3678 /* This code appears twice in strength_reduce. There is also similar
3679 code in scan_loop. */
3680 if (GET_CODE (p
) == JUMP_INSN
3681 /* If we enter the loop in the middle, and scan around to the
3682 beginning, don't set not_every_iteration for that.
3683 This can be any kind of jump, since we want to know if insns
3684 will be executed if the loop is executed. */
3685 && ! (JUMP_LABEL (p
) == loop_top
3686 && ((NEXT_INSN (NEXT_INSN (p
)) == loop_end
&& simplejump_p (p
))
3687 || (NEXT_INSN (p
) == loop_end
&& condjump_p (p
)))))
3691 /* If this is a jump outside the loop, then it also doesn't
3692 matter. Check to see if the target of this branch is on the
3693 loop_number_exits_labels list. */
3695 for (label
= loop_number_exit_labels
[uid_loop_num
[INSN_UID (loop_start
)]];
3697 label
= LABEL_NEXTREF (label
))
3698 if (XEXP (label
, 0) == JUMP_LABEL (p
))
3702 not_every_iteration
= 1;
3705 else if (GET_CODE (p
) == NOTE
)
3707 /* At the virtual top of a converted loop, insns are again known to
3708 be executed each iteration: logically, the loop begins here
3709 even though the exit code has been duplicated. */
3710 if (NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_VTOP
&& loop_depth
== 0)
3711 not_every_iteration
= 0;
3712 else if (NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_BEG
)
3714 else if (NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_END
)
3718 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
3719 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
3720 or not an insn is known to be executed each iteration of the
3721 loop, whether or not any iterations are known to occur.
3723 Therefore, if we have just passed a label and have no more labels
3724 between here and the test insn of the loop, we know these insns
3725 will be executed each iteration. */
3727 if (not_every_iteration
&& GET_CODE (p
) == CODE_LABEL
3728 && no_labels_between_p (p
, loop_end
))
3729 not_every_iteration
= 0;
3732 /* Scan loop_iv_list to remove all regs that proved not to be bivs.
3733 Make a sanity check against n_times_set. */
3734 for (backbl
= &loop_iv_list
, bl
= *backbl
; bl
; bl
= bl
->next
)
3736 if (reg_iv_type
[bl
->regno
] != BASIC_INDUCT
3737 /* Above happens if register modified by subreg, etc. */
3738 /* Make sure it is not recognized as a basic induction var: */
3739 || n_times_set
[bl
->regno
] != bl
->biv_count
3740 /* If never incremented, it is invariant that we decided not to
3741 move. So leave it alone. */
3742 || ! bl
->incremented
)
3744 if (loop_dump_stream
)
3745 fprintf (loop_dump_stream
, "Reg %d: biv discarded, %s\n",
3747 (reg_iv_type
[bl
->regno
] != BASIC_INDUCT
3748 ? "not induction variable"
3749 : (! bl
->incremented
? "never incremented"
3752 reg_iv_type
[bl
->regno
] = NOT_BASIC_INDUCT
;
3759 if (loop_dump_stream
)
3760 fprintf (loop_dump_stream
, "Reg %d: biv verified\n", bl
->regno
);
3764 /* Exit if there are no bivs. */
3767 /* Can still unroll the loop anyways, but indicate that there is no
3768 strength reduction info available. */
3770 unroll_loop (loop_end
, insn_count
, loop_start
, end_insert_before
, 0);
3775 /* Find initial value for each biv by searching backwards from loop_start,
3776 halting at first label. Also record any test condition. */
3779 for (p
= loop_start
; p
&& GET_CODE (p
) != CODE_LABEL
; p
= PREV_INSN (p
))
3783 if (GET_CODE (p
) == CALL_INSN
)
3786 if (GET_CODE (p
) == INSN
|| GET_CODE (p
) == JUMP_INSN
3787 || GET_CODE (p
) == CALL_INSN
)
3788 note_stores (PATTERN (p
), record_initial
);
3790 /* Record any test of a biv that branches around the loop if no store
3791 between it and the start of loop. We only care about tests with
3792 constants and registers and only certain of those. */
3793 if (GET_CODE (p
) == JUMP_INSN
3794 && JUMP_LABEL (p
) != 0
3795 && next_real_insn (JUMP_LABEL (p
)) == next_real_insn (loop_end
)
3796 && (test
= get_condition_for_loop (p
)) != 0
3797 && GET_CODE (XEXP (test
, 0)) == REG
3798 && REGNO (XEXP (test
, 0)) < max_reg_before_loop
3799 && (bl
= reg_biv_class
[REGNO (XEXP (test
, 0))]) != 0
3800 && valid_initial_value_p (XEXP (test
, 1), p
, call_seen
, loop_start
)
3801 && bl
->init_insn
== 0)
3803 /* If an NE test, we have an initial value! */
3804 if (GET_CODE (test
) == NE
)
3807 bl
->init_set
= gen_rtx_SET (VOIDmode
,
3808 XEXP (test
, 0), XEXP (test
, 1));
3811 bl
->initial_test
= test
;
3815 /* Look at the each biv and see if we can say anything better about its
3816 initial value from any initializing insns set up above. (This is done
3817 in two passes to avoid missing SETs in a PARALLEL.) */
3818 for (bl
= loop_iv_list
; bl
; bl
= bl
->next
)
3823 if (! bl
->init_insn
)
3826 /* IF INIT_INSN has a REG_EQUAL or REG_EQUIV note and the value
3827 is a constant, use the value of that. */
3828 if (((note
= find_reg_note (bl
->init_insn
, REG_EQUAL
, 0)) != NULL
3829 && CONSTANT_P (XEXP (note
, 0)))
3830 || ((note
= find_reg_note (bl
->init_insn
, REG_EQUIV
, 0)) != NULL
3831 && CONSTANT_P (XEXP (note
, 0))))
3832 src
= XEXP (note
, 0);
3834 src
= SET_SRC (bl
->init_set
);
3836 if (loop_dump_stream
)
3837 fprintf (loop_dump_stream
,
3838 "Biv %d initialized at insn %d: initial value ",
3839 bl
->regno
, INSN_UID (bl
->init_insn
));
3841 if ((GET_MODE (src
) == GET_MODE (regno_reg_rtx
[bl
->regno
])
3842 || GET_MODE (src
) == VOIDmode
)
3843 && valid_initial_value_p (src
, bl
->init_insn
, call_seen
, loop_start
))
3845 bl
->initial_value
= src
;
3847 if (loop_dump_stream
)
3849 if (GET_CODE (src
) == CONST_INT
)
3851 fprintf (loop_dump_stream
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (src
));
3852 fputc ('\n', loop_dump_stream
);
3856 print_rtl (loop_dump_stream
, src
);
3857 fprintf (loop_dump_stream
, "\n");
3863 /* Biv initial value is not simple move,
3864 so let it keep initial value of "itself". */
3866 if (loop_dump_stream
)
3867 fprintf (loop_dump_stream
, "is complex\n");
3871 /* Search the loop for general induction variables. */
3873 /* A register is a giv if: it is only set once, it is a function of a
3874 biv and a constant (or invariant), and it is not a biv. */
3876 not_every_iteration
= 0;
3882 /* At end of a straight-in loop, we are done.
3883 At end of a loop entered at the bottom, scan the top. */
3884 if (p
== scan_start
)
3892 if (p
== scan_start
)
3896 /* Look for a general induction variable in a register. */
3897 if (GET_CODE (p
) == INSN
3898 && (set
= single_set (p
))
3899 && GET_CODE (SET_DEST (set
)) == REG
3900 && ! may_not_optimize
[REGNO (SET_DEST (set
))])
3908 dest_reg
= SET_DEST (set
);
3909 if (REGNO (dest_reg
) < FIRST_PSEUDO_REGISTER
)
3912 if (/* SET_SRC is a giv. */
3913 (general_induction_var (SET_SRC (set
), &src_reg
, &add_val
,
3914 &mult_val
, 0, &benefit
)
3915 /* Equivalent expression is a giv. */
3916 || ((regnote
= find_reg_note (p
, REG_EQUAL
, NULL_RTX
))
3917 && general_induction_var (XEXP (regnote
, 0), &src_reg
,
3918 &add_val
, &mult_val
, 0,
3920 /* Don't try to handle any regs made by loop optimization.
3921 We have nothing on them in regno_first_uid, etc. */
3922 && REGNO (dest_reg
) < max_reg_before_loop
3923 /* Don't recognize a BASIC_INDUCT_VAR here. */
3924 && dest_reg
!= src_reg
3925 /* This must be the only place where the register is set. */
3926 && (n_times_set
[REGNO (dest_reg
)] == 1
3927 /* or all sets must be consecutive and make a giv. */
3928 || (benefit
= consec_sets_giv (benefit
, p
,
3930 &add_val
, &mult_val
))))
3934 = (struct induction
*) alloca (sizeof (struct induction
));
3937 /* If this is a library call, increase benefit. */
3938 if (find_reg_note (p
, REG_RETVAL
, NULL_RTX
))
3939 benefit
+= libcall_benefit (p
);
3941 /* Skip the consecutive insns, if there are any. */
3942 for (count
= n_times_set
[REGNO (dest_reg
)] - 1;
3945 /* If first insn of libcall sequence, skip to end.
3946 Do this at start of loop, since INSN is guaranteed to
3948 if (GET_CODE (p
) != NOTE
3949 && (temp
= find_reg_note (p
, REG_LIBCALL
, NULL_RTX
)))
3952 do p
= NEXT_INSN (p
);
3953 while (GET_CODE (p
) == NOTE
);
3956 record_giv (v
, p
, src_reg
, dest_reg
, mult_val
, add_val
, benefit
,
3957 DEST_REG
, not_every_iteration
, NULL_PTR
, loop_start
,
3963 #ifndef DONT_REDUCE_ADDR
3964 /* Look for givs which are memory addresses. */
3965 /* This resulted in worse code on a VAX 8600. I wonder if it
3967 if (GET_CODE (p
) == INSN
)
3968 find_mem_givs (PATTERN (p
), p
, not_every_iteration
, loop_start
,
3972 /* Update the status of whether giv can derive other givs. This can
3973 change when we pass a label or an insn that updates a biv. */
3974 if (GET_CODE (p
) == INSN
|| GET_CODE (p
) == JUMP_INSN
3975 || GET_CODE (p
) == CODE_LABEL
)
3976 update_giv_derive (p
);
3978 /* Past a jump, we get to insns for which we can't count
3979 on whether they will be executed during each iteration. */
3980 /* This code appears twice in strength_reduce. There is also similar
3981 code in scan_loop. */
3982 if (GET_CODE (p
) == JUMP_INSN
3983 /* If we enter the loop in the middle, and scan around to the
3984 beginning, don't set not_every_iteration for that.
3985 This can be any kind of jump, since we want to know if insns
3986 will be executed if the loop is executed. */
3987 && ! (JUMP_LABEL (p
) == loop_top
3988 && ((NEXT_INSN (NEXT_INSN (p
)) == loop_end
&& simplejump_p (p
))
3989 || (NEXT_INSN (p
) == loop_end
&& condjump_p (p
)))))
3993 /* If this is a jump outside the loop, then it also doesn't
3994 matter. Check to see if the target of this branch is on the
3995 loop_number_exits_labels list. */
3997 for (label
= loop_number_exit_labels
[uid_loop_num
[INSN_UID (loop_start
)]];
3999 label
= LABEL_NEXTREF (label
))
4000 if (XEXP (label
, 0) == JUMP_LABEL (p
))
4004 not_every_iteration
= 1;
4007 else if (GET_CODE (p
) == NOTE
)
4009 /* At the virtual top of a converted loop, insns are again known to
4010 be executed each iteration: logically, the loop begins here
4011 even though the exit code has been duplicated. */
4012 if (NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_VTOP
&& loop_depth
== 0)
4013 not_every_iteration
= 0;
4014 else if (NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_BEG
)
4016 else if (NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_END
)
4020 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
4021 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
4022 or not an insn is known to be executed each iteration of the
4023 loop, whether or not any iterations are known to occur.
4025 Therefore, if we have just passed a label and have no more labels
4026 between here and the test insn of the loop, we know these insns
4027 will be executed each iteration. */
4029 if (not_every_iteration
&& GET_CODE (p
) == CODE_LABEL
4030 && no_labels_between_p (p
, loop_end
))
4031 not_every_iteration
= 0;
4034 /* Try to calculate and save the number of loop iterations. This is
4035 set to zero if the actual number can not be calculated. This must
4036 be called after all giv's have been identified, since otherwise it may
4037 fail if the iteration variable is a giv. */
4039 loop_n_iterations
= loop_iterations (loop_start
, loop_end
);
4041 /* Now for each giv for which we still don't know whether or not it is
4042 replaceable, check to see if it is replaceable because its final value
4043 can be calculated. This must be done after loop_iterations is called,
4044 so that final_giv_value will work correctly. */
4046 for (bl
= loop_iv_list
; bl
; bl
= bl
->next
)
4048 struct induction
*v
;
4050 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
4051 if (! v
->replaceable
&& ! v
->not_replaceable
)
4052 check_final_value (v
, loop_start
, loop_end
);
4055 /* Try to prove that the loop counter variable (if any) is always
4056 nonnegative; if so, record that fact with a REG_NONNEG note
4057 so that "decrement and branch until zero" insn can be used. */
4058 check_dbra_loop (loop_end
, insn_count
, loop_start
);
4061 /* record loop-variables relevant for BCT optimization before unrolling
4062 the loop. Unrolling may update part of this information, and the
4063 correct data will be used for generating the BCT. */
4064 #ifdef HAVE_decrement_and_branch_on_count
4065 if (HAVE_decrement_and_branch_on_count
)
4066 analyze_loop_iterations (loop_start
, loop_end
);
4070 /* Create reg_map to hold substitutions for replaceable giv regs. */
4071 reg_map
= (rtx
*) alloca (max_reg_before_loop
* sizeof (rtx
));
4072 bzero ((char *) reg_map
, max_reg_before_loop
* sizeof (rtx
));
4074 /* Examine each iv class for feasibility of strength reduction/induction
4075 variable elimination. */
4077 for (bl
= loop_iv_list
; bl
; bl
= bl
->next
)
4079 struct induction
*v
;
4082 rtx final_value
= 0;
4084 /* Test whether it will be possible to eliminate this biv
4085 provided all givs are reduced. This is possible if either
4086 the reg is not used outside the loop, or we can compute
4087 what its final value will be.
4089 For architectures with a decrement_and_branch_until_zero insn,
4090 don't do this if we put a REG_NONNEG note on the endtest for
4093 /* Compare against bl->init_insn rather than loop_start.
4094 We aren't concerned with any uses of the biv between
4095 init_insn and loop_start since these won't be affected
4096 by the value of the biv elsewhere in the function, so
4097 long as init_insn doesn't use the biv itself.
4098 March 14, 1989 -- self@bayes.arc.nasa.gov */
4100 if ((uid_luid
[REGNO_LAST_UID (bl
->regno
)] < INSN_LUID (loop_end
)
4102 && INSN_UID (bl
->init_insn
) < max_uid_for_loop
4103 && uid_luid
[REGNO_FIRST_UID (bl
->regno
)] >= INSN_LUID (bl
->init_insn
)
4104 #ifdef HAVE_decrement_and_branch_until_zero
4107 && ! reg_mentioned_p (bl
->biv
->dest_reg
, SET_SRC (bl
->init_set
)))
4108 || ((final_value
= final_biv_value (bl
, loop_start
, loop_end
))
4109 #ifdef HAVE_decrement_and_branch_until_zero
4113 bl
->eliminable
= maybe_eliminate_biv (bl
, loop_start
, end
, 0,
4114 threshold
, insn_count
);
4117 if (loop_dump_stream
)
4119 fprintf (loop_dump_stream
,
4120 "Cannot eliminate biv %d.\n",
4122 fprintf (loop_dump_stream
,
4123 "First use: insn %d, last use: insn %d.\n",
4124 REGNO_FIRST_UID (bl
->regno
),
4125 REGNO_LAST_UID (bl
->regno
));
4129 /* Combine all giv's for this iv_class. */
4132 /* This will be true at the end, if all givs which depend on this
4133 biv have been strength reduced.
4134 We can't (currently) eliminate the biv unless this is so. */
4137 /* Check each giv in this class to see if we will benefit by reducing
4138 it. Skip giv's combined with others. */
4139 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
4141 struct induction
*tv
;
4143 if (v
->ignore
|| v
->same
)
4146 benefit
= v
->benefit
;
4148 /* Reduce benefit if not replaceable, since we will insert
4149 a move-insn to replace the insn that calculates this giv.
4150 Don't do this unless the giv is a user variable, since it
4151 will often be marked non-replaceable because of the duplication
4152 of the exit code outside the loop. In such a case, the copies
4153 we insert are dead and will be deleted. So they don't have
4154 a cost. Similar situations exist. */
4155 /* ??? The new final_[bg]iv_value code does a much better job
4156 of finding replaceable giv's, and hence this code may no longer
4158 if (! v
->replaceable
&& ! bl
->eliminable
4159 && REG_USERVAR_P (v
->dest_reg
))
4160 benefit
-= copy_cost
;
4162 /* Decrease the benefit to count the add-insns that we will
4163 insert to increment the reduced reg for the giv. */
4164 benefit
-= add_cost
* bl
->biv_count
;
4166 /* Decide whether to strength-reduce this giv or to leave the code
4167 unchanged (recompute it from the biv each time it is used).
4168 This decision can be made independently for each giv. */
4171 /* Attempt to guess whether autoincrement will handle some of the
4172 new add insns; if so, increase BENEFIT (undo the subtraction of
4173 add_cost that was done above). */
4174 if (v
->giv_type
== DEST_ADDR
4175 && GET_CODE (v
->mult_val
) == CONST_INT
)
4177 #if defined (HAVE_POST_INCREMENT) || defined (HAVE_PRE_INCREMENT)
4178 if (INTVAL (v
->mult_val
) == GET_MODE_SIZE (v
->mem_mode
))
4179 benefit
+= add_cost
* bl
->biv_count
;
4181 #if defined (HAVE_POST_DECREMENT) || defined (HAVE_PRE_DECREMENT)
4182 if (-INTVAL (v
->mult_val
) == GET_MODE_SIZE (v
->mem_mode
))
4183 benefit
+= add_cost
* bl
->biv_count
;
4188 /* If an insn is not to be strength reduced, then set its ignore
4189 flag, and clear all_reduced. */
4191 /* A giv that depends on a reversed biv must be reduced if it is
4192 used after the loop exit, otherwise, it would have the wrong
4193 value after the loop exit. To make it simple, just reduce all
4194 of such giv's whether or not we know they are used after the loop
4197 if ( ! flag_reduce_all_givs
&& v
->lifetime
* threshold
* benefit
< insn_count
4200 if (loop_dump_stream
)
4201 fprintf (loop_dump_stream
,
4202 "giv of insn %d not worth while, %d vs %d.\n",
4204 v
->lifetime
* threshold
* benefit
, insn_count
);
4210 /* Check that we can increment the reduced giv without a
4211 multiply insn. If not, reject it. */
4213 for (tv
= bl
->biv
; tv
; tv
= tv
->next_iv
)
4214 if (tv
->mult_val
== const1_rtx
4215 && ! product_cheap_p (tv
->add_val
, v
->mult_val
))
4217 if (loop_dump_stream
)
4218 fprintf (loop_dump_stream
,
4219 "giv of insn %d: would need a multiply.\n",
4220 INSN_UID (v
->insn
));
4228 /* Reduce each giv that we decided to reduce. */
4230 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
4232 struct induction
*tv
;
4233 if (! v
->ignore
&& v
->same
== 0)
4235 int auto_inc_opt
= 0;
4237 v
->new_reg
= gen_reg_rtx (v
->mode
);
4240 /* If the target has auto-increment addressing modes, and
4241 this is an address giv, then try to put the increment
4242 immediately after its use, so that flow can create an
4243 auto-increment addressing mode. */
4244 if (v
->giv_type
== DEST_ADDR
&& bl
->biv_count
== 1
4245 && bl
->biv
->always_executed
&& ! bl
->biv
->maybe_multiple
4246 /* We don't handle reversed biv's because bl->biv->insn
4247 does not have a valid INSN_LUID. */
4249 && v
->always_executed
&& ! v
->maybe_multiple
4250 && INSN_UID (v
->insn
) < max_uid_for_loop
)
4252 /* If other giv's have been combined with this one, then
4253 this will work only if all uses of the other giv's occur
4254 before this giv's insn. This is difficult to check.
4256 We simplify this by looking for the common case where
4257 there is one DEST_REG giv, and this giv's insn is the
4258 last use of the dest_reg of that DEST_REG giv. If the
4259 increment occurs after the address giv, then we can
4260 perform the optimization. (Otherwise, the increment
4261 would have to go before other_giv, and we would not be
4262 able to combine it with the address giv to get an
4263 auto-inc address.) */
4264 if (v
->combined_with
)
4266 struct induction
*other_giv
= 0;
4268 for (tv
= bl
->giv
; tv
; tv
= tv
->next_iv
)
4276 if (! tv
&& other_giv
4277 && REGNO (other_giv
->dest_reg
) < max_reg_before_loop
4278 && (REGNO_LAST_UID (REGNO (other_giv
->dest_reg
))
4279 == INSN_UID (v
->insn
))
4280 && INSN_LUID (v
->insn
) < INSN_LUID (bl
->biv
->insn
))
4283 /* Check for case where increment is before the address
4284 giv. Do this test in "loop order". */
4285 else if ((INSN_LUID (v
->insn
) > INSN_LUID (bl
->biv
->insn
)
4286 && (INSN_LUID (v
->insn
) < INSN_LUID (scan_start
)
4287 || (INSN_LUID (bl
->biv
->insn
)
4288 > INSN_LUID (scan_start
))))
4289 || (INSN_LUID (v
->insn
) < INSN_LUID (scan_start
)
4290 && (INSN_LUID (scan_start
)
4291 < INSN_LUID (bl
->biv
->insn
))))
4300 /* We can't put an insn immediately after one setting
4301 cc0, or immediately before one using cc0. */
4302 if ((auto_inc_opt
== 1 && sets_cc0_p (PATTERN (v
->insn
)))
4303 || (auto_inc_opt
== -1
4304 && (prev
= prev_nonnote_insn (v
->insn
)) != 0
4305 && GET_RTX_CLASS (GET_CODE (prev
)) == 'i'
4306 && sets_cc0_p (PATTERN (prev
))))
4312 v
->auto_inc_opt
= 1;
4316 /* For each place where the biv is incremented, add an insn
4317 to increment the new, reduced reg for the giv. */
4318 for (tv
= bl
->biv
; tv
; tv
= tv
->next_iv
)
4323 insert_before
= tv
->insn
;
4324 else if (auto_inc_opt
== 1)
4325 insert_before
= NEXT_INSN (v
->insn
);
4327 insert_before
= v
->insn
;
4329 if (tv
->mult_val
== const1_rtx
)
4330 emit_iv_add_mult (tv
->add_val
, v
->mult_val
,
4331 v
->new_reg
, v
->new_reg
, insert_before
);
4332 else /* tv->mult_val == const0_rtx */
4333 /* A multiply is acceptable here
4334 since this is presumed to be seldom executed. */
4335 emit_iv_add_mult (tv
->add_val
, v
->mult_val
,
4336 v
->add_val
, v
->new_reg
, insert_before
);
4339 /* Add code at loop start to initialize giv's reduced reg. */
4341 emit_iv_add_mult (bl
->initial_value
, v
->mult_val
,
4342 v
->add_val
, v
->new_reg
, loop_start
);
4346 /* Rescan all givs. If a giv is the same as a giv not reduced, mark it
4349 For each giv register that can be reduced now: if replaceable,
4350 substitute reduced reg wherever the old giv occurs;
4351 else add new move insn "giv_reg = reduced_reg".
4353 Also check for givs whose first use is their definition and whose
4354 last use is the definition of another giv. If so, it is likely
4355 dead and should not be used to eliminate a biv. */
4356 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
4358 if (v
->same
&& v
->same
->ignore
)
4364 if (v
->giv_type
== DEST_REG
4365 && REGNO_FIRST_UID (REGNO (v
->dest_reg
)) == INSN_UID (v
->insn
))
4367 struct induction
*v1
;
4369 for (v1
= bl
->giv
; v1
; v1
= v1
->next_iv
)
4370 if (REGNO_LAST_UID (REGNO (v
->dest_reg
)) == INSN_UID (v1
->insn
))
4374 /* Update expression if this was combined, in case other giv was
4377 v
->new_reg
= replace_rtx (v
->new_reg
,
4378 v
->same
->dest_reg
, v
->same
->new_reg
);
4380 if (v
->giv_type
== DEST_ADDR
)
4381 /* Store reduced reg as the address in the memref where we found
4383 validate_change (v
->insn
, v
->location
, v
->new_reg
, 0);
4384 else if (v
->replaceable
)
4386 reg_map
[REGNO (v
->dest_reg
)] = v
->new_reg
;
4389 /* I can no longer duplicate the original problem. Perhaps
4390 this is unnecessary now? */
4392 /* Replaceable; it isn't strictly necessary to delete the old
4393 insn and emit a new one, because v->dest_reg is now dead.
4395 However, especially when unrolling loops, the special
4396 handling for (set REG0 REG1) in the second cse pass may
4397 make v->dest_reg live again. To avoid this problem, emit
4398 an insn to set the original giv reg from the reduced giv.
4399 We can not delete the original insn, since it may be part
4400 of a LIBCALL, and the code in flow that eliminates dead
4401 libcalls will fail if it is deleted. */
4402 emit_insn_after (gen_move_insn (v
->dest_reg
, v
->new_reg
),
4408 /* Not replaceable; emit an insn to set the original giv reg from
4409 the reduced giv, same as above. */
4410 emit_insn_after (gen_move_insn (v
->dest_reg
, v
->new_reg
),
4414 /* When a loop is reversed, givs which depend on the reversed
4415 biv, and which are live outside the loop, must be set to their
4416 correct final value. This insn is only needed if the giv is
4417 not replaceable. The correct final value is the same as the
4418 value that the giv starts the reversed loop with. */
4419 if (bl
->reversed
&& ! v
->replaceable
)
4420 emit_iv_add_mult (bl
->initial_value
, v
->mult_val
,
4421 v
->add_val
, v
->dest_reg
, end_insert_before
);
4422 else if (v
->final_value
)
4426 /* If the loop has multiple exits, emit the insn before the
4427 loop to ensure that it will always be executed no matter
4428 how the loop exits. Otherwise, emit the insn after the loop,
4429 since this is slightly more efficient. */
4430 if (loop_number_exit_count
[uid_loop_num
[INSN_UID (loop_start
)]])
4431 insert_before
= loop_start
;
4433 insert_before
= end_insert_before
;
4434 emit_insn_before (gen_move_insn (v
->dest_reg
, v
->final_value
),
4438 /* If the insn to set the final value of the giv was emitted
4439 before the loop, then we must delete the insn inside the loop
4440 that sets it. If this is a LIBCALL, then we must delete
4441 every insn in the libcall. Note, however, that
4442 final_giv_value will only succeed when there are multiple
4443 exits if the giv is dead at each exit, hence it does not
4444 matter that the original insn remains because it is dead
4446 /* Delete the insn inside the loop that sets the giv since
4447 the giv is now set before (or after) the loop. */
4448 delete_insn (v
->insn
);
4452 if (loop_dump_stream
)
4454 fprintf (loop_dump_stream
, "giv at %d reduced to ",
4455 INSN_UID (v
->insn
));
4456 print_rtl (loop_dump_stream
, v
->new_reg
);
4457 fprintf (loop_dump_stream
, "\n");
4461 /* All the givs based on the biv bl have been reduced if they
4464 /* For each giv not marked as maybe dead that has been combined with a
4465 second giv, clear any "maybe dead" mark on that second giv.
4466 v->new_reg will either be or refer to the register of the giv it
4469 Doing this clearing avoids problems in biv elimination where a
4470 giv's new_reg is a complex value that can't be put in the insn but
4471 the giv combined with (with a reg as new_reg) is marked maybe_dead.
4472 Since the register will be used in either case, we'd prefer it be
4473 used from the simpler giv. */
4475 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
4476 if (! v
->maybe_dead
&& v
->same
)
4477 v
->same
->maybe_dead
= 0;
4479 /* Try to eliminate the biv, if it is a candidate.
4480 This won't work if ! all_reduced,
4481 since the givs we planned to use might not have been reduced.
4483 We have to be careful that we didn't initially think we could eliminate
4484 this biv because of a giv that we now think may be dead and shouldn't
4485 be used as a biv replacement.
4487 Also, there is the possibility that we may have a giv that looks
4488 like it can be used to eliminate a biv, but the resulting insn
4489 isn't valid. This can happen, for example, on the 88k, where a
4490 JUMP_INSN can compare a register only with zero. Attempts to
4491 replace it with a compare with a constant will fail.
4493 Note that in cases where this call fails, we may have replaced some
4494 of the occurrences of the biv with a giv, but no harm was done in
4495 doing so in the rare cases where it can occur. */
4497 if (all_reduced
== 1 && bl
->eliminable
4498 && maybe_eliminate_biv (bl
, loop_start
, end
, 1,
4499 threshold
, insn_count
))
4502 /* ?? If we created a new test to bypass the loop entirely,
4503 or otherwise drop straight in, based on this test, then
4504 we might want to rewrite it also. This way some later
4505 pass has more hope of removing the initialization of this
4508 /* If final_value != 0, then the biv may be used after loop end
4509 and we must emit an insn to set it just in case.
4511 Reversed bivs already have an insn after the loop setting their
4512 value, so we don't need another one. We can't calculate the
4513 proper final value for such a biv here anyways. */
4514 if (final_value
!= 0 && ! bl
->reversed
)
4518 /* If the loop has multiple exits, emit the insn before the
4519 loop to ensure that it will always be executed no matter
4520 how the loop exits. Otherwise, emit the insn after the
4521 loop, since this is slightly more efficient. */
4522 if (loop_number_exit_count
[uid_loop_num
[INSN_UID (loop_start
)]])
4523 insert_before
= loop_start
;
4525 insert_before
= end_insert_before
;
4527 emit_insn_before (gen_move_insn (bl
->biv
->dest_reg
, final_value
),
4532 /* Delete all of the instructions inside the loop which set
4533 the biv, as they are all dead. If is safe to delete them,
4534 because an insn setting a biv will never be part of a libcall. */
4535 /* However, deleting them will invalidate the regno_last_uid info,
4536 so keeping them around is more convenient. Final_biv_value
4537 will only succeed when there are multiple exits if the biv
4538 is dead at each exit, hence it does not matter that the original
4539 insn remains, because it is dead anyways. */
4540 for (v
= bl
->biv
; v
; v
= v
->next_iv
)
4541 delete_insn (v
->insn
);
4544 if (loop_dump_stream
)
4545 fprintf (loop_dump_stream
, "Reg %d: biv eliminated\n",
4550 /* Go through all the instructions in the loop, making all the
4551 register substitutions scheduled in REG_MAP. */
4553 for (p
= loop_start
; p
!= end
; p
= NEXT_INSN (p
))
4554 if (GET_CODE (p
) == INSN
|| GET_CODE (p
) == JUMP_INSN
4555 || GET_CODE (p
) == CALL_INSN
)
4557 replace_regs (PATTERN (p
), reg_map
, max_reg_before_loop
, 0);
4558 replace_regs (REG_NOTES (p
), reg_map
, max_reg_before_loop
, 0);
4562 /* Unroll loops from within strength reduction so that we can use the
4563 induction variable information that strength_reduce has already
4567 unroll_loop (loop_end
, insn_count
, loop_start
, end_insert_before
, 1);
4570 /* instrument the loop with bct insn */
4571 #ifdef HAVE_decrement_and_branch_on_count
4572 if (HAVE_decrement_and_branch_on_count
)
4573 insert_bct (loop_start
, loop_end
);
4577 if (loop_dump_stream
)
4578 fprintf (loop_dump_stream
, "\n");
4581 /* Return 1 if X is a valid source for an initial value (or as value being
4582 compared against in an initial test).
4584 X must be either a register or constant and must not be clobbered between
4585 the current insn and the start of the loop.
4587 INSN is the insn containing X. */
4590 valid_initial_value_p (x
, insn
, call_seen
, loop_start
)
4599 /* Only consider pseudos we know about initialized in insns whose luids
4601 if (GET_CODE (x
) != REG
4602 || REGNO (x
) >= max_reg_before_loop
)
4605 /* Don't use call-clobbered registers across a call which clobbers it. On
4606 some machines, don't use any hard registers at all. */
4607 if (REGNO (x
) < FIRST_PSEUDO_REGISTER
4608 && (SMALL_REGISTER_CLASSES
4609 || (call_used_regs
[REGNO (x
)] && call_seen
)))
4612 /* Don't use registers that have been clobbered before the start of the
4614 if (reg_set_between_p (x
, insn
, loop_start
))
4620 /* Scan X for memory refs and check each memory address
4621 as a possible giv. INSN is the insn whose pattern X comes from.
4622 NOT_EVERY_ITERATION is 1 if the insn might not be executed during
4623 every loop iteration. */
4626 find_mem_givs (x
, insn
, not_every_iteration
, loop_start
, loop_end
)
4629 int not_every_iteration
;
4630 rtx loop_start
, loop_end
;
4633 register enum rtx_code code
;
4639 code
= GET_CODE (x
);
4663 /* This code used to disable creating GIVs with mult_val == 1 and
4664 add_val == 0. However, this leads to lost optimizations when
4665 it comes time to combine a set of related DEST_ADDR GIVs, since
4666 this one would not be seen. */
4668 if (general_induction_var (XEXP (x
, 0), &src_reg
, &add_val
,
4669 &mult_val
, 1, &benefit
))
4671 /* Found one; record it. */
4673 = (struct induction
*) oballoc (sizeof (struct induction
));
4675 record_giv (v
, insn
, src_reg
, addr_placeholder
, mult_val
,
4676 add_val
, benefit
, DEST_ADDR
, not_every_iteration
,
4677 &XEXP (x
, 0), loop_start
, loop_end
);
4679 v
->mem_mode
= GET_MODE (x
);
4688 /* Recursively scan the subexpressions for other mem refs. */
4690 fmt
= GET_RTX_FORMAT (code
);
4691 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
4693 find_mem_givs (XEXP (x
, i
), insn
, not_every_iteration
, loop_start
,
4695 else if (fmt
[i
] == 'E')
4696 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
4697 find_mem_givs (XVECEXP (x
, i
, j
), insn
, not_every_iteration
,
4698 loop_start
, loop_end
);
4701 /* Fill in the data about one biv update.
4702 V is the `struct induction' in which we record the biv. (It is
4703 allocated by the caller, with alloca.)
4704 INSN is the insn that sets it.
4705 DEST_REG is the biv's reg.
4707 MULT_VAL is const1_rtx if the biv is being incremented here, in which case
4708 INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is
4709 being set to INC_VAL.
4711 NOT_EVERY_ITERATION is nonzero if this biv update is not know to be
4712 executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update
4713 can be executed more than once per iteration. If MAYBE_MULTIPLE
4714 and NOT_EVERY_ITERATION are both zero, we know that the biv update is
4715 executed exactly once per iteration. */
4718 record_biv (v
, insn
, dest_reg
, inc_val
, mult_val
,
4719 not_every_iteration
, maybe_multiple
)
4720 struct induction
*v
;
4725 int not_every_iteration
;
4728 struct iv_class
*bl
;
4731 v
->src_reg
= dest_reg
;
4732 v
->dest_reg
= dest_reg
;
4733 v
->mult_val
= mult_val
;
4734 v
->add_val
= inc_val
;
4735 v
->mode
= GET_MODE (dest_reg
);
4736 v
->always_computable
= ! not_every_iteration
;
4737 v
->always_executed
= ! not_every_iteration
;
4738 v
->maybe_multiple
= maybe_multiple
;
4740 /* Add this to the reg's iv_class, creating a class
4741 if this is the first incrementation of the reg. */
4743 bl
= reg_biv_class
[REGNO (dest_reg
)];
4746 /* Create and initialize new iv_class. */
4748 bl
= (struct iv_class
*) oballoc (sizeof (struct iv_class
));
4750 bl
->regno
= REGNO (dest_reg
);
4756 /* Set initial value to the reg itself. */
4757 bl
->initial_value
= dest_reg
;
4758 /* We haven't seen the initializing insn yet */
4761 bl
->initial_test
= 0;
4762 bl
->incremented
= 0;
4766 bl
->total_benefit
= 0;
4768 /* Add this class to loop_iv_list. */
4769 bl
->next
= loop_iv_list
;
4772 /* Put it in the array of biv register classes. */
4773 reg_biv_class
[REGNO (dest_reg
)] = bl
;
4776 /* Update IV_CLASS entry for this biv. */
4777 v
->next_iv
= bl
->biv
;
4780 if (mult_val
== const1_rtx
)
4781 bl
->incremented
= 1;
4783 if (loop_dump_stream
)
4785 fprintf (loop_dump_stream
,
4786 "Insn %d: possible biv, reg %d,",
4787 INSN_UID (insn
), REGNO (dest_reg
));
4788 if (GET_CODE (inc_val
) == CONST_INT
)
4790 fprintf (loop_dump_stream
, " const =");
4791 fprintf (loop_dump_stream
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (inc_val
));
4792 fputc ('\n', loop_dump_stream
);
4796 fprintf (loop_dump_stream
, " const = ");
4797 print_rtl (loop_dump_stream
, inc_val
);
4798 fprintf (loop_dump_stream
, "\n");
4803 /* Fill in the data about one giv.
4804 V is the `struct induction' in which we record the giv. (It is
4805 allocated by the caller, with alloca.)
4806 INSN is the insn that sets it.
4807 BENEFIT estimates the savings from deleting this insn.
4808 TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed
4809 into a register or is used as a memory address.
4811 SRC_REG is the biv reg which the giv is computed from.
4812 DEST_REG is the giv's reg (if the giv is stored in a reg).
4813 MULT_VAL and ADD_VAL are the coefficients used to compute the giv.
4814 LOCATION points to the place where this giv's value appears in INSN. */
4817 record_giv (v
, insn
, src_reg
, dest_reg
, mult_val
, add_val
, benefit
,
4818 type
, not_every_iteration
, location
, loop_start
, loop_end
)
4819 struct induction
*v
;
4823 rtx mult_val
, add_val
;
4826 int not_every_iteration
;
4828 rtx loop_start
, loop_end
;
4830 struct induction
*b
;
4831 struct iv_class
*bl
;
4832 rtx set
= single_set (insn
);
4835 v
->src_reg
= src_reg
;
4837 v
->dest_reg
= dest_reg
;
4838 v
->mult_val
= mult_val
;
4839 v
->add_val
= add_val
;
4840 v
->benefit
= benefit
;
4841 v
->location
= location
;
4843 v
->combined_with
= 0;
4844 v
->maybe_multiple
= 0;
4846 v
->derive_adjustment
= 0;
4852 v
->auto_inc_opt
= 0;
4856 /* The v->always_computable field is used in update_giv_derive, to
4857 determine whether a giv can be used to derive another giv. For a
4858 DEST_REG giv, INSN computes a new value for the giv, so its value
4859 isn't computable if INSN insn't executed every iteration.
4860 However, for a DEST_ADDR giv, INSN merely uses the value of the giv;
4861 it does not compute a new value. Hence the value is always computable
4862 regardless of whether INSN is executed each iteration. */
4864 if (type
== DEST_ADDR
)
4865 v
->always_computable
= 1;
4867 v
->always_computable
= ! not_every_iteration
;
4869 v
->always_executed
= ! not_every_iteration
;
4871 if (type
== DEST_ADDR
)
4873 v
->mode
= GET_MODE (*location
);
4877 else /* type == DEST_REG */
4879 v
->mode
= GET_MODE (SET_DEST (set
));
4881 v
->lifetime
= (uid_luid
[REGNO_LAST_UID (REGNO (dest_reg
))]
4882 - uid_luid
[REGNO_FIRST_UID (REGNO (dest_reg
))]);
4884 v
->times_used
= n_times_used
[REGNO (dest_reg
)];
4886 /* If the lifetime is zero, it means that this register is
4887 really a dead store. So mark this as a giv that can be
4888 ignored. This will not prevent the biv from being eliminated. */
4889 if (v
->lifetime
== 0)
4892 reg_iv_type
[REGNO (dest_reg
)] = GENERAL_INDUCT
;
4893 reg_iv_info
[REGNO (dest_reg
)] = v
;
4896 /* Add the giv to the class of givs computed from one biv. */
4898 bl
= reg_biv_class
[REGNO (src_reg
)];
4901 v
->next_iv
= bl
->giv
;
4903 /* Don't count DEST_ADDR. This is supposed to count the number of
4904 insns that calculate givs. */
4905 if (type
== DEST_REG
)
4907 bl
->total_benefit
+= benefit
;
4910 /* Fatal error, biv missing for this giv? */
4913 if (type
== DEST_ADDR
)
4917 /* The giv can be replaced outright by the reduced register only if all
4918 of the following conditions are true:
4919 - the insn that sets the giv is always executed on any iteration
4920 on which the giv is used at all
4921 (there are two ways to deduce this:
4922 either the insn is executed on every iteration,
4923 or all uses follow that insn in the same basic block),
4924 - the giv is not used outside the loop
4925 - no assignments to the biv occur during the giv's lifetime. */
4927 if (REGNO_FIRST_UID (REGNO (dest_reg
)) == INSN_UID (insn
)
4928 /* Previous line always fails if INSN was moved by loop opt. */
4929 && uid_luid
[REGNO_LAST_UID (REGNO (dest_reg
))] < INSN_LUID (loop_end
)
4930 && (! not_every_iteration
4931 || last_use_this_basic_block (dest_reg
, insn
)))
4933 /* Now check that there are no assignments to the biv within the
4934 giv's lifetime. This requires two separate checks. */
4936 /* Check each biv update, and fail if any are between the first
4937 and last use of the giv.
4939 If this loop contains an inner loop that was unrolled, then
4940 the insn modifying the biv may have been emitted by the loop
4941 unrolling code, and hence does not have a valid luid. Just
4942 mark the biv as not replaceable in this case. It is not very
4943 useful as a biv, because it is used in two different loops.
4944 It is very unlikely that we would be able to optimize the giv
4945 using this biv anyways. */
4948 for (b
= bl
->biv
; b
; b
= b
->next_iv
)
4950 if (INSN_UID (b
->insn
) >= max_uid_for_loop
4951 || ((uid_luid
[INSN_UID (b
->insn
)]
4952 >= uid_luid
[REGNO_FIRST_UID (REGNO (dest_reg
))])
4953 && (uid_luid
[INSN_UID (b
->insn
)]
4954 <= uid_luid
[REGNO_LAST_UID (REGNO (dest_reg
))])))
4957 v
->not_replaceable
= 1;
4962 /* If there are any backwards branches that go from after the
4963 biv update to before it, then this giv is not replaceable. */
4965 for (b
= bl
->biv
; b
; b
= b
->next_iv
)
4966 if (back_branch_in_range_p (b
->insn
, loop_start
, loop_end
))
4969 v
->not_replaceable
= 1;
4975 /* May still be replaceable, we don't have enough info here to
4978 v
->not_replaceable
= 0;
4982 /* Record whether the add_val contains a const_int, for later use by
4987 v
->no_const_addval
= 1;
4988 if (tem
== const0_rtx
)
4990 else if (GET_CODE (tem
) == CONST_INT
)
4991 v
->no_const_addval
= 0;
4992 else if (GET_CODE (tem
) == PLUS
)
4996 if (GET_CODE (XEXP (tem
, 0)) == PLUS
)
4997 tem
= XEXP (tem
, 0);
4998 else if (GET_CODE (XEXP (tem
, 1)) == PLUS
)
4999 tem
= XEXP (tem
, 1);
5003 if (GET_CODE (XEXP (tem
, 1)) == CONST_INT
)
5004 v
->no_const_addval
= 0;
5008 if (loop_dump_stream
)
5010 if (type
== DEST_REG
)
5011 fprintf (loop_dump_stream
, "Insn %d: giv reg %d",
5012 INSN_UID (insn
), REGNO (dest_reg
));
5014 fprintf (loop_dump_stream
, "Insn %d: dest address",
5017 fprintf (loop_dump_stream
, " src reg %d benefit %d",
5018 REGNO (src_reg
), v
->benefit
);
5019 fprintf (loop_dump_stream
, " used %d lifetime %d",
5020 v
->times_used
, v
->lifetime
);
5023 fprintf (loop_dump_stream
, " replaceable");
5025 if (v
->no_const_addval
)
5026 fprintf (loop_dump_stream
, " ncav");
5028 if (GET_CODE (mult_val
) == CONST_INT
)
5030 fprintf (loop_dump_stream
, " mult ");
5031 fprintf (loop_dump_stream
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (mult_val
));
5035 fprintf (loop_dump_stream
, " mult ");
5036 print_rtl (loop_dump_stream
, mult_val
);
5039 if (GET_CODE (add_val
) == CONST_INT
)
5041 fprintf (loop_dump_stream
, " add ");
5042 fprintf (loop_dump_stream
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (add_val
));
5046 fprintf (loop_dump_stream
, " add ");
5047 print_rtl (loop_dump_stream
, add_val
);
5051 if (loop_dump_stream
)
5052 fprintf (loop_dump_stream
, "\n");
5057 /* All this does is determine whether a giv can be made replaceable because
5058 its final value can be calculated. This code can not be part of record_giv
5059 above, because final_giv_value requires that the number of loop iterations
5060 be known, and that can not be accurately calculated until after all givs
5061 have been identified. */
5064 check_final_value (v
, loop_start
, loop_end
)
5065 struct induction
*v
;
5066 rtx loop_start
, loop_end
;
5068 struct iv_class
*bl
;
5069 rtx final_value
= 0;
5071 bl
= reg_biv_class
[REGNO (v
->src_reg
)];
5073 /* DEST_ADDR givs will never reach here, because they are always marked
5074 replaceable above in record_giv. */
5076 /* The giv can be replaced outright by the reduced register only if all
5077 of the following conditions are true:
5078 - the insn that sets the giv is always executed on any iteration
5079 on which the giv is used at all
5080 (there are two ways to deduce this:
5081 either the insn is executed on every iteration,
5082 or all uses follow that insn in the same basic block),
5083 - its final value can be calculated (this condition is different
5084 than the one above in record_giv)
5085 - no assignments to the biv occur during the giv's lifetime. */
5088 /* This is only called now when replaceable is known to be false. */
5089 /* Clear replaceable, so that it won't confuse final_giv_value. */
5093 if ((final_value
= final_giv_value (v
, loop_start
, loop_end
))
5094 && (v
->always_computable
|| last_use_this_basic_block (v
->dest_reg
, v
->insn
)))
5096 int biv_increment_seen
= 0;
5102 /* When trying to determine whether or not a biv increment occurs
5103 during the lifetime of the giv, we can ignore uses of the variable
5104 outside the loop because final_value is true. Hence we can not
5105 use regno_last_uid and regno_first_uid as above in record_giv. */
5107 /* Search the loop to determine whether any assignments to the
5108 biv occur during the giv's lifetime. Start with the insn
5109 that sets the giv, and search around the loop until we come
5110 back to that insn again.
5112 Also fail if there is a jump within the giv's lifetime that jumps
5113 to somewhere outside the lifetime but still within the loop. This
5114 catches spaghetti code where the execution order is not linear, and
5115 hence the above test fails. Here we assume that the giv lifetime
5116 does not extend from one iteration of the loop to the next, so as
5117 to make the test easier. Since the lifetime isn't known yet,
5118 this requires two loops. See also record_giv above. */
5120 last_giv_use
= v
->insn
;
5126 p
= NEXT_INSN (loop_start
);
5130 if (GET_CODE (p
) == INSN
|| GET_CODE (p
) == JUMP_INSN
5131 || GET_CODE (p
) == CALL_INSN
)
5133 if (biv_increment_seen
)
5135 if (reg_mentioned_p (v
->dest_reg
, PATTERN (p
)))
5138 v
->not_replaceable
= 1;
5142 else if (reg_set_p (v
->src_reg
, PATTERN (p
)))
5143 biv_increment_seen
= 1;
5144 else if (reg_mentioned_p (v
->dest_reg
, PATTERN (p
)))
5149 /* Now that the lifetime of the giv is known, check for branches
5150 from within the lifetime to outside the lifetime if it is still
5160 p
= NEXT_INSN (loop_start
);
5161 if (p
== last_giv_use
)
5164 if (GET_CODE (p
) == JUMP_INSN
&& JUMP_LABEL (p
)
5165 && LABEL_NAME (JUMP_LABEL (p
))
5166 && ((INSN_UID (JUMP_LABEL (p
)) >= max_uid_for_loop
)
5167 || (INSN_UID (v
->insn
) >= max_uid_for_loop
)
5168 || (INSN_UID (last_giv_use
) >= max_uid_for_loop
)
5169 || (INSN_LUID (JUMP_LABEL (p
)) < INSN_LUID (v
->insn
)
5170 && INSN_LUID (JUMP_LABEL (p
)) > INSN_LUID (loop_start
))
5171 || (INSN_LUID (JUMP_LABEL (p
)) > INSN_LUID (last_giv_use
)
5172 && INSN_LUID (JUMP_LABEL (p
)) < INSN_LUID (loop_end
))))
5175 v
->not_replaceable
= 1;
5177 if (loop_dump_stream
)
5178 fprintf (loop_dump_stream
,
5179 "Found branch outside giv lifetime.\n");
5186 /* If it is replaceable, then save the final value. */
5188 v
->final_value
= final_value
;
5191 if (loop_dump_stream
&& v
->replaceable
)
5192 fprintf (loop_dump_stream
, "Insn %d: giv reg %d final_value replaceable\n",
5193 INSN_UID (v
->insn
), REGNO (v
->dest_reg
));
5196 /* Update the status of whether a giv can derive other givs.
5198 We need to do something special if there is or may be an update to the biv
5199 between the time the giv is defined and the time it is used to derive
5202 In addition, a giv that is only conditionally set is not allowed to
5203 derive another giv once a label has been passed.
5205 The cases we look at are when a label or an update to a biv is passed. */
5208 update_giv_derive (p
)
5211 struct iv_class
*bl
;
5212 struct induction
*biv
, *giv
;
5216 /* Search all IV classes, then all bivs, and finally all givs.
5218 There are three cases we are concerned with. First we have the situation
5219 of a giv that is only updated conditionally. In that case, it may not
5220 derive any givs after a label is passed.
5222 The second case is when a biv update occurs, or may occur, after the
5223 definition of a giv. For certain biv updates (see below) that are
5224 known to occur between the giv definition and use, we can adjust the
5225 giv definition. For others, or when the biv update is conditional,
5226 we must prevent the giv from deriving any other givs. There are two
5227 sub-cases within this case.
5229 If this is a label, we are concerned with any biv update that is done
5230 conditionally, since it may be done after the giv is defined followed by
5231 a branch here (actually, we need to pass both a jump and a label, but
5232 this extra tracking doesn't seem worth it).
5234 If this is a jump, we are concerned about any biv update that may be
5235 executed multiple times. We are actually only concerned about
5236 backward jumps, but it is probably not worth performing the test
5237 on the jump again here.
5239 If this is a biv update, we must adjust the giv status to show that a
5240 subsequent biv update was performed. If this adjustment cannot be done,
5241 the giv cannot derive further givs. */
5243 for (bl
= loop_iv_list
; bl
; bl
= bl
->next
)
5244 for (biv
= bl
->biv
; biv
; biv
= biv
->next_iv
)
5245 if (GET_CODE (p
) == CODE_LABEL
|| GET_CODE (p
) == JUMP_INSN
5248 for (giv
= bl
->giv
; giv
; giv
= giv
->next_iv
)
5250 /* If cant_derive is already true, there is no point in
5251 checking all of these conditions again. */
5252 if (giv
->cant_derive
)
5255 /* If this giv is conditionally set and we have passed a label,
5256 it cannot derive anything. */
5257 if (GET_CODE (p
) == CODE_LABEL
&& ! giv
->always_computable
)
5258 giv
->cant_derive
= 1;
5260 /* Skip givs that have mult_val == 0, since
5261 they are really invariants. Also skip those that are
5262 replaceable, since we know their lifetime doesn't contain
5264 else if (giv
->mult_val
== const0_rtx
|| giv
->replaceable
)
5267 /* The only way we can allow this giv to derive another
5268 is if this is a biv increment and we can form the product
5269 of biv->add_val and giv->mult_val. In this case, we will
5270 be able to compute a compensation. */
5271 else if (biv
->insn
== p
)
5275 if (biv
->mult_val
== const1_rtx
)
5276 tem
= simplify_giv_expr (gen_rtx_MULT (giv
->mode
,
5281 if (tem
&& giv
->derive_adjustment
)
5282 tem
= simplify_giv_expr (gen_rtx_PLUS (giv
->mode
, tem
,
5283 giv
->derive_adjustment
),
5286 giv
->derive_adjustment
= tem
;
5288 giv
->cant_derive
= 1;
5290 else if ((GET_CODE (p
) == CODE_LABEL
&& ! biv
->always_computable
)
5291 || (GET_CODE (p
) == JUMP_INSN
&& biv
->maybe_multiple
))
5292 giv
->cant_derive
= 1;
5297 /* Check whether an insn is an increment legitimate for a basic induction var.
5298 X is the source of insn P, or a part of it.
5299 MODE is the mode in which X should be interpreted.
5301 DEST_REG is the putative biv, also the destination of the insn.
5302 We accept patterns of these forms:
5303 REG = REG + INVARIANT (includes REG = REG - CONSTANT)
5304 REG = INVARIANT + REG
5306 If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX,
5307 and store the additive term into *INC_VAL.
5309 If X is an assignment of an invariant into DEST_REG, we set
5310 *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL.
5312 We also want to detect a BIV when it corresponds to a variable
5313 whose mode was promoted via PROMOTED_MODE. In that case, an increment
5314 of the variable may be a PLUS that adds a SUBREG of that variable to
5315 an invariant and then sign- or zero-extends the result of the PLUS
5318 Most GIVs in such cases will be in the promoted mode, since that is the
5319 probably the natural computation mode (and almost certainly the mode
5320 used for addresses) on the machine. So we view the pseudo-reg containing
5321 the variable as the BIV, as if it were simply incremented.
5323 Note that treating the entire pseudo as a BIV will result in making
5324 simple increments to any GIVs based on it. However, if the variable
5325 overflows in its declared mode but not its promoted mode, the result will
5326 be incorrect. This is acceptable if the variable is signed, since
5327 overflows in such cases are undefined, but not if it is unsigned, since
5328 those overflows are defined. So we only check for SIGN_EXTEND and
5331 If we cannot find a biv, we return 0. */
5334 basic_induction_var (x
, mode
, dest_reg
, p
, inc_val
, mult_val
)
5336 enum machine_mode mode
;
5342 register enum rtx_code code
;
5346 code
= GET_CODE (x
);
5350 if (rtx_equal_p (XEXP (x
, 0), dest_reg
)
5351 || (GET_CODE (XEXP (x
, 0)) == SUBREG
5352 && SUBREG_PROMOTED_VAR_P (XEXP (x
, 0))
5353 && SUBREG_REG (XEXP (x
, 0)) == dest_reg
))
5355 else if (rtx_equal_p (XEXP (x
, 1), dest_reg
)
5356 || (GET_CODE (XEXP (x
, 1)) == SUBREG
5357 && SUBREG_PROMOTED_VAR_P (XEXP (x
, 1))
5358 && SUBREG_REG (XEXP (x
, 1)) == dest_reg
))
5363 if (invariant_p (arg
) != 1)
5366 *inc_val
= convert_modes (GET_MODE (dest_reg
), GET_MODE (x
), arg
, 0);
5367 *mult_val
= const1_rtx
;
5371 /* If this is a SUBREG for a promoted variable, check the inner
5373 if (SUBREG_PROMOTED_VAR_P (x
))
5374 return basic_induction_var (SUBREG_REG (x
), GET_MODE (SUBREG_REG (x
)),
5375 dest_reg
, p
, inc_val
, mult_val
);
5379 /* If this register is assigned in a previous insn, look at its
5380 source, but don't go outside the loop or past a label. */
5386 insn
= PREV_INSN (insn
);
5387 } while (insn
&& GET_CODE (insn
) == NOTE
5388 && NOTE_LINE_NUMBER (insn
) != NOTE_INSN_LOOP_BEG
);
5392 set
= single_set (insn
);
5396 if ((SET_DEST (set
) == x
5397 || (GET_CODE (SET_DEST (set
)) == SUBREG
5398 && (GET_MODE_SIZE (GET_MODE (SET_DEST (set
)))
5400 && SUBREG_REG (SET_DEST (set
)) == x
))
5401 && basic_induction_var (SET_SRC (set
),
5402 (GET_MODE (SET_SRC (set
)) == VOIDmode
5404 : GET_MODE (SET_SRC (set
))),
5409 /* ... fall through ... */
5411 /* Can accept constant setting of biv only when inside inner most loop.
5412 Otherwise, a biv of an inner loop may be incorrectly recognized
5413 as a biv of the outer loop,
5414 causing code to be moved INTO the inner loop. */
5416 if (invariant_p (x
) != 1)
5421 /* convert_modes aborts if we try to convert to or from CCmode, so just
5422 exclude that case. It is very unlikely that a condition code value
5423 would be a useful iterator anyways. */
5424 if (loops_enclosed
== 1
5425 && GET_MODE_CLASS (mode
) != MODE_CC
5426 && GET_MODE_CLASS (GET_MODE (dest_reg
)) != MODE_CC
)
5428 /* Possible bug here? Perhaps we don't know the mode of X. */
5429 *inc_val
= convert_modes (GET_MODE (dest_reg
), mode
, x
, 0);
5430 *mult_val
= const0_rtx
;
5437 return basic_induction_var (XEXP (x
, 0), GET_MODE (XEXP (x
, 0)),
5438 dest_reg
, p
, inc_val
, mult_val
);
5441 /* Similar, since this can be a sign extension. */
5442 for (insn
= PREV_INSN (p
);
5443 (insn
&& GET_CODE (insn
) == NOTE
5444 && NOTE_LINE_NUMBER (insn
) != NOTE_INSN_LOOP_BEG
);
5445 insn
= PREV_INSN (insn
))
5449 set
= single_set (insn
);
5451 if (set
&& SET_DEST (set
) == XEXP (x
, 0)
5452 && GET_CODE (XEXP (x
, 1)) == CONST_INT
5453 && INTVAL (XEXP (x
, 1)) >= 0
5454 && GET_CODE (SET_SRC (set
)) == ASHIFT
5455 && XEXP (x
, 1) == XEXP (SET_SRC (set
), 1))
5456 return basic_induction_var (XEXP (SET_SRC (set
), 0),
5457 GET_MODE (XEXP (x
, 0)),
5458 dest_reg
, insn
, inc_val
, mult_val
);
5466 /* A general induction variable (giv) is any quantity that is a linear
5467 function of a basic induction variable,
5468 i.e. giv = biv * mult_val + add_val.
5469 The coefficients can be any loop invariant quantity.
5470 A giv need not be computed directly from the biv;
5471 it can be computed by way of other givs. */
5473 /* Determine whether X computes a giv.
5474 If it does, return a nonzero value
5475 which is the benefit from eliminating the computation of X;
5476 set *SRC_REG to the register of the biv that it is computed from;
5477 set *ADD_VAL and *MULT_VAL to the coefficients,
5478 such that the value of X is biv * mult + add; */
5481 general_induction_var (x
, src_reg
, add_val
, mult_val
, is_addr
, pbenefit
)
5492 /* If this is an invariant, forget it, it isn't a giv. */
5493 if (invariant_p (x
) == 1)
5496 /* See if the expression could be a giv and get its form.
5497 Mark our place on the obstack in case we don't find a giv. */
5498 storage
= (char *) oballoc (0);
5500 x
= simplify_giv_expr (x
, pbenefit
);
5507 switch (GET_CODE (x
))
5511 /* Since this is now an invariant and wasn't before, it must be a giv
5512 with MULT_VAL == 0. It doesn't matter which BIV we associate this
5514 *src_reg
= loop_iv_list
->biv
->dest_reg
;
5515 *mult_val
= const0_rtx
;
5520 /* This is equivalent to a BIV. */
5522 *mult_val
= const1_rtx
;
5523 *add_val
= const0_rtx
;
5527 /* Either (plus (biv) (invar)) or
5528 (plus (mult (biv) (invar_1)) (invar_2)). */
5529 if (GET_CODE (XEXP (x
, 0)) == MULT
)
5531 *src_reg
= XEXP (XEXP (x
, 0), 0);
5532 *mult_val
= XEXP (XEXP (x
, 0), 1);
5536 *src_reg
= XEXP (x
, 0);
5537 *mult_val
= const1_rtx
;
5539 *add_val
= XEXP (x
, 1);
5543 /* ADD_VAL is zero. */
5544 *src_reg
= XEXP (x
, 0);
5545 *mult_val
= XEXP (x
, 1);
5546 *add_val
= const0_rtx
;
5553 /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be
5554 unless they are CONST_INT). */
5555 if (GET_CODE (*add_val
) == USE
)
5556 *add_val
= XEXP (*add_val
, 0);
5557 if (GET_CODE (*mult_val
) == USE
)
5558 *mult_val
= XEXP (*mult_val
, 0);
5563 *pbenefit
+= ADDRESS_COST (orig_x
) - reg_address_cost
;
5565 *pbenefit
+= rtx_cost (orig_x
, MEM
) - reg_address_cost
;
5569 *pbenefit
+= rtx_cost (orig_x
, SET
);
5571 /* Always return true if this is a giv so it will be detected as such,
5572 even if the benefit is zero or negative. This allows elimination
5573 of bivs that might otherwise not be eliminated. */
5577 /* Given an expression, X, try to form it as a linear function of a biv.
5578 We will canonicalize it to be of the form
5579 (plus (mult (BIV) (invar_1))
5581 with possible degeneracies.
5583 The invariant expressions must each be of a form that can be used as a
5584 machine operand. We surround then with a USE rtx (a hack, but localized
5585 and certainly unambiguous!) if not a CONST_INT for simplicity in this
5586 routine; it is the caller's responsibility to strip them.
5588 If no such canonicalization is possible (i.e., two biv's are used or an
5589 expression that is neither invariant nor a biv or giv), this routine
5592 For a non-zero return, the result will have a code of CONST_INT, USE,
5593 REG (for a BIV), PLUS, or MULT. No other codes will occur.
5595 *BENEFIT will be incremented by the benefit of any sub-giv encountered. */
5597 static rtx sge_plus
PROTO ((enum machine_mode
, rtx
, rtx
));
5598 static rtx sge_plus_constant
PROTO ((rtx
, rtx
));
5601 simplify_giv_expr (x
, benefit
)
5605 enum machine_mode mode
= GET_MODE (x
);
5609 /* If this is not an integer mode, or if we cannot do arithmetic in this
5610 mode, this can't be a giv. */
5611 if (mode
!= VOIDmode
5612 && (GET_MODE_CLASS (mode
) != MODE_INT
5613 || GET_MODE_BITSIZE (mode
) > HOST_BITS_PER_WIDE_INT
))
5616 switch (GET_CODE (x
))
5619 arg0
= simplify_giv_expr (XEXP (x
, 0), benefit
);
5620 arg1
= simplify_giv_expr (XEXP (x
, 1), benefit
);
5621 if (arg0
== 0 || arg1
== 0)
5624 /* Put constant last, CONST_INT last if both constant. */
5625 if ((GET_CODE (arg0
) == USE
5626 || GET_CODE (arg0
) == CONST_INT
)
5627 && ! ((GET_CODE (arg0
) == USE
5628 && GET_CODE (arg1
) == USE
)
5629 || GET_CODE (arg1
) == CONST_INT
))
5630 tem
= arg0
, arg0
= arg1
, arg1
= tem
;
5632 /* Handle addition of zero, then addition of an invariant. */
5633 if (arg1
== const0_rtx
)
5635 else if (GET_CODE (arg1
) == CONST_INT
|| GET_CODE (arg1
) == USE
)
5636 switch (GET_CODE (arg0
))
5640 /* Adding two invariants must result in an invariant, so enclose
5641 addition operation inside a USE and return it. */
5642 if (GET_CODE (arg0
) == USE
)
5643 arg0
= XEXP (arg0
, 0);
5644 if (GET_CODE (arg1
) == USE
)
5645 arg1
= XEXP (arg1
, 0);
5647 if (GET_CODE (arg0
) == CONST_INT
)
5648 tem
= arg0
, arg0
= arg1
, arg1
= tem
;
5649 if (GET_CODE (arg1
) == CONST_INT
)
5650 tem
= sge_plus_constant (arg0
, arg1
);
5652 tem
= sge_plus (mode
, arg0
, arg1
);
5654 if (GET_CODE (tem
) != CONST_INT
)
5655 tem
= gen_rtx_USE (mode
, tem
);
5660 /* biv + invar or mult + invar. Return sum. */
5661 return gen_rtx_PLUS (mode
, arg0
, arg1
);
5664 /* (a + invar_1) + invar_2. Associate. */
5665 return simplify_giv_expr (
5666 gen_rtx_PLUS (mode
, XEXP (arg0
, 0),
5667 gen_rtx_PLUS (mode
, XEXP (arg0
, 1), arg1
)),
5674 /* Each argument must be either REG, PLUS, or MULT. Convert REG to
5675 MULT to reduce cases. */
5676 if (GET_CODE (arg0
) == REG
)
5677 arg0
= gen_rtx_MULT (mode
, arg0
, const1_rtx
);
5678 if (GET_CODE (arg1
) == REG
)
5679 arg1
= gen_rtx_MULT (mode
, arg1
, const1_rtx
);
5681 /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT.
5682 Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT.
5683 Recurse to associate the second PLUS. */
5684 if (GET_CODE (arg1
) == MULT
)
5685 tem
= arg0
, arg0
= arg1
, arg1
= tem
;
5687 if (GET_CODE (arg1
) == PLUS
)
5688 return simplify_giv_expr (gen_rtx_PLUS (mode
,
5689 gen_rtx_PLUS (mode
, arg0
,
5694 /* Now must have MULT + MULT. Distribute if same biv, else not giv. */
5695 if (GET_CODE (arg0
) != MULT
|| GET_CODE (arg1
) != MULT
)
5698 if (!rtx_equal_p (arg0
, arg1
))
5701 return simplify_giv_expr (gen_rtx_MULT (mode
,
5709 /* Handle "a - b" as "a + b * (-1)". */
5710 return simplify_giv_expr (gen_rtx_PLUS (mode
,
5712 gen_rtx_MULT (mode
, XEXP (x
, 1),
5717 arg0
= simplify_giv_expr (XEXP (x
, 0), benefit
);
5718 arg1
= simplify_giv_expr (XEXP (x
, 1), benefit
);
5719 if (arg0
== 0 || arg1
== 0)
5722 /* Put constant last, CONST_INT last if both constant. */
5723 if ((GET_CODE (arg0
) == USE
|| GET_CODE (arg0
) == CONST_INT
)
5724 && GET_CODE (arg1
) != CONST_INT
)
5725 tem
= arg0
, arg0
= arg1
, arg1
= tem
;
5727 /* If second argument is not now constant, not giv. */
5728 if (GET_CODE (arg1
) != USE
&& GET_CODE (arg1
) != CONST_INT
)
5731 /* Handle multiply by 0 or 1. */
5732 if (arg1
== const0_rtx
)
5735 else if (arg1
== const1_rtx
)
5738 switch (GET_CODE (arg0
))
5741 /* biv * invar. Done. */
5742 return gen_rtx_MULT (mode
, arg0
, arg1
);
5745 /* Product of two constants. */
5746 return GEN_INT (INTVAL (arg0
) * INTVAL (arg1
));
5749 /* invar * invar. It is a giv, but very few of these will
5750 actually pay off, so limit to simple registers. */
5751 if (GET_CODE (arg1
) != CONST_INT
)
5754 arg0
= XEXP (arg0
, 0);
5755 if (GET_CODE (arg0
) == REG
)
5756 tem
= gen_rtx_MULT (mode
, arg0
, arg1
);
5757 else if (GET_CODE (arg0
) == MULT
5758 && GET_CODE (XEXP (arg0
, 0)) == REG
5759 && GET_CODE (XEXP (arg0
, 1)) == CONST_INT
)
5761 tem
= gen_rtx_MULT (mode
, XEXP (arg0
, 0),
5762 GEN_INT (INTVAL (XEXP (arg0
, 1))
5767 return gen_rtx_USE (mode
, tem
);
5770 /* (a * invar_1) * invar_2. Associate. */
5771 return simplify_giv_expr (gen_rtx_MULT (mode
, XEXP (arg0
, 0),
5778 /* (a + invar_1) * invar_2. Distribute. */
5779 return simplify_giv_expr (gen_rtx_PLUS (mode
,
5793 /* Shift by constant is multiply by power of two. */
5794 if (GET_CODE (XEXP (x
, 1)) != CONST_INT
)
5797 return simplify_giv_expr (gen_rtx_MULT (mode
,
5799 GEN_INT ((HOST_WIDE_INT
) 1
5800 << INTVAL (XEXP (x
, 1)))),
5804 /* "-a" is "a * (-1)" */
5805 return simplify_giv_expr (gen_rtx_MULT (mode
, XEXP (x
, 0), constm1_rtx
),
5809 /* "~a" is "-a - 1". Silly, but easy. */
5810 return simplify_giv_expr (gen_rtx_MINUS (mode
,
5811 gen_rtx_NEG (mode
, XEXP (x
, 0)),
5816 /* Already in proper form for invariant. */
5820 /* If this is a new register, we can't deal with it. */
5821 if (REGNO (x
) >= max_reg_before_loop
)
5824 /* Check for biv or giv. */
5825 switch (reg_iv_type
[REGNO (x
)])
5829 case GENERAL_INDUCT
:
5831 struct induction
*v
= reg_iv_info
[REGNO (x
)];
5833 /* Form expression from giv and add benefit. Ensure this giv
5834 can derive another and subtract any needed adjustment if so. */
5835 *benefit
+= v
->benefit
;
5839 tem
= gen_rtx_PLUS (mode
, gen_rtx_MULT (mode
, v
->src_reg
,
5842 if (v
->derive_adjustment
)
5843 tem
= gen_rtx_MINUS (mode
, tem
, v
->derive_adjustment
);
5844 return simplify_giv_expr (tem
, benefit
);
5848 /* If it isn't an induction variable, and it is invariant, we
5849 may be able to simplify things further by looking through
5850 the bits we just moved outside the loop. */
5851 if (invariant_p (x
) == 1)
5855 for (m
= the_movables
; m
; m
= m
->next
)
5856 if (rtx_equal_p (x
, m
->set_dest
))
5858 /* Ok, we found a match. Substitute and simplify. */
5860 /* If we match another movable, we must use that, as
5861 this one is going away. */
5863 return simplify_giv_expr (m
->match
->set_dest
, benefit
);
5865 /* If consec is non-zero, this is a member of a group of
5866 instructions that were moved together. We handle this
5867 case only to the point of seeking to the last insn and
5868 looking for a REG_EQUAL. Fail if we don't find one. */
5873 do { tem
= NEXT_INSN (tem
); } while (--i
> 0);
5875 tem
= find_reg_note (tem
, REG_EQUAL
, NULL_RTX
);
5877 tem
= XEXP (tem
, 0);
5881 tem
= single_set (m
->insn
);
5883 tem
= SET_SRC (tem
);
5888 /* What we are most interested in is pointer
5889 arithmetic on invariants -- only take
5890 patterns we may be able to do something with. */
5891 if (GET_CODE (tem
) == PLUS
5892 || GET_CODE (tem
) == MULT
5893 || GET_CODE (tem
) == ASHIFT
5894 || GET_CODE (tem
) == CONST_INT
5895 || GET_CODE (tem
) == SYMBOL_REF
)
5897 tem
= simplify_giv_expr (tem
, benefit
);
5901 else if (GET_CODE (tem
) == CONST
5902 && GET_CODE (XEXP (tem
, 0)) == PLUS
5903 && GET_CODE (XEXP (XEXP (tem
, 0), 0)) == SYMBOL_REF
5904 && GET_CODE (XEXP (XEXP (tem
, 0), 1)) == CONST_INT
)
5906 tem
= simplify_giv_expr (XEXP (tem
, 0), benefit
);
5917 /* Fall through to general case. */
5919 /* If invariant, return as USE (unless CONST_INT).
5920 Otherwise, not giv. */
5921 if (GET_CODE (x
) == USE
)
5924 if (invariant_p (x
) == 1)
5926 if (GET_CODE (x
) == CONST_INT
)
5928 if (GET_CODE (x
) == CONST
5929 && GET_CODE (XEXP (x
, 0)) == PLUS
5930 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == SYMBOL_REF
5931 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
)
5933 return gen_rtx_USE (mode
, x
);
5940 /* This routine folds invariants such that there is only ever one
5941 CONST_INT in the summation. It is only used by simplify_giv_expr. */
5944 sge_plus_constant (x
, c
)
5947 if (GET_CODE (x
) == CONST_INT
)
5948 return GEN_INT (INTVAL (x
) + INTVAL (c
));
5949 else if (GET_CODE (x
) != PLUS
)
5950 return gen_rtx_PLUS (GET_MODE (x
), x
, c
);
5951 else if (GET_CODE (XEXP (x
, 1)) == CONST_INT
)
5953 return gen_rtx_PLUS (GET_MODE (x
), XEXP (x
, 0),
5954 GEN_INT (INTVAL (XEXP (x
, 1)) + INTVAL (c
)));
5956 else if (GET_CODE (XEXP (x
, 0)) == PLUS
5957 || GET_CODE (XEXP (x
, 1)) != PLUS
)
5959 return gen_rtx_PLUS (GET_MODE (x
),
5960 sge_plus_constant (XEXP (x
, 0), c
), XEXP (x
, 1));
5964 return gen_rtx_PLUS (GET_MODE (x
),
5965 sge_plus_constant (XEXP (x
, 1), c
), XEXP (x
, 0));
5970 sge_plus (mode
, x
, y
)
5971 enum machine_mode mode
;
5974 while (GET_CODE (y
) == PLUS
)
5976 rtx a
= XEXP (y
, 0);
5977 if (GET_CODE (a
) == CONST_INT
)
5978 x
= sge_plus_constant (x
, a
);
5980 x
= gen_rtx_PLUS (mode
, x
, a
);
5983 if (GET_CODE (y
) == CONST_INT
)
5984 x
= sge_plus_constant (x
, y
);
5986 x
= gen_rtx_PLUS (mode
, x
, y
);
5990 /* Help detect a giv that is calculated by several consecutive insns;
5994 The caller has already identified the first insn P as having a giv as dest;
5995 we check that all other insns that set the same register follow
5996 immediately after P, that they alter nothing else,
5997 and that the result of the last is still a giv.
5999 The value is 0 if the reg set in P is not really a giv.
6000 Otherwise, the value is the amount gained by eliminating
6001 all the consecutive insns that compute the value.
6003 FIRST_BENEFIT is the amount gained by eliminating the first insn, P.
6004 SRC_REG is the reg of the biv; DEST_REG is the reg of the giv.
6006 The coefficients of the ultimate giv value are stored in
6007 *MULT_VAL and *ADD_VAL. */
6010 consec_sets_giv (first_benefit
, p
, src_reg
, dest_reg
,
6025 /* Indicate that this is a giv so that we can update the value produced in
6026 each insn of the multi-insn sequence.
6028 This induction structure will be used only by the call to
6029 general_induction_var below, so we can allocate it on our stack.
6030 If this is a giv, our caller will replace the induct var entry with
6031 a new induction structure. */
6033 = (struct induction
*) alloca (sizeof (struct induction
));
6034 v
->src_reg
= src_reg
;
6035 v
->mult_val
= *mult_val
;
6036 v
->add_val
= *add_val
;
6037 v
->benefit
= first_benefit
;
6039 v
->derive_adjustment
= 0;
6041 reg_iv_type
[REGNO (dest_reg
)] = GENERAL_INDUCT
;
6042 reg_iv_info
[REGNO (dest_reg
)] = v
;
6044 count
= n_times_set
[REGNO (dest_reg
)] - 1;
6049 code
= GET_CODE (p
);
6051 /* If libcall, skip to end of call sequence. */
6052 if (code
== INSN
&& (temp
= find_reg_note (p
, REG_LIBCALL
, NULL_RTX
)))
6056 && (set
= single_set (p
))
6057 && GET_CODE (SET_DEST (set
)) == REG
6058 && SET_DEST (set
) == dest_reg
6059 && (general_induction_var (SET_SRC (set
), &src_reg
,
6060 add_val
, mult_val
, 0, &benefit
)
6061 /* Giv created by equivalent expression. */
6062 || ((temp
= find_reg_note (p
, REG_EQUAL
, NULL_RTX
))
6063 && general_induction_var (XEXP (temp
, 0), &src_reg
,
6064 add_val
, mult_val
, 0, &benefit
)))
6065 && src_reg
== v
->src_reg
)
6067 if (find_reg_note (p
, REG_RETVAL
, NULL_RTX
))
6068 benefit
+= libcall_benefit (p
);
6071 v
->mult_val
= *mult_val
;
6072 v
->add_val
= *add_val
;
6073 v
->benefit
= benefit
;
6075 else if (code
!= NOTE
)
6077 /* Allow insns that set something other than this giv to a
6078 constant. Such insns are needed on machines which cannot
6079 include long constants and should not disqualify a giv. */
6081 && (set
= single_set (p
))
6082 && SET_DEST (set
) != dest_reg
6083 && CONSTANT_P (SET_SRC (set
)))
6086 reg_iv_type
[REGNO (dest_reg
)] = UNKNOWN_INDUCT
;
6094 /* Return an rtx, if any, that expresses giv G2 as a function of the register
6095 represented by G1. If no such expression can be found, or it is clear that
6096 it cannot possibly be a valid address, 0 is returned.
6098 To perform the computation, we note that
6101 where `v' is the biv.
6103 So G2 = (y/b) * G1 + (b - a*y/x).
6105 Note that MULT = y/x.
6107 Update: A and B are now allowed to be additive expressions such that
6108 B contains all variables in A. That is, computing B-A will not require
6109 subtracting variables. */
6112 express_from_1 (a
, b
, mult
)
6115 /* If MULT is zero, then A*MULT is zero, and our expression is B. */
6117 if (mult
== const0_rtx
)
6120 /* If MULT is not 1, we cannot handle A with non-constants, since we
6121 would then be required to subtract multiples of the registers in A.
6122 This is theoretically possible, and may even apply to some Fortran
6123 constructs, but it is a lot of work and we do not attempt it here. */
6125 if (mult
!= const1_rtx
&& GET_CODE (a
) != CONST_INT
)
6128 /* In general these structures are sorted top to bottom (down the PLUS
6129 chain), but not left to right across the PLUS. If B is a higher
6130 order giv than A, we can strip one level and recurse. If A is higher
6131 order, we'll eventually bail out, but won't know that until the end.
6132 If they are the same, we'll strip one level around this loop. */
6134 while (GET_CODE (a
) == PLUS
&& GET_CODE (b
) == PLUS
)
6136 rtx ra
, rb
, oa
, ob
, tmp
;
6138 ra
= XEXP (a
, 0), oa
= XEXP (a
, 1);
6139 if (GET_CODE (ra
) == PLUS
)
6140 tmp
= ra
, ra
= oa
, oa
= tmp
;
6142 rb
= XEXP (b
, 0), ob
= XEXP (b
, 1);
6143 if (GET_CODE (rb
) == PLUS
)
6144 tmp
= rb
, rb
= ob
, ob
= tmp
;
6146 if (rtx_equal_p (ra
, rb
))
6147 /* We matched: remove one reg completely. */
6149 else if (GET_CODE (ob
) != PLUS
&& rtx_equal_p (ra
, ob
))
6150 /* An alternate match. */
6152 else if (GET_CODE (oa
) != PLUS
&& rtx_equal_p (oa
, rb
))
6153 /* An alternate match. */
6157 /* Indicates an extra register in B. Strip one level from B and
6158 recurse, hoping B was the higher order expression. */
6159 ob
= express_from_1 (a
, ob
, mult
);
6162 return gen_rtx_PLUS (GET_MODE (b
), rb
, ob
);
6166 /* Here we are at the last level of A, go through the cases hoping to
6167 get rid of everything but a constant. */
6169 if (GET_CODE (a
) == PLUS
)
6173 ra
= XEXP (a
, 0), oa
= XEXP (a
, 1);
6174 if (rtx_equal_p (oa
, b
))
6176 else if (!rtx_equal_p (ra
, b
))
6179 if (GET_CODE (oa
) != CONST_INT
)
6182 return GEN_INT (-INTVAL (oa
) * INTVAL (mult
));
6184 else if (GET_CODE (a
) == CONST_INT
)
6186 return plus_constant (b
, -INTVAL (a
) * INTVAL (mult
));
6188 else if (GET_CODE (b
) == PLUS
)
6190 if (rtx_equal_p (a
, XEXP (b
, 0)))
6192 else if (rtx_equal_p (a
, XEXP (b
, 1)))
6197 else if (rtx_equal_p (a
, b
))
6204 express_from (g1
, g2
)
6205 struct induction
*g1
, *g2
;
6209 /* The value that G1 will be multiplied by must be a constant integer. Also,
6210 the only chance we have of getting a valid address is if b*c/a (see above
6211 for notation) is also an integer. */
6212 if (GET_CODE (g1
->mult_val
) == CONST_INT
6213 && GET_CODE (g2
->mult_val
) == CONST_INT
)
6215 if (g1
->mult_val
== const0_rtx
6216 || INTVAL (g2
->mult_val
) % INTVAL (g1
->mult_val
) != 0)
6218 mult
= GEN_INT (INTVAL (g2
->mult_val
) / INTVAL (g1
->mult_val
));
6220 else if (rtx_equal_p (g1
->mult_val
, g2
->mult_val
))
6224 /* ??? Find out if the one is a multiple of the other? */
6228 add
= express_from_1 (g1
->add_val
, g2
->add_val
, mult
);
6229 if (add
== NULL_RTX
)
6232 /* Form simplified final result. */
6233 if (mult
== const0_rtx
)
6235 else if (mult
== const1_rtx
)
6236 mult
= g1
->dest_reg
;
6238 mult
= gen_rtx_MULT (g2
->mode
, g1
->dest_reg
, mult
);
6240 if (add
== const0_rtx
)
6243 return gen_rtx_PLUS (g2
->mode
, mult
, add
);
6246 /* Return 1 if giv G2 can be combined with G1. This means that G2 can use
6247 (either directly or via an address expression) a register used to represent
6248 G1. Set g2->new_reg to a represtation of G1 (normally just
6252 combine_givs_p (g1
, g2
)
6253 struct induction
*g1
, *g2
;
6255 rtx tem
= express_from (g1
, g2
);
6257 /* If these givs are identical, they can be combined. We use the results
6258 of express_from because the addends are not in a canonical form, so
6259 rtx_equal_p is a weaker test. */
6260 if (tem
== const0_rtx
)
6262 return g1
->dest_reg
;
6265 /* If G2 can be expressed as a function of G1 and that function is valid
6266 as an address and no more expensive than using a register for G2,
6267 the expression of G2 in terms of G1 can be used. */
6269 && g2
->giv_type
== DEST_ADDR
6270 && memory_address_p (g2
->mem_mode
, tem
)
6271 /* ??? Looses, especially with -fforce-addr, where *g2->location
6272 will always be a register, and so anything more complicated
6276 && ADDRESS_COST (tem
) <= ADDRESS_COST (*g2
->location
)
6278 && rtx_cost (tem
, MEM
) <= rtx_cost (*g2
->location
, MEM
)
6289 struct combine_givs_stats
6296 cmp_combine_givs_stats (x
, y
)
6297 struct combine_givs_stats
*x
, *y
;
6300 d
= y
->total_benefit
- x
->total_benefit
;
6301 /* Stabilize the sort. */
6303 d
= x
->giv_number
- y
->giv_number
;
6307 /* If one of these givs is a DEST_REG that was only used once, by the
6308 other giv, this is actually a single use. Return 0 if this is not
6309 the case, -1 if g1 is the DEST_REG involved, and 1 if it was g2. */
6312 combine_givs_used_once (g1
, g2
)
6313 struct induction
*g1
, *g2
;
6315 if (g1
->giv_type
== DEST_REG
6316 && n_times_used
[REGNO (g1
->dest_reg
)] == 1
6317 && reg_mentioned_p (g1
->dest_reg
, PATTERN (g2
->insn
)))
6320 if (g2
->giv_type
== DEST_REG
6321 && n_times_used
[REGNO (g2
->dest_reg
)] == 1
6322 && reg_mentioned_p (g2
->dest_reg
, PATTERN (g1
->insn
)))
6329 combine_givs_benefit_from (g1
, g2
)
6330 struct induction
*g1
, *g2
;
6332 int tmp
= combine_givs_used_once (g1
, g2
);
6336 return g2
->benefit
- g1
->benefit
;
6341 /* Check all pairs of givs for iv_class BL and see if any can be combined with
6342 any other. If so, point SAME to the giv combined with and set NEW_REG to
6343 be an expression (in terms of the other giv's DEST_REG) equivalent to the
6344 giv. Also, update BENEFIT and related fields for cost/benefit analysis. */
6348 struct iv_class
*bl
;
6350 struct induction
*g1
, *g2
, **giv_array
;
6351 int i
, j
, k
, giv_count
;
6352 struct combine_givs_stats
*stats
;
6355 /* Count givs, because bl->giv_count is incorrect here. */
6357 for (g1
= bl
->giv
; g1
; g1
= g1
->next_iv
)
6362 = (struct induction
**) alloca (giv_count
* sizeof (struct induction
*));
6364 for (g1
= bl
->giv
; g1
; g1
= g1
->next_iv
)
6366 giv_array
[i
++] = g1
;
6368 stats
= (struct combine_givs_stats
*) alloca (giv_count
* sizeof (*stats
));
6369 bzero ((char *) stats
, giv_count
* sizeof (*stats
));
6371 can_combine
= (rtx
*) alloca (giv_count
* giv_count
* sizeof(rtx
));
6372 bzero ((char *) can_combine
, giv_count
* giv_count
* sizeof(rtx
));
6374 for (i
= 0; i
< giv_count
; i
++)
6380 this_benefit
= g1
->benefit
;
6381 /* Add an additional weight for zero addends. */
6382 if (g1
->no_const_addval
)
6384 for (j
= 0; j
< giv_count
; j
++)
6390 && (this_combine
= combine_givs_p (g1
, g2
)) != NULL_RTX
)
6392 can_combine
[i
*giv_count
+ j
] = this_combine
;
6393 this_benefit
+= combine_givs_benefit_from (g1
, g2
);
6394 /* Add an additional weight for being reused more times. */
6398 stats
[i
].giv_number
= i
;
6399 stats
[i
].total_benefit
= this_benefit
;
6402 /* Iterate, combining until we can't. */
6404 qsort (stats
, giv_count
, sizeof(*stats
), cmp_combine_givs_stats
);
6406 if (loop_dump_stream
)
6408 fprintf (loop_dump_stream
, "Sorted combine statistics:\n");
6409 for (k
= 0; k
< giv_count
; k
++)
6411 g1
= giv_array
[stats
[k
].giv_number
];
6412 if (!g1
->combined_with
&& !g1
->same
)
6413 fprintf (loop_dump_stream
, " {%d, %d}",
6414 INSN_UID (giv_array
[stats
[k
].giv_number
]->insn
),
6415 stats
[k
].total_benefit
);
6417 putc ('\n', loop_dump_stream
);
6420 for (k
= 0; k
< giv_count
; k
++)
6422 int g1_add_benefit
= 0;
6424 i
= stats
[k
].giv_number
;
6427 /* If it has already been combined, skip. */
6428 if (g1
->combined_with
|| g1
->same
)
6431 for (j
= 0; j
< giv_count
; j
++)
6434 if (g1
!= g2
&& can_combine
[i
*giv_count
+ j
]
6435 /* If it has already been combined, skip. */
6436 && ! g2
->same
&& ! g2
->combined_with
)
6440 g2
->new_reg
= can_combine
[i
*giv_count
+ j
];
6442 g1
->combined_with
= 1;
6443 if (!combine_givs_used_once (g1
, g2
))
6444 g1
->times_used
+= 1;
6445 g1
->lifetime
+= g2
->lifetime
;
6447 g1_add_benefit
+= combine_givs_benefit_from (g1
, g2
);
6449 /* ??? The new final_[bg]iv_value code does a much better job
6450 of finding replaceable giv's, and hence this code may no
6451 longer be necessary. */
6452 if (! g2
->replaceable
&& REG_USERVAR_P (g2
->dest_reg
))
6453 g1_add_benefit
-= copy_cost
;
6455 /* To help optimize the next set of combinations, remove
6456 this giv from the benefits of other potential mates. */
6457 for (l
= 0; l
< giv_count
; ++l
)
6459 int m
= stats
[l
].giv_number
;
6460 if (can_combine
[m
*giv_count
+ j
])
6462 /* Remove additional weight for being reused. */
6463 stats
[l
].total_benefit
-= 3 +
6464 combine_givs_benefit_from (giv_array
[m
], g2
);
6468 if (loop_dump_stream
)
6469 fprintf (loop_dump_stream
,
6470 "giv at %d combined with giv at %d\n",
6471 INSN_UID (g2
->insn
), INSN_UID (g1
->insn
));
6475 /* To help optimize the next set of combinations, remove
6476 this giv from the benefits of other potential mates. */
6477 if (g1
->combined_with
)
6479 for (j
= 0; j
< giv_count
; ++j
)
6481 int m
= stats
[j
].giv_number
;
6482 if (can_combine
[m
*giv_count
+ j
])
6484 /* Remove additional weight for being reused. */
6485 stats
[j
].total_benefit
-= 3 +
6486 combine_givs_benefit_from (giv_array
[m
], g1
);
6490 g1
->benefit
+= g1_add_benefit
;
6492 /* We've finished with this giv, and everything it touched.
6493 Restart the combination so that proper weights for the
6494 rest of the givs are properly taken into account. */
6495 /* ??? Ideally we would compact the arrays at this point, so
6496 as to not cover old ground. But sanely compacting
6497 can_combine is tricky. */
6503 /* EMIT code before INSERT_BEFORE to set REG = B * M + A. */
6506 emit_iv_add_mult (b
, m
, a
, reg
, insert_before
)
6507 rtx b
; /* initial value of basic induction variable */
6508 rtx m
; /* multiplicative constant */
6509 rtx a
; /* additive constant */
6510 rtx reg
; /* destination register */
6516 /* Prevent unexpected sharing of these rtx. */
6520 /* Increase the lifetime of any invariants moved further in code. */
6521 update_reg_last_use (a
, insert_before
);
6522 update_reg_last_use (b
, insert_before
);
6523 update_reg_last_use (m
, insert_before
);
6526 result
= expand_mult_add (b
, reg
, m
, a
, GET_MODE (reg
), 0);
6528 emit_move_insn (reg
, result
);
6529 seq
= gen_sequence ();
6532 emit_insn_before (seq
, insert_before
);
6534 /* It is entirely possible that the expansion created lots of new
6535 registers. Iterate over the sequence we just created and
6538 if (GET_CODE (seq
) == SEQUENCE
)
6541 for (i
= 0; i
< XVECLEN (seq
, 0); ++i
)
6543 rtx set
= single_set (XVECEXP (seq
, 0, i
));
6544 if (set
&& GET_CODE (SET_DEST (set
)) == REG
)
6545 record_base_value (REGNO (SET_DEST (set
)), SET_SRC (set
), 0);
6548 else if (GET_CODE (seq
) == SET
6549 && GET_CODE (SET_DEST (seq
)) == REG
)
6550 record_base_value (REGNO (SET_DEST (seq
)), SET_SRC (seq
), 0);
6553 /* Test whether A * B can be computed without
6554 an actual multiply insn. Value is 1 if so. */
6557 product_cheap_p (a
, b
)
6563 struct obstack
*old_rtl_obstack
= rtl_obstack
;
6564 char *storage
= (char *) obstack_alloc (&temp_obstack
, 0);
6567 /* If only one is constant, make it B. */
6568 if (GET_CODE (a
) == CONST_INT
)
6569 tmp
= a
, a
= b
, b
= tmp
;
6571 /* If first constant, both constant, so don't need multiply. */
6572 if (GET_CODE (a
) == CONST_INT
)
6575 /* If second not constant, neither is constant, so would need multiply. */
6576 if (GET_CODE (b
) != CONST_INT
)
6579 /* One operand is constant, so might not need multiply insn. Generate the
6580 code for the multiply and see if a call or multiply, or long sequence
6581 of insns is generated. */
6583 rtl_obstack
= &temp_obstack
;
6585 expand_mult (GET_MODE (a
), a
, b
, NULL_RTX
, 0);
6586 tmp
= gen_sequence ();
6589 if (GET_CODE (tmp
) == SEQUENCE
)
6591 if (XVEC (tmp
, 0) == 0)
6593 else if (XVECLEN (tmp
, 0) > 3)
6596 for (i
= 0; i
< XVECLEN (tmp
, 0); i
++)
6598 rtx insn
= XVECEXP (tmp
, 0, i
);
6600 if (GET_CODE (insn
) != INSN
6601 || (GET_CODE (PATTERN (insn
)) == SET
6602 && GET_CODE (SET_SRC (PATTERN (insn
))) == MULT
)
6603 || (GET_CODE (PATTERN (insn
)) == PARALLEL
6604 && GET_CODE (XVECEXP (PATTERN (insn
), 0, 0)) == SET
6605 && GET_CODE (SET_SRC (XVECEXP (PATTERN (insn
), 0, 0))) == MULT
))
6612 else if (GET_CODE (tmp
) == SET
6613 && GET_CODE (SET_SRC (tmp
)) == MULT
)
6615 else if (GET_CODE (tmp
) == PARALLEL
6616 && GET_CODE (XVECEXP (tmp
, 0, 0)) == SET
6617 && GET_CODE (SET_SRC (XVECEXP (tmp
, 0, 0))) == MULT
)
6620 /* Free any storage we obtained in generating this multiply and restore rtl
6621 allocation to its normal obstack. */
6622 obstack_free (&temp_obstack
, storage
);
6623 rtl_obstack
= old_rtl_obstack
;
6628 /* Check to see if loop can be terminated by a "decrement and branch until
6629 zero" instruction. If so, add a REG_NONNEG note to the branch insn if so.
6630 Also try reversing an increment loop to a decrement loop
6631 to see if the optimization can be performed.
6632 Value is nonzero if optimization was performed. */
6634 /* This is useful even if the architecture doesn't have such an insn,
6635 because it might change a loops which increments from 0 to n to a loop
6636 which decrements from n to 0. A loop that decrements to zero is usually
6637 faster than one that increments from zero. */
6639 /* ??? This could be rewritten to use some of the loop unrolling procedures,
6640 such as approx_final_value, biv_total_increment, loop_iterations, and
6641 final_[bg]iv_value. */
6644 check_dbra_loop (loop_end
, insn_count
, loop_start
)
6649 struct iv_class
*bl
;
6656 rtx before_comparison
;
6660 int compare_and_branch
;
6662 /* If last insn is a conditional branch, and the insn before tests a
6663 register value, try to optimize it. Otherwise, we can't do anything. */
6665 jump
= PREV_INSN (loop_end
);
6666 comparison
= get_condition_for_loop (jump
);
6667 if (comparison
== 0)
6670 /* Try to compute whether the compare/branch at the loop end is one or
6671 two instructions. */
6672 get_condition (jump
, &first_compare
);
6673 if (first_compare
== jump
)
6674 compare_and_branch
= 1;
6675 else if (first_compare
== prev_nonnote_insn (jump
))
6676 compare_and_branch
= 2;
6680 /* Check all of the bivs to see if the compare uses one of them.
6681 Skip biv's set more than once because we can't guarantee that
6682 it will be zero on the last iteration. Also skip if the biv is
6683 used between its update and the test insn. */
6685 for (bl
= loop_iv_list
; bl
; bl
= bl
->next
)
6687 if (bl
->biv_count
== 1
6688 && bl
->biv
->dest_reg
== XEXP (comparison
, 0)
6689 && ! reg_used_between_p (regno_reg_rtx
[bl
->regno
], bl
->biv
->insn
,
6697 /* Look for the case where the basic induction variable is always
6698 nonnegative, and equals zero on the last iteration.
6699 In this case, add a reg_note REG_NONNEG, which allows the
6700 m68k DBRA instruction to be used. */
6702 if (((GET_CODE (comparison
) == GT
6703 && GET_CODE (XEXP (comparison
, 1)) == CONST_INT
6704 && INTVAL (XEXP (comparison
, 1)) == -1)
6705 || (GET_CODE (comparison
) == NE
&& XEXP (comparison
, 1) == const0_rtx
))
6706 && GET_CODE (bl
->biv
->add_val
) == CONST_INT
6707 && INTVAL (bl
->biv
->add_val
) < 0)
6709 /* Initial value must be greater than 0,
6710 init_val % -dec_value == 0 to ensure that it equals zero on
6711 the last iteration */
6713 if (GET_CODE (bl
->initial_value
) == CONST_INT
6714 && INTVAL (bl
->initial_value
) > 0
6715 && (INTVAL (bl
->initial_value
)
6716 % (-INTVAL (bl
->biv
->add_val
))) == 0)
6718 /* register always nonnegative, add REG_NOTE to branch */
6719 REG_NOTES (PREV_INSN (loop_end
))
6720 = gen_rtx_EXPR_LIST (REG_NONNEG
, NULL_RTX
,
6721 REG_NOTES (PREV_INSN (loop_end
)));
6727 /* If the decrement is 1 and the value was tested as >= 0 before
6728 the loop, then we can safely optimize. */
6729 for (p
= loop_start
; p
; p
= PREV_INSN (p
))
6731 if (GET_CODE (p
) == CODE_LABEL
)
6733 if (GET_CODE (p
) != JUMP_INSN
)
6736 before_comparison
= get_condition_for_loop (p
);
6737 if (before_comparison
6738 && XEXP (before_comparison
, 0) == bl
->biv
->dest_reg
6739 && GET_CODE (before_comparison
) == LT
6740 && XEXP (before_comparison
, 1) == const0_rtx
6741 && ! reg_set_between_p (bl
->biv
->dest_reg
, p
, loop_start
)
6742 && INTVAL (bl
->biv
->add_val
) == -1)
6744 REG_NOTES (PREV_INSN (loop_end
))
6745 = gen_rtx_EXPR_LIST (REG_NONNEG
, NULL_RTX
,
6746 REG_NOTES (PREV_INSN (loop_end
)));
6753 else if (num_mem_sets
<= 1)
6755 /* Try to change inc to dec, so can apply above optimization. */
6757 all registers modified are induction variables or invariant,
6758 all memory references have non-overlapping addresses
6759 (obviously true if only one write)
6760 allow 2 insns for the compare/jump at the end of the loop. */
6761 /* Also, we must avoid any instructions which use both the reversed
6762 biv and another biv. Such instructions will fail if the loop is
6763 reversed. We meet this condition by requiring that either
6764 no_use_except_counting is true, or else that there is only
6766 int num_nonfixed_reads
= 0;
6767 /* 1 if the iteration var is used only to count iterations. */
6768 int no_use_except_counting
= 0;
6769 /* 1 if the loop has no memory store, or it has a single memory store
6770 which is reversible. */
6771 int reversible_mem_store
= 1;
6773 for (p
= loop_start
; p
!= loop_end
; p
= NEXT_INSN (p
))
6774 if (GET_RTX_CLASS (GET_CODE (p
)) == 'i')
6775 num_nonfixed_reads
+= count_nonfixed_reads (PATTERN (p
));
6777 if (bl
->giv_count
== 0
6778 && ! loop_number_exit_count
[uid_loop_num
[INSN_UID (loop_start
)]])
6780 rtx bivreg
= regno_reg_rtx
[bl
->regno
];
6782 /* If there are no givs for this biv, and the only exit is the
6783 fall through at the end of the loop, then
6784 see if perhaps there are no uses except to count. */
6785 no_use_except_counting
= 1;
6786 for (p
= loop_start
; p
!= loop_end
; p
= NEXT_INSN (p
))
6787 if (GET_RTX_CLASS (GET_CODE (p
)) == 'i')
6789 rtx set
= single_set (p
);
6791 if (set
&& GET_CODE (SET_DEST (set
)) == REG
6792 && REGNO (SET_DEST (set
)) == bl
->regno
)
6793 /* An insn that sets the biv is okay. */
6795 else if (p
== prev_nonnote_insn (prev_nonnote_insn (loop_end
))
6796 || p
== prev_nonnote_insn (loop_end
))
6797 /* Don't bother about the end test. */
6799 else if (reg_mentioned_p (bivreg
, PATTERN (p
)))
6800 /* Any other use of the biv is no good. */
6802 no_use_except_counting
= 0;
6808 /* If the loop has a single store, and the destination address is
6809 invariant, then we can't reverse the loop, because this address
6810 might then have the wrong value at loop exit.
6811 This would work if the source was invariant also, however, in that
6812 case, the insn should have been moved out of the loop. */
6814 if (num_mem_sets
== 1)
6815 reversible_mem_store
6816 = (! unknown_address_altered
6817 && ! invariant_p (XEXP (loop_store_mems
[0], 0)));
6819 /* This code only acts for innermost loops. Also it simplifies
6820 the memory address check by only reversing loops with
6821 zero or one memory access.
6822 Two memory accesses could involve parts of the same array,
6823 and that can't be reversed. */
6825 if (num_nonfixed_reads
<= 1
6827 && !loop_has_volatile
6828 && reversible_mem_store
6829 && (no_use_except_counting
6830 || ((bl
->giv_count
+ bl
->biv_count
+ num_mem_sets
6831 + num_movables
+ compare_and_branch
== insn_count
)
6832 && (bl
== loop_iv_list
&& bl
->next
== 0))))
6836 /* Loop can be reversed. */
6837 if (loop_dump_stream
)
6838 fprintf (loop_dump_stream
, "Can reverse loop\n");
6840 /* Now check other conditions:
6842 The increment must be a constant, as must the initial value,
6843 and the comparison code must be LT.
6845 This test can probably be improved since +/- 1 in the constant
6846 can be obtained by changing LT to LE and vice versa; this is
6850 && GET_CODE (XEXP (comparison
, 1)) == CONST_INT
6851 /* LE gets turned into LT */
6852 && GET_CODE (comparison
) == LT
6853 && GET_CODE (bl
->initial_value
) == CONST_INT
)
6855 HOST_WIDE_INT add_val
, comparison_val
;
6858 add_val
= INTVAL (bl
->biv
->add_val
);
6859 comparison_val
= INTVAL (XEXP (comparison
, 1));
6860 final_value
= XEXP (comparison
, 1);
6861 initial_value
= bl
->initial_value
;
6863 /* Normalize the initial value if it is an integer and
6864 has no other use except as a counter. This will allow
6865 a few more loops to be reversed. */
6866 if (no_use_except_counting
6867 && GET_CODE (initial_value
) == CONST_INT
)
6869 comparison_val
= comparison_val
- INTVAL (bl
->initial_value
);
6870 /* Check for overflow. If comparison_val ends up as a
6871 negative value, then we can't reverse the loop. */
6872 if (comparison_val
>= 0)
6873 initial_value
= const0_rtx
;
6876 /* If the initial value is not zero, or if the comparison
6877 value is not an exact multiple of the increment, then we
6878 can not reverse this loop. */
6879 if (initial_value
!= const0_rtx
6880 || (comparison_val
% add_val
) != 0)
6883 /* Reset these in case we normalized the initial value
6884 and comparison value above. */
6885 bl
->initial_value
= initial_value
;
6886 XEXP (comparison
, 1) = GEN_INT (comparison_val
);
6888 /* Register will always be nonnegative, with value
6889 0 on last iteration if loop reversed */
6891 /* Save some info needed to produce the new insns. */
6892 reg
= bl
->biv
->dest_reg
;
6893 jump_label
= XEXP (SET_SRC (PATTERN (PREV_INSN (loop_end
))), 1);
6894 if (jump_label
== pc_rtx
)
6895 jump_label
= XEXP (SET_SRC (PATTERN (PREV_INSN (loop_end
))), 2);
6896 new_add_val
= GEN_INT (- INTVAL (bl
->biv
->add_val
));
6898 start_value
= GEN_INT (INTVAL (XEXP (comparison
, 1))
6899 - INTVAL (bl
->biv
->add_val
));
6901 /* Initialize biv to start_value before loop start.
6902 The old initializing insn will be deleted as a
6903 dead store by flow.c. */
6904 emit_insn_before (gen_move_insn (reg
, start_value
), loop_start
);
6906 /* Add insn to decrement register, and delete insn
6907 that incremented the register. */
6908 p
= emit_insn_before (gen_add2_insn (reg
, new_add_val
),
6910 delete_insn (bl
->biv
->insn
);
6912 /* Update biv info to reflect its new status. */
6914 bl
->initial_value
= start_value
;
6915 bl
->biv
->add_val
= new_add_val
;
6917 /* Inc LABEL_NUSES so that delete_insn will
6918 not delete the label. */
6919 LABEL_NUSES (XEXP (jump_label
, 0)) ++;
6921 /* Emit an insn after the end of the loop to set the biv's
6922 proper exit value if it is used anywhere outside the loop. */
6923 if ((REGNO_LAST_UID (bl
->regno
) != INSN_UID (first_compare
))
6925 || REGNO_FIRST_UID (bl
->regno
) != INSN_UID (bl
->init_insn
))
6926 emit_insn_after (gen_move_insn (reg
, final_value
),
6929 /* Delete compare/branch at end of loop. */
6930 delete_insn (PREV_INSN (loop_end
));
6931 if (compare_and_branch
== 2)
6932 delete_insn (first_compare
);
6934 /* Add new compare/branch insn at end of loop. */
6936 emit_cmp_insn (reg
, const0_rtx
, GE
, NULL_RTX
,
6937 GET_MODE (reg
), 0, 0);
6938 emit_jump_insn (gen_bge (XEXP (jump_label
, 0)));
6939 tem
= gen_sequence ();
6941 emit_jump_insn_before (tem
, loop_end
);
6943 for (tem
= PREV_INSN (loop_end
);
6944 tem
&& GET_CODE (tem
) != JUMP_INSN
; tem
= PREV_INSN (tem
))
6948 JUMP_LABEL (tem
) = XEXP (jump_label
, 0);
6950 /* Increment of LABEL_NUSES done above. */
6951 /* Register is now always nonnegative,
6952 so add REG_NONNEG note to the branch. */
6953 REG_NOTES (tem
) = gen_rtx_EXPR_LIST (REG_NONNEG
, NULL_RTX
,
6959 /* Mark that this biv has been reversed. Each giv which depends
6960 on this biv, and which is also live past the end of the loop
6961 will have to be fixed up. */
6965 if (loop_dump_stream
)
6966 fprintf (loop_dump_stream
,
6967 "Reversed loop and added reg_nonneg\n");
6977 /* Verify whether the biv BL appears to be eliminable,
6978 based on the insns in the loop that refer to it.
6979 LOOP_START is the first insn of the loop, and END is the end insn.
6981 If ELIMINATE_P is non-zero, actually do the elimination.
6983 THRESHOLD and INSN_COUNT are from loop_optimize and are used to
6984 determine whether invariant insns should be placed inside or at the
6985 start of the loop. */
6988 maybe_eliminate_biv (bl
, loop_start
, end
, eliminate_p
, threshold
, insn_count
)
6989 struct iv_class
*bl
;
6993 int threshold
, insn_count
;
6995 rtx reg
= bl
->biv
->dest_reg
;
6998 /* Scan all insns in the loop, stopping if we find one that uses the
6999 biv in a way that we cannot eliminate. */
7001 for (p
= loop_start
; p
!= end
; p
= NEXT_INSN (p
))
7003 enum rtx_code code
= GET_CODE (p
);
7004 rtx where
= threshold
>= insn_count
? loop_start
: p
;
7006 if ((code
== INSN
|| code
== JUMP_INSN
|| code
== CALL_INSN
)
7007 && reg_mentioned_p (reg
, PATTERN (p
))
7008 && ! maybe_eliminate_biv_1 (PATTERN (p
), p
, bl
, eliminate_p
, where
))
7010 if (loop_dump_stream
)
7011 fprintf (loop_dump_stream
,
7012 "Cannot eliminate biv %d: biv used in insn %d.\n",
7013 bl
->regno
, INSN_UID (p
));
7020 if (loop_dump_stream
)
7021 fprintf (loop_dump_stream
, "biv %d %s eliminated.\n",
7022 bl
->regno
, eliminate_p
? "was" : "can be");
7029 /* If BL appears in X (part of the pattern of INSN), see if we can
7030 eliminate its use. If so, return 1. If not, return 0.
7032 If BIV does not appear in X, return 1.
7034 If ELIMINATE_P is non-zero, actually do the elimination. WHERE indicates
7035 where extra insns should be added. Depending on how many items have been
7036 moved out of the loop, it will either be before INSN or at the start of
7040 maybe_eliminate_biv_1 (x
, insn
, bl
, eliminate_p
, where
)
7042 struct iv_class
*bl
;
7046 enum rtx_code code
= GET_CODE (x
);
7047 rtx reg
= bl
->biv
->dest_reg
;
7048 enum machine_mode mode
= GET_MODE (reg
);
7049 struct induction
*v
;
7061 /* If we haven't already been able to do something with this BIV,
7062 we can't eliminate it. */
7068 /* If this sets the BIV, it is not a problem. */
7069 if (SET_DEST (x
) == reg
)
7072 /* If this is an insn that defines a giv, it is also ok because
7073 it will go away when the giv is reduced. */
7074 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
7075 if (v
->giv_type
== DEST_REG
&& SET_DEST (x
) == v
->dest_reg
)
7079 if (SET_DEST (x
) == cc0_rtx
&& SET_SRC (x
) == reg
)
7081 /* Can replace with any giv that was reduced and
7082 that has (MULT_VAL != 0) and (ADD_VAL == 0).
7083 Require a constant for MULT_VAL, so we know it's nonzero.
7084 ??? We disable this optimization to avoid potential
7087 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
7088 if (CONSTANT_P (v
->mult_val
) && v
->mult_val
!= const0_rtx
7089 && v
->add_val
== const0_rtx
7090 && ! v
->ignore
&& ! v
->maybe_dead
&& v
->always_computable
7094 /* If the giv V had the auto-inc address optimization applied
7095 to it, and INSN occurs between the giv insn and the biv
7096 insn, then we must adjust the value used here.
7097 This is rare, so we don't bother to do so. */
7099 && ((INSN_LUID (v
->insn
) < INSN_LUID (insn
)
7100 && INSN_LUID (insn
) < INSN_LUID (bl
->biv
->insn
))
7101 || (INSN_LUID (v
->insn
) > INSN_LUID (insn
)
7102 && INSN_LUID (insn
) > INSN_LUID (bl
->biv
->insn
))))
7108 /* If the giv has the opposite direction of change,
7109 then reverse the comparison. */
7110 if (INTVAL (v
->mult_val
) < 0)
7111 new = gen_rtx_COMPARE (GET_MODE (v
->new_reg
),
7112 const0_rtx
, v
->new_reg
);
7116 /* We can probably test that giv's reduced reg. */
7117 if (validate_change (insn
, &SET_SRC (x
), new, 0))
7121 /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0);
7122 replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL).
7123 Require a constant for MULT_VAL, so we know it's nonzero.
7124 ??? Do this only if ADD_VAL is a pointer to avoid a potential
7125 overflow problem. */
7127 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
7128 if (CONSTANT_P (v
->mult_val
) && v
->mult_val
!= const0_rtx
7129 && ! v
->ignore
&& ! v
->maybe_dead
&& v
->always_computable
7131 && (GET_CODE (v
->add_val
) == SYMBOL_REF
7132 || GET_CODE (v
->add_val
) == LABEL_REF
7133 || GET_CODE (v
->add_val
) == CONST
7134 || (GET_CODE (v
->add_val
) == REG
7135 && REGNO_POINTER_FLAG (REGNO (v
->add_val
)))))
7137 /* If the giv V had the auto-inc address optimization applied
7138 to it, and INSN occurs between the giv insn and the biv
7139 insn, then we must adjust the value used here.
7140 This is rare, so we don't bother to do so. */
7142 && ((INSN_LUID (v
->insn
) < INSN_LUID (insn
)
7143 && INSN_LUID (insn
) < INSN_LUID (bl
->biv
->insn
))
7144 || (INSN_LUID (v
->insn
) > INSN_LUID (insn
)
7145 && INSN_LUID (insn
) > INSN_LUID (bl
->biv
->insn
))))
7151 /* If the giv has the opposite direction of change,
7152 then reverse the comparison. */
7153 if (INTVAL (v
->mult_val
) < 0)
7154 new = gen_rtx_COMPARE (VOIDmode
, copy_rtx (v
->add_val
),
7157 new = gen_rtx_COMPARE (VOIDmode
, v
->new_reg
,
7158 copy_rtx (v
->add_val
));
7160 /* Replace biv with the giv's reduced register. */
7161 update_reg_last_use (v
->add_val
, insn
);
7162 if (validate_change (insn
, &SET_SRC (PATTERN (insn
)), new, 0))
7165 /* Insn doesn't support that constant or invariant. Copy it
7166 into a register (it will be a loop invariant.) */
7167 tem
= gen_reg_rtx (GET_MODE (v
->new_reg
));
7169 emit_insn_before (gen_move_insn (tem
, copy_rtx (v
->add_val
)),
7172 /* Substitute the new register for its invariant value in
7173 the compare expression. */
7174 XEXP (new, (INTVAL (v
->mult_val
) < 0) ? 0 : 1) = tem
;
7175 if (validate_change (insn
, &SET_SRC (PATTERN (insn
)), new, 0))
7184 case GT
: case GE
: case GTU
: case GEU
:
7185 case LT
: case LE
: case LTU
: case LEU
:
7186 /* See if either argument is the biv. */
7187 if (XEXP (x
, 0) == reg
)
7188 arg
= XEXP (x
, 1), arg_operand
= 1;
7189 else if (XEXP (x
, 1) == reg
)
7190 arg
= XEXP (x
, 0), arg_operand
= 0;
7194 if (CONSTANT_P (arg
))
7196 /* First try to replace with any giv that has constant positive
7197 mult_val and constant add_val. We might be able to support
7198 negative mult_val, but it seems complex to do it in general. */
7200 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
7201 if (CONSTANT_P (v
->mult_val
) && INTVAL (v
->mult_val
) > 0
7202 && (GET_CODE (v
->add_val
) == SYMBOL_REF
7203 || GET_CODE (v
->add_val
) == LABEL_REF
7204 || GET_CODE (v
->add_val
) == CONST
7205 || (GET_CODE (v
->add_val
) == REG
7206 && REGNO_POINTER_FLAG (REGNO (v
->add_val
))))
7207 && ! v
->ignore
&& ! v
->maybe_dead
&& v
->always_computable
7210 /* If the giv V had the auto-inc address optimization applied
7211 to it, and INSN occurs between the giv insn and the biv
7212 insn, then we must adjust the value used here.
7213 This is rare, so we don't bother to do so. */
7215 && ((INSN_LUID (v
->insn
) < INSN_LUID (insn
)
7216 && INSN_LUID (insn
) < INSN_LUID (bl
->biv
->insn
))
7217 || (INSN_LUID (v
->insn
) > INSN_LUID (insn
)
7218 && INSN_LUID (insn
) > INSN_LUID (bl
->biv
->insn
))))
7224 /* Replace biv with the giv's reduced reg. */
7225 XEXP (x
, 1-arg_operand
) = v
->new_reg
;
7227 /* If all constants are actually constant integers and
7228 the derived constant can be directly placed in the COMPARE,
7230 if (GET_CODE (arg
) == CONST_INT
7231 && GET_CODE (v
->mult_val
) == CONST_INT
7232 && GET_CODE (v
->add_val
) == CONST_INT
7233 && validate_change (insn
, &XEXP (x
, arg_operand
),
7234 GEN_INT (INTVAL (arg
)
7235 * INTVAL (v
->mult_val
)
7236 + INTVAL (v
->add_val
)), 0))
7239 /* Otherwise, load it into a register. */
7240 tem
= gen_reg_rtx (mode
);
7241 emit_iv_add_mult (arg
, v
->mult_val
, v
->add_val
, tem
, where
);
7242 if (validate_change (insn
, &XEXP (x
, arg_operand
), tem
, 0))
7245 /* If that failed, put back the change we made above. */
7246 XEXP (x
, 1-arg_operand
) = reg
;
7249 /* Look for giv with positive constant mult_val and nonconst add_val.
7250 Insert insns to calculate new compare value.
7251 ??? Turn this off due to possible overflow. */
7253 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
7254 if (CONSTANT_P (v
->mult_val
) && INTVAL (v
->mult_val
) > 0
7255 && ! v
->ignore
&& ! v
->maybe_dead
&& v
->always_computable
7261 /* If the giv V had the auto-inc address optimization applied
7262 to it, and INSN occurs between the giv insn and the biv
7263 insn, then we must adjust the value used here.
7264 This is rare, so we don't bother to do so. */
7266 && ((INSN_LUID (v
->insn
) < INSN_LUID (insn
)
7267 && INSN_LUID (insn
) < INSN_LUID (bl
->biv
->insn
))
7268 || (INSN_LUID (v
->insn
) > INSN_LUID (insn
)
7269 && INSN_LUID (insn
) > INSN_LUID (bl
->biv
->insn
))))
7275 tem
= gen_reg_rtx (mode
);
7277 /* Replace biv with giv's reduced register. */
7278 validate_change (insn
, &XEXP (x
, 1 - arg_operand
),
7281 /* Compute value to compare against. */
7282 emit_iv_add_mult (arg
, v
->mult_val
, v
->add_val
, tem
, where
);
7283 /* Use it in this insn. */
7284 validate_change (insn
, &XEXP (x
, arg_operand
), tem
, 1);
7285 if (apply_change_group ())
7289 else if (GET_CODE (arg
) == REG
|| GET_CODE (arg
) == MEM
)
7291 if (invariant_p (arg
) == 1)
7293 /* Look for giv with constant positive mult_val and nonconst
7294 add_val. Insert insns to compute new compare value.
7295 ??? Turn this off due to possible overflow. */
7297 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
7298 if (CONSTANT_P (v
->mult_val
) && INTVAL (v
->mult_val
) > 0
7299 && ! v
->ignore
&& ! v
->maybe_dead
&& v
->always_computable
7305 /* If the giv V had the auto-inc address optimization applied
7306 to it, and INSN occurs between the giv insn and the biv
7307 insn, then we must adjust the value used here.
7308 This is rare, so we don't bother to do so. */
7310 && ((INSN_LUID (v
->insn
) < INSN_LUID (insn
)
7311 && INSN_LUID (insn
) < INSN_LUID (bl
->biv
->insn
))
7312 || (INSN_LUID (v
->insn
) > INSN_LUID (insn
)
7313 && INSN_LUID (insn
) > INSN_LUID (bl
->biv
->insn
))))
7319 tem
= gen_reg_rtx (mode
);
7321 /* Replace biv with giv's reduced register. */
7322 validate_change (insn
, &XEXP (x
, 1 - arg_operand
),
7325 /* Compute value to compare against. */
7326 emit_iv_add_mult (arg
, v
->mult_val
, v
->add_val
,
7328 validate_change (insn
, &XEXP (x
, arg_operand
), tem
, 1);
7329 if (apply_change_group ())
7334 /* This code has problems. Basically, you can't know when
7335 seeing if we will eliminate BL, whether a particular giv
7336 of ARG will be reduced. If it isn't going to be reduced,
7337 we can't eliminate BL. We can try forcing it to be reduced,
7338 but that can generate poor code.
7340 The problem is that the benefit of reducing TV, below should
7341 be increased if BL can actually be eliminated, but this means
7342 we might have to do a topological sort of the order in which
7343 we try to process biv. It doesn't seem worthwhile to do
7344 this sort of thing now. */
7347 /* Otherwise the reg compared with had better be a biv. */
7348 if (GET_CODE (arg
) != REG
7349 || reg_iv_type
[REGNO (arg
)] != BASIC_INDUCT
)
7352 /* Look for a pair of givs, one for each biv,
7353 with identical coefficients. */
7354 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
7356 struct induction
*tv
;
7358 if (v
->ignore
|| v
->maybe_dead
|| v
->mode
!= mode
)
7361 for (tv
= reg_biv_class
[REGNO (arg
)]->giv
; tv
; tv
= tv
->next_iv
)
7362 if (! tv
->ignore
&& ! tv
->maybe_dead
7363 && rtx_equal_p (tv
->mult_val
, v
->mult_val
)
7364 && rtx_equal_p (tv
->add_val
, v
->add_val
)
7365 && tv
->mode
== mode
)
7367 /* If the giv V had the auto-inc address optimization applied
7368 to it, and INSN occurs between the giv insn and the biv
7369 insn, then we must adjust the value used here.
7370 This is rare, so we don't bother to do so. */
7372 && ((INSN_LUID (v
->insn
) < INSN_LUID (insn
)
7373 && INSN_LUID (insn
) < INSN_LUID (bl
->biv
->insn
))
7374 || (INSN_LUID (v
->insn
) > INSN_LUID (insn
)
7375 && INSN_LUID (insn
) > INSN_LUID (bl
->biv
->insn
))))
7381 /* Replace biv with its giv's reduced reg. */
7382 XEXP (x
, 1-arg_operand
) = v
->new_reg
;
7383 /* Replace other operand with the other giv's
7385 XEXP (x
, arg_operand
) = tv
->new_reg
;
7392 /* If we get here, the biv can't be eliminated. */
7396 /* If this address is a DEST_ADDR giv, it doesn't matter if the
7397 biv is used in it, since it will be replaced. */
7398 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
7399 if (v
->giv_type
== DEST_ADDR
&& v
->location
== &XEXP (x
, 0))
7407 /* See if any subexpression fails elimination. */
7408 fmt
= GET_RTX_FORMAT (code
);
7409 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
7414 if (! maybe_eliminate_biv_1 (XEXP (x
, i
), insn
, bl
,
7415 eliminate_p
, where
))
7420 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
7421 if (! maybe_eliminate_biv_1 (XVECEXP (x
, i
, j
), insn
, bl
,
7422 eliminate_p
, where
))
7431 /* Return nonzero if the last use of REG
7432 is in an insn following INSN in the same basic block. */
7435 last_use_this_basic_block (reg
, insn
)
7441 n
&& GET_CODE (n
) != CODE_LABEL
&& GET_CODE (n
) != JUMP_INSN
;
7444 if (REGNO_LAST_UID (REGNO (reg
)) == INSN_UID (n
))
7450 /* Called via `note_stores' to record the initial value of a biv. Here we
7451 just record the location of the set and process it later. */
7454 record_initial (dest
, set
)
7458 struct iv_class
*bl
;
7460 if (GET_CODE (dest
) != REG
7461 || REGNO (dest
) >= max_reg_before_loop
7462 || reg_iv_type
[REGNO (dest
)] != BASIC_INDUCT
)
7465 bl
= reg_biv_class
[REGNO (dest
)];
7467 /* If this is the first set found, record it. */
7468 if (bl
->init_insn
== 0)
7470 bl
->init_insn
= note_insn
;
7475 /* If any of the registers in X are "old" and currently have a last use earlier
7476 than INSN, update them to have a last use of INSN. Their actual last use
7477 will be the previous insn but it will not have a valid uid_luid so we can't
7481 update_reg_last_use (x
, insn
)
7485 /* Check for the case where INSN does not have a valid luid. In this case,
7486 there is no need to modify the regno_last_uid, as this can only happen
7487 when code is inserted after the loop_end to set a pseudo's final value,
7488 and hence this insn will never be the last use of x. */
7489 if (GET_CODE (x
) == REG
&& REGNO (x
) < max_reg_before_loop
7490 && INSN_UID (insn
) < max_uid_for_loop
7491 && uid_luid
[REGNO_LAST_UID (REGNO (x
))] < uid_luid
[INSN_UID (insn
)])
7492 REGNO_LAST_UID (REGNO (x
)) = INSN_UID (insn
);
7496 register char *fmt
= GET_RTX_FORMAT (GET_CODE (x
));
7497 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
7500 update_reg_last_use (XEXP (x
, i
), insn
);
7501 else if (fmt
[i
] == 'E')
7502 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
7503 update_reg_last_use (XVECEXP (x
, i
, j
), insn
);
7508 /* Given a jump insn JUMP, return the condition that will cause it to branch
7509 to its JUMP_LABEL. If the condition cannot be understood, or is an
7510 inequality floating-point comparison which needs to be reversed, 0 will
7513 If EARLIEST is non-zero, it is a pointer to a place where the earliest
7514 insn used in locating the condition was found. If a replacement test
7515 of the condition is desired, it should be placed in front of that
7516 insn and we will be sure that the inputs are still valid.
7518 The condition will be returned in a canonical form to simplify testing by
7519 callers. Specifically:
7521 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
7522 (2) Both operands will be machine operands; (cc0) will have been replaced.
7523 (3) If an operand is a constant, it will be the second operand.
7524 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
7525 for GE, GEU, and LEU. */
7528 get_condition (jump
, earliest
)
7537 int reverse_code
= 0;
7538 int did_reverse_condition
= 0;
7539 enum machine_mode mode
;
7541 /* If this is not a standard conditional jump, we can't parse it. */
7542 if (GET_CODE (jump
) != JUMP_INSN
7543 || ! condjump_p (jump
) || simplejump_p (jump
))
7546 code
= GET_CODE (XEXP (SET_SRC (PATTERN (jump
)), 0));
7547 mode
= GET_MODE (XEXP (SET_SRC (PATTERN (jump
)), 0));
7548 op0
= XEXP (XEXP (SET_SRC (PATTERN (jump
)), 0), 0);
7549 op1
= XEXP (XEXP (SET_SRC (PATTERN (jump
)), 0), 1);
7554 /* If this branches to JUMP_LABEL when the condition is false, reverse
7556 if (GET_CODE (XEXP (SET_SRC (PATTERN (jump
)), 2)) == LABEL_REF
7557 && XEXP (XEXP (SET_SRC (PATTERN (jump
)), 2), 0) == JUMP_LABEL (jump
))
7558 code
= reverse_condition (code
), did_reverse_condition
^= 1;
7560 /* If we are comparing a register with zero, see if the register is set
7561 in the previous insn to a COMPARE or a comparison operation. Perform
7562 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
7565 while (GET_RTX_CLASS (code
) == '<' && op1
== CONST0_RTX (GET_MODE (op0
)))
7567 /* Set non-zero when we find something of interest. */
7571 /* If comparison with cc0, import actual comparison from compare
7575 if ((prev
= prev_nonnote_insn (prev
)) == 0
7576 || GET_CODE (prev
) != INSN
7577 || (set
= single_set (prev
)) == 0
7578 || SET_DEST (set
) != cc0_rtx
)
7581 op0
= SET_SRC (set
);
7582 op1
= CONST0_RTX (GET_MODE (op0
));
7588 /* If this is a COMPARE, pick up the two things being compared. */
7589 if (GET_CODE (op0
) == COMPARE
)
7591 op1
= XEXP (op0
, 1);
7592 op0
= XEXP (op0
, 0);
7595 else if (GET_CODE (op0
) != REG
)
7598 /* Go back to the previous insn. Stop if it is not an INSN. We also
7599 stop if it isn't a single set or if it has a REG_INC note because
7600 we don't want to bother dealing with it. */
7602 if ((prev
= prev_nonnote_insn (prev
)) == 0
7603 || GET_CODE (prev
) != INSN
7604 || FIND_REG_INC_NOTE (prev
, 0)
7605 || (set
= single_set (prev
)) == 0)
7608 /* If this is setting OP0, get what it sets it to if it looks
7610 if (rtx_equal_p (SET_DEST (set
), op0
))
7612 enum machine_mode inner_mode
= GET_MODE (SET_SRC (set
));
7614 /* ??? We may not combine comparisons done in a CCmode with
7615 comparisons not done in a CCmode. This is to aid targets
7616 like Alpha that have an IEEE compliant EQ instruction, and
7617 a non-IEEE compliant BEQ instruction. The use of CCmode is
7618 actually artificial, simply to prevent the combination, but
7619 should not affect other platforms. */
7621 if ((GET_CODE (SET_SRC (set
)) == COMPARE
7624 && GET_MODE_CLASS (inner_mode
) == MODE_INT
7625 && (GET_MODE_BITSIZE (inner_mode
)
7626 <= HOST_BITS_PER_WIDE_INT
)
7627 && (STORE_FLAG_VALUE
7628 & ((HOST_WIDE_INT
) 1
7629 << (GET_MODE_BITSIZE (inner_mode
) - 1))))
7630 #ifdef FLOAT_STORE_FLAG_VALUE
7632 && GET_MODE_CLASS (inner_mode
) == MODE_FLOAT
7633 && FLOAT_STORE_FLAG_VALUE
< 0)
7636 && GET_RTX_CLASS (GET_CODE (SET_SRC (set
))) == '<'))
7637 && ((GET_MODE_CLASS (mode
) == MODE_CC
)
7638 != (GET_MODE_CLASS (inner_mode
) == MODE_CC
)))
7640 else if (((code
== EQ
7642 && (GET_MODE_BITSIZE (inner_mode
)
7643 <= HOST_BITS_PER_WIDE_INT
)
7644 && GET_MODE_CLASS (inner_mode
) == MODE_INT
7645 && (STORE_FLAG_VALUE
7646 & ((HOST_WIDE_INT
) 1
7647 << (GET_MODE_BITSIZE (inner_mode
) - 1))))
7648 #ifdef FLOAT_STORE_FLAG_VALUE
7650 && GET_MODE_CLASS (inner_mode
) == MODE_FLOAT
7651 && FLOAT_STORE_FLAG_VALUE
< 0)
7654 && GET_RTX_CLASS (GET_CODE (SET_SRC (set
))) == '<'
7655 && ((GET_MODE_CLASS (mode
) == MODE_CC
)
7656 != (GET_MODE_CLASS (inner_mode
) == MODE_CC
)))
7658 /* We might have reversed a LT to get a GE here. But this wasn't
7659 actually the comparison of data, so we don't flag that we
7660 have had to reverse the condition. */
7661 did_reverse_condition
^= 1;
7669 else if (reg_set_p (op0
, prev
))
7670 /* If this sets OP0, but not directly, we have to give up. */
7675 if (GET_RTX_CLASS (GET_CODE (x
)) == '<')
7676 code
= GET_CODE (x
);
7679 code
= reverse_condition (code
);
7680 did_reverse_condition
^= 1;
7684 op0
= XEXP (x
, 0), op1
= XEXP (x
, 1);
7690 /* If constant is first, put it last. */
7691 if (CONSTANT_P (op0
))
7692 code
= swap_condition (code
), tem
= op0
, op0
= op1
, op1
= tem
;
7694 /* If OP0 is the result of a comparison, we weren't able to find what
7695 was really being compared, so fail. */
7696 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
)
7699 /* Canonicalize any ordered comparison with integers involving equality
7700 if we can do computations in the relevant mode and we do not
7703 if (GET_CODE (op1
) == CONST_INT
7704 && GET_MODE (op0
) != VOIDmode
7705 && GET_MODE_BITSIZE (GET_MODE (op0
)) <= HOST_BITS_PER_WIDE_INT
)
7707 HOST_WIDE_INT const_val
= INTVAL (op1
);
7708 unsigned HOST_WIDE_INT uconst_val
= const_val
;
7709 unsigned HOST_WIDE_INT max_val
7710 = (unsigned HOST_WIDE_INT
) GET_MODE_MASK (GET_MODE (op0
));
7715 if (const_val
!= max_val
>> 1)
7716 code
= LT
, op1
= GEN_INT (const_val
+ 1);
7719 /* When cross-compiling, const_val might be sign-extended from
7720 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
7722 if ((const_val
& max_val
)
7723 != (((HOST_WIDE_INT
) 1
7724 << (GET_MODE_BITSIZE (GET_MODE (op0
)) - 1))))
7725 code
= GT
, op1
= GEN_INT (const_val
- 1);
7729 if (uconst_val
< max_val
)
7730 code
= LTU
, op1
= GEN_INT (uconst_val
+ 1);
7734 if (uconst_val
!= 0)
7735 code
= GTU
, op1
= GEN_INT (uconst_val
- 1);
7743 /* If this was floating-point and we reversed anything other than an
7744 EQ or NE, return zero. */
7745 if (TARGET_FLOAT_FORMAT
== IEEE_FLOAT_FORMAT
7746 && did_reverse_condition
&& code
!= NE
&& code
!= EQ
7748 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_FLOAT
)
7752 /* Never return CC0; return zero instead. */
7757 return gen_rtx_fmt_ee (code
, VOIDmode
, op0
, op1
);
7760 /* Similar to above routine, except that we also put an invariant last
7761 unless both operands are invariants. */
7764 get_condition_for_loop (x
)
7767 rtx comparison
= get_condition (x
, NULL_PTR
);
7770 || ! invariant_p (XEXP (comparison
, 0))
7771 || invariant_p (XEXP (comparison
, 1)))
7774 return gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison
)), VOIDmode
,
7775 XEXP (comparison
, 1), XEXP (comparison
, 0));
7779 /* Analyze a loop in order to instrument it with the use of count register.
7780 loop_start and loop_end are the first and last insns of the loop.
7781 This function works in cooperation with insert_bct ().
7782 loop_can_insert_bct[loop_num] is set according to whether the optimization
7783 is applicable to the loop. When it is applicable, the following variables
7785 loop_start_value[loop_num]
7786 loop_comparison_value[loop_num]
7787 loop_increment[loop_num]
7788 loop_comparison_code[loop_num] */
7790 #ifdef HAVE_decrement_and_branch_on_count
7792 analyze_loop_iterations (loop_start
, loop_end
)
7793 rtx loop_start
, loop_end
;
7795 rtx comparison
, comparison_value
;
7796 rtx iteration_var
, initial_value
, increment
;
7797 enum rtx_code comparison_code
;
7803 /* loop_variable mode */
7804 enum machine_mode original_mode
;
7806 /* find the number of the loop */
7807 int loop_num
= uid_loop_num
[INSN_UID (loop_start
)];
7809 /* we change our mind only when we are sure that loop will be instrumented */
7810 loop_can_insert_bct
[loop_num
] = 0;
7812 /* is the optimization suppressed. */
7813 if ( !flag_branch_on_count_reg
)
7816 /* make sure that count-reg is not in use */
7817 if (loop_used_count_register
[loop_num
]){
7818 if (loop_dump_stream
)
7819 fprintf (loop_dump_stream
,
7820 "analyze_loop_iterations %d: BCT instrumentation failed: count register already in use\n",
7825 /* make sure that the function has no indirect jumps. */
7826 if (indirect_jump_in_function
){
7827 if (loop_dump_stream
)
7828 fprintf (loop_dump_stream
,
7829 "analyze_loop_iterations %d: BCT instrumentation failed: indirect jump in function\n",
7834 /* make sure that the last loop insn is a conditional jump */
7835 last_loop_insn
= PREV_INSN (loop_end
);
7836 if (GET_CODE (last_loop_insn
) != JUMP_INSN
|| !condjump_p (last_loop_insn
)) {
7837 if (loop_dump_stream
)
7838 fprintf (loop_dump_stream
,
7839 "analyze_loop_iterations %d: BCT instrumentation failed: invalid jump at loop end\n",
7844 /* First find the iteration variable. If the last insn is a conditional
7845 branch, and the insn preceding it tests a register value, make that
7846 register the iteration variable. */
7848 /* We used to use prev_nonnote_insn here, but that fails because it might
7849 accidentally get the branch for a contained loop if the branch for this
7850 loop was deleted. We can only trust branches immediately before the
7853 comparison
= get_condition_for_loop (last_loop_insn
);
7854 /* ??? Get_condition may switch position of induction variable and
7855 invariant register when it canonicalizes the comparison. */
7857 if (comparison
== 0) {
7858 if (loop_dump_stream
)
7859 fprintf (loop_dump_stream
,
7860 "analyze_loop_iterations %d: BCT instrumentation failed: comparison not found\n",
7865 comparison_code
= GET_CODE (comparison
);
7866 iteration_var
= XEXP (comparison
, 0);
7867 comparison_value
= XEXP (comparison
, 1);
7869 original_mode
= GET_MODE (iteration_var
);
7870 if (GET_MODE_CLASS (original_mode
) != MODE_INT
7871 || GET_MODE_SIZE (original_mode
) != UNITS_PER_WORD
) {
7872 if (loop_dump_stream
)
7873 fprintf (loop_dump_stream
,
7874 "analyze_loop_iterations %d: BCT Instrumentation failed: loop variable not integer\n",
7879 /* get info about loop bounds and increment */
7880 iteration_info (iteration_var
, &initial_value
, &increment
,
7881 loop_start
, loop_end
);
7883 /* make sure that all required loop data were found */
7884 if (!(initial_value
&& increment
&& comparison_value
7885 && invariant_p (comparison_value
) && invariant_p (increment
)
7886 && ! indirect_jump_in_function
))
7888 if (loop_dump_stream
) {
7889 fprintf (loop_dump_stream
,
7890 "analyze_loop_iterations %d: BCT instrumentation failed because of wrong loop: ", loop_num
);
7891 if (!(initial_value
&& increment
&& comparison_value
)) {
7892 fprintf (loop_dump_stream
, "\tbounds not available: ");
7893 if ( ! initial_value
)
7894 fprintf (loop_dump_stream
, "initial ");
7896 fprintf (loop_dump_stream
, "increment ");
7897 if ( ! comparison_value
)
7898 fprintf (loop_dump_stream
, "comparison ");
7899 fprintf (loop_dump_stream
, "\n");
7901 if (!invariant_p (comparison_value
) || !invariant_p (increment
))
7902 fprintf (loop_dump_stream
, "\tloop bounds not invariant\n");
7907 /* make sure that the increment is constant */
7908 if (GET_CODE (increment
) != CONST_INT
) {
7909 if (loop_dump_stream
)
7910 fprintf (loop_dump_stream
,
7911 "analyze_loop_iterations %d: instrumentation failed: not arithmetic loop\n",
7916 /* make sure that the loop contains neither function call, nor jump on table.
7917 (the count register might be altered by the called function, and might
7918 be used for a branch on table). */
7919 for (insn
= loop_start
; insn
&& insn
!= loop_end
; insn
= NEXT_INSN (insn
)) {
7920 if (GET_CODE (insn
) == CALL_INSN
){
7921 if (loop_dump_stream
)
7922 fprintf (loop_dump_stream
,
7923 "analyze_loop_iterations %d: BCT instrumentation failed: function call in the loop\n",
7928 if (GET_CODE (insn
) == JUMP_INSN
7929 && (GET_CODE (PATTERN (insn
)) == ADDR_DIFF_VEC
7930 || GET_CODE (PATTERN (insn
)) == ADDR_VEC
)){
7931 if (loop_dump_stream
)
7932 fprintf (loop_dump_stream
,
7933 "analyze_loop_iterations %d: BCT instrumentation failed: computed branch in the loop\n",
7939 /* At this point, we are sure that the loop can be instrumented with BCT.
7940 Some of the loops, however, will not be instrumented - the final decision
7941 is taken by insert_bct () */
7942 if (loop_dump_stream
)
7943 fprintf (loop_dump_stream
,
7944 "analyze_loop_iterations: loop (luid =%d) can be BCT instrumented.\n",
7947 /* mark all enclosing loops that they cannot use count register */
7948 /* ???: In fact, since insert_bct may decide not to instrument this loop,
7949 marking here may prevent instrumenting an enclosing loop that could
7950 actually be instrumented. But since this is rare, it is safer to mark
7951 here in case the order of calling (analyze/insert)_bct would be changed. */
7952 for (i
=loop_num
; i
!= -1; i
= loop_outer_loop
[i
])
7953 loop_used_count_register
[i
] = 1;
7955 /* Set data structures which will be used by the instrumentation phase */
7956 loop_start_value
[loop_num
] = initial_value
;
7957 loop_comparison_value
[loop_num
] = comparison_value
;
7958 loop_increment
[loop_num
] = increment
;
7959 loop_comparison_code
[loop_num
] = comparison_code
;
7960 loop_can_insert_bct
[loop_num
] = 1;
7964 /* instrument loop for insertion of bct instruction. We distinguish between
7965 loops with compile-time bounds, to those with run-time bounds. The loop
7966 behaviour is analized according to the following characteristics/variables:
7968 ; comparison-value: the value to which the iteration counter is compared.
7969 ; initial-value: iteration-counter initial value.
7970 ; increment: iteration-counter increment.
7971 ; Computed variables:
7972 ; increment-direction: the sign of the increment.
7973 ; compare-direction: '1' for GT, GTE, '-1' for LT, LTE, '0' for NE.
7974 ; range-direction: sign (comparison-value - initial-value)
7975 We give up on the following cases:
7976 ; loop variable overflow.
7977 ; run-time loop bounds with comparison code NE.
7981 insert_bct (loop_start
, loop_end
)
7982 rtx loop_start
, loop_end
;
7984 rtx initial_value
, comparison_value
, increment
;
7985 enum rtx_code comparison_code
;
7987 int increment_direction
, compare_direction
;
7990 /* if the loop condition is <= or >=, the number of iteration
7991 is 1 more than the range of the bounds of the loop */
7992 int add_iteration
= 0;
7994 /* the only machine mode we work with - is the integer of the size that the
7996 enum machine_mode loop_var_mode
= SImode
;
7998 int loop_num
= uid_loop_num
[INSN_UID (loop_start
)];
8000 /* get loop-variables. No need to check that these are valid - already
8001 checked in analyze_loop_iterations (). */
8002 comparison_code
= loop_comparison_code
[loop_num
];
8003 initial_value
= loop_start_value
[loop_num
];
8004 comparison_value
= loop_comparison_value
[loop_num
];
8005 increment
= loop_increment
[loop_num
];
8007 /* check analyze_loop_iterations decision for this loop. */
8008 if (! loop_can_insert_bct
[loop_num
]){
8009 if (loop_dump_stream
)
8010 fprintf (loop_dump_stream
,
8011 "insert_bct: [%d] - was decided not to instrument by analyze_loop_iterations ()\n",
8016 /* It's impossible to instrument a competely unrolled loop. */
8017 if (loop_unroll_factor
[loop_num
] == -1)
8020 /* make sure that the last loop insn is a conditional jump .
8021 This check is repeated from analyze_loop_iterations (),
8022 because unrolling might have changed that. */
8023 if (GET_CODE (PREV_INSN (loop_end
)) != JUMP_INSN
8024 || !condjump_p (PREV_INSN (loop_end
))) {
8025 if (loop_dump_stream
)
8026 fprintf (loop_dump_stream
,
8027 "insert_bct: not instrumenting BCT because of invalid branch\n");
8031 /* fix increment in case loop was unrolled. */
8032 if (loop_unroll_factor
[loop_num
] > 1)
8033 increment
= GEN_INT ( INTVAL (increment
) * loop_unroll_factor
[loop_num
] );
8035 /* determine properties and directions of the loop */
8036 increment_direction
= (INTVAL (increment
) > 0) ? 1:-1;
8037 switch ( comparison_code
) {
8042 compare_direction
= 1;
8049 compare_direction
= -1;
8053 /* in this case we cannot know the number of iterations */
8054 if (loop_dump_stream
)
8055 fprintf (loop_dump_stream
,
8056 "insert_bct: %d: loop cannot be instrumented: == in condition\n",
8063 compare_direction
= 1;
8069 compare_direction
= -1;
8072 compare_direction
= 0;
8079 /* make sure that the loop does not end by an overflow */
8080 if (compare_direction
!= increment_direction
) {
8081 if (loop_dump_stream
)
8082 fprintf (loop_dump_stream
,
8083 "insert_bct: %d: loop cannot be instrumented: terminated by overflow\n",
8088 /* try to instrument the loop. */
8090 /* Handle the simpler case, where the bounds are known at compile time. */
8091 if (GET_CODE (initial_value
) == CONST_INT
&& GET_CODE (comparison_value
) == CONST_INT
)
8094 int increment_value_abs
= INTVAL (increment
) * increment_direction
;
8096 /* check the relation between compare-val and initial-val */
8097 int difference
= INTVAL (comparison_value
) - INTVAL (initial_value
);
8098 int range_direction
= (difference
> 0) ? 1 : -1;
8100 /* make sure the loop executes enough iterations to gain from BCT */
8101 if (difference
> -3 && difference
< 3) {
8102 if (loop_dump_stream
)
8103 fprintf (loop_dump_stream
,
8104 "insert_bct: loop %d not BCT instrumented: too small iteration count.\n",
8109 /* make sure that the loop executes at least once */
8110 if ((range_direction
== 1 && compare_direction
== -1)
8111 || (range_direction
== -1 && compare_direction
== 1))
8113 if (loop_dump_stream
)
8114 fprintf (loop_dump_stream
,
8115 "insert_bct: loop %d: does not iterate even once. Not instrumenting.\n",
8120 /* make sure that the loop does not end by an overflow (in compile time
8121 bounds we must have an additional check for overflow, because here
8122 we also support the compare code of 'NE'. */
8123 if (comparison_code
== NE
8124 && increment_direction
!= range_direction
) {
8125 if (loop_dump_stream
)
8126 fprintf (loop_dump_stream
,
8127 "insert_bct (compile time bounds): %d: loop not instrumented: terminated by overflow\n",
8132 /* Determine the number of iterations by:
8134 ; compare-val - initial-val + (increment -1) + additional-iteration
8135 ; num_iterations = -----------------------------------------------------------------
8138 difference
= (range_direction
> 0) ? difference
: -difference
;
8140 fprintf (stderr
, "difference is: %d\n", difference
); /* @*/
8141 fprintf (stderr
, "increment_value_abs is: %d\n", increment_value_abs
); /* @*/
8142 fprintf (stderr
, "add_iteration is: %d\n", add_iteration
); /* @*/
8143 fprintf (stderr
, "INTVAL (comparison_value) is: %d\n", INTVAL (comparison_value
)); /* @*/
8144 fprintf (stderr
, "INTVAL (initial_value) is: %d\n", INTVAL (initial_value
)); /* @*/
8147 if (increment_value_abs
== 0) {
8148 fprintf (stderr
, "insert_bct: error: increment == 0 !!!\n");
8151 n_iterations
= (difference
+ increment_value_abs
- 1 + add_iteration
)
8152 / increment_value_abs
;
8155 fprintf (stderr
, "number of iterations is: %d\n", n_iterations
); /* @*/
8157 instrument_loop_bct (loop_start
, loop_end
, GEN_INT (n_iterations
));
8159 /* Done with this loop. */
8163 /* Handle the more complex case, that the bounds are NOT known at compile time. */
8164 /* In this case we generate run_time calculation of the number of iterations */
8166 /* With runtime bounds, if the compare is of the form '!=' we give up */
8167 if (comparison_code
== NE
) {
8168 if (loop_dump_stream
)
8169 fprintf (loop_dump_stream
,
8170 "insert_bct: fail for loop %d: runtime bounds with != comparison\n",
8176 /* We rely on the existence of run-time guard to ensure that the
8177 loop executes at least once. */
8179 rtx iterations_num_reg
;
8181 int increment_value_abs
= INTVAL (increment
) * increment_direction
;
8183 /* make sure that the increment is a power of two, otherwise (an
8184 expensive) divide is needed. */
8185 if (exact_log2 (increment_value_abs
) == -1)
8187 if (loop_dump_stream
)
8188 fprintf (loop_dump_stream
,
8189 "insert_bct: not instrumenting BCT because the increment is not power of 2\n");
8193 /* compute the number of iterations */
8198 /* Again, the number of iterations is calculated by:
8200 ; compare-val - initial-val + (increment -1) + additional-iteration
8201 ; num_iterations = -----------------------------------------------------------------
8204 /* ??? Do we have to call copy_rtx here before passing rtx to
8206 if (compare_direction
> 0) {
8207 /* <, <= :the loop variable is increasing */
8208 temp_reg
= expand_binop (loop_var_mode
, sub_optab
, comparison_value
,
8209 initial_value
, NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
8212 temp_reg
= expand_binop (loop_var_mode
, sub_optab
, initial_value
,
8213 comparison_value
, NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
8216 if (increment_value_abs
- 1 + add_iteration
!= 0)
8217 temp_reg
= expand_binop (loop_var_mode
, add_optab
, temp_reg
,
8218 GEN_INT (increment_value_abs
- 1 + add_iteration
),
8219 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
8221 if (increment_value_abs
!= 1)
8223 /* ??? This will generate an expensive divide instruction for
8224 most targets. The original authors apparently expected this
8225 to be a shift, since they test for power-of-2 divisors above,
8226 but just naively generating a divide instruction will not give
8227 a shift. It happens to work for the PowerPC target because
8228 the rs6000.md file has a divide pattern that emits shifts.
8229 It will probably not work for any other target. */
8230 iterations_num_reg
= expand_binop (loop_var_mode
, sdiv_optab
,
8232 GEN_INT (increment_value_abs
),
8233 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
8236 iterations_num_reg
= temp_reg
;
8238 sequence
= gen_sequence ();
8240 emit_insn_before (sequence
, loop_start
);
8241 instrument_loop_bct (loop_start
, loop_end
, iterations_num_reg
);
8245 /* instrument loop by inserting a bct in it. This is done in the following way:
8246 1. A new register is created and assigned the hard register number of the count
8248 2. In the head of the loop the new variable is initialized by the value passed in the
8249 loop_num_iterations parameter.
8250 3. At the end of the loop, comparison of the register with 0 is generated.
8251 The created comparison follows the pattern defined for the
8252 decrement_and_branch_on_count insn, so this insn will be generated in assembly
8254 4. The compare&branch on the old variable is deleted. So, if the loop-variable was
8255 not used elsewhere, it will be eliminated by data-flow analisys. */
8258 instrument_loop_bct (loop_start
, loop_end
, loop_num_iterations
)
8259 rtx loop_start
, loop_end
;
8260 rtx loop_num_iterations
;
8262 rtx temp_reg1
, temp_reg2
;
8266 enum machine_mode loop_var_mode
= SImode
;
8268 if (HAVE_decrement_and_branch_on_count
)
8270 if (loop_dump_stream
)
8271 fprintf (loop_dump_stream
, "Loop: Inserting BCT\n");
8273 /* eliminate the check on the old variable */
8274 delete_insn (PREV_INSN (loop_end
));
8275 delete_insn (PREV_INSN (loop_end
));
8277 /* insert the label which will delimit the start of the loop */
8278 start_label
= gen_label_rtx ();
8279 emit_label_after (start_label
, loop_start
);
8281 /* insert initialization of the count register into the loop header */
8283 temp_reg1
= gen_reg_rtx (loop_var_mode
);
8284 emit_insn (gen_move_insn (temp_reg1
, loop_num_iterations
));
8286 /* this will be count register */
8287 temp_reg2
= gen_rtx_REG (loop_var_mode
, COUNT_REGISTER_REGNUM
);
8288 /* we have to move the value to the count register from an GPR
8289 because rtx pointed to by loop_num_iterations could contain
8290 expression which cannot be moved into count register */
8291 emit_insn (gen_move_insn (temp_reg2
, temp_reg1
));
8293 sequence
= gen_sequence ();
8295 emit_insn_after (sequence
, loop_start
);
8297 /* insert new comparison on the count register instead of the
8298 old one, generating the needed BCT pattern (that will be
8299 later recognized by assembly generation phase). */
8300 emit_jump_insn_before (gen_decrement_and_branch_on_count (temp_reg2
, start_label
),
8302 LABEL_NUSES (start_label
)++;
8306 #endif /* HAVE_decrement_and_branch_on_count */
8310 /* Scan the function and determine whether it has indirect (computed) jumps.
8312 This is taken mostly from flow.c; similar code exists elsewhere
8313 in the compiler. It may be useful to put this into rtlanal.c. */
8315 indirect_jump_in_function_p (start
)
8320 for (insn
= start
; insn
; insn
= NEXT_INSN (insn
))
8321 if (computed_jump_p (insn
))
8327 /* Add MEM to the LOOP_MEMS array, if appropriate. See the
8328 documentation for LOOP_MEMS for the definition of `appropriate'.
8329 This function is called from prescan_loop via for_each_rtx. */
8332 insert_loop_mem (mem
, data
)
8342 switch (GET_CODE (m
))
8348 /* We're not interested in the MEM associated with a
8349 CONST_DOUBLE, so there's no need to traverse into this. */
8353 /* This is not a MEM. */
8357 /* See if we've already seen this MEM. */
8358 for (i
= 0; i
< loop_mems_idx
; ++i
)
8359 if (rtx_equal_p (m
, loop_mems
[i
].mem
))
8361 if (GET_MODE (m
) != GET_MODE (loop_mems
[i
].mem
))
8362 /* The modes of the two memory accesses are different. If
8363 this happens, something tricky is going on, and we just
8364 don't optimize accesses to this MEM. */
8365 loop_mems
[i
].optimize
= 0;
8370 /* Resize the array, if necessary. */
8371 if (loop_mems_idx
== loop_mems_allocated
)
8373 if (loop_mems_allocated
!= 0)
8374 loop_mems_allocated
*= 2;
8376 loop_mems_allocated
= 32;
8378 loop_mems
= (loop_mem_info
*)
8379 xrealloc (loop_mems
,
8380 loop_mems_allocated
* sizeof (loop_mem_info
));
8383 /* Actually insert the MEM. */
8384 loop_mems
[loop_mems_idx
].mem
= m
;
8385 /* We can't hoist this MEM out of the loop if it's a BLKmode MEM
8386 because we can't put it in a register. We still store it in the
8387 table, though, so that if we see the same address later, but in a
8388 non-BLK mode, we'll not think we can optimize it at that point. */
8389 loop_mems
[loop_mems_idx
].optimize
= (GET_MODE (m
) != BLKmode
);
8390 loop_mems
[loop_mems_idx
].reg
= NULL_RTX
;
8394 /* Like load_mems, but also ensures that N_TIMES_SET,
8395 MAY_NOT_OPTIMIZE, REG_SINGLE_USAGE, and INSN_COUNT have the correct
8396 values after load_mems. */
8399 load_mems_and_recount_loop_regs_set (scan_start
, end
, loop_top
, start
,
8400 reg_single_usage
, insn_count
)
8405 rtx
*reg_single_usage
;
8408 int nregs
= max_reg_num ();
8410 load_mems (scan_start
, end
, loop_top
, start
);
8412 /* Recalculate n_times_set and friends since load_mems may have
8413 created new registers. */
8414 if (max_reg_num () > nregs
)
8420 nregs
= max_reg_num ();
8422 /* Note that we assume here that enough room was allocated in
8423 the various arrays to accomodate the extra registers created
8425 bzero ((char *) n_times_set
, nregs
* sizeof (int));
8426 bzero (may_not_optimize
, nregs
);
8427 if (loop_has_call
&& reg_single_usage
)
8428 bzero ((char *) reg_single_usage
, nregs
* sizeof (rtx
));
8430 count_loop_regs_set (loop_top
? loop_top
: start
, end
,
8431 may_not_optimize
, reg_single_usage
,
8434 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
8435 may_not_optimize
[i
] = 1, n_times_set
[i
] = 1;
8437 /* Set n_times_used for the new registers. */
8438 bcopy ((char *) (n_times_set
+ old_nregs
),
8439 (char *) (n_times_used
+ old_nregs
),
8440 (nregs
- old_nregs
) * sizeof (int));
8444 /* Move MEMs into registers for the duration of the loop. SCAN_START
8445 is the first instruction in the loop (as it is executed). The
8446 other parameters are as for next_insn_in_loop. */
8449 load_mems (scan_start
, end
, loop_top
, start
)
8455 int maybe_never
= 0;
8458 rtx label
= NULL_RTX
;
8461 if (loop_mems_idx
> 0)
8463 /* Nonzero if the next instruction may never be executed. */
8464 int next_maybe_never
= 0;
8466 /* Check to see if it's possible that some instructions in the
8467 loop are never executed. */
8468 for (p
= next_insn_in_loop (scan_start
, scan_start
, end
, loop_top
);
8469 p
!= NULL_RTX
&& !maybe_never
;
8470 p
= next_insn_in_loop (p
, scan_start
, end
, loop_top
))
8472 if (GET_CODE (p
) == CODE_LABEL
)
8474 else if (GET_CODE (p
) == JUMP_INSN
8475 /* If we enter the loop in the middle, and scan
8476 around to the beginning, don't set maybe_never
8477 for that. This must be an unconditional jump,
8478 otherwise the code at the top of the loop might
8479 never be executed. Unconditional jumps are
8480 followed a by barrier then loop end. */
8481 && ! (GET_CODE (p
) == JUMP_INSN
8482 && JUMP_LABEL (p
) == loop_top
8483 && NEXT_INSN (NEXT_INSN (p
)) == end
8484 && simplejump_p (p
)))
8486 if (!condjump_p (p
))
8487 /* Something complicated. */
8490 /* If there are any more instructions in the loop, they
8491 might not be reached. */
8492 next_maybe_never
= 1;
8494 else if (next_maybe_never
)
8498 /* Actually move the MEMs. */
8499 for (i
= 0; i
< loop_mems_idx
; ++i
)
8504 rtx mem
= loop_mems
[i
].mem
;
8506 if (MEM_VOLATILE_P (mem
)
8507 || invariant_p (XEXP (mem
, 0)) != 1)
8508 /* There's no telling whether or not MEM is modified. */
8509 loop_mems
[i
].optimize
= 0;
8511 /* Go through the MEMs written to in the loop to see if this
8512 one is aliased by one of them. */
8513 for (j
= 0; j
< loop_store_mems_idx
; ++j
)
8515 if (rtx_equal_p (mem
, loop_store_mems
[j
]))
8517 else if (true_dependence (loop_store_mems
[j
], VOIDmode
,
8520 /* MEM is indeed aliased by this store. */
8521 loop_mems
[i
].optimize
= 0;
8526 /* If this MEM is written to, we must be sure that there
8527 are no reads from another MEM that aliases this one. */
8528 if (loop_mems
[i
].optimize
&& written
)
8532 for (j
= 0; j
< loop_mems_idx
; ++j
)
8536 else if (true_dependence (mem
,
8541 /* It's not safe to hoist loop_mems[i] out of
8542 the loop because writes to it might not be
8543 seen by reads from loop_mems[j]. */
8544 loop_mems
[i
].optimize
= 0;
8550 if (maybe_never
&& may_trap_p (mem
))
8551 /* We can't access the MEM outside the loop; it might
8552 cause a trap that wouldn't have happened otherwise. */
8553 loop_mems
[i
].optimize
= 0;
8555 if (!loop_mems
[i
].optimize
)
8556 /* We thought we were going to lift this MEM out of the
8557 loop, but later discovered that we could not. */
8560 /* Allocate a pseudo for this MEM. We set REG_USERVAR_P in
8561 order to keep scan_loop from moving stores to this MEM
8562 out of the loop just because this REG is neither a
8563 user-variable nor used in the loop test. */
8564 reg
= gen_reg_rtx (GET_MODE (mem
));
8565 REG_USERVAR_P (reg
) = 1;
8566 loop_mems
[i
].reg
= reg
;
8568 /* Now, replace all references to the MEM with the
8569 corresponding pesudos. */
8570 for (p
= next_insn_in_loop (scan_start
, scan_start
, end
, loop_top
);
8572 p
= next_insn_in_loop (p
, scan_start
, end
, loop_top
))
8574 rtx_and_int ri
= { p
, i
};
8575 for_each_rtx (&p
, replace_loop_mem
, &ri
);
8578 if (!apply_change_group ())
8579 /* We couldn't replace all occurrences of the MEM. */
8580 loop_mems
[i
].optimize
= 0;
8585 /* Load the memory immediately before START, which is
8586 the NOTE_LOOP_BEG. */
8587 set
= gen_rtx_SET (GET_MODE (reg
), reg
, mem
);
8588 emit_insn_before (set
, start
);
8592 if (label
== NULL_RTX
)
8594 /* We must compute the former
8595 right-after-the-end label before we insert
8597 end_label
= next_label (end
);
8598 label
= gen_label_rtx ();
8599 emit_label_after (label
, end
);
8602 /* Store the memory immediately after END, which is
8603 the NOTE_LOOP_END. */
8604 set
= gen_rtx_SET (GET_MODE (reg
), mem
, reg
);
8605 emit_insn_after (set
, label
);
8608 if (loop_dump_stream
)
8610 fprintf (loop_dump_stream
, "Hoisted regno %d %s from ",
8611 REGNO (reg
), (written
? "r/w" : "r/o"));
8612 print_rtl (loop_dump_stream
, mem
);
8613 fputc ('\n', loop_dump_stream
);
8619 if (label
!= NULL_RTX
)
8621 /* Now, we need to replace all references to the previous exit
8622 label with the new one. */
8623 rtx_pair rr
= { end_label
, label
};
8625 for (p
= start
; p
!= end
; p
= NEXT_INSN (p
))
8626 for_each_rtx (&p
, replace_label
, &rr
);
8630 /* Replace MEM with its associated pseudo register. This function is
8631 called from load_mems via for_each_rtx. DATA is actually an
8632 rtx_and_int * describing the instruction currently being scanned
8633 and the MEM we are currently replacing. */
8636 replace_loop_mem (mem
, data
)
8648 switch (GET_CODE (m
))
8654 /* We're not interested in the MEM associated with a
8655 CONST_DOUBLE, so there's no need to traverse into one. */
8659 /* This is not a MEM. */
8663 ri
= (rtx_and_int
*) data
;
8666 if (!rtx_equal_p (loop_mems
[i
].mem
, m
))
8667 /* This is not the MEM we are currently replacing. */
8672 /* Actually replace the MEM. */
8673 validate_change (insn
, mem
, loop_mems
[i
].reg
, 1);
8678 /* Replace occurrences of the old exit label for the loop with the new
8679 one. DATA is an rtx_pair containing the old and new labels,
8683 replace_label (x
, data
)
8688 rtx old_label
= ((rtx_pair
*) data
)->r1
;
8689 rtx new_label
= ((rtx_pair
*) data
)->r2
;
8694 if (GET_CODE (l
) != LABEL_REF
)
8697 if (XEXP (l
, 0) != old_label
)
8700 XEXP (l
, 0) = new_label
;
8701 ++LABEL_NUSES (new_label
);
8702 --LABEL_NUSES (old_label
);