1 /* Perform various loop optimizations, including strength reduction.
2 Copyright (C) 1987, 88, 89, 91-97, 1998 Free Software Foundation, Inc.
4 This file is part of GNU CC.
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
22 /* This is the loop optimization pass of the compiler.
23 It finds invariant computations within loops and moves them
24 to the beginning of the loop. Then it identifies basic and
25 general induction variables. Strength reduction is applied to the general
26 induction variables, and induction variable elimination is applied to
27 the basic induction variables.
29 It also finds cases where
30 a register is set within the loop by zero-extending a narrower value
31 and changes these to zero the entire register once before the loop
32 and merely copy the low part within the loop.
34 Most of the complexity is in heuristics to decide when it is worth
35 while to do these things. */
42 #include "insn-config.h"
43 #include "insn-flags.h"
45 #include "hard-reg-set.h"
53 /* Vector mapping INSN_UIDs to luids.
54 The luids are like uids but increase monotonically always.
55 We use them to see whether a jump comes from outside a given loop. */
59 /* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop
60 number the insn is contained in. */
64 /* 1 + largest uid of any insn. */
68 /* 1 + luid of last insn. */
72 /* Number of loops detected in current function. Used as index to the
75 static int max_loop_num
;
77 /* Indexed by loop number, contains the first and last insn of each loop. */
79 static rtx
*loop_number_loop_starts
, *loop_number_loop_ends
;
81 /* For each loop, gives the containing loop number, -1 if none. */
86 /* The main output of analyze_loop_iterations is placed here */
88 int *loop_can_insert_bct
;
90 /* For each loop, determines whether some of its inner loops has used
93 int *loop_used_count_register
;
95 /* loop parameters for arithmetic loops. These loops have a loop variable
96 which is initialized to loop_start_value, incremented in each iteration
97 by "loop_increment". At the end of the iteration the loop variable is
98 compared to the loop_comparison_value (using loop_comparison_code). */
101 rtx
*loop_comparison_value
;
102 rtx
*loop_start_value
;
103 enum rtx_code
*loop_comparison_code
;
106 /* For each loop, keep track of its unrolling factor.
110 -1: completely unrolled
111 >0: holds the unroll exact factor. */
112 int *loop_unroll_factor
;
114 /* Indexed by loop number, contains a nonzero value if the "loop" isn't
115 really a loop (an insn outside the loop branches into it). */
117 static char *loop_invalid
;
119 /* Indexed by loop number, links together all LABEL_REFs which refer to
120 code labels outside the loop. Used by routines that need to know all
121 loop exits, such as final_biv_value and final_giv_value.
123 This does not include loop exits due to return instructions. This is
124 because all bivs and givs are pseudos, and hence must be dead after a
125 return, so the presense of a return does not affect any of the
126 optimizations that use this info. It is simpler to just not include return
127 instructions on this list. */
129 rtx
*loop_number_exit_labels
;
131 /* Indexed by loop number, counts the number of LABEL_REFs on
132 loop_number_exit_labels for this loop and all loops nested inside it. */
134 int *loop_number_exit_count
;
136 /* Holds the number of loop iterations. It is zero if the number could not be
137 calculated. Must be unsigned since the number of iterations can
138 be as high as 2^wordsize-1. For loops with a wider iterator, this number
139 will be zero if the number of loop iterations is too large for an
140 unsigned integer to hold. */
142 unsigned HOST_WIDE_INT loop_n_iterations
;
144 /* Nonzero if there is a subroutine call in the current loop. */
146 static int loop_has_call
;
148 /* Nonzero if there is a volatile memory reference in the current
151 static int loop_has_volatile
;
153 /* Added loop_continue which is the NOTE_INSN_LOOP_CONT of the
154 current loop. A continue statement will generate a branch to
155 NEXT_INSN (loop_continue). */
157 static rtx loop_continue
;
159 /* Indexed by register number, contains the number of times the reg
160 is set during the loop being scanned.
161 During code motion, a negative value indicates a reg that has been
162 made a candidate; in particular -2 means that it is an candidate that
163 we know is equal to a constant and -1 means that it is an candidate
164 not known equal to a constant.
165 After code motion, regs moved have 0 (which is accurate now)
166 while the failed candidates have the original number of times set.
168 Therefore, at all times, == 0 indicates an invariant register;
169 < 0 a conditionally invariant one. */
171 static varray_type n_times_set
;
173 /* Original value of n_times_set; same except that this value
174 is not set negative for a reg whose sets have been made candidates
175 and not set to 0 for a reg that is moved. */
177 static varray_type n_times_used
;
179 /* Index by register number, 1 indicates that the register
180 cannot be moved or strength reduced. */
182 static varray_type may_not_optimize
;
184 /* Nonzero means reg N has already been moved out of one loop.
185 This reduces the desire to move it out of another. */
187 static char *moved_once
;
189 /* Array of MEMs that are stored in this loop. If there are too many to fit
190 here, we just turn on unknown_address_altered. */
192 #define NUM_STORES 30
193 static rtx loop_store_mems
[NUM_STORES
];
195 /* Index of first available slot in above array. */
196 static int loop_store_mems_idx
;
198 typedef struct loop_mem_info
{
199 rtx mem
; /* The MEM itself. */
200 rtx reg
; /* Corresponding pseudo, if any. */
201 int optimize
; /* Nonzero if we can optimize access to this MEM. */
204 /* Array of MEMs that are used (read or written) in this loop, but
205 cannot be aliased by anything in this loop, except perhaps
206 themselves. In other words, if loop_mems[i] is altered during the
207 loop, it is altered by an expression that is rtx_equal_p to it. */
209 static loop_mem_info
*loop_mems
;
211 /* The index of the next available slot in LOOP_MEMS. */
213 static int loop_mems_idx
;
215 /* The number of elements allocated in LOOP_MEMs. */
217 static int loop_mems_allocated
;
219 /* Nonzero if we don't know what MEMs were changed in the current loop.
220 This happens if the loop contains a call (in which case `loop_has_call'
221 will also be set) or if we store into more than NUM_STORES MEMs. */
223 static int unknown_address_altered
;
225 /* Count of movable (i.e. invariant) instructions discovered in the loop. */
226 static int num_movables
;
228 /* Count of memory write instructions discovered in the loop. */
229 static int num_mem_sets
;
231 /* Number of loops contained within the current one, including itself. */
232 static int loops_enclosed
;
234 /* Bound on pseudo register number before loop optimization.
235 A pseudo has valid regscan info if its number is < max_reg_before_loop. */
236 int max_reg_before_loop
;
238 /* This obstack is used in product_cheap_p to allocate its rtl. It
239 may call gen_reg_rtx which, in turn, may reallocate regno_reg_rtx.
240 If we used the same obstack that it did, we would be deallocating
243 static struct obstack temp_obstack
;
245 /* This is where the pointer to the obstack being used for RTL is stored. */
247 extern struct obstack
*rtl_obstack
;
249 #define obstack_chunk_alloc xmalloc
250 #define obstack_chunk_free free
252 /* During the analysis of a loop, a chain of `struct movable's
253 is made to record all the movable insns found.
254 Then the entire chain can be scanned to decide which to move. */
258 rtx insn
; /* A movable insn */
259 rtx set_src
; /* The expression this reg is set from. */
260 rtx set_dest
; /* The destination of this SET. */
261 rtx dependencies
; /* When INSN is libcall, this is an EXPR_LIST
262 of any registers used within the LIBCALL. */
263 int consec
; /* Number of consecutive following insns
264 that must be moved with this one. */
265 int regno
; /* The register it sets */
266 short lifetime
; /* lifetime of that register;
267 may be adjusted when matching movables
268 that load the same value are found. */
269 short savings
; /* Number of insns we can move for this reg,
270 including other movables that force this
271 or match this one. */
272 unsigned int cond
: 1; /* 1 if only conditionally movable */
273 unsigned int force
: 1; /* 1 means MUST move this insn */
274 unsigned int global
: 1; /* 1 means reg is live outside this loop */
275 /* If PARTIAL is 1, GLOBAL means something different:
276 that the reg is live outside the range from where it is set
277 to the following label. */
278 unsigned int done
: 1; /* 1 inhibits further processing of this */
280 unsigned int partial
: 1; /* 1 means this reg is used for zero-extending.
281 In particular, moving it does not make it
283 unsigned int move_insn
: 1; /* 1 means that we call emit_move_insn to
284 load SRC, rather than copying INSN. */
285 unsigned int move_insn_first
:1;/* Same as above, if this is necessary for the
286 first insn of a consecutive sets group. */
287 unsigned int is_equiv
: 1; /* 1 means a REG_EQUIV is present on INSN. */
288 enum machine_mode savemode
; /* Nonzero means it is a mode for a low part
289 that we should avoid changing when clearing
290 the rest of the reg. */
291 struct movable
*match
; /* First entry for same value */
292 struct movable
*forces
; /* An insn that must be moved if this is */
293 struct movable
*next
;
296 static struct movable
*the_movables
;
298 FILE *loop_dump_stream
;
300 /* Forward declarations. */
302 static void find_and_verify_loops
PROTO((rtx
));
303 static void mark_loop_jump
PROTO((rtx
, int));
304 static void prescan_loop
PROTO((rtx
, rtx
));
305 static int reg_in_basic_block_p
PROTO((rtx
, rtx
));
306 static int consec_sets_invariant_p
PROTO((rtx
, int, rtx
));
307 static rtx libcall_other_reg
PROTO((rtx
, rtx
));
308 static int labels_in_range_p
PROTO((rtx
, int));
309 static void count_loop_regs_set
PROTO((rtx
, rtx
, varray_type
, varray_type
,
311 static void note_addr_stored
PROTO((rtx
, rtx
));
312 static int loop_reg_used_before_p
PROTO((rtx
, rtx
, rtx
, rtx
, rtx
));
313 static void scan_loop
PROTO((rtx
, rtx
, int, int));
315 static void replace_call_address
PROTO((rtx
, rtx
, rtx
));
317 static rtx skip_consec_insns
PROTO((rtx
, int));
318 static int libcall_benefit
PROTO((rtx
));
319 static void ignore_some_movables
PROTO((struct movable
*));
320 static void force_movables
PROTO((struct movable
*));
321 static void combine_movables
PROTO((struct movable
*, int));
322 static int regs_match_p
PROTO((rtx
, rtx
, struct movable
*));
323 static int rtx_equal_for_loop_p
PROTO((rtx
, rtx
, struct movable
*));
324 static void add_label_notes
PROTO((rtx
, rtx
));
325 static void move_movables
PROTO((struct movable
*, int, int, rtx
, rtx
, int));
326 static int count_nonfixed_reads
PROTO((rtx
));
327 static void strength_reduce
PROTO((rtx
, rtx
, rtx
, int, rtx
, rtx
, int, int));
328 static void find_single_use_in_loop
PROTO((rtx
, rtx
, varray_type
));
329 static int valid_initial_value_p
PROTO((rtx
, rtx
, int, rtx
));
330 static void find_mem_givs
PROTO((rtx
, rtx
, int, rtx
, rtx
));
331 static void record_biv
PROTO((struct induction
*, rtx
, rtx
, rtx
, rtx
, int, int));
332 static void check_final_value
PROTO((struct induction
*, rtx
, rtx
));
333 static void record_giv
PROTO((struct induction
*, rtx
, rtx
, rtx
, rtx
, rtx
, int, enum g_types
, int, rtx
*, rtx
, rtx
));
334 static void update_giv_derive
PROTO((rtx
));
335 static int basic_induction_var
PROTO((rtx
, enum machine_mode
, rtx
, rtx
, rtx
*, rtx
*));
336 static rtx simplify_giv_expr
PROTO((rtx
, int *));
337 static int general_induction_var
PROTO((rtx
, rtx
*, rtx
*, rtx
*, int, int *));
338 static int consec_sets_giv
PROTO((int, rtx
, rtx
, rtx
, rtx
*, rtx
*));
339 static int check_dbra_loop
PROTO((rtx
, int, rtx
));
340 static rtx express_from_1
PROTO((rtx
, rtx
, rtx
));
341 static rtx express_from
PROTO((struct induction
*, struct induction
*));
342 static rtx combine_givs_p
PROTO((struct induction
*, struct induction
*));
343 static void combine_givs
PROTO((struct iv_class
*));
344 static int product_cheap_p
PROTO((rtx
, rtx
));
345 static int maybe_eliminate_biv
PROTO((struct iv_class
*, rtx
, rtx
, int, int, int));
346 static int maybe_eliminate_biv_1
PROTO((rtx
, rtx
, struct iv_class
*, int, rtx
));
347 static int last_use_this_basic_block
PROTO((rtx
, rtx
));
348 static void record_initial
PROTO((rtx
, rtx
));
349 static void update_reg_last_use
PROTO((rtx
, rtx
));
350 static rtx next_insn_in_loop
PROTO((rtx
, rtx
, rtx
, rtx
));
351 static void load_mems_and_recount_loop_regs_set
PROTO((rtx
, rtx
, rtx
,
354 static void load_mems
PROTO((rtx
, rtx
, rtx
, rtx
));
355 static int insert_loop_mem
PROTO((rtx
*, void *));
356 static int replace_loop_mem
PROTO((rtx
*, void *));
357 static int replace_label
PROTO((rtx
*, void *));
359 typedef struct rtx_and_int
{
364 typedef struct rtx_pair
{
369 /* Nonzero iff INSN is between START and END, inclusive. */
370 #define INSN_IN_RANGE_P(INSN, START, END) \
371 (INSN_UID (INSN) < max_uid_for_loop \
372 && INSN_LUID (INSN) >= INSN_LUID (START) \
373 && INSN_LUID (INSN) <= INSN_LUID (END))
376 /* This is extern from unroll.c */
377 extern void iteration_info
PROTO((rtx
, rtx
*, rtx
*, rtx
, rtx
));
379 /* Two main functions for implementing bct:
380 first - to be called before loop unrolling, and the second - after */
381 #ifdef HAVE_decrement_and_branch_on_count
382 static void analyze_loop_iterations
PROTO((rtx
, rtx
));
383 static void insert_bct
PROTO((rtx
, rtx
));
385 /* Auxiliary function that inserts the bct pattern into the loop */
386 static void instrument_loop_bct
PROTO((rtx
, rtx
, rtx
));
387 #endif /* HAVE_decrement_and_branch_on_count */
390 /* Indirect_jump_in_function is computed once per function. */
391 int indirect_jump_in_function
= 0;
392 static int indirect_jump_in_function_p
PROTO((rtx
));
395 /* Relative gain of eliminating various kinds of operations. */
398 static int shift_cost
;
399 static int mult_cost
;
402 /* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to
403 copy the value of the strength reduced giv to its original register. */
404 static int copy_cost
;
406 /* Cost of using a register, to normalize the benefits of a giv. */
407 static int reg_address_cost
;
413 char *free_point
= (char *) oballoc (1);
414 rtx reg
= gen_rtx_REG (word_mode
, LAST_VIRTUAL_REGISTER
+ 1);
416 add_cost
= rtx_cost (gen_rtx_PLUS (word_mode
, reg
, reg
), SET
);
419 reg_address_cost
= ADDRESS_COST (reg
);
421 reg_address_cost
= rtx_cost (reg
, MEM
);
424 /* We multiply by 2 to reconcile the difference in scale between
425 these two ways of computing costs. Otherwise the cost of a copy
426 will be far less than the cost of an add. */
430 /* Free the objects we just allocated. */
433 /* Initialize the obstack used for rtl in product_cheap_p. */
434 gcc_obstack_init (&temp_obstack
);
437 /* Entry point of this file. Perform loop optimization
438 on the current function. F is the first insn of the function
439 and DUMPFILE is a stream for output of a trace of actions taken
440 (or 0 if none should be output). */
443 loop_optimize (f
, dumpfile
, unroll_p
, bct_p
)
444 /* f is the first instruction of a chain of insns for one function */
453 loop_dump_stream
= dumpfile
;
455 init_recog_no_volatile ();
457 max_reg_before_loop
= max_reg_num ();
459 moved_once
= (char *) alloca (max_reg_before_loop
);
460 bzero (moved_once
, max_reg_before_loop
);
464 /* Count the number of loops. */
467 for (insn
= f
; insn
; insn
= NEXT_INSN (insn
))
469 if (GET_CODE (insn
) == NOTE
470 && NOTE_LINE_NUMBER (insn
) == NOTE_INSN_LOOP_BEG
)
474 /* Don't waste time if no loops. */
475 if (max_loop_num
== 0)
478 /* Get size to use for tables indexed by uids.
479 Leave some space for labels allocated by find_and_verify_loops. */
480 max_uid_for_loop
= get_max_uid () + 1 + max_loop_num
* 32;
482 uid_luid
= (int *) alloca (max_uid_for_loop
* sizeof (int));
483 uid_loop_num
= (int *) alloca (max_uid_for_loop
* sizeof (int));
485 bzero ((char *) uid_luid
, max_uid_for_loop
* sizeof (int));
486 bzero ((char *) uid_loop_num
, max_uid_for_loop
* sizeof (int));
488 /* Allocate tables for recording each loop. We set each entry, so they need
490 loop_number_loop_starts
= (rtx
*) alloca (max_loop_num
* sizeof (rtx
));
491 loop_number_loop_ends
= (rtx
*) alloca (max_loop_num
* sizeof (rtx
));
492 loop_outer_loop
= (int *) alloca (max_loop_num
* sizeof (int));
493 loop_invalid
= (char *) alloca (max_loop_num
* sizeof (char));
494 loop_number_exit_labels
= (rtx
*) alloca (max_loop_num
* sizeof (rtx
));
495 loop_number_exit_count
= (int *) alloca (max_loop_num
* sizeof (int));
497 /* This is initialized by the unrolling code, so we go ahead
498 and clear them just in case we are not performing loop
500 loop_unroll_factor
= (int *) alloca (max_loop_num
*sizeof (int));
501 bzero ((char *) loop_unroll_factor
, max_loop_num
* sizeof (int));
504 /* Allocate for BCT optimization */
505 loop_can_insert_bct
= (int *) alloca (max_loop_num
* sizeof (int));
506 bzero ((char *) loop_can_insert_bct
, max_loop_num
* sizeof (int));
508 loop_used_count_register
= (int *) alloca (max_loop_num
* sizeof (int));
509 bzero ((char *) loop_used_count_register
, max_loop_num
* sizeof (int));
511 loop_increment
= (rtx
*) alloca (max_loop_num
* sizeof (rtx
));
512 loop_comparison_value
= (rtx
*) alloca (max_loop_num
* sizeof (rtx
));
513 loop_start_value
= (rtx
*) alloca (max_loop_num
* sizeof (rtx
));
514 bzero ((char *) loop_increment
, max_loop_num
* sizeof (rtx
));
515 bzero ((char *) loop_comparison_value
, max_loop_num
* sizeof (rtx
));
516 bzero ((char *) loop_start_value
, max_loop_num
* sizeof (rtx
));
519 = (enum rtx_code
*) alloca (max_loop_num
* sizeof (enum rtx_code
));
520 bzero ((char *) loop_comparison_code
, max_loop_num
* sizeof (enum rtx_code
));
523 /* Find and process each loop.
524 First, find them, and record them in order of their beginnings. */
525 find_and_verify_loops (f
);
527 /* Now find all register lifetimes. This must be done after
528 find_and_verify_loops, because it might reorder the insns in the
530 reg_scan (f
, max_reg_num (), 1);
532 /* This must occur after reg_scan so that registers created by gcse
533 will have entries in the register tables.
535 We could have added a call to reg_scan after gcse_main in toplev.c,
536 but moving this call to init_alias_analysis is more efficient. */
537 init_alias_analysis ();
539 /* See if we went too far. */
540 if (get_max_uid () > max_uid_for_loop
)
542 /* Now reset it to the actual size we need. See above. */
543 max_uid_for_loop
= get_max_uid () + 1;
545 /* Compute the mapping from uids to luids.
546 LUIDs are numbers assigned to insns, like uids,
547 except that luids increase monotonically through the code.
548 Don't assign luids to line-number NOTEs, so that the distance in luids
549 between two insns is not affected by -g. */
551 for (insn
= f
, i
= 0; insn
; insn
= NEXT_INSN (insn
))
554 if (GET_CODE (insn
) != NOTE
555 || NOTE_LINE_NUMBER (insn
) <= 0)
556 uid_luid
[INSN_UID (insn
)] = ++i
;
558 /* Give a line number note the same luid as preceding insn. */
559 uid_luid
[INSN_UID (insn
)] = i
;
564 /* Don't leave gaps in uid_luid for insns that have been
565 deleted. It is possible that the first or last insn
566 using some register has been deleted by cross-jumping.
567 Make sure that uid_luid for that former insn's uid
568 points to the general area where that insn used to be. */
569 for (i
= 0; i
< max_uid_for_loop
; i
++)
571 uid_luid
[0] = uid_luid
[i
];
572 if (uid_luid
[0] != 0)
575 for (i
= 0; i
< max_uid_for_loop
; i
++)
576 if (uid_luid
[i
] == 0)
577 uid_luid
[i
] = uid_luid
[i
- 1];
579 /* Create a mapping from loops to BLOCK tree nodes. */
580 if (unroll_p
&& write_symbols
!= NO_DEBUG
)
581 find_loop_tree_blocks ();
583 /* Determine if the function has indirect jump. On some systems
584 this prevents low overhead loop instructions from being used. */
585 indirect_jump_in_function
= indirect_jump_in_function_p (f
);
587 /* Now scan the loops, last ones first, since this means inner ones are done
588 before outer ones. */
589 for (i
= max_loop_num
-1; i
>= 0; i
--)
590 if (! loop_invalid
[i
] && loop_number_loop_ends
[i
])
591 scan_loop (loop_number_loop_starts
[i
], loop_number_loop_ends
[i
],
594 /* If debugging and unrolling loops, we must replicate the tree nodes
595 corresponding to the blocks inside the loop, so that the original one
596 to one mapping will remain. */
597 if (unroll_p
&& write_symbols
!= NO_DEBUG
)
598 unroll_block_trees ();
600 end_alias_analysis ();
603 /* Returns the next insn, in execution order, after INSN. START and
604 END are the NOTE_INSN_LOOP_BEG and NOTE_INSN_LOOP_END for the loop,
605 respectively. LOOP_TOP, if non-NULL, is the top of the loop in the
606 insn-stream; it is used with loops that are entered near the
610 next_insn_in_loop (insn
, start
, end
, loop_top
)
616 insn
= NEXT_INSN (insn
);
621 /* Go to the top of the loop, and continue there. */
635 /* Optimize one loop whose start is LOOP_START and end is END.
636 LOOP_START is the NOTE_INSN_LOOP_BEG and END is the matching
637 NOTE_INSN_LOOP_END. */
639 /* ??? Could also move memory writes out of loops if the destination address
640 is invariant, the source is invariant, the memory write is not volatile,
641 and if we can prove that no read inside the loop can read this address
642 before the write occurs. If there is a read of this address after the
643 write, then we can also mark the memory read as invariant. */
646 scan_loop (loop_start
, end
, unroll_p
, bct_p
)
652 /* 1 if we are scanning insns that could be executed zero times. */
654 /* 1 if we are scanning insns that might never be executed
655 due to a subroutine call which might exit before they are reached. */
657 /* For a rotated loop that is entered near the bottom,
658 this is the label at the top. Otherwise it is zero. */
660 /* Jump insn that enters the loop, or 0 if control drops in. */
661 rtx loop_entry_jump
= 0;
662 /* Place in the loop where control enters. */
664 /* Number of insns in the loop. */
669 /* The SET from an insn, if it is the only SET in the insn. */
671 /* Chain describing insns movable in current loop. */
672 struct movable
*movables
= 0;
673 /* Last element in `movables' -- so we can add elements at the end. */
674 struct movable
*last_movable
= 0;
675 /* Ratio of extra register life span we can justify
676 for saving an instruction. More if loop doesn't call subroutines
677 since in that case saving an insn makes more difference
678 and more registers are available. */
680 /* If we have calls, contains the insn in which a register was used
681 if it was used exactly once; contains const0_rtx if it was used more
683 varray_type reg_single_usage
= 0;
684 /* Nonzero if we are scanning instructions in a sub-loop. */
688 /* Determine whether this loop starts with a jump down to a test at
689 the end. This will occur for a small number of loops with a test
690 that is too complex to duplicate in front of the loop.
692 We search for the first insn or label in the loop, skipping NOTEs.
693 However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG
694 (because we might have a loop executed only once that contains a
695 loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END
696 (in case we have a degenerate loop).
698 Note that if we mistakenly think that a loop is entered at the top
699 when, in fact, it is entered at the exit test, the only effect will be
700 slightly poorer optimization. Making the opposite error can generate
701 incorrect code. Since very few loops now start with a jump to the
702 exit test, the code here to detect that case is very conservative. */
704 for (p
= NEXT_INSN (loop_start
);
706 && GET_CODE (p
) != CODE_LABEL
&& GET_RTX_CLASS (GET_CODE (p
)) != 'i'
707 && (GET_CODE (p
) != NOTE
708 || (NOTE_LINE_NUMBER (p
) != NOTE_INSN_LOOP_BEG
709 && NOTE_LINE_NUMBER (p
) != NOTE_INSN_LOOP_END
));
715 /* Set up variables describing this loop. */
716 prescan_loop (loop_start
, end
);
717 threshold
= (loop_has_call
? 1 : 2) * (1 + n_non_fixed_regs
);
719 /* If loop has a jump before the first label,
720 the true entry is the target of that jump.
721 Start scan from there.
722 But record in LOOP_TOP the place where the end-test jumps
723 back to so we can scan that after the end of the loop. */
724 if (GET_CODE (p
) == JUMP_INSN
)
728 /* Loop entry must be unconditional jump (and not a RETURN) */
730 && JUMP_LABEL (p
) != 0
731 /* Check to see whether the jump actually
732 jumps out of the loop (meaning it's no loop).
733 This case can happen for things like
734 do {..} while (0). If this label was generated previously
735 by loop, we can't tell anything about it and have to reject
737 && INSN_IN_RANGE_P (JUMP_LABEL (p
), loop_start
, end
))
739 loop_top
= next_label (scan_start
);
740 scan_start
= JUMP_LABEL (p
);
744 /* If SCAN_START was an insn created by loop, we don't know its luid
745 as required by loop_reg_used_before_p. So skip such loops. (This
746 test may never be true, but it's best to play it safe.)
748 Also, skip loops where we do not start scanning at a label. This
749 test also rejects loops starting with a JUMP_INSN that failed the
752 if (INSN_UID (scan_start
) >= max_uid_for_loop
753 || GET_CODE (scan_start
) != CODE_LABEL
)
755 if (loop_dump_stream
)
756 fprintf (loop_dump_stream
, "\nLoop from %d to %d is phony.\n\n",
757 INSN_UID (loop_start
), INSN_UID (end
));
761 /* Count number of times each reg is set during this loop.
762 Set VARRAY_CHAR (may_not_optimize, I) if it is not safe to move out
763 the setting of register I. If this loop has calls, set
764 VARRAY_RTX (reg_single_usage, I). */
766 /* Allocate extra space for REGS that might be created by
767 load_mems. We allocate a little extra slop as well, in the hopes
768 that even after the moving of movables creates some new registers
769 we won't have to reallocate these arrays. However, we do grow
770 the arrays, if necessary, in load_mems_recount_loop_regs_set. */
771 nregs
= max_reg_num () + loop_mems_idx
+ 16;
772 VARRAY_INT_INIT (n_times_set
, nregs
, "n_times_set");
773 VARRAY_INT_INIT (n_times_used
, nregs
, "n_times_used");
774 VARRAY_CHAR_INIT (may_not_optimize
, nregs
, "may_not_optimize");
777 VARRAY_RTX_INIT (reg_single_usage
, nregs
, "reg_single_usage");
779 count_loop_regs_set (loop_top
? loop_top
: loop_start
, end
,
780 may_not_optimize
, reg_single_usage
, &insn_count
, nregs
);
782 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
784 VARRAY_CHAR (may_not_optimize
, i
) = 1;
785 VARRAY_INT (n_times_set
, i
) = 1;
788 #ifdef AVOID_CCMODE_COPIES
789 /* Don't try to move insns which set CC registers if we should not
790 create CCmode register copies. */
791 for (i
= max_reg_num () - 1; i
>= FIRST_PSEUDO_REGISTER
; i
--)
792 if (GET_MODE_CLASS (GET_MODE (regno_reg_rtx
[i
])) == MODE_CC
)
793 VARRAY_CHAR (may_not_optimize
, i
) = 1;
796 bcopy ((char *) &n_times_set
->data
,
797 (char *) &n_times_used
->data
, nregs
* sizeof (int));
799 if (loop_dump_stream
)
801 fprintf (loop_dump_stream
, "\nLoop from %d to %d: %d real insns.\n",
802 INSN_UID (loop_start
), INSN_UID (end
), insn_count
);
804 fprintf (loop_dump_stream
, "Continue at insn %d.\n",
805 INSN_UID (loop_continue
));
808 /* Scan through the loop finding insns that are safe to move.
809 Set n_times_set negative for the reg being set, so that
810 this reg will be considered invariant for subsequent insns.
811 We consider whether subsequent insns use the reg
812 in deciding whether it is worth actually moving.
814 MAYBE_NEVER is nonzero if we have passed a conditional jump insn
815 and therefore it is possible that the insns we are scanning
816 would never be executed. At such times, we must make sure
817 that it is safe to execute the insn once instead of zero times.
818 When MAYBE_NEVER is 0, all insns will be executed at least once
819 so that is not a problem. */
821 for (p
= next_insn_in_loop (scan_start
, scan_start
, end
, loop_top
);
823 p
= next_insn_in_loop (p
, scan_start
, end
, loop_top
))
825 if (GET_RTX_CLASS (GET_CODE (p
)) == 'i'
826 && find_reg_note (p
, REG_LIBCALL
, NULL_RTX
))
828 else if (GET_RTX_CLASS (GET_CODE (p
)) == 'i'
829 && find_reg_note (p
, REG_RETVAL
, NULL_RTX
))
832 if (GET_CODE (p
) == INSN
833 && (set
= single_set (p
))
834 && GET_CODE (SET_DEST (set
)) == REG
835 && ! VARRAY_CHAR (may_not_optimize
, REGNO (SET_DEST (set
))))
840 rtx src
= SET_SRC (set
);
841 rtx dependencies
= 0;
843 /* Figure out what to use as a source of this insn. If a REG_EQUIV
844 note is given or if a REG_EQUAL note with a constant operand is
845 specified, use it as the source and mark that we should move
846 this insn by calling emit_move_insn rather that duplicating the
849 Otherwise, only use the REG_EQUAL contents if a REG_RETVAL note
851 temp
= find_reg_note (p
, REG_EQUIV
, NULL_RTX
);
853 src
= XEXP (temp
, 0), move_insn
= 1;
856 temp
= find_reg_note (p
, REG_EQUAL
, NULL_RTX
);
857 if (temp
&& CONSTANT_P (XEXP (temp
, 0)))
858 src
= XEXP (temp
, 0), move_insn
= 1;
859 if (temp
&& find_reg_note (p
, REG_RETVAL
, NULL_RTX
))
861 src
= XEXP (temp
, 0);
862 /* A libcall block can use regs that don't appear in
863 the equivalent expression. To move the libcall,
864 we must move those regs too. */
865 dependencies
= libcall_other_reg (p
, src
);
869 /* Don't try to optimize a register that was made
870 by loop-optimization for an inner loop.
871 We don't know its life-span, so we can't compute the benefit. */
872 if (REGNO (SET_DEST (set
)) >= max_reg_before_loop
)
874 /* In order to move a register, we need to have one of three cases:
875 (1) it is used only in the same basic block as the set
876 (2) it is not a user variable and it is not used in the
877 exit test (this can cause the variable to be used
878 before it is set just like a user-variable).
879 (3) the set is guaranteed to be executed once the loop starts,
880 and the reg is not used until after that. */
881 else if (! ((! maybe_never
882 && ! loop_reg_used_before_p (set
, p
, loop_start
,
884 || (! REG_USERVAR_P (SET_DEST (set
))
885 && ! REG_LOOP_TEST_P (SET_DEST (set
)))
886 || reg_in_basic_block_p (p
, SET_DEST (set
))))
888 else if ((tem
= invariant_p (src
))
889 && (dependencies
== 0
890 || (tem2
= invariant_p (dependencies
)) != 0)
891 && (VARRAY_INT (n_times_set
,
892 REGNO (SET_DEST (set
))) == 1
894 = consec_sets_invariant_p
896 VARRAY_INT (n_times_set
, REGNO (SET_DEST (set
))),
898 /* If the insn can cause a trap (such as divide by zero),
899 can't move it unless it's guaranteed to be executed
900 once loop is entered. Even a function call might
901 prevent the trap insn from being reached
902 (since it might exit!) */
903 && ! ((maybe_never
|| call_passed
)
904 && may_trap_p (src
)))
906 register struct movable
*m
;
907 register int regno
= REGNO (SET_DEST (set
));
909 /* A potential lossage is where we have a case where two insns
910 can be combined as long as they are both in the loop, but
911 we move one of them outside the loop. For large loops,
912 this can lose. The most common case of this is the address
913 of a function being called.
915 Therefore, if this register is marked as being used exactly
916 once if we are in a loop with calls (a "large loop"), see if
917 we can replace the usage of this register with the source
918 of this SET. If we can, delete this insn.
920 Don't do this if P has a REG_RETVAL note or if we have
921 SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */
923 if (reg_single_usage
&& VARRAY_RTX (reg_single_usage
, regno
) != 0
924 && VARRAY_RTX (reg_single_usage
, regno
) != const0_rtx
925 && REGNO_FIRST_UID (regno
) == INSN_UID (p
)
926 && (REGNO_LAST_UID (regno
)
927 == INSN_UID (VARRAY_RTX (reg_single_usage
, regno
)))
928 && VARRAY_INT (n_times_set
, regno
) == 1
929 && ! side_effects_p (SET_SRC (set
))
930 && ! find_reg_note (p
, REG_RETVAL
, NULL_RTX
)
931 && (! SMALL_REGISTER_CLASSES
932 || (! (GET_CODE (SET_SRC (set
)) == REG
933 && REGNO (SET_SRC (set
)) < FIRST_PSEUDO_REGISTER
)))
934 /* This test is not redundant; SET_SRC (set) might be
935 a call-clobbered register and the life of REGNO
936 might span a call. */
937 && ! modified_between_p (SET_SRC (set
), p
,
939 (reg_single_usage
, regno
))
940 && no_labels_between_p (p
, VARRAY_RTX (reg_single_usage
, regno
))
941 && validate_replace_rtx (SET_DEST (set
), SET_SRC (set
),
943 (reg_single_usage
, regno
)))
945 /* Replace any usage in a REG_EQUAL note. Must copy the
946 new source, so that we don't get rtx sharing between the
947 SET_SOURCE and REG_NOTES of insn p. */
948 REG_NOTES (VARRAY_RTX (reg_single_usage
, regno
))
949 = replace_rtx (REG_NOTES (VARRAY_RTX
950 (reg_single_usage
, regno
)),
951 SET_DEST (set
), copy_rtx (SET_SRC (set
)));
954 NOTE_LINE_NUMBER (p
) = NOTE_INSN_DELETED
;
955 NOTE_SOURCE_FILE (p
) = 0;
956 VARRAY_INT (n_times_set
, regno
) = 0;
960 m
= (struct movable
*) alloca (sizeof (struct movable
));
964 m
->dependencies
= dependencies
;
965 m
->set_dest
= SET_DEST (set
);
967 m
->consec
= VARRAY_INT (n_times_set
,
968 REGNO (SET_DEST (set
))) - 1;
972 m
->move_insn
= move_insn
;
973 m
->move_insn_first
= 0;
974 m
->is_equiv
= (find_reg_note (p
, REG_EQUIV
, NULL_RTX
) != 0);
975 m
->savemode
= VOIDmode
;
977 /* Set M->cond if either invariant_p or consec_sets_invariant_p
978 returned 2 (only conditionally invariant). */
979 m
->cond
= ((tem
| tem1
| tem2
) > 1);
980 m
->global
= (uid_luid
[REGNO_LAST_UID (regno
)] > INSN_LUID (end
)
981 || uid_luid
[REGNO_FIRST_UID (regno
)] < INSN_LUID (loop_start
));
983 m
->lifetime
= (uid_luid
[REGNO_LAST_UID (regno
)]
984 - uid_luid
[REGNO_FIRST_UID (regno
)]);
985 m
->savings
= VARRAY_INT (n_times_used
, regno
);
986 if (find_reg_note (p
, REG_RETVAL
, NULL_RTX
))
987 m
->savings
+= libcall_benefit (p
);
988 VARRAY_INT (n_times_set
, regno
) = move_insn
? -2 : -1;
989 /* Add M to the end of the chain MOVABLES. */
993 last_movable
->next
= m
;
998 /* It is possible for the first instruction to have a
999 REG_EQUAL note but a non-invariant SET_SRC, so we must
1000 remember the status of the first instruction in case
1001 the last instruction doesn't have a REG_EQUAL note. */
1002 m
->move_insn_first
= m
->move_insn
;
1004 /* Skip this insn, not checking REG_LIBCALL notes. */
1005 p
= next_nonnote_insn (p
);
1006 /* Skip the consecutive insns, if there are any. */
1007 p
= skip_consec_insns (p
, m
->consec
);
1008 /* Back up to the last insn of the consecutive group. */
1009 p
= prev_nonnote_insn (p
);
1011 /* We must now reset m->move_insn, m->is_equiv, and possibly
1012 m->set_src to correspond to the effects of all the
1014 temp
= find_reg_note (p
, REG_EQUIV
, NULL_RTX
);
1016 m
->set_src
= XEXP (temp
, 0), m
->move_insn
= 1;
1019 temp
= find_reg_note (p
, REG_EQUAL
, NULL_RTX
);
1020 if (temp
&& CONSTANT_P (XEXP (temp
, 0)))
1021 m
->set_src
= XEXP (temp
, 0), m
->move_insn
= 1;
1026 m
->is_equiv
= (find_reg_note (p
, REG_EQUIV
, NULL_RTX
) != 0);
1029 /* If this register is always set within a STRICT_LOW_PART
1030 or set to zero, then its high bytes are constant.
1031 So clear them outside the loop and within the loop
1032 just load the low bytes.
1033 We must check that the machine has an instruction to do so.
1034 Also, if the value loaded into the register
1035 depends on the same register, this cannot be done. */
1036 else if (SET_SRC (set
) == const0_rtx
1037 && GET_CODE (NEXT_INSN (p
)) == INSN
1038 && (set1
= single_set (NEXT_INSN (p
)))
1039 && GET_CODE (set1
) == SET
1040 && (GET_CODE (SET_DEST (set1
)) == STRICT_LOW_PART
)
1041 && (GET_CODE (XEXP (SET_DEST (set1
), 0)) == SUBREG
)
1042 && (SUBREG_REG (XEXP (SET_DEST (set1
), 0))
1044 && !reg_mentioned_p (SET_DEST (set
), SET_SRC (set1
)))
1046 register int regno
= REGNO (SET_DEST (set
));
1047 if (VARRAY_INT (n_times_set
, regno
) == 2)
1049 register struct movable
*m
;
1050 m
= (struct movable
*) alloca (sizeof (struct movable
));
1053 m
->set_dest
= SET_DEST (set
);
1054 m
->dependencies
= 0;
1060 m
->move_insn_first
= 0;
1062 /* If the insn may not be executed on some cycles,
1063 we can't clear the whole reg; clear just high part.
1064 Not even if the reg is used only within this loop.
1071 Clearing x before the inner loop could clobber a value
1072 being saved from the last time around the outer loop.
1073 However, if the reg is not used outside this loop
1074 and all uses of the register are in the same
1075 basic block as the store, there is no problem.
1077 If this insn was made by loop, we don't know its
1078 INSN_LUID and hence must make a conservative
1080 m
->global
= (INSN_UID (p
) >= max_uid_for_loop
1081 || (uid_luid
[REGNO_LAST_UID (regno
)]
1083 || (uid_luid
[REGNO_FIRST_UID (regno
)]
1085 || (labels_in_range_p
1086 (p
, uid_luid
[REGNO_FIRST_UID (regno
)])));
1087 if (maybe_never
&& m
->global
)
1088 m
->savemode
= GET_MODE (SET_SRC (set1
));
1090 m
->savemode
= VOIDmode
;
1094 m
->lifetime
= (uid_luid
[REGNO_LAST_UID (regno
)]
1095 - uid_luid
[REGNO_FIRST_UID (regno
)]);
1097 VARRAY_INT (n_times_set
, regno
) = -1;
1098 /* Add M to the end of the chain MOVABLES. */
1102 last_movable
->next
= m
;
1107 /* Past a call insn, we get to insns which might not be executed
1108 because the call might exit. This matters for insns that trap.
1109 Call insns inside a REG_LIBCALL/REG_RETVAL block always return,
1110 so they don't count. */
1111 else if (GET_CODE (p
) == CALL_INSN
&& ! in_libcall
)
1113 /* Past a label or a jump, we get to insns for which we
1114 can't count on whether or how many times they will be
1115 executed during each iteration. Therefore, we can
1116 only move out sets of trivial variables
1117 (those not used after the loop). */
1118 /* Similar code appears twice in strength_reduce. */
1119 else if ((GET_CODE (p
) == CODE_LABEL
|| GET_CODE (p
) == JUMP_INSN
)
1120 /* If we enter the loop in the middle, and scan around to the
1121 beginning, don't set maybe_never for that. This must be an
1122 unconditional jump, otherwise the code at the top of the
1123 loop might never be executed. Unconditional jumps are
1124 followed a by barrier then loop end. */
1125 && ! (GET_CODE (p
) == JUMP_INSN
&& JUMP_LABEL (p
) == loop_top
1126 && NEXT_INSN (NEXT_INSN (p
)) == end
1127 && simplejump_p (p
)))
1129 else if (GET_CODE (p
) == NOTE
)
1131 /* At the virtual top of a converted loop, insns are again known to
1132 be executed: logically, the loop begins here even though the exit
1133 code has been duplicated. */
1134 if (NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_VTOP
&& loop_depth
== 0)
1135 maybe_never
= call_passed
= 0;
1136 else if (NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_BEG
)
1138 else if (NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_END
)
1143 /* If one movable subsumes another, ignore that other. */
1145 ignore_some_movables (movables
);
1147 /* For each movable insn, see if the reg that it loads
1148 leads when it dies right into another conditionally movable insn.
1149 If so, record that the second insn "forces" the first one,
1150 since the second can be moved only if the first is. */
1152 force_movables (movables
);
1154 /* See if there are multiple movable insns that load the same value.
1155 If there are, make all but the first point at the first one
1156 through the `match' field, and add the priorities of them
1157 all together as the priority of the first. */
1159 combine_movables (movables
, nregs
);
1161 /* Now consider each movable insn to decide whether it is worth moving.
1162 Store 0 in n_times_set for each reg that is moved.
1164 Generally this increases code size, so do not move moveables when
1165 optimizing for code size. */
1167 if (! optimize_size
)
1168 move_movables (movables
, threshold
,
1169 insn_count
, loop_start
, end
, nregs
);
1171 /* Now candidates that still are negative are those not moved.
1172 Change n_times_set to indicate that those are not actually invariant. */
1173 for (i
= 0; i
< nregs
; i
++)
1174 if (VARRAY_INT (n_times_set
, i
) < 0)
1175 VARRAY_INT (n_times_set
, i
) = VARRAY_INT (n_times_used
, i
);
1177 /* Now that we've moved some things out of the loop, we able to
1178 hoist even more memory references. There's no need to pass
1179 reg_single_usage this time, since we're done with it. */
1180 load_mems_and_recount_loop_regs_set (scan_start
, end
, loop_top
,
1184 if (flag_strength_reduce
)
1186 the_movables
= movables
;
1187 strength_reduce (scan_start
, end
, loop_top
,
1188 insn_count
, loop_start
, end
, unroll_p
, bct_p
);
1191 VARRAY_FREE (n_times_set
);
1192 VARRAY_FREE (n_times_used
);
1193 VARRAY_FREE (may_not_optimize
);
1194 VARRAY_FREE (reg_single_usage
);
1197 /* Add elements to *OUTPUT to record all the pseudo-regs
1198 mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */
1201 record_excess_regs (in_this
, not_in_this
, output
)
1202 rtx in_this
, not_in_this
;
1209 code
= GET_CODE (in_this
);
1223 if (REGNO (in_this
) >= FIRST_PSEUDO_REGISTER
1224 && ! reg_mentioned_p (in_this
, not_in_this
))
1225 *output
= gen_rtx_EXPR_LIST (VOIDmode
, in_this
, *output
);
1232 fmt
= GET_RTX_FORMAT (code
);
1233 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1240 for (j
= 0; j
< XVECLEN (in_this
, i
); j
++)
1241 record_excess_regs (XVECEXP (in_this
, i
, j
), not_in_this
, output
);
1245 record_excess_regs (XEXP (in_this
, i
), not_in_this
, output
);
1251 /* Check what regs are referred to in the libcall block ending with INSN,
1252 aside from those mentioned in the equivalent value.
1253 If there are none, return 0.
1254 If there are one or more, return an EXPR_LIST containing all of them. */
1257 libcall_other_reg (insn
, equiv
)
1260 rtx note
= find_reg_note (insn
, REG_RETVAL
, NULL_RTX
);
1261 rtx p
= XEXP (note
, 0);
1264 /* First, find all the regs used in the libcall block
1265 that are not mentioned as inputs to the result. */
1269 if (GET_CODE (p
) == INSN
|| GET_CODE (p
) == JUMP_INSN
1270 || GET_CODE (p
) == CALL_INSN
)
1271 record_excess_regs (PATTERN (p
), equiv
, &output
);
1278 /* Return 1 if all uses of REG
1279 are between INSN and the end of the basic block. */
1282 reg_in_basic_block_p (insn
, reg
)
1285 int regno
= REGNO (reg
);
1288 if (REGNO_FIRST_UID (regno
) != INSN_UID (insn
))
1291 /* Search this basic block for the already recorded last use of the reg. */
1292 for (p
= insn
; p
; p
= NEXT_INSN (p
))
1294 switch (GET_CODE (p
))
1301 /* Ordinary insn: if this is the last use, we win. */
1302 if (REGNO_LAST_UID (regno
) == INSN_UID (p
))
1307 /* Jump insn: if this is the last use, we win. */
1308 if (REGNO_LAST_UID (regno
) == INSN_UID (p
))
1310 /* Otherwise, it's the end of the basic block, so we lose. */
1315 /* It's the end of the basic block, so we lose. */
1323 /* The "last use" doesn't follow the "first use"?? */
1327 /* Compute the benefit of eliminating the insns in the block whose
1328 last insn is LAST. This may be a group of insns used to compute a
1329 value directly or can contain a library call. */
1332 libcall_benefit (last
)
1338 for (insn
= XEXP (find_reg_note (last
, REG_RETVAL
, NULL_RTX
), 0);
1339 insn
!= last
; insn
= NEXT_INSN (insn
))
1341 if (GET_CODE (insn
) == CALL_INSN
)
1342 benefit
+= 10; /* Assume at least this many insns in a library
1344 else if (GET_CODE (insn
) == INSN
1345 && GET_CODE (PATTERN (insn
)) != USE
1346 && GET_CODE (PATTERN (insn
)) != CLOBBER
)
1353 /* Skip COUNT insns from INSN, counting library calls as 1 insn. */
1356 skip_consec_insns (insn
, count
)
1360 for (; count
> 0; count
--)
1364 /* If first insn of libcall sequence, skip to end. */
1365 /* Do this at start of loop, since INSN is guaranteed to
1367 if (GET_CODE (insn
) != NOTE
1368 && (temp
= find_reg_note (insn
, REG_LIBCALL
, NULL_RTX
)))
1369 insn
= XEXP (temp
, 0);
1371 do insn
= NEXT_INSN (insn
);
1372 while (GET_CODE (insn
) == NOTE
);
1378 /* Ignore any movable whose insn falls within a libcall
1379 which is part of another movable.
1380 We make use of the fact that the movable for the libcall value
1381 was made later and so appears later on the chain. */
1384 ignore_some_movables (movables
)
1385 struct movable
*movables
;
1387 register struct movable
*m
, *m1
;
1389 for (m
= movables
; m
; m
= m
->next
)
1391 /* Is this a movable for the value of a libcall? */
1392 rtx note
= find_reg_note (m
->insn
, REG_RETVAL
, NULL_RTX
);
1396 /* Check for earlier movables inside that range,
1397 and mark them invalid. We cannot use LUIDs here because
1398 insns created by loop.c for prior loops don't have LUIDs.
1399 Rather than reject all such insns from movables, we just
1400 explicitly check each insn in the libcall (since invariant
1401 libcalls aren't that common). */
1402 for (insn
= XEXP (note
, 0); insn
!= m
->insn
; insn
= NEXT_INSN (insn
))
1403 for (m1
= movables
; m1
!= m
; m1
= m1
->next
)
1404 if (m1
->insn
== insn
)
1410 /* For each movable insn, see if the reg that it loads
1411 leads when it dies right into another conditionally movable insn.
1412 If so, record that the second insn "forces" the first one,
1413 since the second can be moved only if the first is. */
1416 force_movables (movables
)
1417 struct movable
*movables
;
1419 register struct movable
*m
, *m1
;
1420 for (m1
= movables
; m1
; m1
= m1
->next
)
1421 /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */
1422 if (!m1
->partial
&& !m1
->done
)
1424 int regno
= m1
->regno
;
1425 for (m
= m1
->next
; m
; m
= m
->next
)
1426 /* ??? Could this be a bug? What if CSE caused the
1427 register of M1 to be used after this insn?
1428 Since CSE does not update regno_last_uid,
1429 this insn M->insn might not be where it dies.
1430 But very likely this doesn't matter; what matters is
1431 that M's reg is computed from M1's reg. */
1432 if (INSN_UID (m
->insn
) == REGNO_LAST_UID (regno
)
1435 if (m
!= 0 && m
->set_src
== m1
->set_dest
1436 /* If m->consec, m->set_src isn't valid. */
1440 /* Increase the priority of the moving the first insn
1441 since it permits the second to be moved as well. */
1445 m1
->lifetime
+= m
->lifetime
;
1446 m1
->savings
+= m
->savings
;
1451 /* Find invariant expressions that are equal and can be combined into
1455 combine_movables (movables
, nregs
)
1456 struct movable
*movables
;
1459 register struct movable
*m
;
1460 char *matched_regs
= (char *) alloca (nregs
);
1461 enum machine_mode mode
;
1463 /* Regs that are set more than once are not allowed to match
1464 or be matched. I'm no longer sure why not. */
1465 /* Perhaps testing m->consec_sets would be more appropriate here? */
1467 for (m
= movables
; m
; m
= m
->next
)
1468 if (m
->match
== 0 && VARRAY_INT (n_times_used
, m
->regno
) == 1 && !m
->partial
)
1470 register struct movable
*m1
;
1471 int regno
= m
->regno
;
1473 bzero (matched_regs
, nregs
);
1474 matched_regs
[regno
] = 1;
1476 /* We want later insns to match the first one. Don't make the first
1477 one match any later ones. So start this loop at m->next. */
1478 for (m1
= m
->next
; m1
; m1
= m1
->next
)
1479 if (m
!= m1
&& m1
->match
== 0 && VARRAY_INT (n_times_used
, m1
->regno
) == 1
1480 /* A reg used outside the loop mustn't be eliminated. */
1482 /* A reg used for zero-extending mustn't be eliminated. */
1484 && (matched_regs
[m1
->regno
]
1487 /* Can combine regs with different modes loaded from the
1488 same constant only if the modes are the same or
1489 if both are integer modes with M wider or the same
1490 width as M1. The check for integer is redundant, but
1491 safe, since the only case of differing destination
1492 modes with equal sources is when both sources are
1493 VOIDmode, i.e., CONST_INT. */
1494 (GET_MODE (m
->set_dest
) == GET_MODE (m1
->set_dest
)
1495 || (GET_MODE_CLASS (GET_MODE (m
->set_dest
)) == MODE_INT
1496 && GET_MODE_CLASS (GET_MODE (m1
->set_dest
)) == MODE_INT
1497 && (GET_MODE_BITSIZE (GET_MODE (m
->set_dest
))
1498 >= GET_MODE_BITSIZE (GET_MODE (m1
->set_dest
)))))
1499 /* See if the source of M1 says it matches M. */
1500 && ((GET_CODE (m1
->set_src
) == REG
1501 && matched_regs
[REGNO (m1
->set_src
)])
1502 || rtx_equal_for_loop_p (m
->set_src
, m1
->set_src
,
1504 && ((m
->dependencies
== m1
->dependencies
)
1505 || rtx_equal_p (m
->dependencies
, m1
->dependencies
)))
1507 m
->lifetime
+= m1
->lifetime
;
1508 m
->savings
+= m1
->savings
;
1511 matched_regs
[m1
->regno
] = 1;
1515 /* Now combine the regs used for zero-extension.
1516 This can be done for those not marked `global'
1517 provided their lives don't overlap. */
1519 for (mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
); mode
!= VOIDmode
;
1520 mode
= GET_MODE_WIDER_MODE (mode
))
1522 register struct movable
*m0
= 0;
1524 /* Combine all the registers for extension from mode MODE.
1525 Don't combine any that are used outside this loop. */
1526 for (m
= movables
; m
; m
= m
->next
)
1527 if (m
->partial
&& ! m
->global
1528 && mode
== GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m
->insn
)))))
1530 register struct movable
*m1
;
1531 int first
= uid_luid
[REGNO_FIRST_UID (m
->regno
)];
1532 int last
= uid_luid
[REGNO_LAST_UID (m
->regno
)];
1536 /* First one: don't check for overlap, just record it. */
1541 /* Make sure they extend to the same mode.
1542 (Almost always true.) */
1543 if (GET_MODE (m
->set_dest
) != GET_MODE (m0
->set_dest
))
1546 /* We already have one: check for overlap with those
1547 already combined together. */
1548 for (m1
= movables
; m1
!= m
; m1
= m1
->next
)
1549 if (m1
== m0
|| (m1
->partial
&& m1
->match
== m0
))
1550 if (! (uid_luid
[REGNO_FIRST_UID (m1
->regno
)] > last
1551 || uid_luid
[REGNO_LAST_UID (m1
->regno
)] < first
))
1554 /* No overlap: we can combine this with the others. */
1555 m0
->lifetime
+= m
->lifetime
;
1556 m0
->savings
+= m
->savings
;
1565 /* Return 1 if regs X and Y will become the same if moved. */
1568 regs_match_p (x
, y
, movables
)
1570 struct movable
*movables
;
1574 struct movable
*mx
, *my
;
1576 for (mx
= movables
; mx
; mx
= mx
->next
)
1577 if (mx
->regno
== xn
)
1580 for (my
= movables
; my
; my
= my
->next
)
1581 if (my
->regno
== yn
)
1585 && ((mx
->match
== my
->match
&& mx
->match
!= 0)
1587 || mx
== my
->match
));
1590 /* Return 1 if X and Y are identical-looking rtx's.
1591 This is the Lisp function EQUAL for rtx arguments.
1593 If two registers are matching movables or a movable register and an
1594 equivalent constant, consider them equal. */
1597 rtx_equal_for_loop_p (x
, y
, movables
)
1599 struct movable
*movables
;
1603 register struct movable
*m
;
1604 register enum rtx_code code
;
1609 if (x
== 0 || y
== 0)
1612 code
= GET_CODE (x
);
1614 /* If we have a register and a constant, they may sometimes be
1616 if (GET_CODE (x
) == REG
&& VARRAY_INT (n_times_set
, REGNO (x
)) == -2
1619 for (m
= movables
; m
; m
= m
->next
)
1620 if (m
->move_insn
&& m
->regno
== REGNO (x
)
1621 && rtx_equal_p (m
->set_src
, y
))
1624 else if (GET_CODE (y
) == REG
&& VARRAY_INT (n_times_set
, REGNO (y
)) == -2
1627 for (m
= movables
; m
; m
= m
->next
)
1628 if (m
->move_insn
&& m
->regno
== REGNO (y
)
1629 && rtx_equal_p (m
->set_src
, x
))
1633 /* Otherwise, rtx's of different codes cannot be equal. */
1634 if (code
!= GET_CODE (y
))
1637 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1638 (REG:SI x) and (REG:HI x) are NOT equivalent. */
1640 if (GET_MODE (x
) != GET_MODE (y
))
1643 /* These three types of rtx's can be compared nonrecursively. */
1645 return (REGNO (x
) == REGNO (y
) || regs_match_p (x
, y
, movables
));
1647 if (code
== LABEL_REF
)
1648 return XEXP (x
, 0) == XEXP (y
, 0);
1649 if (code
== SYMBOL_REF
)
1650 return XSTR (x
, 0) == XSTR (y
, 0);
1652 /* Compare the elements. If any pair of corresponding elements
1653 fail to match, return 0 for the whole things. */
1655 fmt
= GET_RTX_FORMAT (code
);
1656 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1661 if (XWINT (x
, i
) != XWINT (y
, i
))
1666 if (XINT (x
, i
) != XINT (y
, i
))
1671 /* Two vectors must have the same length. */
1672 if (XVECLEN (x
, i
) != XVECLEN (y
, i
))
1675 /* And the corresponding elements must match. */
1676 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
1677 if (rtx_equal_for_loop_p (XVECEXP (x
, i
, j
), XVECEXP (y
, i
, j
), movables
) == 0)
1682 if (rtx_equal_for_loop_p (XEXP (x
, i
), XEXP (y
, i
), movables
) == 0)
1687 if (strcmp (XSTR (x
, i
), XSTR (y
, i
)))
1692 /* These are just backpointers, so they don't matter. */
1698 /* It is believed that rtx's at this level will never
1699 contain anything but integers and other rtx's,
1700 except for within LABEL_REFs and SYMBOL_REFs. */
1708 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to all
1709 insns in INSNS which use thet reference. */
1712 add_label_notes (x
, insns
)
1716 enum rtx_code code
= GET_CODE (x
);
1721 if (code
== LABEL_REF
&& !LABEL_REF_NONLOCAL_P (x
))
1723 /* This code used to ignore labels that referred to dispatch tables to
1724 avoid flow generating (slighly) worse code.
1726 We no longer ignore such label references (see LABEL_REF handling in
1727 mark_jump_label for additional information). */
1728 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
1729 if (reg_mentioned_p (XEXP (x
, 0), insn
))
1730 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_LABEL
, XEXP (x
, 0),
1734 fmt
= GET_RTX_FORMAT (code
);
1735 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1738 add_label_notes (XEXP (x
, i
), insns
);
1739 else if (fmt
[i
] == 'E')
1740 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1741 add_label_notes (XVECEXP (x
, i
, j
), insns
);
1745 /* Scan MOVABLES, and move the insns that deserve to be moved.
1746 If two matching movables are combined, replace one reg with the
1747 other throughout. */
1750 move_movables (movables
, threshold
, insn_count
, loop_start
, end
, nregs
)
1751 struct movable
*movables
;
1759 register struct movable
*m
;
1761 /* Map of pseudo-register replacements to handle combining
1762 when we move several insns that load the same value
1763 into different pseudo-registers. */
1764 rtx
*reg_map
= (rtx
*) alloca (nregs
* sizeof (rtx
));
1765 char *already_moved
= (char *) alloca (nregs
);
1767 bzero (already_moved
, nregs
);
1768 bzero ((char *) reg_map
, nregs
* sizeof (rtx
));
1772 for (m
= movables
; m
; m
= m
->next
)
1774 /* Describe this movable insn. */
1776 if (loop_dump_stream
)
1778 fprintf (loop_dump_stream
, "Insn %d: regno %d (life %d), ",
1779 INSN_UID (m
->insn
), m
->regno
, m
->lifetime
);
1781 fprintf (loop_dump_stream
, "consec %d, ", m
->consec
);
1783 fprintf (loop_dump_stream
, "cond ");
1785 fprintf (loop_dump_stream
, "force ");
1787 fprintf (loop_dump_stream
, "global ");
1789 fprintf (loop_dump_stream
, "done ");
1791 fprintf (loop_dump_stream
, "move-insn ");
1793 fprintf (loop_dump_stream
, "matches %d ",
1794 INSN_UID (m
->match
->insn
));
1796 fprintf (loop_dump_stream
, "forces %d ",
1797 INSN_UID (m
->forces
->insn
));
1800 /* Count movables. Value used in heuristics in strength_reduce. */
1803 /* Ignore the insn if it's already done (it matched something else).
1804 Otherwise, see if it is now safe to move. */
1808 || (1 == invariant_p (m
->set_src
)
1809 && (m
->dependencies
== 0
1810 || 1 == invariant_p (m
->dependencies
))
1812 || 1 == consec_sets_invariant_p (m
->set_dest
,
1815 && (! m
->forces
|| m
->forces
->done
))
1819 int savings
= m
->savings
;
1821 /* We have an insn that is safe to move.
1822 Compute its desirability. */
1827 if (loop_dump_stream
)
1828 fprintf (loop_dump_stream
, "savings %d ", savings
);
1830 if (moved_once
[regno
])
1834 if (loop_dump_stream
)
1835 fprintf (loop_dump_stream
, "halved since already moved ");
1838 /* An insn MUST be moved if we already moved something else
1839 which is safe only if this one is moved too: that is,
1840 if already_moved[REGNO] is nonzero. */
1842 /* An insn is desirable to move if the new lifetime of the
1843 register is no more than THRESHOLD times the old lifetime.
1844 If it's not desirable, it means the loop is so big
1845 that moving won't speed things up much,
1846 and it is liable to make register usage worse. */
1848 /* It is also desirable to move if it can be moved at no
1849 extra cost because something else was already moved. */
1851 if (already_moved
[regno
]
1852 || flag_move_all_movables
1853 || (threshold
* savings
* m
->lifetime
) >= insn_count
1854 || (m
->forces
&& m
->forces
->done
1855 && VARRAY_INT (n_times_used
, m
->forces
->regno
) == 1))
1858 register struct movable
*m1
;
1861 /* Now move the insns that set the reg. */
1863 if (m
->partial
&& m
->match
)
1867 /* Find the end of this chain of matching regs.
1868 Thus, we load each reg in the chain from that one reg.
1869 And that reg is loaded with 0 directly,
1870 since it has ->match == 0. */
1871 for (m1
= m
; m1
->match
; m1
= m1
->match
);
1872 newpat
= gen_move_insn (SET_DEST (PATTERN (m
->insn
)),
1873 SET_DEST (PATTERN (m1
->insn
)));
1874 i1
= emit_insn_before (newpat
, loop_start
);
1876 /* Mark the moved, invariant reg as being allowed to
1877 share a hard reg with the other matching invariant. */
1878 REG_NOTES (i1
) = REG_NOTES (m
->insn
);
1879 r1
= SET_DEST (PATTERN (m
->insn
));
1880 r2
= SET_DEST (PATTERN (m1
->insn
));
1882 = gen_rtx_EXPR_LIST (VOIDmode
, r1
,
1883 gen_rtx_EXPR_LIST (VOIDmode
, r2
,
1885 delete_insn (m
->insn
);
1890 if (loop_dump_stream
)
1891 fprintf (loop_dump_stream
, " moved to %d", INSN_UID (i1
));
1893 /* If we are to re-generate the item being moved with a
1894 new move insn, first delete what we have and then emit
1895 the move insn before the loop. */
1896 else if (m
->move_insn
)
1900 for (count
= m
->consec
; count
>= 0; count
--)
1902 /* If this is the first insn of a library call sequence,
1904 if (GET_CODE (p
) != NOTE
1905 && (temp
= find_reg_note (p
, REG_LIBCALL
, NULL_RTX
)))
1908 /* If this is the last insn of a libcall sequence, then
1909 delete every insn in the sequence except the last.
1910 The last insn is handled in the normal manner. */
1911 if (GET_CODE (p
) != NOTE
1912 && (temp
= find_reg_note (p
, REG_RETVAL
, NULL_RTX
)))
1914 temp
= XEXP (temp
, 0);
1916 temp
= delete_insn (temp
);
1920 p
= delete_insn (p
);
1922 /* simplify_giv_expr expects that it can walk the insns
1923 at m->insn forwards and see this old sequence we are
1924 tossing here. delete_insn does preserve the next
1925 pointers, but when we skip over a NOTE we must fix
1926 it up. Otherwise that code walks into the non-deleted
1928 while (p
&& GET_CODE (p
) == NOTE
)
1929 p
= NEXT_INSN (temp
) = NEXT_INSN (p
);
1933 emit_move_insn (m
->set_dest
, m
->set_src
);
1934 temp
= get_insns ();
1937 add_label_notes (m
->set_src
, temp
);
1939 i1
= emit_insns_before (temp
, loop_start
);
1940 if (! find_reg_note (i1
, REG_EQUAL
, NULL_RTX
))
1942 = gen_rtx_EXPR_LIST (m
->is_equiv
? REG_EQUIV
: REG_EQUAL
,
1943 m
->set_src
, REG_NOTES (i1
));
1945 if (loop_dump_stream
)
1946 fprintf (loop_dump_stream
, " moved to %d", INSN_UID (i1
));
1948 /* The more regs we move, the less we like moving them. */
1953 for (count
= m
->consec
; count
>= 0; count
--)
1957 /* If first insn of libcall sequence, skip to end. */
1958 /* Do this at start of loop, since p is guaranteed to
1960 if (GET_CODE (p
) != NOTE
1961 && (temp
= find_reg_note (p
, REG_LIBCALL
, NULL_RTX
)))
1964 /* If last insn of libcall sequence, move all
1965 insns except the last before the loop. The last
1966 insn is handled in the normal manner. */
1967 if (GET_CODE (p
) != NOTE
1968 && (temp
= find_reg_note (p
, REG_RETVAL
, NULL_RTX
)))
1972 rtx fn_address_insn
= 0;
1975 for (temp
= XEXP (temp
, 0); temp
!= p
;
1976 temp
= NEXT_INSN (temp
))
1982 if (GET_CODE (temp
) == NOTE
)
1985 body
= PATTERN (temp
);
1987 /* Find the next insn after TEMP,
1988 not counting USE or NOTE insns. */
1989 for (next
= NEXT_INSN (temp
); next
!= p
;
1990 next
= NEXT_INSN (next
))
1991 if (! (GET_CODE (next
) == INSN
1992 && GET_CODE (PATTERN (next
)) == USE
)
1993 && GET_CODE (next
) != NOTE
)
1996 /* If that is the call, this may be the insn
1997 that loads the function address.
1999 Extract the function address from the insn
2000 that loads it into a register.
2001 If this insn was cse'd, we get incorrect code.
2003 So emit a new move insn that copies the
2004 function address into the register that the
2005 call insn will use. flow.c will delete any
2006 redundant stores that we have created. */
2007 if (GET_CODE (next
) == CALL_INSN
2008 && GET_CODE (body
) == SET
2009 && GET_CODE (SET_DEST (body
)) == REG
2010 && (n
= find_reg_note (temp
, REG_EQUAL
,
2013 fn_reg
= SET_SRC (body
);
2014 if (GET_CODE (fn_reg
) != REG
)
2015 fn_reg
= SET_DEST (body
);
2016 fn_address
= XEXP (n
, 0);
2017 fn_address_insn
= temp
;
2019 /* We have the call insn.
2020 If it uses the register we suspect it might,
2021 load it with the correct address directly. */
2022 if (GET_CODE (temp
) == CALL_INSN
2024 && reg_referenced_p (fn_reg
, body
))
2025 emit_insn_after (gen_move_insn (fn_reg
,
2029 if (GET_CODE (temp
) == CALL_INSN
)
2031 i1
= emit_call_insn_before (body
, loop_start
);
2032 /* Because the USAGE information potentially
2033 contains objects other than hard registers
2034 we need to copy it. */
2035 if (CALL_INSN_FUNCTION_USAGE (temp
))
2036 CALL_INSN_FUNCTION_USAGE (i1
)
2037 = copy_rtx (CALL_INSN_FUNCTION_USAGE (temp
));
2040 i1
= emit_insn_before (body
, loop_start
);
2043 if (temp
== fn_address_insn
)
2044 fn_address_insn
= i1
;
2045 REG_NOTES (i1
) = REG_NOTES (temp
);
2049 if (m
->savemode
!= VOIDmode
)
2051 /* P sets REG to zero; but we should clear only
2052 the bits that are not covered by the mode
2054 rtx reg
= m
->set_dest
;
2060 (GET_MODE (reg
), and_optab
, reg
,
2061 GEN_INT ((((HOST_WIDE_INT
) 1
2062 << GET_MODE_BITSIZE (m
->savemode
)))
2064 reg
, 1, OPTAB_LIB_WIDEN
);
2068 emit_move_insn (reg
, tem
);
2069 sequence
= gen_sequence ();
2071 i1
= emit_insn_before (sequence
, loop_start
);
2073 else if (GET_CODE (p
) == CALL_INSN
)
2075 i1
= emit_call_insn_before (PATTERN (p
), loop_start
);
2076 /* Because the USAGE information potentially
2077 contains objects other than hard registers
2078 we need to copy it. */
2079 if (CALL_INSN_FUNCTION_USAGE (p
))
2080 CALL_INSN_FUNCTION_USAGE (i1
)
2081 = copy_rtx (CALL_INSN_FUNCTION_USAGE (p
));
2083 else if (count
== m
->consec
&& m
->move_insn_first
)
2085 /* The SET_SRC might not be invariant, so we must
2086 use the REG_EQUAL note. */
2088 emit_move_insn (m
->set_dest
, m
->set_src
);
2089 temp
= get_insns ();
2092 add_label_notes (m
->set_src
, temp
);
2094 i1
= emit_insns_before (temp
, loop_start
);
2095 if (! find_reg_note (i1
, REG_EQUAL
, NULL_RTX
))
2097 = gen_rtx_EXPR_LIST ((m
->is_equiv
? REG_EQUIV
2099 m
->set_src
, REG_NOTES (i1
));
2102 i1
= emit_insn_before (PATTERN (p
), loop_start
);
2104 if (REG_NOTES (i1
) == 0)
2106 REG_NOTES (i1
) = REG_NOTES (p
);
2108 /* If there is a REG_EQUAL note present whose value
2109 is not loop invariant, then delete it, since it
2110 may cause problems with later optimization passes.
2111 It is possible for cse to create such notes
2112 like this as a result of record_jump_cond. */
2114 if ((temp
= find_reg_note (i1
, REG_EQUAL
, NULL_RTX
))
2115 && ! invariant_p (XEXP (temp
, 0)))
2116 remove_note (i1
, temp
);
2122 if (loop_dump_stream
)
2123 fprintf (loop_dump_stream
, " moved to %d",
2126 /* If library call, now fix the REG_NOTES that contain
2127 insn pointers, namely REG_LIBCALL on FIRST
2128 and REG_RETVAL on I1. */
2129 if ((temp
= find_reg_note (i1
, REG_RETVAL
, NULL_RTX
)))
2131 XEXP (temp
, 0) = first
;
2132 temp
= find_reg_note (first
, REG_LIBCALL
, NULL_RTX
);
2133 XEXP (temp
, 0) = i1
;
2140 /* simplify_giv_expr expects that it can walk the insns
2141 at m->insn forwards and see this old sequence we are
2142 tossing here. delete_insn does preserve the next
2143 pointers, but when we skip over a NOTE we must fix
2144 it up. Otherwise that code walks into the non-deleted
2146 while (p
&& GET_CODE (p
) == NOTE
)
2147 p
= NEXT_INSN (temp
) = NEXT_INSN (p
);
2150 /* The more regs we move, the less we like moving them. */
2154 /* Any other movable that loads the same register
2156 already_moved
[regno
] = 1;
2158 /* This reg has been moved out of one loop. */
2159 moved_once
[regno
] = 1;
2161 /* The reg set here is now invariant. */
2163 VARRAY_INT (n_times_set
, regno
) = 0;
2167 /* Change the length-of-life info for the register
2168 to say it lives at least the full length of this loop.
2169 This will help guide optimizations in outer loops. */
2171 if (uid_luid
[REGNO_FIRST_UID (regno
)] > INSN_LUID (loop_start
))
2172 /* This is the old insn before all the moved insns.
2173 We can't use the moved insn because it is out of range
2174 in uid_luid. Only the old insns have luids. */
2175 REGNO_FIRST_UID (regno
) = INSN_UID (loop_start
);
2176 if (uid_luid
[REGNO_LAST_UID (regno
)] < INSN_LUID (end
))
2177 REGNO_LAST_UID (regno
) = INSN_UID (end
);
2179 /* Combine with this moved insn any other matching movables. */
2182 for (m1
= movables
; m1
; m1
= m1
->next
)
2187 /* Schedule the reg loaded by M1
2188 for replacement so that shares the reg of M.
2189 If the modes differ (only possible in restricted
2190 circumstances, make a SUBREG. */
2191 if (GET_MODE (m
->set_dest
) == GET_MODE (m1
->set_dest
))
2192 reg_map
[m1
->regno
] = m
->set_dest
;
2195 = gen_lowpart_common (GET_MODE (m1
->set_dest
),
2198 /* Get rid of the matching insn
2199 and prevent further processing of it. */
2202 /* if library call, delete all insn except last, which
2204 if ((temp
= find_reg_note (m1
->insn
, REG_RETVAL
,
2207 for (temp
= XEXP (temp
, 0); temp
!= m1
->insn
;
2208 temp
= NEXT_INSN (temp
))
2211 delete_insn (m1
->insn
);
2213 /* Any other movable that loads the same register
2215 already_moved
[m1
->regno
] = 1;
2217 /* The reg merged here is now invariant,
2218 if the reg it matches is invariant. */
2220 VARRAY_INT (n_times_set
, m1
->regno
) = 0;
2223 else if (loop_dump_stream
)
2224 fprintf (loop_dump_stream
, "not desirable");
2226 else if (loop_dump_stream
&& !m
->match
)
2227 fprintf (loop_dump_stream
, "not safe");
2229 if (loop_dump_stream
)
2230 fprintf (loop_dump_stream
, "\n");
2234 new_start
= loop_start
;
2236 /* Go through all the instructions in the loop, making
2237 all the register substitutions scheduled in REG_MAP. */
2238 for (p
= new_start
; p
!= end
; p
= NEXT_INSN (p
))
2239 if (GET_CODE (p
) == INSN
|| GET_CODE (p
) == JUMP_INSN
2240 || GET_CODE (p
) == CALL_INSN
)
2242 replace_regs (PATTERN (p
), reg_map
, nregs
, 0);
2243 replace_regs (REG_NOTES (p
), reg_map
, nregs
, 0);
2249 /* Scan X and replace the address of any MEM in it with ADDR.
2250 REG is the address that MEM should have before the replacement. */
2253 replace_call_address (x
, reg
, addr
)
2256 register enum rtx_code code
;
2262 code
= GET_CODE (x
);
2276 /* Short cut for very common case. */
2277 replace_call_address (XEXP (x
, 1), reg
, addr
);
2281 /* Short cut for very common case. */
2282 replace_call_address (XEXP (x
, 0), reg
, addr
);
2286 /* If this MEM uses a reg other than the one we expected,
2287 something is wrong. */
2288 if (XEXP (x
, 0) != reg
)
2297 fmt
= GET_RTX_FORMAT (code
);
2298 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2301 replace_call_address (XEXP (x
, i
), reg
, addr
);
2305 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2306 replace_call_address (XVECEXP (x
, i
, j
), reg
, addr
);
2312 /* Return the number of memory refs to addresses that vary
2316 count_nonfixed_reads (x
)
2319 register enum rtx_code code
;
2327 code
= GET_CODE (x
);
2341 return ((invariant_p (XEXP (x
, 0)) != 1)
2342 + count_nonfixed_reads (XEXP (x
, 0)));
2349 fmt
= GET_RTX_FORMAT (code
);
2350 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2353 value
+= count_nonfixed_reads (XEXP (x
, i
));
2357 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2358 value
+= count_nonfixed_reads (XVECEXP (x
, i
, j
));
2366 /* P is an instruction that sets a register to the result of a ZERO_EXTEND.
2367 Replace it with an instruction to load just the low bytes
2368 if the machine supports such an instruction,
2369 and insert above LOOP_START an instruction to clear the register. */
2372 constant_high_bytes (p
, loop_start
)
2376 register int insn_code_number
;
2378 /* Try to change (SET (REG ...) (ZERO_EXTEND (..:B ...)))
2379 to (SET (STRICT_LOW_PART (SUBREG:B (REG...))) ...). */
2381 new = gen_rtx_SET (VOIDmode
,
2382 gen_rtx_STRICT_LOW_PART (VOIDmode
,
2383 gen_rtx_SUBREG (GET_MODE (XEXP (SET_SRC (PATTERN (p
)), 0)),
2384 SET_DEST (PATTERN (p
)),
2386 XEXP (SET_SRC (PATTERN (p
)), 0));
2387 insn_code_number
= recog (new, p
);
2389 if (insn_code_number
)
2393 /* Clear destination register before the loop. */
2394 emit_insn_before (gen_rtx_SET (VOIDmode
, SET_DEST (PATTERN (p
)),
2398 /* Inside the loop, just load the low part. */
2404 /* Scan a loop setting the variables `unknown_address_altered',
2405 `num_mem_sets', `loop_continue', loops_enclosed', `loop_has_call',
2406 and `loop_has_volatile'. Also, fill in the arrays `loop_mems' and
2407 `loop_store_mems'. */
2410 prescan_loop (start
, end
)
2413 register int level
= 1;
2415 int loop_has_multiple_exit_targets
= 0;
2416 /* The label after END. Jumping here is just like falling off the
2417 end of the loop. We use next_nonnote_insn instead of next_label
2418 as a hedge against the (pathological) case where some actual insn
2419 might end up between the two. */
2420 rtx exit_target
= next_nonnote_insn (end
);
2421 if (exit_target
== NULL_RTX
|| GET_CODE (exit_target
) != CODE_LABEL
)
2422 loop_has_multiple_exit_targets
= 1;
2424 unknown_address_altered
= 0;
2426 loop_has_volatile
= 0;
2427 loop_store_mems_idx
= 0;
2434 for (insn
= NEXT_INSN (start
); insn
!= NEXT_INSN (end
);
2435 insn
= NEXT_INSN (insn
))
2437 if (GET_CODE (insn
) == NOTE
)
2439 if (NOTE_LINE_NUMBER (insn
) == NOTE_INSN_LOOP_BEG
)
2442 /* Count number of loops contained in this one. */
2445 else if (NOTE_LINE_NUMBER (insn
) == NOTE_INSN_LOOP_END
)
2454 else if (NOTE_LINE_NUMBER (insn
) == NOTE_INSN_LOOP_CONT
)
2457 loop_continue
= insn
;
2460 else if (GET_CODE (insn
) == CALL_INSN
)
2462 if (! CONST_CALL_P (insn
))
2463 unknown_address_altered
= 1;
2466 else if (GET_CODE (insn
) == INSN
|| GET_CODE (insn
) == JUMP_INSN
)
2468 rtx label1
= NULL_RTX
;
2469 rtx label2
= NULL_RTX
;
2471 if (volatile_refs_p (PATTERN (insn
)))
2472 loop_has_volatile
= 1;
2474 note_stores (PATTERN (insn
), note_addr_stored
);
2476 if (!loop_has_multiple_exit_targets
2477 && GET_CODE (insn
) == JUMP_INSN
2478 && GET_CODE (PATTERN (insn
)) == SET
2479 && SET_DEST (PATTERN (insn
)) == pc_rtx
)
2481 if (GET_CODE (SET_SRC (PATTERN (insn
))) == IF_THEN_ELSE
)
2483 label1
= XEXP (SET_SRC (PATTERN (insn
)), 1);
2484 label2
= XEXP (SET_SRC (PATTERN (insn
)), 2);
2488 label1
= SET_SRC (PATTERN (insn
));
2492 if (label1
&& label1
!= pc_rtx
)
2494 if (GET_CODE (label1
) != LABEL_REF
)
2496 /* Something tricky. */
2497 loop_has_multiple_exit_targets
= 1;
2500 else if (XEXP (label1
, 0) != exit_target
2501 && LABEL_OUTSIDE_LOOP_P (label1
))
2503 /* A jump outside the current loop. */
2504 loop_has_multiple_exit_targets
= 1;
2514 else if (GET_CODE (insn
) == RETURN
)
2515 loop_has_multiple_exit_targets
= 1;
2518 /* Now, rescan the loop, setting up the LOOP_MEMS array. */
2519 if (/* We can't tell what MEMs are aliased by what. */
2520 !unknown_address_altered
2521 /* An exception thrown by a called function might land us
2524 /* We don't want loads for MEMs moved to a location before the
2525 one at which their stack memory becomes allocated. (Note
2526 that this is not a problem for malloc, etc., since those
2527 require actual function calls. */
2528 && !current_function_calls_alloca
2529 /* There are ways to leave the loop other than falling off the
2531 && !loop_has_multiple_exit_targets
)
2532 for (insn
= NEXT_INSN (start
); insn
!= NEXT_INSN (end
);
2533 insn
= NEXT_INSN (insn
))
2534 for_each_rtx (&insn
, insert_loop_mem
, 0);
2537 /* Scan the function looking for loops. Record the start and end of each loop.
2538 Also mark as invalid loops any loops that contain a setjmp or are branched
2539 to from outside the loop. */
2542 find_and_verify_loops (f
)
2546 int current_loop
= -1;
2550 /* If there are jumps to undefined labels,
2551 treat them as jumps out of any/all loops.
2552 This also avoids writing past end of tables when there are no loops. */
2553 uid_loop_num
[0] = -1;
2555 /* Find boundaries of loops, mark which loops are contained within
2556 loops, and invalidate loops that have setjmp. */
2558 for (insn
= f
; insn
; insn
= NEXT_INSN (insn
))
2560 if (GET_CODE (insn
) == NOTE
)
2561 switch (NOTE_LINE_NUMBER (insn
))
2563 case NOTE_INSN_LOOP_BEG
:
2564 loop_number_loop_starts
[++next_loop
] = insn
;
2565 loop_number_loop_ends
[next_loop
] = 0;
2566 loop_outer_loop
[next_loop
] = current_loop
;
2567 loop_invalid
[next_loop
] = 0;
2568 loop_number_exit_labels
[next_loop
] = 0;
2569 loop_number_exit_count
[next_loop
] = 0;
2570 current_loop
= next_loop
;
2573 case NOTE_INSN_SETJMP
:
2574 /* In this case, we must invalidate our current loop and any
2576 for (loop
= current_loop
; loop
!= -1; loop
= loop_outer_loop
[loop
])
2578 loop_invalid
[loop
] = 1;
2579 if (loop_dump_stream
)
2580 fprintf (loop_dump_stream
,
2581 "\nLoop at %d ignored due to setjmp.\n",
2582 INSN_UID (loop_number_loop_starts
[loop
]));
2586 case NOTE_INSN_LOOP_END
:
2587 if (current_loop
== -1)
2590 loop_number_loop_ends
[current_loop
] = insn
;
2591 current_loop
= loop_outer_loop
[current_loop
];
2598 /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the
2599 enclosing loop, but this doesn't matter. */
2600 uid_loop_num
[INSN_UID (insn
)] = current_loop
;
2603 /* Any loop containing a label used in an initializer must be invalidated,
2604 because it can be jumped into from anywhere. */
2606 for (label
= forced_labels
; label
; label
= XEXP (label
, 1))
2610 for (loop_num
= uid_loop_num
[INSN_UID (XEXP (label
, 0))];
2612 loop_num
= loop_outer_loop
[loop_num
])
2613 loop_invalid
[loop_num
] = 1;
2616 /* Any loop containing a label used for an exception handler must be
2617 invalidated, because it can be jumped into from anywhere. */
2619 for (label
= exception_handler_labels
; label
; label
= XEXP (label
, 1))
2623 for (loop_num
= uid_loop_num
[INSN_UID (XEXP (label
, 0))];
2625 loop_num
= loop_outer_loop
[loop_num
])
2626 loop_invalid
[loop_num
] = 1;
2629 /* Now scan all insn's in the function. If any JUMP_INSN branches into a
2630 loop that it is not contained within, that loop is marked invalid.
2631 If any INSN or CALL_INSN uses a label's address, then the loop containing
2632 that label is marked invalid, because it could be jumped into from
2635 Also look for blocks of code ending in an unconditional branch that
2636 exits the loop. If such a block is surrounded by a conditional
2637 branch around the block, move the block elsewhere (see below) and
2638 invert the jump to point to the code block. This may eliminate a
2639 label in our loop and will simplify processing by both us and a
2640 possible second cse pass. */
2642 for (insn
= f
; insn
; insn
= NEXT_INSN (insn
))
2643 if (GET_RTX_CLASS (GET_CODE (insn
)) == 'i')
2645 int this_loop_num
= uid_loop_num
[INSN_UID (insn
)];
2647 if (GET_CODE (insn
) == INSN
|| GET_CODE (insn
) == CALL_INSN
)
2649 rtx note
= find_reg_note (insn
, REG_LABEL
, NULL_RTX
);
2654 for (loop_num
= uid_loop_num
[INSN_UID (XEXP (note
, 0))];
2656 loop_num
= loop_outer_loop
[loop_num
])
2657 loop_invalid
[loop_num
] = 1;
2661 if (GET_CODE (insn
) != JUMP_INSN
)
2664 mark_loop_jump (PATTERN (insn
), this_loop_num
);
2666 /* See if this is an unconditional branch outside the loop. */
2667 if (this_loop_num
!= -1
2668 && (GET_CODE (PATTERN (insn
)) == RETURN
2669 || (simplejump_p (insn
)
2670 && (uid_loop_num
[INSN_UID (JUMP_LABEL (insn
))]
2672 && get_max_uid () < max_uid_for_loop
)
2675 rtx our_next
= next_real_insn (insn
);
2677 int outer_loop
= -1;
2679 /* Go backwards until we reach the start of the loop, a label,
2681 for (p
= PREV_INSN (insn
);
2682 GET_CODE (p
) != CODE_LABEL
2683 && ! (GET_CODE (p
) == NOTE
2684 && NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_BEG
)
2685 && GET_CODE (p
) != JUMP_INSN
;
2689 /* Check for the case where we have a jump to an inner nested
2690 loop, and do not perform the optimization in that case. */
2692 if (JUMP_LABEL (insn
))
2694 dest_loop
= uid_loop_num
[INSN_UID (JUMP_LABEL (insn
))];
2695 if (dest_loop
!= -1)
2697 for (outer_loop
= dest_loop
; outer_loop
!= -1;
2698 outer_loop
= loop_outer_loop
[outer_loop
])
2699 if (outer_loop
== this_loop_num
)
2704 /* Make sure that the target of P is within the current loop. */
2706 if (GET_CODE (p
) == JUMP_INSN
&& JUMP_LABEL (p
)
2707 && uid_loop_num
[INSN_UID (JUMP_LABEL (p
))] != this_loop_num
)
2708 outer_loop
= this_loop_num
;
2710 /* If we stopped on a JUMP_INSN to the next insn after INSN,
2711 we have a block of code to try to move.
2713 We look backward and then forward from the target of INSN
2714 to find a BARRIER at the same loop depth as the target.
2715 If we find such a BARRIER, we make a new label for the start
2716 of the block, invert the jump in P and point it to that label,
2717 and move the block of code to the spot we found. */
2719 if (outer_loop
== -1
2720 && GET_CODE (p
) == JUMP_INSN
2721 && JUMP_LABEL (p
) != 0
2722 /* Just ignore jumps to labels that were never emitted.
2723 These always indicate compilation errors. */
2724 && INSN_UID (JUMP_LABEL (p
)) != 0
2726 && ! simplejump_p (p
)
2727 && next_real_insn (JUMP_LABEL (p
)) == our_next
)
2730 = JUMP_LABEL (insn
) ? JUMP_LABEL (insn
) : get_last_insn ();
2731 int target_loop_num
= uid_loop_num
[INSN_UID (target
)];
2734 for (loc
= target
; loc
; loc
= PREV_INSN (loc
))
2735 if (GET_CODE (loc
) == BARRIER
2736 && uid_loop_num
[INSN_UID (loc
)] == target_loop_num
)
2740 for (loc
= target
; loc
; loc
= NEXT_INSN (loc
))
2741 if (GET_CODE (loc
) == BARRIER
2742 && uid_loop_num
[INSN_UID (loc
)] == target_loop_num
)
2747 rtx cond_label
= JUMP_LABEL (p
);
2748 rtx new_label
= get_label_after (p
);
2750 /* Ensure our label doesn't go away. */
2751 LABEL_NUSES (cond_label
)++;
2753 /* Verify that uid_loop_num is large enough and that
2755 if (invert_jump (p
, new_label
))
2759 /* If no suitable BARRIER was found, create a suitable
2760 one before TARGET. Since TARGET is a fall through
2761 path, we'll need to insert an jump around our block
2762 and a add a BARRIER before TARGET.
2764 This creates an extra unconditional jump outside
2765 the loop. However, the benefits of removing rarely
2766 executed instructions from inside the loop usually
2767 outweighs the cost of the extra unconditional jump
2768 outside the loop. */
2773 temp
= gen_jump (JUMP_LABEL (insn
));
2774 temp
= emit_jump_insn_before (temp
, target
);
2775 JUMP_LABEL (temp
) = JUMP_LABEL (insn
);
2776 LABEL_NUSES (JUMP_LABEL (insn
))++;
2777 loc
= emit_barrier_before (target
);
2780 /* Include the BARRIER after INSN and copy the
2782 new_label
= squeeze_notes (new_label
, NEXT_INSN (insn
));
2783 reorder_insns (new_label
, NEXT_INSN (insn
), loc
);
2785 /* All those insns are now in TARGET_LOOP_NUM. */
2786 for (q
= new_label
; q
!= NEXT_INSN (NEXT_INSN (insn
));
2788 uid_loop_num
[INSN_UID (q
)] = target_loop_num
;
2790 /* The label jumped to by INSN is no longer a loop exit.
2791 Unless INSN does not have a label (e.g., it is a
2792 RETURN insn), search loop_number_exit_labels to find
2793 its label_ref, and remove it. Also turn off
2794 LABEL_OUTSIDE_LOOP_P bit. */
2795 if (JUMP_LABEL (insn
))
2800 r
= loop_number_exit_labels
[this_loop_num
];
2801 r
; q
= r
, r
= LABEL_NEXTREF (r
))
2802 if (XEXP (r
, 0) == JUMP_LABEL (insn
))
2804 LABEL_OUTSIDE_LOOP_P (r
) = 0;
2806 LABEL_NEXTREF (q
) = LABEL_NEXTREF (r
);
2808 loop_number_exit_labels
[this_loop_num
]
2809 = LABEL_NEXTREF (r
);
2813 for (loop_num
= this_loop_num
;
2814 loop_num
!= -1 && loop_num
!= target_loop_num
;
2815 loop_num
= loop_outer_loop
[loop_num
])
2816 loop_number_exit_count
[loop_num
]--;
2818 /* If we didn't find it, then something is wrong. */
2823 /* P is now a jump outside the loop, so it must be put
2824 in loop_number_exit_labels, and marked as such.
2825 The easiest way to do this is to just call
2826 mark_loop_jump again for P. */
2827 mark_loop_jump (PATTERN (p
), this_loop_num
);
2829 /* If INSN now jumps to the insn after it,
2831 if (JUMP_LABEL (insn
) != 0
2832 && (next_real_insn (JUMP_LABEL (insn
))
2833 == next_real_insn (insn
)))
2837 /* Continue the loop after where the conditional
2838 branch used to jump, since the only branch insn
2839 in the block (if it still remains) is an inter-loop
2840 branch and hence needs no processing. */
2841 insn
= NEXT_INSN (cond_label
);
2843 if (--LABEL_NUSES (cond_label
) == 0)
2844 delete_insn (cond_label
);
2846 /* This loop will be continued with NEXT_INSN (insn). */
2847 insn
= PREV_INSN (insn
);
2854 /* If any label in X jumps to a loop different from LOOP_NUM and any of the
2855 loops it is contained in, mark the target loop invalid.
2857 For speed, we assume that X is part of a pattern of a JUMP_INSN. */
2860 mark_loop_jump (x
, loop_num
)
2868 switch (GET_CODE (x
))
2881 /* There could be a label reference in here. */
2882 mark_loop_jump (XEXP (x
, 0), loop_num
);
2888 mark_loop_jump (XEXP (x
, 0), loop_num
);
2889 mark_loop_jump (XEXP (x
, 1), loop_num
);
2894 mark_loop_jump (XEXP (x
, 0), loop_num
);
2898 dest_loop
= uid_loop_num
[INSN_UID (XEXP (x
, 0))];
2900 /* Link together all labels that branch outside the loop. This
2901 is used by final_[bg]iv_value and the loop unrolling code. Also
2902 mark this LABEL_REF so we know that this branch should predict
2905 /* A check to make sure the label is not in an inner nested loop,
2906 since this does not count as a loop exit. */
2907 if (dest_loop
!= -1)
2909 for (outer_loop
= dest_loop
; outer_loop
!= -1;
2910 outer_loop
= loop_outer_loop
[outer_loop
])
2911 if (outer_loop
== loop_num
)
2917 if (loop_num
!= -1 && outer_loop
== -1)
2919 LABEL_OUTSIDE_LOOP_P (x
) = 1;
2920 LABEL_NEXTREF (x
) = loop_number_exit_labels
[loop_num
];
2921 loop_number_exit_labels
[loop_num
] = x
;
2923 for (outer_loop
= loop_num
;
2924 outer_loop
!= -1 && outer_loop
!= dest_loop
;
2925 outer_loop
= loop_outer_loop
[outer_loop
])
2926 loop_number_exit_count
[outer_loop
]++;
2929 /* If this is inside a loop, but not in the current loop or one enclosed
2930 by it, it invalidates at least one loop. */
2932 if (dest_loop
== -1)
2935 /* We must invalidate every nested loop containing the target of this
2936 label, except those that also contain the jump insn. */
2938 for (; dest_loop
!= -1; dest_loop
= loop_outer_loop
[dest_loop
])
2940 /* Stop when we reach a loop that also contains the jump insn. */
2941 for (outer_loop
= loop_num
; outer_loop
!= -1;
2942 outer_loop
= loop_outer_loop
[outer_loop
])
2943 if (dest_loop
== outer_loop
)
2946 /* If we get here, we know we need to invalidate a loop. */
2947 if (loop_dump_stream
&& ! loop_invalid
[dest_loop
])
2948 fprintf (loop_dump_stream
,
2949 "\nLoop at %d ignored due to multiple entry points.\n",
2950 INSN_UID (loop_number_loop_starts
[dest_loop
]));
2952 loop_invalid
[dest_loop
] = 1;
2957 /* If this is not setting pc, ignore. */
2958 if (SET_DEST (x
) == pc_rtx
)
2959 mark_loop_jump (SET_SRC (x
), loop_num
);
2963 mark_loop_jump (XEXP (x
, 1), loop_num
);
2964 mark_loop_jump (XEXP (x
, 2), loop_num
);
2969 for (i
= 0; i
< XVECLEN (x
, 0); i
++)
2970 mark_loop_jump (XVECEXP (x
, 0, i
), loop_num
);
2974 for (i
= 0; i
< XVECLEN (x
, 1); i
++)
2975 mark_loop_jump (XVECEXP (x
, 1, i
), loop_num
);
2979 /* Treat anything else (such as a symbol_ref)
2980 as a branch out of this loop, but not into any loop. */
2985 LABEL_OUTSIDE_LOOP_P (x
) = 1;
2986 LABEL_NEXTREF (x
) = loop_number_exit_labels
[loop_num
];
2989 loop_number_exit_labels
[loop_num
] = x
;
2991 for (outer_loop
= loop_num
; outer_loop
!= -1;
2992 outer_loop
= loop_outer_loop
[outer_loop
])
2993 loop_number_exit_count
[outer_loop
]++;
2999 /* Return nonzero if there is a label in the range from
3000 insn INSN to and including the insn whose luid is END
3001 INSN must have an assigned luid (i.e., it must not have
3002 been previously created by loop.c). */
3005 labels_in_range_p (insn
, end
)
3009 while (insn
&& INSN_LUID (insn
) <= end
)
3011 if (GET_CODE (insn
) == CODE_LABEL
)
3013 insn
= NEXT_INSN (insn
);
3019 /* Record that a memory reference X is being set. */
3022 note_addr_stored (x
, y
)
3024 rtx y ATTRIBUTE_UNUSED
;
3028 if (x
== 0 || GET_CODE (x
) != MEM
)
3031 /* Count number of memory writes.
3032 This affects heuristics in strength_reduce. */
3035 /* BLKmode MEM means all memory is clobbered. */
3036 if (GET_MODE (x
) == BLKmode
)
3037 unknown_address_altered
= 1;
3039 if (unknown_address_altered
)
3042 for (i
= 0; i
< loop_store_mems_idx
; i
++)
3043 if (rtx_equal_p (XEXP (loop_store_mems
[i
], 0), XEXP (x
, 0))
3044 && MEM_IN_STRUCT_P (x
) == MEM_IN_STRUCT_P (loop_store_mems
[i
]))
3046 /* We are storing at the same address as previously noted. Save the
3048 if (GET_MODE_SIZE (GET_MODE (x
))
3049 > GET_MODE_SIZE (GET_MODE (loop_store_mems
[i
])))
3050 loop_store_mems
[i
] = x
;
3054 if (i
== NUM_STORES
)
3055 unknown_address_altered
= 1;
3057 else if (i
== loop_store_mems_idx
)
3058 loop_store_mems
[loop_store_mems_idx
++] = x
;
3061 /* Return nonzero if the rtx X is invariant over the current loop.
3063 The value is 2 if we refer to something only conditionally invariant.
3065 If `unknown_address_altered' is nonzero, no memory ref is invariant.
3066 Otherwise, a memory ref is invariant if it does not conflict with
3067 anything stored in `loop_store_mems'. */
3074 register enum rtx_code code
;
3076 int conditional
= 0;
3080 code
= GET_CODE (x
);
3090 /* A LABEL_REF is normally invariant, however, if we are unrolling
3091 loops, and this label is inside the loop, then it isn't invariant.
3092 This is because each unrolled copy of the loop body will have
3093 a copy of this label. If this was invariant, then an insn loading
3094 the address of this label into a register might get moved outside
3095 the loop, and then each loop body would end up using the same label.
3097 We don't know the loop bounds here though, so just fail for all
3099 if (flag_unroll_loops
)
3106 case UNSPEC_VOLATILE
:
3110 /* We used to check RTX_UNCHANGING_P (x) here, but that is invalid
3111 since the reg might be set by initialization within the loop. */
3113 if ((x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
3114 || x
== arg_pointer_rtx
)
3115 && ! current_function_has_nonlocal_goto
)
3119 && REGNO (x
) < FIRST_PSEUDO_REGISTER
&& call_used_regs
[REGNO (x
)])
3122 if (VARRAY_INT (n_times_set
, REGNO (x
)) < 0)
3125 return VARRAY_INT (n_times_set
, REGNO (x
)) == 0;
3128 /* Volatile memory references must be rejected. Do this before
3129 checking for read-only items, so that volatile read-only items
3130 will be rejected also. */
3131 if (MEM_VOLATILE_P (x
))
3134 /* Read-only items (such as constants in a constant pool) are
3135 invariant if their address is. */
3136 if (RTX_UNCHANGING_P (x
))
3139 /* If we filled the table (or had a subroutine call), any location
3140 in memory could have been clobbered. */
3141 if (unknown_address_altered
)
3144 /* See if there is any dependence between a store and this load. */
3145 for (i
= loop_store_mems_idx
- 1; i
>= 0; i
--)
3146 if (true_dependence (loop_store_mems
[i
], VOIDmode
, x
, rtx_varies_p
))
3149 /* It's not invalidated by a store in memory
3150 but we must still verify the address is invariant. */
3154 /* Don't mess with insns declared volatile. */
3155 if (MEM_VOLATILE_P (x
))
3163 fmt
= GET_RTX_FORMAT (code
);
3164 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3168 int tem
= invariant_p (XEXP (x
, i
));
3174 else if (fmt
[i
] == 'E')
3177 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
3179 int tem
= invariant_p (XVECEXP (x
, i
, j
));
3189 return 1 + conditional
;
3193 /* Return nonzero if all the insns in the loop that set REG
3194 are INSN and the immediately following insns,
3195 and if each of those insns sets REG in an invariant way
3196 (not counting uses of REG in them).
3198 The value is 2 if some of these insns are only conditionally invariant.
3200 We assume that INSN itself is the first set of REG
3201 and that its source is invariant. */
3204 consec_sets_invariant_p (reg
, n_sets
, insn
)
3208 register rtx p
= insn
;
3209 register int regno
= REGNO (reg
);
3211 /* Number of sets we have to insist on finding after INSN. */
3212 int count
= n_sets
- 1;
3213 int old
= VARRAY_INT (n_times_set
, regno
);
3217 /* If N_SETS hit the limit, we can't rely on its value. */
3221 VARRAY_INT (n_times_set
, regno
) = 0;
3225 register enum rtx_code code
;
3229 code
= GET_CODE (p
);
3231 /* If library call, skip to end of it. */
3232 if (code
== INSN
&& (temp
= find_reg_note (p
, REG_LIBCALL
, NULL_RTX
)))
3237 && (set
= single_set (p
))
3238 && GET_CODE (SET_DEST (set
)) == REG
3239 && REGNO (SET_DEST (set
)) == regno
)
3241 this = invariant_p (SET_SRC (set
));
3244 else if ((temp
= find_reg_note (p
, REG_EQUAL
, NULL_RTX
)))
3246 /* If this is a libcall, then any invariant REG_EQUAL note is OK.
3247 If this is an ordinary insn, then only CONSTANT_P REG_EQUAL
3249 this = (CONSTANT_P (XEXP (temp
, 0))
3250 || (find_reg_note (p
, REG_RETVAL
, NULL_RTX
)
3251 && invariant_p (XEXP (temp
, 0))));
3258 else if (code
!= NOTE
)
3260 VARRAY_INT (n_times_set
, regno
) = old
;
3265 VARRAY_INT (n_times_set
, regno
) = old
;
3266 /* If invariant_p ever returned 2, we return 2. */
3267 return 1 + (value
& 2);
3271 /* I don't think this condition is sufficient to allow INSN
3272 to be moved, so we no longer test it. */
3274 /* Return 1 if all insns in the basic block of INSN and following INSN
3275 that set REG are invariant according to TABLE. */
3278 all_sets_invariant_p (reg
, insn
, table
)
3282 register rtx p
= insn
;
3283 register int regno
= REGNO (reg
);
3287 register enum rtx_code code
;
3289 code
= GET_CODE (p
);
3290 if (code
== CODE_LABEL
|| code
== JUMP_INSN
)
3292 if (code
== INSN
&& GET_CODE (PATTERN (p
)) == SET
3293 && GET_CODE (SET_DEST (PATTERN (p
))) == REG
3294 && REGNO (SET_DEST (PATTERN (p
))) == regno
)
3296 if (!invariant_p (SET_SRC (PATTERN (p
)), table
))
3303 /* Look at all uses (not sets) of registers in X. For each, if it is
3304 the single use, set USAGE[REGNO] to INSN; if there was a previous use in
3305 a different insn, set USAGE[REGNO] to const0_rtx. */
3308 find_single_use_in_loop (insn
, x
, usage
)
3313 enum rtx_code code
= GET_CODE (x
);
3314 char *fmt
= GET_RTX_FORMAT (code
);
3318 VARRAY_RTX (usage
, REGNO (x
))
3319 = (VARRAY_RTX (usage
, REGNO (x
)) != 0
3320 && VARRAY_RTX (usage
, REGNO (x
)) != insn
)
3321 ? const0_rtx
: insn
;
3323 else if (code
== SET
)
3325 /* Don't count SET_DEST if it is a REG; otherwise count things
3326 in SET_DEST because if a register is partially modified, it won't
3327 show up as a potential movable so we don't care how USAGE is set
3329 if (GET_CODE (SET_DEST (x
)) != REG
)
3330 find_single_use_in_loop (insn
, SET_DEST (x
), usage
);
3331 find_single_use_in_loop (insn
, SET_SRC (x
), usage
);
3334 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3336 if (fmt
[i
] == 'e' && XEXP (x
, i
) != 0)
3337 find_single_use_in_loop (insn
, XEXP (x
, i
), usage
);
3338 else if (fmt
[i
] == 'E')
3339 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
3340 find_single_use_in_loop (insn
, XVECEXP (x
, i
, j
), usage
);
3344 /* Increment N_TIMES_SET at the index of each register
3345 that is modified by an insn between FROM and TO.
3346 If the value of an element of N_TIMES_SET becomes 127 or more,
3347 stop incrementing it, to avoid overflow.
3349 Store in SINGLE_USAGE[I] the single insn in which register I is
3350 used, if it is only used once. Otherwise, it is set to 0 (for no
3351 uses) or const0_rtx for more than one use. This parameter may be zero,
3352 in which case this processing is not done.
3354 Store in *COUNT_PTR the number of actual instruction
3355 in the loop. We use this to decide what is worth moving out. */
3357 /* last_set[n] is nonzero iff reg n has been set in the current basic block.
3358 In that case, it is the insn that last set reg n. */
3361 count_loop_regs_set (from
, to
, may_not_move
, single_usage
, count_ptr
, nregs
)
3362 register rtx from
, to
;
3363 varray_type may_not_move
;
3364 varray_type single_usage
;
3368 register rtx
*last_set
= (rtx
*) alloca (nregs
* sizeof (rtx
));
3370 register int count
= 0;
3373 bzero ((char *) last_set
, nregs
* sizeof (rtx
));
3374 for (insn
= from
; insn
!= to
; insn
= NEXT_INSN (insn
))
3376 if (GET_RTX_CLASS (GET_CODE (insn
)) == 'i')
3380 /* If requested, record registers that have exactly one use. */
3383 find_single_use_in_loop (insn
, PATTERN (insn
), single_usage
);
3385 /* Include uses in REG_EQUAL notes. */
3386 if (REG_NOTES (insn
))
3387 find_single_use_in_loop (insn
, REG_NOTES (insn
), single_usage
);
3390 if (GET_CODE (PATTERN (insn
)) == CLOBBER
3391 && GET_CODE (XEXP (PATTERN (insn
), 0)) == REG
)
3392 /* Don't move a reg that has an explicit clobber.
3393 We might do so sometimes, but it's not worth the pain. */
3394 VARRAY_CHAR (may_not_move
, REGNO (XEXP (PATTERN (insn
), 0))) = 1;
3396 if (GET_CODE (PATTERN (insn
)) == SET
3397 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
3399 dest
= SET_DEST (PATTERN (insn
));
3400 while (GET_CODE (dest
) == SUBREG
3401 || GET_CODE (dest
) == ZERO_EXTRACT
3402 || GET_CODE (dest
) == SIGN_EXTRACT
3403 || GET_CODE (dest
) == STRICT_LOW_PART
)
3404 dest
= XEXP (dest
, 0);
3405 if (GET_CODE (dest
) == REG
)
3407 register int regno
= REGNO (dest
);
3408 /* If this is the first setting of this reg
3409 in current basic block, and it was set before,
3410 it must be set in two basic blocks, so it cannot
3411 be moved out of the loop. */
3412 if (VARRAY_INT (n_times_set
, regno
) > 0
3413 && last_set
[regno
] == 0)
3414 VARRAY_CHAR (may_not_move
, regno
) = 1;
3415 /* If this is not first setting in current basic block,
3416 see if reg was used in between previous one and this.
3417 If so, neither one can be moved. */
3418 if (last_set
[regno
] != 0
3419 && reg_used_between_p (dest
, last_set
[regno
], insn
))
3420 VARRAY_CHAR (may_not_move
, regno
) = 1;
3421 if (VARRAY_INT (n_times_set
, regno
) < 127)
3422 ++VARRAY_INT (n_times_set
, regno
);
3423 last_set
[regno
] = insn
;
3426 else if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
3429 for (i
= XVECLEN (PATTERN (insn
), 0) - 1; i
>= 0; i
--)
3431 register rtx x
= XVECEXP (PATTERN (insn
), 0, i
);
3432 if (GET_CODE (x
) == CLOBBER
&& GET_CODE (XEXP (x
, 0)) == REG
)
3433 /* Don't move a reg that has an explicit clobber.
3434 It's not worth the pain to try to do it correctly. */
3435 VARRAY_CHAR (may_not_move
, REGNO (XEXP (x
, 0))) = 1;
3437 if (GET_CODE (x
) == SET
|| GET_CODE (x
) == CLOBBER
)
3439 dest
= SET_DEST (x
);
3440 while (GET_CODE (dest
) == SUBREG
3441 || GET_CODE (dest
) == ZERO_EXTRACT
3442 || GET_CODE (dest
) == SIGN_EXTRACT
3443 || GET_CODE (dest
) == STRICT_LOW_PART
)
3444 dest
= XEXP (dest
, 0);
3445 if (GET_CODE (dest
) == REG
)
3447 register int regno
= REGNO (dest
);
3448 if (VARRAY_INT (n_times_set
, regno
) > 0
3449 && last_set
[regno
] == 0)
3450 VARRAY_CHAR (may_not_move
, regno
) = 1;
3451 if (last_set
[regno
] != 0
3452 && reg_used_between_p (dest
, last_set
[regno
], insn
))
3453 VARRAY_CHAR (may_not_move
, regno
) = 1;
3454 if (VARRAY_INT (n_times_set
, regno
) < 127)
3455 ++VARRAY_INT (n_times_set
, regno
);
3456 last_set
[regno
] = insn
;
3463 if (GET_CODE (insn
) == CODE_LABEL
|| GET_CODE (insn
) == JUMP_INSN
)
3464 bzero ((char *) last_set
, nregs
* sizeof (rtx
));
3469 /* Given a loop that is bounded by LOOP_START and LOOP_END
3470 and that is entered at SCAN_START,
3471 return 1 if the register set in SET contained in insn INSN is used by
3472 any insn that precedes INSN in cyclic order starting
3473 from the loop entry point.
3475 We don't want to use INSN_LUID here because if we restrict INSN to those
3476 that have a valid INSN_LUID, it means we cannot move an invariant out
3477 from an inner loop past two loops. */
3480 loop_reg_used_before_p (set
, insn
, loop_start
, scan_start
, loop_end
)
3481 rtx set
, insn
, loop_start
, scan_start
, loop_end
;
3483 rtx reg
= SET_DEST (set
);
3486 /* Scan forward checking for register usage. If we hit INSN, we
3487 are done. Otherwise, if we hit LOOP_END, wrap around to LOOP_START. */
3488 for (p
= scan_start
; p
!= insn
; p
= NEXT_INSN (p
))
3490 if (GET_RTX_CLASS (GET_CODE (p
)) == 'i'
3491 && reg_overlap_mentioned_p (reg
, PATTERN (p
)))
3501 /* A "basic induction variable" or biv is a pseudo reg that is set
3502 (within this loop) only by incrementing or decrementing it. */
3503 /* A "general induction variable" or giv is a pseudo reg whose
3504 value is a linear function of a biv. */
3506 /* Bivs are recognized by `basic_induction_var';
3507 Givs by `general_induction_var'. */
3509 /* Indexed by register number, indicates whether or not register is an
3510 induction variable, and if so what type. */
3512 enum iv_mode
*reg_iv_type
;
3514 /* Indexed by register number, contains pointer to `struct induction'
3515 if register is an induction variable. This holds general info for
3516 all induction variables. */
3518 struct induction
**reg_iv_info
;
3520 /* Indexed by register number, contains pointer to `struct iv_class'
3521 if register is a basic induction variable. This holds info describing
3522 the class (a related group) of induction variables that the biv belongs
3525 struct iv_class
**reg_biv_class
;
3527 /* The head of a list which links together (via the next field)
3528 every iv class for the current loop. */
3530 struct iv_class
*loop_iv_list
;
3532 /* Communication with routines called via `note_stores'. */
3534 static rtx note_insn
;
3536 /* Dummy register to have non-zero DEST_REG for DEST_ADDR type givs. */
3538 static rtx addr_placeholder
;
3540 /* ??? Unfinished optimizations, and possible future optimizations,
3541 for the strength reduction code. */
3543 /* ??? The interaction of biv elimination, and recognition of 'constant'
3544 bivs, may cause problems. */
3546 /* ??? Add heuristics so that DEST_ADDR strength reduction does not cause
3547 performance problems.
3549 Perhaps don't eliminate things that can be combined with an addressing
3550 mode. Find all givs that have the same biv, mult_val, and add_val;
3551 then for each giv, check to see if its only use dies in a following
3552 memory address. If so, generate a new memory address and check to see
3553 if it is valid. If it is valid, then store the modified memory address,
3554 otherwise, mark the giv as not done so that it will get its own iv. */
3556 /* ??? Could try to optimize branches when it is known that a biv is always
3559 /* ??? When replace a biv in a compare insn, we should replace with closest
3560 giv so that an optimized branch can still be recognized by the combiner,
3561 e.g. the VAX acb insn. */
3563 /* ??? Many of the checks involving uid_luid could be simplified if regscan
3564 was rerun in loop_optimize whenever a register was added or moved.
3565 Also, some of the optimizations could be a little less conservative. */
3567 /* Perform strength reduction and induction variable elimination.
3569 Pseudo registers created during this function will be beyond the last
3570 valid index in several tables including n_times_set and regno_last_uid.
3571 This does not cause a problem here, because the added registers cannot be
3572 givs outside of their loop, and hence will never be reconsidered.
3573 But scan_loop must check regnos to make sure they are in bounds.
3575 SCAN_START is the first instruction in the loop, as the loop would
3576 actually be executed. END is the NOTE_INSN_LOOP_END. LOOP_TOP is
3577 the first instruction in the loop, as it is layed out in the
3578 instruction stream. LOOP_START is the NOTE_INSN_LOOP_BEG. */
3581 strength_reduce (scan_start
, end
, loop_top
, insn_count
,
3582 loop_start
, loop_end
, unroll_p
, bct_p
)
3589 int unroll_p
, bct_p
;
3596 /* This is 1 if current insn is not executed at least once for every loop
3598 int not_every_iteration
= 0;
3599 /* This is 1 if current insn may be executed more than once for every
3601 int maybe_multiple
= 0;
3602 /* Temporary list pointers for traversing loop_iv_list. */
3603 struct iv_class
*bl
, **backbl
;
3604 /* Ratio of extra register life span we can justify
3605 for saving an instruction. More if loop doesn't call subroutines
3606 since in that case saving an insn makes more difference
3607 and more registers are available. */
3608 /* ??? could set this to last value of threshold in move_movables */
3609 int threshold
= (loop_has_call
? 1 : 2) * (3 + n_non_fixed_regs
);
3610 /* Map of pseudo-register replacements. */
3614 rtx end_insert_before
;
3617 reg_iv_type
= (enum iv_mode
*) alloca (max_reg_before_loop
3618 * sizeof (enum iv_mode
*));
3619 bzero ((char *) reg_iv_type
, max_reg_before_loop
* sizeof (enum iv_mode
*));
3620 reg_iv_info
= (struct induction
**)
3621 alloca (max_reg_before_loop
* sizeof (struct induction
*));
3622 bzero ((char *) reg_iv_info
, (max_reg_before_loop
3623 * sizeof (struct induction
*)));
3624 reg_biv_class
= (struct iv_class
**)
3625 alloca (max_reg_before_loop
* sizeof (struct iv_class
*));
3626 bzero ((char *) reg_biv_class
, (max_reg_before_loop
3627 * sizeof (struct iv_class
*)));
3630 addr_placeholder
= gen_reg_rtx (Pmode
);
3632 /* Save insn immediately after the loop_end. Insns inserted after loop_end
3633 must be put before this insn, so that they will appear in the right
3634 order (i.e. loop order).
3636 If loop_end is the end of the current function, then emit a
3637 NOTE_INSN_DELETED after loop_end and set end_insert_before to the
3639 if (NEXT_INSN (loop_end
) != 0)
3640 end_insert_before
= NEXT_INSN (loop_end
);
3642 end_insert_before
= emit_note_after (NOTE_INSN_DELETED
, loop_end
);
3644 /* Scan through loop to find all possible bivs. */
3646 for (p
= next_insn_in_loop (scan_start
, scan_start
, end
, loop_top
);
3648 p
= next_insn_in_loop (p
, scan_start
, end
, loop_top
))
3650 if (GET_CODE (p
) == INSN
3651 && (set
= single_set (p
))
3652 && GET_CODE (SET_DEST (set
)) == REG
)
3654 dest_reg
= SET_DEST (set
);
3655 if (REGNO (dest_reg
) < max_reg_before_loop
3656 && REGNO (dest_reg
) >= FIRST_PSEUDO_REGISTER
3657 && reg_iv_type
[REGNO (dest_reg
)] != NOT_BASIC_INDUCT
)
3659 if (basic_induction_var (SET_SRC (set
), GET_MODE (SET_SRC (set
)),
3660 dest_reg
, p
, &inc_val
, &mult_val
))
3662 /* It is a possible basic induction variable.
3663 Create and initialize an induction structure for it. */
3666 = (struct induction
*) alloca (sizeof (struct induction
));
3668 record_biv (v
, p
, dest_reg
, inc_val
, mult_val
,
3669 not_every_iteration
, maybe_multiple
);
3670 reg_iv_type
[REGNO (dest_reg
)] = BASIC_INDUCT
;
3672 else if (REGNO (dest_reg
) < max_reg_before_loop
)
3673 reg_iv_type
[REGNO (dest_reg
)] = NOT_BASIC_INDUCT
;
3677 /* Past CODE_LABEL, we get to insns that may be executed multiple
3678 times. The only way we can be sure that they can't is if every
3679 jump insn between here and the end of the loop either
3680 returns, exits the loop, is a forward jump, or is a jump
3681 to the loop start. */
3683 if (GET_CODE (p
) == CODE_LABEL
)
3691 insn
= NEXT_INSN (insn
);
3692 if (insn
== scan_start
)
3700 if (insn
== scan_start
)
3704 if (GET_CODE (insn
) == JUMP_INSN
3705 && GET_CODE (PATTERN (insn
)) != RETURN
3706 && (! condjump_p (insn
)
3707 || (JUMP_LABEL (insn
) != 0
3708 && JUMP_LABEL (insn
) != scan_start
3709 && (INSN_UID (JUMP_LABEL (insn
)) >= max_uid_for_loop
3710 || INSN_UID (insn
) >= max_uid_for_loop
3711 || (INSN_LUID (JUMP_LABEL (insn
))
3712 < INSN_LUID (insn
))))))
3720 /* Past a jump, we get to insns for which we can't count
3721 on whether they will be executed during each iteration. */
3722 /* This code appears twice in strength_reduce. There is also similar
3723 code in scan_loop. */
3724 if (GET_CODE (p
) == JUMP_INSN
3725 /* If we enter the loop in the middle, and scan around to the
3726 beginning, don't set not_every_iteration for that.
3727 This can be any kind of jump, since we want to know if insns
3728 will be executed if the loop is executed. */
3729 && ! (JUMP_LABEL (p
) == loop_top
3730 && ((NEXT_INSN (NEXT_INSN (p
)) == loop_end
&& simplejump_p (p
))
3731 || (NEXT_INSN (p
) == loop_end
&& condjump_p (p
)))))
3735 /* If this is a jump outside the loop, then it also doesn't
3736 matter. Check to see if the target of this branch is on the
3737 loop_number_exits_labels list. */
3739 for (label
= loop_number_exit_labels
[uid_loop_num
[INSN_UID (loop_start
)]];
3741 label
= LABEL_NEXTREF (label
))
3742 if (XEXP (label
, 0) == JUMP_LABEL (p
))
3746 not_every_iteration
= 1;
3749 else if (GET_CODE (p
) == NOTE
)
3751 /* At the virtual top of a converted loop, insns are again known to
3752 be executed each iteration: logically, the loop begins here
3753 even though the exit code has been duplicated. */
3754 if (NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_VTOP
&& loop_depth
== 0)
3755 not_every_iteration
= 0;
3756 else if (NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_BEG
)
3758 else if (NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_END
)
3762 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
3763 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
3764 or not an insn is known to be executed each iteration of the
3765 loop, whether or not any iterations are known to occur.
3767 Therefore, if we have just passed a label and have no more labels
3768 between here and the test insn of the loop, we know these insns
3769 will be executed each iteration. */
3771 if (not_every_iteration
&& GET_CODE (p
) == CODE_LABEL
3772 && no_labels_between_p (p
, loop_end
))
3773 not_every_iteration
= 0;
3776 /* Scan loop_iv_list to remove all regs that proved not to be bivs.
3777 Make a sanity check against n_times_set. */
3778 for (backbl
= &loop_iv_list
, bl
= *backbl
; bl
; bl
= bl
->next
)
3780 if (reg_iv_type
[bl
->regno
] != BASIC_INDUCT
3781 /* Above happens if register modified by subreg, etc. */
3782 /* Make sure it is not recognized as a basic induction var: */
3783 || VARRAY_INT (n_times_set
, bl
->regno
) != bl
->biv_count
3784 /* If never incremented, it is invariant that we decided not to
3785 move. So leave it alone. */
3786 || ! bl
->incremented
)
3788 if (loop_dump_stream
)
3789 fprintf (loop_dump_stream
, "Reg %d: biv discarded, %s\n",
3791 (reg_iv_type
[bl
->regno
] != BASIC_INDUCT
3792 ? "not induction variable"
3793 : (! bl
->incremented
? "never incremented"
3796 reg_iv_type
[bl
->regno
] = NOT_BASIC_INDUCT
;
3803 if (loop_dump_stream
)
3804 fprintf (loop_dump_stream
, "Reg %d: biv verified\n", bl
->regno
);
3808 /* Exit if there are no bivs. */
3811 /* Can still unroll the loop anyways, but indicate that there is no
3812 strength reduction info available. */
3814 unroll_loop (loop_end
, insn_count
, loop_start
, end_insert_before
, 0);
3819 /* Find initial value for each biv by searching backwards from loop_start,
3820 halting at first label. Also record any test condition. */
3823 for (p
= loop_start
; p
&& GET_CODE (p
) != CODE_LABEL
; p
= PREV_INSN (p
))
3827 if (GET_CODE (p
) == CALL_INSN
)
3830 if (GET_CODE (p
) == INSN
|| GET_CODE (p
) == JUMP_INSN
3831 || GET_CODE (p
) == CALL_INSN
)
3832 note_stores (PATTERN (p
), record_initial
);
3834 /* Record any test of a biv that branches around the loop if no store
3835 between it and the start of loop. We only care about tests with
3836 constants and registers and only certain of those. */
3837 if (GET_CODE (p
) == JUMP_INSN
3838 && JUMP_LABEL (p
) != 0
3839 && next_real_insn (JUMP_LABEL (p
)) == next_real_insn (loop_end
)
3840 && (test
= get_condition_for_loop (p
)) != 0
3841 && GET_CODE (XEXP (test
, 0)) == REG
3842 && REGNO (XEXP (test
, 0)) < max_reg_before_loop
3843 && (bl
= reg_biv_class
[REGNO (XEXP (test
, 0))]) != 0
3844 && valid_initial_value_p (XEXP (test
, 1), p
, call_seen
, loop_start
)
3845 && bl
->init_insn
== 0)
3847 /* If an NE test, we have an initial value! */
3848 if (GET_CODE (test
) == NE
)
3851 bl
->init_set
= gen_rtx_SET (VOIDmode
,
3852 XEXP (test
, 0), XEXP (test
, 1));
3855 bl
->initial_test
= test
;
3859 /* Look at the each biv and see if we can say anything better about its
3860 initial value from any initializing insns set up above. (This is done
3861 in two passes to avoid missing SETs in a PARALLEL.) */
3862 for (bl
= loop_iv_list
; bl
; bl
= bl
->next
)
3867 if (! bl
->init_insn
)
3870 /* IF INIT_INSN has a REG_EQUAL or REG_EQUIV note and the value
3871 is a constant, use the value of that. */
3872 if (((note
= find_reg_note (bl
->init_insn
, REG_EQUAL
, 0)) != NULL
3873 && CONSTANT_P (XEXP (note
, 0)))
3874 || ((note
= find_reg_note (bl
->init_insn
, REG_EQUIV
, 0)) != NULL
3875 && CONSTANT_P (XEXP (note
, 0))))
3876 src
= XEXP (note
, 0);
3878 src
= SET_SRC (bl
->init_set
);
3880 if (loop_dump_stream
)
3881 fprintf (loop_dump_stream
,
3882 "Biv %d initialized at insn %d: initial value ",
3883 bl
->regno
, INSN_UID (bl
->init_insn
));
3885 if ((GET_MODE (src
) == GET_MODE (regno_reg_rtx
[bl
->regno
])
3886 || GET_MODE (src
) == VOIDmode
)
3887 && valid_initial_value_p (src
, bl
->init_insn
, call_seen
, loop_start
))
3889 bl
->initial_value
= src
;
3891 if (loop_dump_stream
)
3893 if (GET_CODE (src
) == CONST_INT
)
3895 fprintf (loop_dump_stream
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (src
));
3896 fputc ('\n', loop_dump_stream
);
3900 print_rtl (loop_dump_stream
, src
);
3901 fprintf (loop_dump_stream
, "\n");
3907 /* Biv initial value is not simple move,
3908 so let it keep initial value of "itself". */
3910 if (loop_dump_stream
)
3911 fprintf (loop_dump_stream
, "is complex\n");
3915 /* Search the loop for general induction variables. */
3917 /* A register is a giv if: it is only set once, it is a function of a
3918 biv and a constant (or invariant), and it is not a biv. */
3920 not_every_iteration
= 0;
3926 /* At end of a straight-in loop, we are done.
3927 At end of a loop entered at the bottom, scan the top. */
3928 if (p
== scan_start
)
3936 if (p
== scan_start
)
3940 /* Look for a general induction variable in a register. */
3941 if (GET_CODE (p
) == INSN
3942 && (set
= single_set (p
))
3943 && GET_CODE (SET_DEST (set
)) == REG
3944 && ! VARRAY_CHAR (may_not_optimize
, REGNO (SET_DEST (set
))))
3952 dest_reg
= SET_DEST (set
);
3953 if (REGNO (dest_reg
) < FIRST_PSEUDO_REGISTER
)
3956 if (/* SET_SRC is a giv. */
3957 (general_induction_var (SET_SRC (set
), &src_reg
, &add_val
,
3958 &mult_val
, 0, &benefit
)
3959 /* Equivalent expression is a giv. */
3960 || ((regnote
= find_reg_note (p
, REG_EQUAL
, NULL_RTX
))
3961 && general_induction_var (XEXP (regnote
, 0), &src_reg
,
3962 &add_val
, &mult_val
, 0,
3964 /* Don't try to handle any regs made by loop optimization.
3965 We have nothing on them in regno_first_uid, etc. */
3966 && REGNO (dest_reg
) < max_reg_before_loop
3967 /* Don't recognize a BASIC_INDUCT_VAR here. */
3968 && dest_reg
!= src_reg
3969 /* This must be the only place where the register is set. */
3970 && (VARRAY_INT (n_times_set
, REGNO (dest_reg
)) == 1
3971 /* or all sets must be consecutive and make a giv. */
3972 || (benefit
= consec_sets_giv (benefit
, p
,
3974 &add_val
, &mult_val
))))
3978 = (struct induction
*) alloca (sizeof (struct induction
));
3981 /* If this is a library call, increase benefit. */
3982 if (find_reg_note (p
, REG_RETVAL
, NULL_RTX
))
3983 benefit
+= libcall_benefit (p
);
3985 /* Skip the consecutive insns, if there are any. */
3986 for (count
= VARRAY_INT (n_times_set
, REGNO (dest_reg
)) - 1;
3989 /* If first insn of libcall sequence, skip to end.
3990 Do this at start of loop, since INSN is guaranteed to
3992 if (GET_CODE (p
) != NOTE
3993 && (temp
= find_reg_note (p
, REG_LIBCALL
, NULL_RTX
)))
3996 do p
= NEXT_INSN (p
);
3997 while (GET_CODE (p
) == NOTE
);
4000 record_giv (v
, p
, src_reg
, dest_reg
, mult_val
, add_val
, benefit
,
4001 DEST_REG
, not_every_iteration
, NULL_PTR
, loop_start
,
4007 #ifndef DONT_REDUCE_ADDR
4008 /* Look for givs which are memory addresses. */
4009 /* This resulted in worse code on a VAX 8600. I wonder if it
4011 if (GET_CODE (p
) == INSN
)
4012 find_mem_givs (PATTERN (p
), p
, not_every_iteration
, loop_start
,
4016 /* Update the status of whether giv can derive other givs. This can
4017 change when we pass a label or an insn that updates a biv. */
4018 if (GET_CODE (p
) == INSN
|| GET_CODE (p
) == JUMP_INSN
4019 || GET_CODE (p
) == CODE_LABEL
)
4020 update_giv_derive (p
);
4022 /* Past a jump, we get to insns for which we can't count
4023 on whether they will be executed during each iteration. */
4024 /* This code appears twice in strength_reduce. There is also similar
4025 code in scan_loop. */
4026 if (GET_CODE (p
) == JUMP_INSN
4027 /* If we enter the loop in the middle, and scan around to the
4028 beginning, don't set not_every_iteration for that.
4029 This can be any kind of jump, since we want to know if insns
4030 will be executed if the loop is executed. */
4031 && ! (JUMP_LABEL (p
) == loop_top
4032 && ((NEXT_INSN (NEXT_INSN (p
)) == loop_end
&& simplejump_p (p
))
4033 || (NEXT_INSN (p
) == loop_end
&& condjump_p (p
)))))
4037 /* If this is a jump outside the loop, then it also doesn't
4038 matter. Check to see if the target of this branch is on the
4039 loop_number_exits_labels list. */
4041 for (label
= loop_number_exit_labels
[uid_loop_num
[INSN_UID (loop_start
)]];
4043 label
= LABEL_NEXTREF (label
))
4044 if (XEXP (label
, 0) == JUMP_LABEL (p
))
4048 not_every_iteration
= 1;
4051 else if (GET_CODE (p
) == NOTE
)
4053 /* At the virtual top of a converted loop, insns are again known to
4054 be executed each iteration: logically, the loop begins here
4055 even though the exit code has been duplicated. */
4056 if (NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_VTOP
&& loop_depth
== 0)
4057 not_every_iteration
= 0;
4058 else if (NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_BEG
)
4060 else if (NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_END
)
4064 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
4065 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
4066 or not an insn is known to be executed each iteration of the
4067 loop, whether or not any iterations are known to occur.
4069 Therefore, if we have just passed a label and have no more labels
4070 between here and the test insn of the loop, we know these insns
4071 will be executed each iteration. */
4073 if (not_every_iteration
&& GET_CODE (p
) == CODE_LABEL
4074 && no_labels_between_p (p
, loop_end
))
4075 not_every_iteration
= 0;
4078 /* Try to calculate and save the number of loop iterations. This is
4079 set to zero if the actual number can not be calculated. This must
4080 be called after all giv's have been identified, since otherwise it may
4081 fail if the iteration variable is a giv. */
4083 loop_n_iterations
= loop_iterations (loop_start
, loop_end
);
4085 /* Now for each giv for which we still don't know whether or not it is
4086 replaceable, check to see if it is replaceable because its final value
4087 can be calculated. This must be done after loop_iterations is called,
4088 so that final_giv_value will work correctly. */
4090 for (bl
= loop_iv_list
; bl
; bl
= bl
->next
)
4092 struct induction
*v
;
4094 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
4095 if (! v
->replaceable
&& ! v
->not_replaceable
)
4096 check_final_value (v
, loop_start
, loop_end
);
4099 /* Try to prove that the loop counter variable (if any) is always
4100 nonnegative; if so, record that fact with a REG_NONNEG note
4101 so that "decrement and branch until zero" insn can be used. */
4102 check_dbra_loop (loop_end
, insn_count
, loop_start
);
4105 /* record loop-variables relevant for BCT optimization before unrolling
4106 the loop. Unrolling may update part of this information, and the
4107 correct data will be used for generating the BCT. */
4108 #ifdef HAVE_decrement_and_branch_on_count
4109 if (HAVE_decrement_and_branch_on_count
&& bct_p
)
4110 analyze_loop_iterations (loop_start
, loop_end
);
4114 /* Create reg_map to hold substitutions for replaceable giv regs. */
4115 reg_map
= (rtx
*) alloca (max_reg_before_loop
* sizeof (rtx
));
4116 bzero ((char *) reg_map
, max_reg_before_loop
* sizeof (rtx
));
4118 /* Examine each iv class for feasibility of strength reduction/induction
4119 variable elimination. */
4121 for (bl
= loop_iv_list
; bl
; bl
= bl
->next
)
4123 struct induction
*v
;
4126 rtx final_value
= 0;
4128 /* Test whether it will be possible to eliminate this biv
4129 provided all givs are reduced. This is possible if either
4130 the reg is not used outside the loop, or we can compute
4131 what its final value will be.
4133 For architectures with a decrement_and_branch_until_zero insn,
4134 don't do this if we put a REG_NONNEG note on the endtest for
4137 /* Compare against bl->init_insn rather than loop_start.
4138 We aren't concerned with any uses of the biv between
4139 init_insn and loop_start since these won't be affected
4140 by the value of the biv elsewhere in the function, so
4141 long as init_insn doesn't use the biv itself.
4142 March 14, 1989 -- self@bayes.arc.nasa.gov */
4144 if ((uid_luid
[REGNO_LAST_UID (bl
->regno
)] < INSN_LUID (loop_end
)
4146 && INSN_UID (bl
->init_insn
) < max_uid_for_loop
4147 && uid_luid
[REGNO_FIRST_UID (bl
->regno
)] >= INSN_LUID (bl
->init_insn
)
4148 #ifdef HAVE_decrement_and_branch_until_zero
4151 && ! reg_mentioned_p (bl
->biv
->dest_reg
, SET_SRC (bl
->init_set
)))
4152 || ((final_value
= final_biv_value (bl
, loop_start
, loop_end
))
4153 #ifdef HAVE_decrement_and_branch_until_zero
4157 bl
->eliminable
= maybe_eliminate_biv (bl
, loop_start
, end
, 0,
4158 threshold
, insn_count
);
4161 if (loop_dump_stream
)
4163 fprintf (loop_dump_stream
,
4164 "Cannot eliminate biv %d.\n",
4166 fprintf (loop_dump_stream
,
4167 "First use: insn %d, last use: insn %d.\n",
4168 REGNO_FIRST_UID (bl
->regno
),
4169 REGNO_LAST_UID (bl
->regno
));
4173 /* Combine all giv's for this iv_class. */
4176 /* This will be true at the end, if all givs which depend on this
4177 biv have been strength reduced.
4178 We can't (currently) eliminate the biv unless this is so. */
4181 /* Check each giv in this class to see if we will benefit by reducing
4182 it. Skip giv's combined with others. */
4183 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
4185 struct induction
*tv
;
4187 if (v
->ignore
|| v
->same
)
4190 benefit
= v
->benefit
;
4192 /* Reduce benefit if not replaceable, since we will insert
4193 a move-insn to replace the insn that calculates this giv.
4194 Don't do this unless the giv is a user variable, since it
4195 will often be marked non-replaceable because of the duplication
4196 of the exit code outside the loop. In such a case, the copies
4197 we insert are dead and will be deleted. So they don't have
4198 a cost. Similar situations exist. */
4199 /* ??? The new final_[bg]iv_value code does a much better job
4200 of finding replaceable giv's, and hence this code may no longer
4202 if (! v
->replaceable
&& ! bl
->eliminable
4203 && REG_USERVAR_P (v
->dest_reg
))
4204 benefit
-= copy_cost
;
4206 /* Decrease the benefit to count the add-insns that we will
4207 insert to increment the reduced reg for the giv. */
4208 benefit
-= add_cost
* bl
->biv_count
;
4210 /* Decide whether to strength-reduce this giv or to leave the code
4211 unchanged (recompute it from the biv each time it is used).
4212 This decision can be made independently for each giv. */
4215 /* Attempt to guess whether autoincrement will handle some of the
4216 new add insns; if so, increase BENEFIT (undo the subtraction of
4217 add_cost that was done above). */
4218 if (v
->giv_type
== DEST_ADDR
4219 && GET_CODE (v
->mult_val
) == CONST_INT
)
4221 #if defined (HAVE_POST_INCREMENT) || defined (HAVE_PRE_INCREMENT)
4222 if (INTVAL (v
->mult_val
) == GET_MODE_SIZE (v
->mem_mode
))
4223 benefit
+= add_cost
* bl
->biv_count
;
4225 #if defined (HAVE_POST_DECREMENT) || defined (HAVE_PRE_DECREMENT)
4226 if (-INTVAL (v
->mult_val
) == GET_MODE_SIZE (v
->mem_mode
))
4227 benefit
+= add_cost
* bl
->biv_count
;
4232 /* If an insn is not to be strength reduced, then set its ignore
4233 flag, and clear all_reduced. */
4235 /* A giv that depends on a reversed biv must be reduced if it is
4236 used after the loop exit, otherwise, it would have the wrong
4237 value after the loop exit. To make it simple, just reduce all
4238 of such giv's whether or not we know they are used after the loop
4241 if ( ! flag_reduce_all_givs
&& v
->lifetime
* threshold
* benefit
< insn_count
4244 if (loop_dump_stream
)
4245 fprintf (loop_dump_stream
,
4246 "giv of insn %d not worth while, %d vs %d.\n",
4248 v
->lifetime
* threshold
* benefit
, insn_count
);
4254 /* Check that we can increment the reduced giv without a
4255 multiply insn. If not, reject it. */
4257 for (tv
= bl
->biv
; tv
; tv
= tv
->next_iv
)
4258 if (tv
->mult_val
== const1_rtx
4259 && ! product_cheap_p (tv
->add_val
, v
->mult_val
))
4261 if (loop_dump_stream
)
4262 fprintf (loop_dump_stream
,
4263 "giv of insn %d: would need a multiply.\n",
4264 INSN_UID (v
->insn
));
4272 /* Reduce each giv that we decided to reduce. */
4274 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
4276 struct induction
*tv
;
4277 if (! v
->ignore
&& v
->same
== 0)
4279 int auto_inc_opt
= 0;
4281 v
->new_reg
= gen_reg_rtx (v
->mode
);
4284 /* If the target has auto-increment addressing modes, and
4285 this is an address giv, then try to put the increment
4286 immediately after its use, so that flow can create an
4287 auto-increment addressing mode. */
4288 if (v
->giv_type
== DEST_ADDR
&& bl
->biv_count
== 1
4289 && bl
->biv
->always_executed
&& ! bl
->biv
->maybe_multiple
4290 /* We don't handle reversed biv's because bl->biv->insn
4291 does not have a valid INSN_LUID. */
4293 && v
->always_executed
&& ! v
->maybe_multiple
4294 && INSN_UID (v
->insn
) < max_uid_for_loop
)
4296 /* If other giv's have been combined with this one, then
4297 this will work only if all uses of the other giv's occur
4298 before this giv's insn. This is difficult to check.
4300 We simplify this by looking for the common case where
4301 there is one DEST_REG giv, and this giv's insn is the
4302 last use of the dest_reg of that DEST_REG giv. If the
4303 increment occurs after the address giv, then we can
4304 perform the optimization. (Otherwise, the increment
4305 would have to go before other_giv, and we would not be
4306 able to combine it with the address giv to get an
4307 auto-inc address.) */
4308 if (v
->combined_with
)
4310 struct induction
*other_giv
= 0;
4312 for (tv
= bl
->giv
; tv
; tv
= tv
->next_iv
)
4320 if (! tv
&& other_giv
4321 && REGNO (other_giv
->dest_reg
) < max_reg_before_loop
4322 && (REGNO_LAST_UID (REGNO (other_giv
->dest_reg
))
4323 == INSN_UID (v
->insn
))
4324 && INSN_LUID (v
->insn
) < INSN_LUID (bl
->biv
->insn
))
4327 /* Check for case where increment is before the address
4328 giv. Do this test in "loop order". */
4329 else if ((INSN_LUID (v
->insn
) > INSN_LUID (bl
->biv
->insn
)
4330 && (INSN_LUID (v
->insn
) < INSN_LUID (scan_start
)
4331 || (INSN_LUID (bl
->biv
->insn
)
4332 > INSN_LUID (scan_start
))))
4333 || (INSN_LUID (v
->insn
) < INSN_LUID (scan_start
)
4334 && (INSN_LUID (scan_start
)
4335 < INSN_LUID (bl
->biv
->insn
))))
4344 /* We can't put an insn immediately after one setting
4345 cc0, or immediately before one using cc0. */
4346 if ((auto_inc_opt
== 1 && sets_cc0_p (PATTERN (v
->insn
)))
4347 || (auto_inc_opt
== -1
4348 && (prev
= prev_nonnote_insn (v
->insn
)) != 0
4349 && GET_RTX_CLASS (GET_CODE (prev
)) == 'i'
4350 && sets_cc0_p (PATTERN (prev
))))
4356 v
->auto_inc_opt
= 1;
4360 /* For each place where the biv is incremented, add an insn
4361 to increment the new, reduced reg for the giv. */
4362 for (tv
= bl
->biv
; tv
; tv
= tv
->next_iv
)
4367 insert_before
= tv
->insn
;
4368 else if (auto_inc_opt
== 1)
4369 insert_before
= NEXT_INSN (v
->insn
);
4371 insert_before
= v
->insn
;
4373 if (tv
->mult_val
== const1_rtx
)
4374 emit_iv_add_mult (tv
->add_val
, v
->mult_val
,
4375 v
->new_reg
, v
->new_reg
, insert_before
);
4376 else /* tv->mult_val == const0_rtx */
4377 /* A multiply is acceptable here
4378 since this is presumed to be seldom executed. */
4379 emit_iv_add_mult (tv
->add_val
, v
->mult_val
,
4380 v
->add_val
, v
->new_reg
, insert_before
);
4383 /* Add code at loop start to initialize giv's reduced reg. */
4385 emit_iv_add_mult (bl
->initial_value
, v
->mult_val
,
4386 v
->add_val
, v
->new_reg
, loop_start
);
4390 /* Rescan all givs. If a giv is the same as a giv not reduced, mark it
4393 For each giv register that can be reduced now: if replaceable,
4394 substitute reduced reg wherever the old giv occurs;
4395 else add new move insn "giv_reg = reduced_reg".
4397 Also check for givs whose first use is their definition and whose
4398 last use is the definition of another giv. If so, it is likely
4399 dead and should not be used to eliminate a biv. */
4400 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
4402 if (v
->same
&& v
->same
->ignore
)
4408 if (v
->giv_type
== DEST_REG
4409 && REGNO_FIRST_UID (REGNO (v
->dest_reg
)) == INSN_UID (v
->insn
))
4411 struct induction
*v1
;
4413 for (v1
= bl
->giv
; v1
; v1
= v1
->next_iv
)
4414 if (REGNO_LAST_UID (REGNO (v
->dest_reg
)) == INSN_UID (v1
->insn
))
4418 /* Update expression if this was combined, in case other giv was
4421 v
->new_reg
= replace_rtx (v
->new_reg
,
4422 v
->same
->dest_reg
, v
->same
->new_reg
);
4424 if (v
->giv_type
== DEST_ADDR
)
4425 /* Store reduced reg as the address in the memref where we found
4427 validate_change (v
->insn
, v
->location
, v
->new_reg
, 0);
4428 else if (v
->replaceable
)
4430 reg_map
[REGNO (v
->dest_reg
)] = v
->new_reg
;
4433 /* I can no longer duplicate the original problem. Perhaps
4434 this is unnecessary now? */
4436 /* Replaceable; it isn't strictly necessary to delete the old
4437 insn and emit a new one, because v->dest_reg is now dead.
4439 However, especially when unrolling loops, the special
4440 handling for (set REG0 REG1) in the second cse pass may
4441 make v->dest_reg live again. To avoid this problem, emit
4442 an insn to set the original giv reg from the reduced giv.
4443 We can not delete the original insn, since it may be part
4444 of a LIBCALL, and the code in flow that eliminates dead
4445 libcalls will fail if it is deleted. */
4446 emit_insn_after (gen_move_insn (v
->dest_reg
, v
->new_reg
),
4452 /* Not replaceable; emit an insn to set the original giv reg from
4453 the reduced giv, same as above. */
4454 emit_insn_after (gen_move_insn (v
->dest_reg
, v
->new_reg
),
4458 /* When a loop is reversed, givs which depend on the reversed
4459 biv, and which are live outside the loop, must be set to their
4460 correct final value. This insn is only needed if the giv is
4461 not replaceable. The correct final value is the same as the
4462 value that the giv starts the reversed loop with. */
4463 if (bl
->reversed
&& ! v
->replaceable
)
4464 emit_iv_add_mult (bl
->initial_value
, v
->mult_val
,
4465 v
->add_val
, v
->dest_reg
, end_insert_before
);
4466 else if (v
->final_value
)
4470 /* If the loop has multiple exits, emit the insn before the
4471 loop to ensure that it will always be executed no matter
4472 how the loop exits. Otherwise, emit the insn after the loop,
4473 since this is slightly more efficient. */
4474 if (loop_number_exit_count
[uid_loop_num
[INSN_UID (loop_start
)]])
4475 insert_before
= loop_start
;
4477 insert_before
= end_insert_before
;
4478 emit_insn_before (gen_move_insn (v
->dest_reg
, v
->final_value
),
4482 /* If the insn to set the final value of the giv was emitted
4483 before the loop, then we must delete the insn inside the loop
4484 that sets it. If this is a LIBCALL, then we must delete
4485 every insn in the libcall. Note, however, that
4486 final_giv_value will only succeed when there are multiple
4487 exits if the giv is dead at each exit, hence it does not
4488 matter that the original insn remains because it is dead
4490 /* Delete the insn inside the loop that sets the giv since
4491 the giv is now set before (or after) the loop. */
4492 delete_insn (v
->insn
);
4496 if (loop_dump_stream
)
4498 fprintf (loop_dump_stream
, "giv at %d reduced to ",
4499 INSN_UID (v
->insn
));
4500 print_rtl (loop_dump_stream
, v
->new_reg
);
4501 fprintf (loop_dump_stream
, "\n");
4505 /* All the givs based on the biv bl have been reduced if they
4508 /* For each giv not marked as maybe dead that has been combined with a
4509 second giv, clear any "maybe dead" mark on that second giv.
4510 v->new_reg will either be or refer to the register of the giv it
4513 Doing this clearing avoids problems in biv elimination where a
4514 giv's new_reg is a complex value that can't be put in the insn but
4515 the giv combined with (with a reg as new_reg) is marked maybe_dead.
4516 Since the register will be used in either case, we'd prefer it be
4517 used from the simpler giv. */
4519 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
4520 if (! v
->maybe_dead
&& v
->same
)
4521 v
->same
->maybe_dead
= 0;
4523 /* Try to eliminate the biv, if it is a candidate.
4524 This won't work if ! all_reduced,
4525 since the givs we planned to use might not have been reduced.
4527 We have to be careful that we didn't initially think we could eliminate
4528 this biv because of a giv that we now think may be dead and shouldn't
4529 be used as a biv replacement.
4531 Also, there is the possibility that we may have a giv that looks
4532 like it can be used to eliminate a biv, but the resulting insn
4533 isn't valid. This can happen, for example, on the 88k, where a
4534 JUMP_INSN can compare a register only with zero. Attempts to
4535 replace it with a compare with a constant will fail.
4537 Note that in cases where this call fails, we may have replaced some
4538 of the occurrences of the biv with a giv, but no harm was done in
4539 doing so in the rare cases where it can occur. */
4541 if (all_reduced
== 1 && bl
->eliminable
4542 && maybe_eliminate_biv (bl
, loop_start
, end
, 1,
4543 threshold
, insn_count
))
4546 /* ?? If we created a new test to bypass the loop entirely,
4547 or otherwise drop straight in, based on this test, then
4548 we might want to rewrite it also. This way some later
4549 pass has more hope of removing the initialization of this
4552 /* If final_value != 0, then the biv may be used after loop end
4553 and we must emit an insn to set it just in case.
4555 Reversed bivs already have an insn after the loop setting their
4556 value, so we don't need another one. We can't calculate the
4557 proper final value for such a biv here anyways. */
4558 if (final_value
!= 0 && ! bl
->reversed
)
4562 /* If the loop has multiple exits, emit the insn before the
4563 loop to ensure that it will always be executed no matter
4564 how the loop exits. Otherwise, emit the insn after the
4565 loop, since this is slightly more efficient. */
4566 if (loop_number_exit_count
[uid_loop_num
[INSN_UID (loop_start
)]])
4567 insert_before
= loop_start
;
4569 insert_before
= end_insert_before
;
4571 emit_insn_before (gen_move_insn (bl
->biv
->dest_reg
, final_value
),
4576 /* Delete all of the instructions inside the loop which set
4577 the biv, as they are all dead. If is safe to delete them,
4578 because an insn setting a biv will never be part of a libcall. */
4579 /* However, deleting them will invalidate the regno_last_uid info,
4580 so keeping them around is more convenient. Final_biv_value
4581 will only succeed when there are multiple exits if the biv
4582 is dead at each exit, hence it does not matter that the original
4583 insn remains, because it is dead anyways. */
4584 for (v
= bl
->biv
; v
; v
= v
->next_iv
)
4585 delete_insn (v
->insn
);
4588 if (loop_dump_stream
)
4589 fprintf (loop_dump_stream
, "Reg %d: biv eliminated\n",
4594 /* Go through all the instructions in the loop, making all the
4595 register substitutions scheduled in REG_MAP. */
4597 for (p
= loop_start
; p
!= end
; p
= NEXT_INSN (p
))
4598 if (GET_CODE (p
) == INSN
|| GET_CODE (p
) == JUMP_INSN
4599 || GET_CODE (p
) == CALL_INSN
)
4601 replace_regs (PATTERN (p
), reg_map
, max_reg_before_loop
, 0);
4602 replace_regs (REG_NOTES (p
), reg_map
, max_reg_before_loop
, 0);
4606 /* Unroll loops from within strength reduction so that we can use the
4607 induction variable information that strength_reduce has already
4611 unroll_loop (loop_end
, insn_count
, loop_start
, end_insert_before
, 1);
4614 /* instrument the loop with bct insn */
4615 #ifdef HAVE_decrement_and_branch_on_count
4616 if (HAVE_decrement_and_branch_on_count
&& bct_p
)
4617 insert_bct (loop_start
, loop_end
);
4621 if (loop_dump_stream
)
4622 fprintf (loop_dump_stream
, "\n");
4625 /* Return 1 if X is a valid source for an initial value (or as value being
4626 compared against in an initial test).
4628 X must be either a register or constant and must not be clobbered between
4629 the current insn and the start of the loop.
4631 INSN is the insn containing X. */
4634 valid_initial_value_p (x
, insn
, call_seen
, loop_start
)
4643 /* Only consider pseudos we know about initialized in insns whose luids
4645 if (GET_CODE (x
) != REG
4646 || REGNO (x
) >= max_reg_before_loop
)
4649 /* Don't use call-clobbered registers across a call which clobbers it. On
4650 some machines, don't use any hard registers at all. */
4651 if (REGNO (x
) < FIRST_PSEUDO_REGISTER
4652 && (SMALL_REGISTER_CLASSES
4653 || (call_used_regs
[REGNO (x
)] && call_seen
)))
4656 /* Don't use registers that have been clobbered before the start of the
4658 if (reg_set_between_p (x
, insn
, loop_start
))
4664 /* Scan X for memory refs and check each memory address
4665 as a possible giv. INSN is the insn whose pattern X comes from.
4666 NOT_EVERY_ITERATION is 1 if the insn might not be executed during
4667 every loop iteration. */
4670 find_mem_givs (x
, insn
, not_every_iteration
, loop_start
, loop_end
)
4673 int not_every_iteration
;
4674 rtx loop_start
, loop_end
;
4677 register enum rtx_code code
;
4683 code
= GET_CODE (x
);
4707 /* This code used to disable creating GIVs with mult_val == 1 and
4708 add_val == 0. However, this leads to lost optimizations when
4709 it comes time to combine a set of related DEST_ADDR GIVs, since
4710 this one would not be seen. */
4712 if (general_induction_var (XEXP (x
, 0), &src_reg
, &add_val
,
4713 &mult_val
, 1, &benefit
))
4715 /* Found one; record it. */
4717 = (struct induction
*) oballoc (sizeof (struct induction
));
4719 record_giv (v
, insn
, src_reg
, addr_placeholder
, mult_val
,
4720 add_val
, benefit
, DEST_ADDR
, not_every_iteration
,
4721 &XEXP (x
, 0), loop_start
, loop_end
);
4723 v
->mem_mode
= GET_MODE (x
);
4732 /* Recursively scan the subexpressions for other mem refs. */
4734 fmt
= GET_RTX_FORMAT (code
);
4735 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
4737 find_mem_givs (XEXP (x
, i
), insn
, not_every_iteration
, loop_start
,
4739 else if (fmt
[i
] == 'E')
4740 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
4741 find_mem_givs (XVECEXP (x
, i
, j
), insn
, not_every_iteration
,
4742 loop_start
, loop_end
);
4745 /* Fill in the data about one biv update.
4746 V is the `struct induction' in which we record the biv. (It is
4747 allocated by the caller, with alloca.)
4748 INSN is the insn that sets it.
4749 DEST_REG is the biv's reg.
4751 MULT_VAL is const1_rtx if the biv is being incremented here, in which case
4752 INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is
4753 being set to INC_VAL.
4755 NOT_EVERY_ITERATION is nonzero if this biv update is not know to be
4756 executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update
4757 can be executed more than once per iteration. If MAYBE_MULTIPLE
4758 and NOT_EVERY_ITERATION are both zero, we know that the biv update is
4759 executed exactly once per iteration. */
4762 record_biv (v
, insn
, dest_reg
, inc_val
, mult_val
,
4763 not_every_iteration
, maybe_multiple
)
4764 struct induction
*v
;
4769 int not_every_iteration
;
4772 struct iv_class
*bl
;
4775 v
->src_reg
= dest_reg
;
4776 v
->dest_reg
= dest_reg
;
4777 v
->mult_val
= mult_val
;
4778 v
->add_val
= inc_val
;
4779 v
->mode
= GET_MODE (dest_reg
);
4780 v
->always_computable
= ! not_every_iteration
;
4781 v
->always_executed
= ! not_every_iteration
;
4782 v
->maybe_multiple
= maybe_multiple
;
4784 /* Add this to the reg's iv_class, creating a class
4785 if this is the first incrementation of the reg. */
4787 bl
= reg_biv_class
[REGNO (dest_reg
)];
4790 /* Create and initialize new iv_class. */
4792 bl
= (struct iv_class
*) oballoc (sizeof (struct iv_class
));
4794 bl
->regno
= REGNO (dest_reg
);
4800 /* Set initial value to the reg itself. */
4801 bl
->initial_value
= dest_reg
;
4802 /* We haven't seen the initializing insn yet */
4805 bl
->initial_test
= 0;
4806 bl
->incremented
= 0;
4810 bl
->total_benefit
= 0;
4812 /* Add this class to loop_iv_list. */
4813 bl
->next
= loop_iv_list
;
4816 /* Put it in the array of biv register classes. */
4817 reg_biv_class
[REGNO (dest_reg
)] = bl
;
4820 /* Update IV_CLASS entry for this biv. */
4821 v
->next_iv
= bl
->biv
;
4824 if (mult_val
== const1_rtx
)
4825 bl
->incremented
= 1;
4827 if (loop_dump_stream
)
4829 fprintf (loop_dump_stream
,
4830 "Insn %d: possible biv, reg %d,",
4831 INSN_UID (insn
), REGNO (dest_reg
));
4832 if (GET_CODE (inc_val
) == CONST_INT
)
4834 fprintf (loop_dump_stream
, " const =");
4835 fprintf (loop_dump_stream
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (inc_val
));
4836 fputc ('\n', loop_dump_stream
);
4840 fprintf (loop_dump_stream
, " const = ");
4841 print_rtl (loop_dump_stream
, inc_val
);
4842 fprintf (loop_dump_stream
, "\n");
4847 /* Fill in the data about one giv.
4848 V is the `struct induction' in which we record the giv. (It is
4849 allocated by the caller, with alloca.)
4850 INSN is the insn that sets it.
4851 BENEFIT estimates the savings from deleting this insn.
4852 TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed
4853 into a register or is used as a memory address.
4855 SRC_REG is the biv reg which the giv is computed from.
4856 DEST_REG is the giv's reg (if the giv is stored in a reg).
4857 MULT_VAL and ADD_VAL are the coefficients used to compute the giv.
4858 LOCATION points to the place where this giv's value appears in INSN. */
4861 record_giv (v
, insn
, src_reg
, dest_reg
, mult_val
, add_val
, benefit
,
4862 type
, not_every_iteration
, location
, loop_start
, loop_end
)
4863 struct induction
*v
;
4867 rtx mult_val
, add_val
;
4870 int not_every_iteration
;
4872 rtx loop_start
, loop_end
;
4874 struct induction
*b
;
4875 struct iv_class
*bl
;
4876 rtx set
= single_set (insn
);
4879 v
->src_reg
= src_reg
;
4881 v
->dest_reg
= dest_reg
;
4882 v
->mult_val
= mult_val
;
4883 v
->add_val
= add_val
;
4884 v
->benefit
= benefit
;
4885 v
->location
= location
;
4887 v
->combined_with
= 0;
4888 v
->maybe_multiple
= 0;
4890 v
->derive_adjustment
= 0;
4896 v
->auto_inc_opt
= 0;
4900 /* The v->always_computable field is used in update_giv_derive, to
4901 determine whether a giv can be used to derive another giv. For a
4902 DEST_REG giv, INSN computes a new value for the giv, so its value
4903 isn't computable if INSN insn't executed every iteration.
4904 However, for a DEST_ADDR giv, INSN merely uses the value of the giv;
4905 it does not compute a new value. Hence the value is always computable
4906 regardless of whether INSN is executed each iteration. */
4908 if (type
== DEST_ADDR
)
4909 v
->always_computable
= 1;
4911 v
->always_computable
= ! not_every_iteration
;
4913 v
->always_executed
= ! not_every_iteration
;
4915 if (type
== DEST_ADDR
)
4917 v
->mode
= GET_MODE (*location
);
4921 else /* type == DEST_REG */
4923 v
->mode
= GET_MODE (SET_DEST (set
));
4925 v
->lifetime
= (uid_luid
[REGNO_LAST_UID (REGNO (dest_reg
))]
4926 - uid_luid
[REGNO_FIRST_UID (REGNO (dest_reg
))]);
4928 v
->times_used
= VARRAY_INT (n_times_used
, REGNO (dest_reg
));
4930 /* If the lifetime is zero, it means that this register is
4931 really a dead store. So mark this as a giv that can be
4932 ignored. This will not prevent the biv from being eliminated. */
4933 if (v
->lifetime
== 0)
4936 reg_iv_type
[REGNO (dest_reg
)] = GENERAL_INDUCT
;
4937 reg_iv_info
[REGNO (dest_reg
)] = v
;
4940 /* Add the giv to the class of givs computed from one biv. */
4942 bl
= reg_biv_class
[REGNO (src_reg
)];
4945 v
->next_iv
= bl
->giv
;
4947 /* Don't count DEST_ADDR. This is supposed to count the number of
4948 insns that calculate givs. */
4949 if (type
== DEST_REG
)
4951 bl
->total_benefit
+= benefit
;
4954 /* Fatal error, biv missing for this giv? */
4957 if (type
== DEST_ADDR
)
4961 /* The giv can be replaced outright by the reduced register only if all
4962 of the following conditions are true:
4963 - the insn that sets the giv is always executed on any iteration
4964 on which the giv is used at all
4965 (there are two ways to deduce this:
4966 either the insn is executed on every iteration,
4967 or all uses follow that insn in the same basic block),
4968 - the giv is not used outside the loop
4969 - no assignments to the biv occur during the giv's lifetime. */
4971 if (REGNO_FIRST_UID (REGNO (dest_reg
)) == INSN_UID (insn
)
4972 /* Previous line always fails if INSN was moved by loop opt. */
4973 && uid_luid
[REGNO_LAST_UID (REGNO (dest_reg
))] < INSN_LUID (loop_end
)
4974 && (! not_every_iteration
4975 || last_use_this_basic_block (dest_reg
, insn
)))
4977 /* Now check that there are no assignments to the biv within the
4978 giv's lifetime. This requires two separate checks. */
4980 /* Check each biv update, and fail if any are between the first
4981 and last use of the giv.
4983 If this loop contains an inner loop that was unrolled, then
4984 the insn modifying the biv may have been emitted by the loop
4985 unrolling code, and hence does not have a valid luid. Just
4986 mark the biv as not replaceable in this case. It is not very
4987 useful as a biv, because it is used in two different loops.
4988 It is very unlikely that we would be able to optimize the giv
4989 using this biv anyways. */
4992 for (b
= bl
->biv
; b
; b
= b
->next_iv
)
4994 if (INSN_UID (b
->insn
) >= max_uid_for_loop
4995 || ((uid_luid
[INSN_UID (b
->insn
)]
4996 >= uid_luid
[REGNO_FIRST_UID (REGNO (dest_reg
))])
4997 && (uid_luid
[INSN_UID (b
->insn
)]
4998 <= uid_luid
[REGNO_LAST_UID (REGNO (dest_reg
))])))
5001 v
->not_replaceable
= 1;
5006 /* If there are any backwards branches that go from after the
5007 biv update to before it, then this giv is not replaceable. */
5009 for (b
= bl
->biv
; b
; b
= b
->next_iv
)
5010 if (back_branch_in_range_p (b
->insn
, loop_start
, loop_end
))
5013 v
->not_replaceable
= 1;
5019 /* May still be replaceable, we don't have enough info here to
5022 v
->not_replaceable
= 0;
5026 /* Record whether the add_val contains a const_int, for later use by
5031 v
->no_const_addval
= 1;
5032 if (tem
== const0_rtx
)
5034 else if (GET_CODE (tem
) == CONST_INT
)
5035 v
->no_const_addval
= 0;
5036 else if (GET_CODE (tem
) == PLUS
)
5040 if (GET_CODE (XEXP (tem
, 0)) == PLUS
)
5041 tem
= XEXP (tem
, 0);
5042 else if (GET_CODE (XEXP (tem
, 1)) == PLUS
)
5043 tem
= XEXP (tem
, 1);
5047 if (GET_CODE (XEXP (tem
, 1)) == CONST_INT
)
5048 v
->no_const_addval
= 0;
5052 if (loop_dump_stream
)
5054 if (type
== DEST_REG
)
5055 fprintf (loop_dump_stream
, "Insn %d: giv reg %d",
5056 INSN_UID (insn
), REGNO (dest_reg
));
5058 fprintf (loop_dump_stream
, "Insn %d: dest address",
5061 fprintf (loop_dump_stream
, " src reg %d benefit %d",
5062 REGNO (src_reg
), v
->benefit
);
5063 fprintf (loop_dump_stream
, " used %d lifetime %d",
5064 v
->times_used
, v
->lifetime
);
5067 fprintf (loop_dump_stream
, " replaceable");
5069 if (v
->no_const_addval
)
5070 fprintf (loop_dump_stream
, " ncav");
5072 if (GET_CODE (mult_val
) == CONST_INT
)
5074 fprintf (loop_dump_stream
, " mult ");
5075 fprintf (loop_dump_stream
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (mult_val
));
5079 fprintf (loop_dump_stream
, " mult ");
5080 print_rtl (loop_dump_stream
, mult_val
);
5083 if (GET_CODE (add_val
) == CONST_INT
)
5085 fprintf (loop_dump_stream
, " add ");
5086 fprintf (loop_dump_stream
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (add_val
));
5090 fprintf (loop_dump_stream
, " add ");
5091 print_rtl (loop_dump_stream
, add_val
);
5095 if (loop_dump_stream
)
5096 fprintf (loop_dump_stream
, "\n");
5101 /* All this does is determine whether a giv can be made replaceable because
5102 its final value can be calculated. This code can not be part of record_giv
5103 above, because final_giv_value requires that the number of loop iterations
5104 be known, and that can not be accurately calculated until after all givs
5105 have been identified. */
5108 check_final_value (v
, loop_start
, loop_end
)
5109 struct induction
*v
;
5110 rtx loop_start
, loop_end
;
5112 struct iv_class
*bl
;
5113 rtx final_value
= 0;
5115 bl
= reg_biv_class
[REGNO (v
->src_reg
)];
5117 /* DEST_ADDR givs will never reach here, because they are always marked
5118 replaceable above in record_giv. */
5120 /* The giv can be replaced outright by the reduced register only if all
5121 of the following conditions are true:
5122 - the insn that sets the giv is always executed on any iteration
5123 on which the giv is used at all
5124 (there are two ways to deduce this:
5125 either the insn is executed on every iteration,
5126 or all uses follow that insn in the same basic block),
5127 - its final value can be calculated (this condition is different
5128 than the one above in record_giv)
5129 - no assignments to the biv occur during the giv's lifetime. */
5132 /* This is only called now when replaceable is known to be false. */
5133 /* Clear replaceable, so that it won't confuse final_giv_value. */
5137 if ((final_value
= final_giv_value (v
, loop_start
, loop_end
))
5138 && (v
->always_computable
|| last_use_this_basic_block (v
->dest_reg
, v
->insn
)))
5140 int biv_increment_seen
= 0;
5146 /* When trying to determine whether or not a biv increment occurs
5147 during the lifetime of the giv, we can ignore uses of the variable
5148 outside the loop because final_value is true. Hence we can not
5149 use regno_last_uid and regno_first_uid as above in record_giv. */
5151 /* Search the loop to determine whether any assignments to the
5152 biv occur during the giv's lifetime. Start with the insn
5153 that sets the giv, and search around the loop until we come
5154 back to that insn again.
5156 Also fail if there is a jump within the giv's lifetime that jumps
5157 to somewhere outside the lifetime but still within the loop. This
5158 catches spaghetti code where the execution order is not linear, and
5159 hence the above test fails. Here we assume that the giv lifetime
5160 does not extend from one iteration of the loop to the next, so as
5161 to make the test easier. Since the lifetime isn't known yet,
5162 this requires two loops. See also record_giv above. */
5164 last_giv_use
= v
->insn
;
5170 p
= NEXT_INSN (loop_start
);
5174 if (GET_CODE (p
) == INSN
|| GET_CODE (p
) == JUMP_INSN
5175 || GET_CODE (p
) == CALL_INSN
)
5177 if (biv_increment_seen
)
5179 if (reg_mentioned_p (v
->dest_reg
, PATTERN (p
)))
5182 v
->not_replaceable
= 1;
5186 else if (reg_set_p (v
->src_reg
, PATTERN (p
)))
5187 biv_increment_seen
= 1;
5188 else if (reg_mentioned_p (v
->dest_reg
, PATTERN (p
)))
5193 /* Now that the lifetime of the giv is known, check for branches
5194 from within the lifetime to outside the lifetime if it is still
5204 p
= NEXT_INSN (loop_start
);
5205 if (p
== last_giv_use
)
5208 if (GET_CODE (p
) == JUMP_INSN
&& JUMP_LABEL (p
)
5209 && LABEL_NAME (JUMP_LABEL (p
))
5210 && ((INSN_UID (JUMP_LABEL (p
)) >= max_uid_for_loop
)
5211 || (INSN_UID (v
->insn
) >= max_uid_for_loop
)
5212 || (INSN_UID (last_giv_use
) >= max_uid_for_loop
)
5213 || (INSN_LUID (JUMP_LABEL (p
)) < INSN_LUID (v
->insn
)
5214 && INSN_LUID (JUMP_LABEL (p
)) > INSN_LUID (loop_start
))
5215 || (INSN_LUID (JUMP_LABEL (p
)) > INSN_LUID (last_giv_use
)
5216 && INSN_LUID (JUMP_LABEL (p
)) < INSN_LUID (loop_end
))))
5219 v
->not_replaceable
= 1;
5221 if (loop_dump_stream
)
5222 fprintf (loop_dump_stream
,
5223 "Found branch outside giv lifetime.\n");
5230 /* If it is replaceable, then save the final value. */
5232 v
->final_value
= final_value
;
5235 if (loop_dump_stream
&& v
->replaceable
)
5236 fprintf (loop_dump_stream
, "Insn %d: giv reg %d final_value replaceable\n",
5237 INSN_UID (v
->insn
), REGNO (v
->dest_reg
));
5240 /* Update the status of whether a giv can derive other givs.
5242 We need to do something special if there is or may be an update to the biv
5243 between the time the giv is defined and the time it is used to derive
5246 In addition, a giv that is only conditionally set is not allowed to
5247 derive another giv once a label has been passed.
5249 The cases we look at are when a label or an update to a biv is passed. */
5252 update_giv_derive (p
)
5255 struct iv_class
*bl
;
5256 struct induction
*biv
, *giv
;
5260 /* Search all IV classes, then all bivs, and finally all givs.
5262 There are three cases we are concerned with. First we have the situation
5263 of a giv that is only updated conditionally. In that case, it may not
5264 derive any givs after a label is passed.
5266 The second case is when a biv update occurs, or may occur, after the
5267 definition of a giv. For certain biv updates (see below) that are
5268 known to occur between the giv definition and use, we can adjust the
5269 giv definition. For others, or when the biv update is conditional,
5270 we must prevent the giv from deriving any other givs. There are two
5271 sub-cases within this case.
5273 If this is a label, we are concerned with any biv update that is done
5274 conditionally, since it may be done after the giv is defined followed by
5275 a branch here (actually, we need to pass both a jump and a label, but
5276 this extra tracking doesn't seem worth it).
5278 If this is a jump, we are concerned about any biv update that may be
5279 executed multiple times. We are actually only concerned about
5280 backward jumps, but it is probably not worth performing the test
5281 on the jump again here.
5283 If this is a biv update, we must adjust the giv status to show that a
5284 subsequent biv update was performed. If this adjustment cannot be done,
5285 the giv cannot derive further givs. */
5287 for (bl
= loop_iv_list
; bl
; bl
= bl
->next
)
5288 for (biv
= bl
->biv
; biv
; biv
= biv
->next_iv
)
5289 if (GET_CODE (p
) == CODE_LABEL
|| GET_CODE (p
) == JUMP_INSN
5292 for (giv
= bl
->giv
; giv
; giv
= giv
->next_iv
)
5294 /* If cant_derive is already true, there is no point in
5295 checking all of these conditions again. */
5296 if (giv
->cant_derive
)
5299 /* If this giv is conditionally set and we have passed a label,
5300 it cannot derive anything. */
5301 if (GET_CODE (p
) == CODE_LABEL
&& ! giv
->always_computable
)
5302 giv
->cant_derive
= 1;
5304 /* Skip givs that have mult_val == 0, since
5305 they are really invariants. Also skip those that are
5306 replaceable, since we know their lifetime doesn't contain
5308 else if (giv
->mult_val
== const0_rtx
|| giv
->replaceable
)
5311 /* The only way we can allow this giv to derive another
5312 is if this is a biv increment and we can form the product
5313 of biv->add_val and giv->mult_val. In this case, we will
5314 be able to compute a compensation. */
5315 else if (biv
->insn
== p
)
5319 if (biv
->mult_val
== const1_rtx
)
5320 tem
= simplify_giv_expr (gen_rtx_MULT (giv
->mode
,
5325 if (tem
&& giv
->derive_adjustment
)
5326 tem
= simplify_giv_expr (gen_rtx_PLUS (giv
->mode
, tem
,
5327 giv
->derive_adjustment
),
5330 giv
->derive_adjustment
= tem
;
5332 giv
->cant_derive
= 1;
5334 else if ((GET_CODE (p
) == CODE_LABEL
&& ! biv
->always_computable
)
5335 || (GET_CODE (p
) == JUMP_INSN
&& biv
->maybe_multiple
))
5336 giv
->cant_derive
= 1;
5341 /* Check whether an insn is an increment legitimate for a basic induction var.
5342 X is the source of insn P, or a part of it.
5343 MODE is the mode in which X should be interpreted.
5345 DEST_REG is the putative biv, also the destination of the insn.
5346 We accept patterns of these forms:
5347 REG = REG + INVARIANT (includes REG = REG - CONSTANT)
5348 REG = INVARIANT + REG
5350 If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX,
5351 and store the additive term into *INC_VAL.
5353 If X is an assignment of an invariant into DEST_REG, we set
5354 *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL.
5356 We also want to detect a BIV when it corresponds to a variable
5357 whose mode was promoted via PROMOTED_MODE. In that case, an increment
5358 of the variable may be a PLUS that adds a SUBREG of that variable to
5359 an invariant and then sign- or zero-extends the result of the PLUS
5362 Most GIVs in such cases will be in the promoted mode, since that is the
5363 probably the natural computation mode (and almost certainly the mode
5364 used for addresses) on the machine. So we view the pseudo-reg containing
5365 the variable as the BIV, as if it were simply incremented.
5367 Note that treating the entire pseudo as a BIV will result in making
5368 simple increments to any GIVs based on it. However, if the variable
5369 overflows in its declared mode but not its promoted mode, the result will
5370 be incorrect. This is acceptable if the variable is signed, since
5371 overflows in such cases are undefined, but not if it is unsigned, since
5372 those overflows are defined. So we only check for SIGN_EXTEND and
5375 If we cannot find a biv, we return 0. */
5378 basic_induction_var (x
, mode
, dest_reg
, p
, inc_val
, mult_val
)
5380 enum machine_mode mode
;
5386 register enum rtx_code code
;
5390 code
= GET_CODE (x
);
5394 if (rtx_equal_p (XEXP (x
, 0), dest_reg
)
5395 || (GET_CODE (XEXP (x
, 0)) == SUBREG
5396 && SUBREG_PROMOTED_VAR_P (XEXP (x
, 0))
5397 && SUBREG_REG (XEXP (x
, 0)) == dest_reg
))
5399 else if (rtx_equal_p (XEXP (x
, 1), dest_reg
)
5400 || (GET_CODE (XEXP (x
, 1)) == SUBREG
5401 && SUBREG_PROMOTED_VAR_P (XEXP (x
, 1))
5402 && SUBREG_REG (XEXP (x
, 1)) == dest_reg
))
5407 if (invariant_p (arg
) != 1)
5410 *inc_val
= convert_modes (GET_MODE (dest_reg
), GET_MODE (x
), arg
, 0);
5411 *mult_val
= const1_rtx
;
5415 /* If this is a SUBREG for a promoted variable, check the inner
5417 if (SUBREG_PROMOTED_VAR_P (x
))
5418 return basic_induction_var (SUBREG_REG (x
), GET_MODE (SUBREG_REG (x
)),
5419 dest_reg
, p
, inc_val
, mult_val
);
5423 /* If this register is assigned in a previous insn, look at its
5424 source, but don't go outside the loop or past a label. */
5430 insn
= PREV_INSN (insn
);
5431 } while (insn
&& GET_CODE (insn
) == NOTE
5432 && NOTE_LINE_NUMBER (insn
) != NOTE_INSN_LOOP_BEG
);
5436 set
= single_set (insn
);
5440 if ((SET_DEST (set
) == x
5441 || (GET_CODE (SET_DEST (set
)) == SUBREG
5442 && (GET_MODE_SIZE (GET_MODE (SET_DEST (set
)))
5444 && SUBREG_REG (SET_DEST (set
)) == x
))
5445 && basic_induction_var (SET_SRC (set
),
5446 (GET_MODE (SET_SRC (set
)) == VOIDmode
5448 : GET_MODE (SET_SRC (set
))),
5453 /* ... fall through ... */
5455 /* Can accept constant setting of biv only when inside inner most loop.
5456 Otherwise, a biv of an inner loop may be incorrectly recognized
5457 as a biv of the outer loop,
5458 causing code to be moved INTO the inner loop. */
5460 if (invariant_p (x
) != 1)
5465 /* convert_modes aborts if we try to convert to or from CCmode, so just
5466 exclude that case. It is very unlikely that a condition code value
5467 would be a useful iterator anyways. */
5468 if (loops_enclosed
== 1
5469 && GET_MODE_CLASS (mode
) != MODE_CC
5470 && GET_MODE_CLASS (GET_MODE (dest_reg
)) != MODE_CC
)
5472 /* Possible bug here? Perhaps we don't know the mode of X. */
5473 *inc_val
= convert_modes (GET_MODE (dest_reg
), mode
, x
, 0);
5474 *mult_val
= const0_rtx
;
5481 return basic_induction_var (XEXP (x
, 0), GET_MODE (XEXP (x
, 0)),
5482 dest_reg
, p
, inc_val
, mult_val
);
5485 /* Similar, since this can be a sign extension. */
5486 for (insn
= PREV_INSN (p
);
5487 (insn
&& GET_CODE (insn
) == NOTE
5488 && NOTE_LINE_NUMBER (insn
) != NOTE_INSN_LOOP_BEG
);
5489 insn
= PREV_INSN (insn
))
5493 set
= single_set (insn
);
5495 if (set
&& SET_DEST (set
) == XEXP (x
, 0)
5496 && GET_CODE (XEXP (x
, 1)) == CONST_INT
5497 && INTVAL (XEXP (x
, 1)) >= 0
5498 && GET_CODE (SET_SRC (set
)) == ASHIFT
5499 && XEXP (x
, 1) == XEXP (SET_SRC (set
), 1))
5500 return basic_induction_var (XEXP (SET_SRC (set
), 0),
5501 GET_MODE (XEXP (x
, 0)),
5502 dest_reg
, insn
, inc_val
, mult_val
);
5510 /* A general induction variable (giv) is any quantity that is a linear
5511 function of a basic induction variable,
5512 i.e. giv = biv * mult_val + add_val.
5513 The coefficients can be any loop invariant quantity.
5514 A giv need not be computed directly from the biv;
5515 it can be computed by way of other givs. */
5517 /* Determine whether X computes a giv.
5518 If it does, return a nonzero value
5519 which is the benefit from eliminating the computation of X;
5520 set *SRC_REG to the register of the biv that it is computed from;
5521 set *ADD_VAL and *MULT_VAL to the coefficients,
5522 such that the value of X is biv * mult + add; */
5525 general_induction_var (x
, src_reg
, add_val
, mult_val
, is_addr
, pbenefit
)
5536 /* If this is an invariant, forget it, it isn't a giv. */
5537 if (invariant_p (x
) == 1)
5540 /* See if the expression could be a giv and get its form.
5541 Mark our place on the obstack in case we don't find a giv. */
5542 storage
= (char *) oballoc (0);
5544 x
= simplify_giv_expr (x
, pbenefit
);
5551 switch (GET_CODE (x
))
5555 /* Since this is now an invariant and wasn't before, it must be a giv
5556 with MULT_VAL == 0. It doesn't matter which BIV we associate this
5558 *src_reg
= loop_iv_list
->biv
->dest_reg
;
5559 *mult_val
= const0_rtx
;
5564 /* This is equivalent to a BIV. */
5566 *mult_val
= const1_rtx
;
5567 *add_val
= const0_rtx
;
5571 /* Either (plus (biv) (invar)) or
5572 (plus (mult (biv) (invar_1)) (invar_2)). */
5573 if (GET_CODE (XEXP (x
, 0)) == MULT
)
5575 *src_reg
= XEXP (XEXP (x
, 0), 0);
5576 *mult_val
= XEXP (XEXP (x
, 0), 1);
5580 *src_reg
= XEXP (x
, 0);
5581 *mult_val
= const1_rtx
;
5583 *add_val
= XEXP (x
, 1);
5587 /* ADD_VAL is zero. */
5588 *src_reg
= XEXP (x
, 0);
5589 *mult_val
= XEXP (x
, 1);
5590 *add_val
= const0_rtx
;
5597 /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be
5598 unless they are CONST_INT). */
5599 if (GET_CODE (*add_val
) == USE
)
5600 *add_val
= XEXP (*add_val
, 0);
5601 if (GET_CODE (*mult_val
) == USE
)
5602 *mult_val
= XEXP (*mult_val
, 0);
5607 *pbenefit
+= ADDRESS_COST (orig_x
) - reg_address_cost
;
5609 *pbenefit
+= rtx_cost (orig_x
, MEM
) - reg_address_cost
;
5613 *pbenefit
+= rtx_cost (orig_x
, SET
);
5615 /* Always return true if this is a giv so it will be detected as such,
5616 even if the benefit is zero or negative. This allows elimination
5617 of bivs that might otherwise not be eliminated. */
5621 /* Given an expression, X, try to form it as a linear function of a biv.
5622 We will canonicalize it to be of the form
5623 (plus (mult (BIV) (invar_1))
5625 with possible degeneracies.
5627 The invariant expressions must each be of a form that can be used as a
5628 machine operand. We surround then with a USE rtx (a hack, but localized
5629 and certainly unambiguous!) if not a CONST_INT for simplicity in this
5630 routine; it is the caller's responsibility to strip them.
5632 If no such canonicalization is possible (i.e., two biv's are used or an
5633 expression that is neither invariant nor a biv or giv), this routine
5636 For a non-zero return, the result will have a code of CONST_INT, USE,
5637 REG (for a BIV), PLUS, or MULT. No other codes will occur.
5639 *BENEFIT will be incremented by the benefit of any sub-giv encountered. */
5641 static rtx sge_plus
PROTO ((enum machine_mode
, rtx
, rtx
));
5642 static rtx sge_plus_constant
PROTO ((rtx
, rtx
));
5645 simplify_giv_expr (x
, benefit
)
5649 enum machine_mode mode
= GET_MODE (x
);
5653 /* If this is not an integer mode, or if we cannot do arithmetic in this
5654 mode, this can't be a giv. */
5655 if (mode
!= VOIDmode
5656 && (GET_MODE_CLASS (mode
) != MODE_INT
5657 || GET_MODE_BITSIZE (mode
) > HOST_BITS_PER_WIDE_INT
))
5660 switch (GET_CODE (x
))
5663 arg0
= simplify_giv_expr (XEXP (x
, 0), benefit
);
5664 arg1
= simplify_giv_expr (XEXP (x
, 1), benefit
);
5665 if (arg0
== 0 || arg1
== 0)
5668 /* Put constant last, CONST_INT last if both constant. */
5669 if ((GET_CODE (arg0
) == USE
5670 || GET_CODE (arg0
) == CONST_INT
)
5671 && ! ((GET_CODE (arg0
) == USE
5672 && GET_CODE (arg1
) == USE
)
5673 || GET_CODE (arg1
) == CONST_INT
))
5674 tem
= arg0
, arg0
= arg1
, arg1
= tem
;
5676 /* Handle addition of zero, then addition of an invariant. */
5677 if (arg1
== const0_rtx
)
5679 else if (GET_CODE (arg1
) == CONST_INT
|| GET_CODE (arg1
) == USE
)
5680 switch (GET_CODE (arg0
))
5684 /* Adding two invariants must result in an invariant, so enclose
5685 addition operation inside a USE and return it. */
5686 if (GET_CODE (arg0
) == USE
)
5687 arg0
= XEXP (arg0
, 0);
5688 if (GET_CODE (arg1
) == USE
)
5689 arg1
= XEXP (arg1
, 0);
5691 if (GET_CODE (arg0
) == CONST_INT
)
5692 tem
= arg0
, arg0
= arg1
, arg1
= tem
;
5693 if (GET_CODE (arg1
) == CONST_INT
)
5694 tem
= sge_plus_constant (arg0
, arg1
);
5696 tem
= sge_plus (mode
, arg0
, arg1
);
5698 if (GET_CODE (tem
) != CONST_INT
)
5699 tem
= gen_rtx_USE (mode
, tem
);
5704 /* biv + invar or mult + invar. Return sum. */
5705 return gen_rtx_PLUS (mode
, arg0
, arg1
);
5708 /* (a + invar_1) + invar_2. Associate. */
5709 return simplify_giv_expr (
5710 gen_rtx_PLUS (mode
, XEXP (arg0
, 0),
5711 gen_rtx_PLUS (mode
, XEXP (arg0
, 1), arg1
)),
5718 /* Each argument must be either REG, PLUS, or MULT. Convert REG to
5719 MULT to reduce cases. */
5720 if (GET_CODE (arg0
) == REG
)
5721 arg0
= gen_rtx_MULT (mode
, arg0
, const1_rtx
);
5722 if (GET_CODE (arg1
) == REG
)
5723 arg1
= gen_rtx_MULT (mode
, arg1
, const1_rtx
);
5725 /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT.
5726 Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT.
5727 Recurse to associate the second PLUS. */
5728 if (GET_CODE (arg1
) == MULT
)
5729 tem
= arg0
, arg0
= arg1
, arg1
= tem
;
5731 if (GET_CODE (arg1
) == PLUS
)
5732 return simplify_giv_expr (gen_rtx_PLUS (mode
,
5733 gen_rtx_PLUS (mode
, arg0
,
5738 /* Now must have MULT + MULT. Distribute if same biv, else not giv. */
5739 if (GET_CODE (arg0
) != MULT
|| GET_CODE (arg1
) != MULT
)
5742 if (!rtx_equal_p (arg0
, arg1
))
5745 return simplify_giv_expr (gen_rtx_MULT (mode
,
5753 /* Handle "a - b" as "a + b * (-1)". */
5754 return simplify_giv_expr (gen_rtx_PLUS (mode
,
5756 gen_rtx_MULT (mode
, XEXP (x
, 1),
5761 arg0
= simplify_giv_expr (XEXP (x
, 0), benefit
);
5762 arg1
= simplify_giv_expr (XEXP (x
, 1), benefit
);
5763 if (arg0
== 0 || arg1
== 0)
5766 /* Put constant last, CONST_INT last if both constant. */
5767 if ((GET_CODE (arg0
) == USE
|| GET_CODE (arg0
) == CONST_INT
)
5768 && GET_CODE (arg1
) != CONST_INT
)
5769 tem
= arg0
, arg0
= arg1
, arg1
= tem
;
5771 /* If second argument is not now constant, not giv. */
5772 if (GET_CODE (arg1
) != USE
&& GET_CODE (arg1
) != CONST_INT
)
5775 /* Handle multiply by 0 or 1. */
5776 if (arg1
== const0_rtx
)
5779 else if (arg1
== const1_rtx
)
5782 switch (GET_CODE (arg0
))
5785 /* biv * invar. Done. */
5786 return gen_rtx_MULT (mode
, arg0
, arg1
);
5789 /* Product of two constants. */
5790 return GEN_INT (INTVAL (arg0
) * INTVAL (arg1
));
5793 /* invar * invar. It is a giv, but very few of these will
5794 actually pay off, so limit to simple registers. */
5795 if (GET_CODE (arg1
) != CONST_INT
)
5798 arg0
= XEXP (arg0
, 0);
5799 if (GET_CODE (arg0
) == REG
)
5800 tem
= gen_rtx_MULT (mode
, arg0
, arg1
);
5801 else if (GET_CODE (arg0
) == MULT
5802 && GET_CODE (XEXP (arg0
, 0)) == REG
5803 && GET_CODE (XEXP (arg0
, 1)) == CONST_INT
)
5805 tem
= gen_rtx_MULT (mode
, XEXP (arg0
, 0),
5806 GEN_INT (INTVAL (XEXP (arg0
, 1))
5811 return gen_rtx_USE (mode
, tem
);
5814 /* (a * invar_1) * invar_2. Associate. */
5815 return simplify_giv_expr (gen_rtx_MULT (mode
, XEXP (arg0
, 0),
5822 /* (a + invar_1) * invar_2. Distribute. */
5823 return simplify_giv_expr (gen_rtx_PLUS (mode
,
5837 /* Shift by constant is multiply by power of two. */
5838 if (GET_CODE (XEXP (x
, 1)) != CONST_INT
)
5841 return simplify_giv_expr (gen_rtx_MULT (mode
,
5843 GEN_INT ((HOST_WIDE_INT
) 1
5844 << INTVAL (XEXP (x
, 1)))),
5848 /* "-a" is "a * (-1)" */
5849 return simplify_giv_expr (gen_rtx_MULT (mode
, XEXP (x
, 0), constm1_rtx
),
5853 /* "~a" is "-a - 1". Silly, but easy. */
5854 return simplify_giv_expr (gen_rtx_MINUS (mode
,
5855 gen_rtx_NEG (mode
, XEXP (x
, 0)),
5860 /* Already in proper form for invariant. */
5864 /* If this is a new register, we can't deal with it. */
5865 if (REGNO (x
) >= max_reg_before_loop
)
5868 /* Check for biv or giv. */
5869 switch (reg_iv_type
[REGNO (x
)])
5873 case GENERAL_INDUCT
:
5875 struct induction
*v
= reg_iv_info
[REGNO (x
)];
5877 /* Form expression from giv and add benefit. Ensure this giv
5878 can derive another and subtract any needed adjustment if so. */
5879 *benefit
+= v
->benefit
;
5883 tem
= gen_rtx_PLUS (mode
, gen_rtx_MULT (mode
, v
->src_reg
,
5886 if (v
->derive_adjustment
)
5887 tem
= gen_rtx_MINUS (mode
, tem
, v
->derive_adjustment
);
5888 return simplify_giv_expr (tem
, benefit
);
5892 /* If it isn't an induction variable, and it is invariant, we
5893 may be able to simplify things further by looking through
5894 the bits we just moved outside the loop. */
5895 if (invariant_p (x
) == 1)
5899 for (m
= the_movables
; m
; m
= m
->next
)
5900 if (rtx_equal_p (x
, m
->set_dest
))
5902 /* Ok, we found a match. Substitute and simplify. */
5904 /* If we match another movable, we must use that, as
5905 this one is going away. */
5907 return simplify_giv_expr (m
->match
->set_dest
, benefit
);
5909 /* If consec is non-zero, this is a member of a group of
5910 instructions that were moved together. We handle this
5911 case only to the point of seeking to the last insn and
5912 looking for a REG_EQUAL. Fail if we don't find one. */
5917 do { tem
= NEXT_INSN (tem
); } while (--i
> 0);
5919 tem
= find_reg_note (tem
, REG_EQUAL
, NULL_RTX
);
5921 tem
= XEXP (tem
, 0);
5925 tem
= single_set (m
->insn
);
5927 tem
= SET_SRC (tem
);
5932 /* What we are most interested in is pointer
5933 arithmetic on invariants -- only take
5934 patterns we may be able to do something with. */
5935 if (GET_CODE (tem
) == PLUS
5936 || GET_CODE (tem
) == MULT
5937 || GET_CODE (tem
) == ASHIFT
5938 || GET_CODE (tem
) == CONST_INT
5939 || GET_CODE (tem
) == SYMBOL_REF
)
5941 tem
= simplify_giv_expr (tem
, benefit
);
5945 else if (GET_CODE (tem
) == CONST
5946 && GET_CODE (XEXP (tem
, 0)) == PLUS
5947 && GET_CODE (XEXP (XEXP (tem
, 0), 0)) == SYMBOL_REF
5948 && GET_CODE (XEXP (XEXP (tem
, 0), 1)) == CONST_INT
)
5950 tem
= simplify_giv_expr (XEXP (tem
, 0), benefit
);
5961 /* Fall through to general case. */
5963 /* If invariant, return as USE (unless CONST_INT).
5964 Otherwise, not giv. */
5965 if (GET_CODE (x
) == USE
)
5968 if (invariant_p (x
) == 1)
5970 if (GET_CODE (x
) == CONST_INT
)
5972 if (GET_CODE (x
) == CONST
5973 && GET_CODE (XEXP (x
, 0)) == PLUS
5974 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == SYMBOL_REF
5975 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
)
5977 return gen_rtx_USE (mode
, x
);
5984 /* This routine folds invariants such that there is only ever one
5985 CONST_INT in the summation. It is only used by simplify_giv_expr. */
5988 sge_plus_constant (x
, c
)
5991 if (GET_CODE (x
) == CONST_INT
)
5992 return GEN_INT (INTVAL (x
) + INTVAL (c
));
5993 else if (GET_CODE (x
) != PLUS
)
5994 return gen_rtx_PLUS (GET_MODE (x
), x
, c
);
5995 else if (GET_CODE (XEXP (x
, 1)) == CONST_INT
)
5997 return gen_rtx_PLUS (GET_MODE (x
), XEXP (x
, 0),
5998 GEN_INT (INTVAL (XEXP (x
, 1)) + INTVAL (c
)));
6000 else if (GET_CODE (XEXP (x
, 0)) == PLUS
6001 || GET_CODE (XEXP (x
, 1)) != PLUS
)
6003 return gen_rtx_PLUS (GET_MODE (x
),
6004 sge_plus_constant (XEXP (x
, 0), c
), XEXP (x
, 1));
6008 return gen_rtx_PLUS (GET_MODE (x
),
6009 sge_plus_constant (XEXP (x
, 1), c
), XEXP (x
, 0));
6014 sge_plus (mode
, x
, y
)
6015 enum machine_mode mode
;
6018 while (GET_CODE (y
) == PLUS
)
6020 rtx a
= XEXP (y
, 0);
6021 if (GET_CODE (a
) == CONST_INT
)
6022 x
= sge_plus_constant (x
, a
);
6024 x
= gen_rtx_PLUS (mode
, x
, a
);
6027 if (GET_CODE (y
) == CONST_INT
)
6028 x
= sge_plus_constant (x
, y
);
6030 x
= gen_rtx_PLUS (mode
, x
, y
);
6034 /* Help detect a giv that is calculated by several consecutive insns;
6038 The caller has already identified the first insn P as having a giv as dest;
6039 we check that all other insns that set the same register follow
6040 immediately after P, that they alter nothing else,
6041 and that the result of the last is still a giv.
6043 The value is 0 if the reg set in P is not really a giv.
6044 Otherwise, the value is the amount gained by eliminating
6045 all the consecutive insns that compute the value.
6047 FIRST_BENEFIT is the amount gained by eliminating the first insn, P.
6048 SRC_REG is the reg of the biv; DEST_REG is the reg of the giv.
6050 The coefficients of the ultimate giv value are stored in
6051 *MULT_VAL and *ADD_VAL. */
6054 consec_sets_giv (first_benefit
, p
, src_reg
, dest_reg
,
6069 /* Indicate that this is a giv so that we can update the value produced in
6070 each insn of the multi-insn sequence.
6072 This induction structure will be used only by the call to
6073 general_induction_var below, so we can allocate it on our stack.
6074 If this is a giv, our caller will replace the induct var entry with
6075 a new induction structure. */
6077 = (struct induction
*) alloca (sizeof (struct induction
));
6078 v
->src_reg
= src_reg
;
6079 v
->mult_val
= *mult_val
;
6080 v
->add_val
= *add_val
;
6081 v
->benefit
= first_benefit
;
6083 v
->derive_adjustment
= 0;
6085 reg_iv_type
[REGNO (dest_reg
)] = GENERAL_INDUCT
;
6086 reg_iv_info
[REGNO (dest_reg
)] = v
;
6088 count
= VARRAY_INT (n_times_set
, REGNO (dest_reg
)) - 1;
6093 code
= GET_CODE (p
);
6095 /* If libcall, skip to end of call sequence. */
6096 if (code
== INSN
&& (temp
= find_reg_note (p
, REG_LIBCALL
, NULL_RTX
)))
6100 && (set
= single_set (p
))
6101 && GET_CODE (SET_DEST (set
)) == REG
6102 && SET_DEST (set
) == dest_reg
6103 && (general_induction_var (SET_SRC (set
), &src_reg
,
6104 add_val
, mult_val
, 0, &benefit
)
6105 /* Giv created by equivalent expression. */
6106 || ((temp
= find_reg_note (p
, REG_EQUAL
, NULL_RTX
))
6107 && general_induction_var (XEXP (temp
, 0), &src_reg
,
6108 add_val
, mult_val
, 0, &benefit
)))
6109 && src_reg
== v
->src_reg
)
6111 if (find_reg_note (p
, REG_RETVAL
, NULL_RTX
))
6112 benefit
+= libcall_benefit (p
);
6115 v
->mult_val
= *mult_val
;
6116 v
->add_val
= *add_val
;
6117 v
->benefit
= benefit
;
6119 else if (code
!= NOTE
)
6121 /* Allow insns that set something other than this giv to a
6122 constant. Such insns are needed on machines which cannot
6123 include long constants and should not disqualify a giv. */
6125 && (set
= single_set (p
))
6126 && SET_DEST (set
) != dest_reg
6127 && CONSTANT_P (SET_SRC (set
)))
6130 reg_iv_type
[REGNO (dest_reg
)] = UNKNOWN_INDUCT
;
6138 /* Return an rtx, if any, that expresses giv G2 as a function of the register
6139 represented by G1. If no such expression can be found, or it is clear that
6140 it cannot possibly be a valid address, 0 is returned.
6142 To perform the computation, we note that
6145 where `v' is the biv.
6147 So G2 = (y/b) * G1 + (b - a*y/x).
6149 Note that MULT = y/x.
6151 Update: A and B are now allowed to be additive expressions such that
6152 B contains all variables in A. That is, computing B-A will not require
6153 subtracting variables. */
6156 express_from_1 (a
, b
, mult
)
6159 /* If MULT is zero, then A*MULT is zero, and our expression is B. */
6161 if (mult
== const0_rtx
)
6164 /* If MULT is not 1, we cannot handle A with non-constants, since we
6165 would then be required to subtract multiples of the registers in A.
6166 This is theoretically possible, and may even apply to some Fortran
6167 constructs, but it is a lot of work and we do not attempt it here. */
6169 if (mult
!= const1_rtx
&& GET_CODE (a
) != CONST_INT
)
6172 /* In general these structures are sorted top to bottom (down the PLUS
6173 chain), but not left to right across the PLUS. If B is a higher
6174 order giv than A, we can strip one level and recurse. If A is higher
6175 order, we'll eventually bail out, but won't know that until the end.
6176 If they are the same, we'll strip one level around this loop. */
6178 while (GET_CODE (a
) == PLUS
&& GET_CODE (b
) == PLUS
)
6180 rtx ra
, rb
, oa
, ob
, tmp
;
6182 ra
= XEXP (a
, 0), oa
= XEXP (a
, 1);
6183 if (GET_CODE (ra
) == PLUS
)
6184 tmp
= ra
, ra
= oa
, oa
= tmp
;
6186 rb
= XEXP (b
, 0), ob
= XEXP (b
, 1);
6187 if (GET_CODE (rb
) == PLUS
)
6188 tmp
= rb
, rb
= ob
, ob
= tmp
;
6190 if (rtx_equal_p (ra
, rb
))
6191 /* We matched: remove one reg completely. */
6193 else if (GET_CODE (ob
) != PLUS
&& rtx_equal_p (ra
, ob
))
6194 /* An alternate match. */
6196 else if (GET_CODE (oa
) != PLUS
&& rtx_equal_p (oa
, rb
))
6197 /* An alternate match. */
6201 /* Indicates an extra register in B. Strip one level from B and
6202 recurse, hoping B was the higher order expression. */
6203 ob
= express_from_1 (a
, ob
, mult
);
6206 return gen_rtx_PLUS (GET_MODE (b
), rb
, ob
);
6210 /* Here we are at the last level of A, go through the cases hoping to
6211 get rid of everything but a constant. */
6213 if (GET_CODE (a
) == PLUS
)
6217 ra
= XEXP (a
, 0), oa
= XEXP (a
, 1);
6218 if (rtx_equal_p (oa
, b
))
6220 else if (!rtx_equal_p (ra
, b
))
6223 if (GET_CODE (oa
) != CONST_INT
)
6226 return GEN_INT (-INTVAL (oa
) * INTVAL (mult
));
6228 else if (GET_CODE (a
) == CONST_INT
)
6230 return plus_constant (b
, -INTVAL (a
) * INTVAL (mult
));
6232 else if (GET_CODE (b
) == PLUS
)
6234 if (rtx_equal_p (a
, XEXP (b
, 0)))
6236 else if (rtx_equal_p (a
, XEXP (b
, 1)))
6241 else if (rtx_equal_p (a
, b
))
6248 express_from (g1
, g2
)
6249 struct induction
*g1
, *g2
;
6253 /* The value that G1 will be multiplied by must be a constant integer. Also,
6254 the only chance we have of getting a valid address is if b*c/a (see above
6255 for notation) is also an integer. */
6256 if (GET_CODE (g1
->mult_val
) == CONST_INT
6257 && GET_CODE (g2
->mult_val
) == CONST_INT
)
6259 if (g1
->mult_val
== const0_rtx
6260 || INTVAL (g2
->mult_val
) % INTVAL (g1
->mult_val
) != 0)
6262 mult
= GEN_INT (INTVAL (g2
->mult_val
) / INTVAL (g1
->mult_val
));
6264 else if (rtx_equal_p (g1
->mult_val
, g2
->mult_val
))
6268 /* ??? Find out if the one is a multiple of the other? */
6272 add
= express_from_1 (g1
->add_val
, g2
->add_val
, mult
);
6273 if (add
== NULL_RTX
)
6276 /* Form simplified final result. */
6277 if (mult
== const0_rtx
)
6279 else if (mult
== const1_rtx
)
6280 mult
= g1
->dest_reg
;
6282 mult
= gen_rtx_MULT (g2
->mode
, g1
->dest_reg
, mult
);
6284 if (add
== const0_rtx
)
6287 return gen_rtx_PLUS (g2
->mode
, mult
, add
);
6290 /* Return 1 if giv G2 can be combined with G1. This means that G2 can use
6291 (either directly or via an address expression) a register used to represent
6292 G1. Set g2->new_reg to a represtation of G1 (normally just
6296 combine_givs_p (g1
, g2
)
6297 struct induction
*g1
, *g2
;
6299 rtx tem
= express_from (g1
, g2
);
6301 /* If these givs are identical, they can be combined. We use the results
6302 of express_from because the addends are not in a canonical form, so
6303 rtx_equal_p is a weaker test. */
6304 if (tem
== const0_rtx
)
6306 return g1
->dest_reg
;
6309 /* If G2 can be expressed as a function of G1 and that function is valid
6310 as an address and no more expensive than using a register for G2,
6311 the expression of G2 in terms of G1 can be used. */
6313 && g2
->giv_type
== DEST_ADDR
6314 && memory_address_p (g2
->mem_mode
, tem
)
6315 /* ??? Looses, especially with -fforce-addr, where *g2->location
6316 will always be a register, and so anything more complicated
6320 && ADDRESS_COST (tem
) <= ADDRESS_COST (*g2
->location
)
6322 && rtx_cost (tem
, MEM
) <= rtx_cost (*g2
->location
, MEM
)
6333 struct combine_givs_stats
6340 cmp_combine_givs_stats (x
, y
)
6341 struct combine_givs_stats
*x
, *y
;
6344 d
= y
->total_benefit
- x
->total_benefit
;
6345 /* Stabilize the sort. */
6347 d
= x
->giv_number
- y
->giv_number
;
6351 /* If one of these givs is a DEST_REG that was only used once, by the
6352 other giv, this is actually a single use. Return 0 if this is not
6353 the case, -1 if g1 is the DEST_REG involved, and 1 if it was g2. */
6356 combine_givs_used_once (g1
, g2
)
6357 struct induction
*g1
, *g2
;
6359 if (g1
->giv_type
== DEST_REG
6360 && VARRAY_INT (n_times_used
, REGNO (g1
->dest_reg
)) == 1
6361 && reg_mentioned_p (g1
->dest_reg
, PATTERN (g2
->insn
)))
6364 if (g2
->giv_type
== DEST_REG
6365 && VARRAY_INT (n_times_used
, REGNO (g2
->dest_reg
)) == 1
6366 && reg_mentioned_p (g2
->dest_reg
, PATTERN (g1
->insn
)))
6373 combine_givs_benefit_from (g1
, g2
)
6374 struct induction
*g1
, *g2
;
6376 int tmp
= combine_givs_used_once (g1
, g2
);
6380 return g2
->benefit
- g1
->benefit
;
6385 /* Check all pairs of givs for iv_class BL and see if any can be combined with
6386 any other. If so, point SAME to the giv combined with and set NEW_REG to
6387 be an expression (in terms of the other giv's DEST_REG) equivalent to the
6388 giv. Also, update BENEFIT and related fields for cost/benefit analysis. */
6392 struct iv_class
*bl
;
6394 struct induction
*g1
, *g2
, **giv_array
;
6395 int i
, j
, k
, giv_count
;
6396 struct combine_givs_stats
*stats
;
6399 /* Count givs, because bl->giv_count is incorrect here. */
6401 for (g1
= bl
->giv
; g1
; g1
= g1
->next_iv
)
6406 = (struct induction
**) alloca (giv_count
* sizeof (struct induction
*));
6408 for (g1
= bl
->giv
; g1
; g1
= g1
->next_iv
)
6410 giv_array
[i
++] = g1
;
6412 stats
= (struct combine_givs_stats
*) alloca (giv_count
* sizeof (*stats
));
6413 bzero ((char *) stats
, giv_count
* sizeof (*stats
));
6415 can_combine
= (rtx
*) alloca (giv_count
* giv_count
* sizeof(rtx
));
6416 bzero ((char *) can_combine
, giv_count
* giv_count
* sizeof(rtx
));
6418 for (i
= 0; i
< giv_count
; i
++)
6424 this_benefit
= g1
->benefit
;
6425 /* Add an additional weight for zero addends. */
6426 if (g1
->no_const_addval
)
6428 for (j
= 0; j
< giv_count
; j
++)
6434 && (this_combine
= combine_givs_p (g1
, g2
)) != NULL_RTX
)
6436 can_combine
[i
*giv_count
+ j
] = this_combine
;
6437 this_benefit
+= combine_givs_benefit_from (g1
, g2
);
6438 /* Add an additional weight for being reused more times. */
6442 stats
[i
].giv_number
= i
;
6443 stats
[i
].total_benefit
= this_benefit
;
6446 /* Iterate, combining until we can't. */
6448 qsort (stats
, giv_count
, sizeof(*stats
), cmp_combine_givs_stats
);
6450 if (loop_dump_stream
)
6452 fprintf (loop_dump_stream
, "Sorted combine statistics:\n");
6453 for (k
= 0; k
< giv_count
; k
++)
6455 g1
= giv_array
[stats
[k
].giv_number
];
6456 if (!g1
->combined_with
&& !g1
->same
)
6457 fprintf (loop_dump_stream
, " {%d, %d}",
6458 INSN_UID (giv_array
[stats
[k
].giv_number
]->insn
),
6459 stats
[k
].total_benefit
);
6461 putc ('\n', loop_dump_stream
);
6464 for (k
= 0; k
< giv_count
; k
++)
6466 int g1_add_benefit
= 0;
6468 i
= stats
[k
].giv_number
;
6471 /* If it has already been combined, skip. */
6472 if (g1
->combined_with
|| g1
->same
)
6475 for (j
= 0; j
< giv_count
; j
++)
6478 if (g1
!= g2
&& can_combine
[i
*giv_count
+ j
]
6479 /* If it has already been combined, skip. */
6480 && ! g2
->same
&& ! g2
->combined_with
)
6484 g2
->new_reg
= can_combine
[i
*giv_count
+ j
];
6486 g1
->combined_with
= 1;
6487 if (!combine_givs_used_once (g1
, g2
))
6488 g1
->times_used
+= 1;
6489 g1
->lifetime
+= g2
->lifetime
;
6491 g1_add_benefit
+= combine_givs_benefit_from (g1
, g2
);
6493 /* ??? The new final_[bg]iv_value code does a much better job
6494 of finding replaceable giv's, and hence this code may no
6495 longer be necessary. */
6496 if (! g2
->replaceable
&& REG_USERVAR_P (g2
->dest_reg
))
6497 g1_add_benefit
-= copy_cost
;
6499 /* To help optimize the next set of combinations, remove
6500 this giv from the benefits of other potential mates. */
6501 for (l
= 0; l
< giv_count
; ++l
)
6503 int m
= stats
[l
].giv_number
;
6504 if (can_combine
[m
*giv_count
+ j
])
6506 /* Remove additional weight for being reused. */
6507 stats
[l
].total_benefit
-= 3 +
6508 combine_givs_benefit_from (giv_array
[m
], g2
);
6512 if (loop_dump_stream
)
6513 fprintf (loop_dump_stream
,
6514 "giv at %d combined with giv at %d\n",
6515 INSN_UID (g2
->insn
), INSN_UID (g1
->insn
));
6519 /* To help optimize the next set of combinations, remove
6520 this giv from the benefits of other potential mates. */
6521 if (g1
->combined_with
)
6523 for (j
= 0; j
< giv_count
; ++j
)
6525 int m
= stats
[j
].giv_number
;
6526 if (can_combine
[m
*giv_count
+ j
])
6528 /* Remove additional weight for being reused. */
6529 stats
[j
].total_benefit
-= 3 +
6530 combine_givs_benefit_from (giv_array
[m
], g1
);
6534 g1
->benefit
+= g1_add_benefit
;
6536 /* We've finished with this giv, and everything it touched.
6537 Restart the combination so that proper weights for the
6538 rest of the givs are properly taken into account. */
6539 /* ??? Ideally we would compact the arrays at this point, so
6540 as to not cover old ground. But sanely compacting
6541 can_combine is tricky. */
6547 /* EMIT code before INSERT_BEFORE to set REG = B * M + A. */
6550 emit_iv_add_mult (b
, m
, a
, reg
, insert_before
)
6551 rtx b
; /* initial value of basic induction variable */
6552 rtx m
; /* multiplicative constant */
6553 rtx a
; /* additive constant */
6554 rtx reg
; /* destination register */
6560 /* Prevent unexpected sharing of these rtx. */
6564 /* Increase the lifetime of any invariants moved further in code. */
6565 update_reg_last_use (a
, insert_before
);
6566 update_reg_last_use (b
, insert_before
);
6567 update_reg_last_use (m
, insert_before
);
6570 result
= expand_mult_add (b
, reg
, m
, a
, GET_MODE (reg
), 0);
6572 emit_move_insn (reg
, result
);
6573 seq
= gen_sequence ();
6576 emit_insn_before (seq
, insert_before
);
6578 /* It is entirely possible that the expansion created lots of new
6579 registers. Iterate over the sequence we just created and
6582 if (GET_CODE (seq
) == SEQUENCE
)
6585 for (i
= 0; i
< XVECLEN (seq
, 0); ++i
)
6587 rtx set
= single_set (XVECEXP (seq
, 0, i
));
6588 if (set
&& GET_CODE (SET_DEST (set
)) == REG
)
6589 record_base_value (REGNO (SET_DEST (set
)), SET_SRC (set
), 0);
6592 else if (GET_CODE (seq
) == SET
6593 && GET_CODE (SET_DEST (seq
)) == REG
)
6594 record_base_value (REGNO (SET_DEST (seq
)), SET_SRC (seq
), 0);
6597 /* Test whether A * B can be computed without
6598 an actual multiply insn. Value is 1 if so. */
6601 product_cheap_p (a
, b
)
6607 struct obstack
*old_rtl_obstack
= rtl_obstack
;
6608 char *storage
= (char *) obstack_alloc (&temp_obstack
, 0);
6611 /* If only one is constant, make it B. */
6612 if (GET_CODE (a
) == CONST_INT
)
6613 tmp
= a
, a
= b
, b
= tmp
;
6615 /* If first constant, both constant, so don't need multiply. */
6616 if (GET_CODE (a
) == CONST_INT
)
6619 /* If second not constant, neither is constant, so would need multiply. */
6620 if (GET_CODE (b
) != CONST_INT
)
6623 /* One operand is constant, so might not need multiply insn. Generate the
6624 code for the multiply and see if a call or multiply, or long sequence
6625 of insns is generated. */
6627 rtl_obstack
= &temp_obstack
;
6629 expand_mult (GET_MODE (a
), a
, b
, NULL_RTX
, 0);
6630 tmp
= gen_sequence ();
6633 if (GET_CODE (tmp
) == SEQUENCE
)
6635 if (XVEC (tmp
, 0) == 0)
6637 else if (XVECLEN (tmp
, 0) > 3)
6640 for (i
= 0; i
< XVECLEN (tmp
, 0); i
++)
6642 rtx insn
= XVECEXP (tmp
, 0, i
);
6644 if (GET_CODE (insn
) != INSN
6645 || (GET_CODE (PATTERN (insn
)) == SET
6646 && GET_CODE (SET_SRC (PATTERN (insn
))) == MULT
)
6647 || (GET_CODE (PATTERN (insn
)) == PARALLEL
6648 && GET_CODE (XVECEXP (PATTERN (insn
), 0, 0)) == SET
6649 && GET_CODE (SET_SRC (XVECEXP (PATTERN (insn
), 0, 0))) == MULT
))
6656 else if (GET_CODE (tmp
) == SET
6657 && GET_CODE (SET_SRC (tmp
)) == MULT
)
6659 else if (GET_CODE (tmp
) == PARALLEL
6660 && GET_CODE (XVECEXP (tmp
, 0, 0)) == SET
6661 && GET_CODE (SET_SRC (XVECEXP (tmp
, 0, 0))) == MULT
)
6664 /* Free any storage we obtained in generating this multiply and restore rtl
6665 allocation to its normal obstack. */
6666 obstack_free (&temp_obstack
, storage
);
6667 rtl_obstack
= old_rtl_obstack
;
6672 /* Check to see if loop can be terminated by a "decrement and branch until
6673 zero" instruction. If so, add a REG_NONNEG note to the branch insn if so.
6674 Also try reversing an increment loop to a decrement loop
6675 to see if the optimization can be performed.
6676 Value is nonzero if optimization was performed. */
6678 /* This is useful even if the architecture doesn't have such an insn,
6679 because it might change a loops which increments from 0 to n to a loop
6680 which decrements from n to 0. A loop that decrements to zero is usually
6681 faster than one that increments from zero. */
6683 /* ??? This could be rewritten to use some of the loop unrolling procedures,
6684 such as approx_final_value, biv_total_increment, loop_iterations, and
6685 final_[bg]iv_value. */
6688 check_dbra_loop (loop_end
, insn_count
, loop_start
)
6693 struct iv_class
*bl
;
6700 rtx before_comparison
;
6704 int compare_and_branch
;
6706 /* If last insn is a conditional branch, and the insn before tests a
6707 register value, try to optimize it. Otherwise, we can't do anything. */
6709 jump
= PREV_INSN (loop_end
);
6710 comparison
= get_condition_for_loop (jump
);
6711 if (comparison
== 0)
6714 /* Try to compute whether the compare/branch at the loop end is one or
6715 two instructions. */
6716 get_condition (jump
, &first_compare
);
6717 if (first_compare
== jump
)
6718 compare_and_branch
= 1;
6719 else if (first_compare
== prev_nonnote_insn (jump
))
6720 compare_and_branch
= 2;
6724 /* Check all of the bivs to see if the compare uses one of them.
6725 Skip biv's set more than once because we can't guarantee that
6726 it will be zero on the last iteration. Also skip if the biv is
6727 used between its update and the test insn. */
6729 for (bl
= loop_iv_list
; bl
; bl
= bl
->next
)
6731 if (bl
->biv_count
== 1
6732 && bl
->biv
->dest_reg
== XEXP (comparison
, 0)
6733 && ! reg_used_between_p (regno_reg_rtx
[bl
->regno
], bl
->biv
->insn
,
6741 /* Look for the case where the basic induction variable is always
6742 nonnegative, and equals zero on the last iteration.
6743 In this case, add a reg_note REG_NONNEG, which allows the
6744 m68k DBRA instruction to be used. */
6746 if (((GET_CODE (comparison
) == GT
6747 && GET_CODE (XEXP (comparison
, 1)) == CONST_INT
6748 && INTVAL (XEXP (comparison
, 1)) == -1)
6749 || (GET_CODE (comparison
) == NE
&& XEXP (comparison
, 1) == const0_rtx
))
6750 && GET_CODE (bl
->biv
->add_val
) == CONST_INT
6751 && INTVAL (bl
->biv
->add_val
) < 0)
6753 /* Initial value must be greater than 0,
6754 init_val % -dec_value == 0 to ensure that it equals zero on
6755 the last iteration */
6757 if (GET_CODE (bl
->initial_value
) == CONST_INT
6758 && INTVAL (bl
->initial_value
) > 0
6759 && (INTVAL (bl
->initial_value
)
6760 % (-INTVAL (bl
->biv
->add_val
))) == 0)
6762 /* register always nonnegative, add REG_NOTE to branch */
6763 REG_NOTES (PREV_INSN (loop_end
))
6764 = gen_rtx_EXPR_LIST (REG_NONNEG
, NULL_RTX
,
6765 REG_NOTES (PREV_INSN (loop_end
)));
6771 /* If the decrement is 1 and the value was tested as >= 0 before
6772 the loop, then we can safely optimize. */
6773 for (p
= loop_start
; p
; p
= PREV_INSN (p
))
6775 if (GET_CODE (p
) == CODE_LABEL
)
6777 if (GET_CODE (p
) != JUMP_INSN
)
6780 before_comparison
= get_condition_for_loop (p
);
6781 if (before_comparison
6782 && XEXP (before_comparison
, 0) == bl
->biv
->dest_reg
6783 && GET_CODE (before_comparison
) == LT
6784 && XEXP (before_comparison
, 1) == const0_rtx
6785 && ! reg_set_between_p (bl
->biv
->dest_reg
, p
, loop_start
)
6786 && INTVAL (bl
->biv
->add_val
) == -1)
6788 REG_NOTES (PREV_INSN (loop_end
))
6789 = gen_rtx_EXPR_LIST (REG_NONNEG
, NULL_RTX
,
6790 REG_NOTES (PREV_INSN (loop_end
)));
6797 else if (INTVAL (bl
->biv
->add_val
) > 0)
6799 /* Try to change inc to dec, so can apply above optimization. */
6801 all registers modified are induction variables or invariant,
6802 all memory references have non-overlapping addresses
6803 (obviously true if only one write)
6804 allow 2 insns for the compare/jump at the end of the loop. */
6805 /* Also, we must avoid any instructions which use both the reversed
6806 biv and another biv. Such instructions will fail if the loop is
6807 reversed. We meet this condition by requiring that either
6808 no_use_except_counting is true, or else that there is only
6810 int num_nonfixed_reads
= 0;
6811 /* 1 if the iteration var is used only to count iterations. */
6812 int no_use_except_counting
= 0;
6813 /* 1 if the loop has no memory store, or it has a single memory store
6814 which is reversible. */
6815 int reversible_mem_store
= 1;
6817 if (bl
->giv_count
== 0
6818 && ! loop_number_exit_count
[uid_loop_num
[INSN_UID (loop_start
)]])
6820 rtx bivreg
= regno_reg_rtx
[bl
->regno
];
6822 /* If there are no givs for this biv, and the only exit is the
6823 fall through at the end of the loop, then
6824 see if perhaps there are no uses except to count. */
6825 no_use_except_counting
= 1;
6826 for (p
= loop_start
; p
!= loop_end
; p
= NEXT_INSN (p
))
6827 if (GET_RTX_CLASS (GET_CODE (p
)) == 'i')
6829 rtx set
= single_set (p
);
6831 if (set
&& GET_CODE (SET_DEST (set
)) == REG
6832 && REGNO (SET_DEST (set
)) == bl
->regno
)
6833 /* An insn that sets the biv is okay. */
6835 else if (p
== prev_nonnote_insn (prev_nonnote_insn (loop_end
))
6836 || p
== prev_nonnote_insn (loop_end
))
6837 /* Don't bother about the end test. */
6839 else if (reg_mentioned_p (bivreg
, PATTERN (p
)))
6841 no_use_except_counting
= 0;
6847 if (no_use_except_counting
)
6848 ; /* no need to worry about MEMs. */
6849 else if (num_mem_sets
<= 1)
6851 for (p
= loop_start
; p
!= loop_end
; p
= NEXT_INSN (p
))
6852 if (GET_RTX_CLASS (GET_CODE (p
)) == 'i')
6853 num_nonfixed_reads
+= count_nonfixed_reads (PATTERN (p
));
6855 /* If the loop has a single store, and the destination address is
6856 invariant, then we can't reverse the loop, because this address
6857 might then have the wrong value at loop exit.
6858 This would work if the source was invariant also, however, in that
6859 case, the insn should have been moved out of the loop. */
6861 if (num_mem_sets
== 1)
6862 reversible_mem_store
6863 = (! unknown_address_altered
6864 && ! invariant_p (XEXP (loop_store_mems
[0], 0)));
6869 /* This code only acts for innermost loops. Also it simplifies
6870 the memory address check by only reversing loops with
6871 zero or one memory access.
6872 Two memory accesses could involve parts of the same array,
6873 and that can't be reversed.
6874 If the biv is used only for counting, than we don't need to worry
6875 about all these things. */
6877 if ((num_nonfixed_reads
<= 1
6879 && !loop_has_volatile
6880 && reversible_mem_store
6881 && (bl
->giv_count
+ bl
->biv_count
+ num_mem_sets
6882 + num_movables
+ compare_and_branch
== insn_count
)
6883 && (bl
== loop_iv_list
&& bl
->next
== 0))
6884 || no_use_except_counting
)
6888 /* Loop can be reversed. */
6889 if (loop_dump_stream
)
6890 fprintf (loop_dump_stream
, "Can reverse loop\n");
6892 /* Now check other conditions:
6894 The increment must be a constant, as must the initial value,
6895 and the comparison code must be LT.
6897 This test can probably be improved since +/- 1 in the constant
6898 can be obtained by changing LT to LE and vice versa; this is
6902 /* for constants, LE gets turned into LT */
6903 && (GET_CODE (comparison
) == LT
6904 || (GET_CODE (comparison
) == LE
6905 && no_use_except_counting
)))
6907 HOST_WIDE_INT add_val
, add_adjust
, comparison_val
;
6908 rtx initial_value
, comparison_value
;
6910 enum rtx_code cmp_code
;
6911 int comparison_const_width
;
6912 unsigned HOST_WIDE_INT comparison_sign_mask
;
6915 add_val
= INTVAL (bl
->biv
->add_val
);
6916 comparison_value
= XEXP (comparison
, 1);
6917 comparison_const_width
6918 = GET_MODE_BITSIZE (GET_MODE (XEXP (comparison
, 1)));
6919 if (comparison_const_width
> HOST_BITS_PER_WIDE_INT
)
6920 comparison_const_width
= HOST_BITS_PER_WIDE_INT
;
6921 comparison_sign_mask
6922 = (unsigned HOST_WIDE_INT
)1 << (comparison_const_width
- 1);
6924 /* If the comparison value is not a loop invariant, then we
6925 can not reverse this loop.
6927 ??? If the insns which initialize the comparison value as
6928 a whole compute an invariant result, then we could move
6929 them out of the loop and proceed with loop reversal. */
6930 if (!invariant_p (comparison_value
))
6933 if (GET_CODE (comparison_value
) == CONST_INT
)
6934 comparison_val
= INTVAL (comparison_value
);
6935 initial_value
= bl
->initial_value
;
6937 /* Normalize the initial value if it is an integer and
6938 has no other use except as a counter. This will allow
6939 a few more loops to be reversed. */
6940 if (no_use_except_counting
6941 && GET_CODE (comparison_value
) == CONST_INT
6942 && GET_CODE (initial_value
) == CONST_INT
)
6944 comparison_val
= comparison_val
- INTVAL (bl
->initial_value
);
6945 /* The code below requires comparison_val to be a multiple
6946 of add_val in order to do the loop reversal, so
6947 round up comparison_val to a multiple of add_val.
6948 Since comparison_value is constant, we know that the
6949 current comparison code is LT. */
6950 comparison_val
= comparison_val
+ add_val
- 1;
6952 -= (unsigned HOST_WIDE_INT
) comparison_val
% add_val
;
6953 /* We postpone overflow checks for COMPARISON_VAL here;
6954 even if there is an overflow, we might still be able to
6955 reverse the loop, if converting the loop exit test to
6957 initial_value
= const0_rtx
;
6960 /* Check if there is a NOTE_INSN_LOOP_VTOP note. If there is,
6961 that means that this is a for or while style loop, with
6962 a loop exit test at the start. Thus, we can assume that
6963 the loop condition was true when the loop was entered.
6964 This allows us to change the loop exit condition to an
6966 We start at the end and search backwards for the previous
6967 NOTE. If there is no NOTE_INSN_LOOP_VTOP for this loop,
6968 the search will stop at the NOTE_INSN_LOOP_CONT. */
6971 vtop
= PREV_INSN (vtop
);
6972 while (GET_CODE (vtop
) != NOTE
6973 || NOTE_LINE_NUMBER (vtop
) > 0
6974 || NOTE_LINE_NUMBER (vtop
) == NOTE_REPEATED_LINE_NUMBER
6975 || NOTE_LINE_NUMBER (vtop
) == NOTE_INSN_DELETED
);
6976 if (NOTE_LINE_NUMBER (vtop
) != NOTE_INSN_LOOP_VTOP
)
6979 /* First check if we can do a vanilla loop reversal. */
6980 if (initial_value
== const0_rtx
6981 /* If we have a decrement_and_branch_on_count, prefer
6982 the NE test, since this will allow that instruction to
6984 #if ! defined (HAVE_decrement_and_branch_until_zero) && defined (HAVE_decrement_and_branch_on_count)
6985 && (add_val
!= 1 || ! vtop
)
6987 && GET_CODE (comparison_value
) == CONST_INT
6988 /* Now do postponed overflow checks on COMPARISON_VAL. */
6989 && ! (((comparison_val
- add_val
) ^ INTVAL (comparison_value
))
6990 & comparison_sign_mask
))
6992 /* Register will always be nonnegative, with value
6993 0 on last iteration */
6994 add_adjust
= add_val
;
6998 else if (add_val
== 1 && vtop
)
7006 if (GET_CODE (comparison
) == LE
)
7007 add_adjust
-= add_val
;
7009 /* If the initial value is not zero, or if the comparison
7010 value is not an exact multiple of the increment, then we
7011 can not reverse this loop. */
7012 if (initial_value
== const0_rtx
7013 && GET_CODE (comparison_value
) == CONST_INT
)
7015 if (((unsigned HOST_WIDE_INT
) comparison_val
% add_val
) != 0)
7020 if (! no_use_except_counting
|| add_val
!= 1)
7024 final_value
= comparison_value
;
7026 /* Reset these in case we normalized the initial value
7027 and comparison value above. */
7028 if (GET_CODE (comparison_value
) == CONST_INT
7029 && GET_CODE (initial_value
) == CONST_INT
)
7031 comparison_value
= GEN_INT (comparison_val
);
7033 = GEN_INT (comparison_val
+ INTVAL (bl
->initial_value
));
7035 bl
->initial_value
= initial_value
;
7037 /* Save some info needed to produce the new insns. */
7038 reg
= bl
->biv
->dest_reg
;
7039 jump_label
= XEXP (SET_SRC (PATTERN (PREV_INSN (loop_end
))), 1);
7040 if (jump_label
== pc_rtx
)
7041 jump_label
= XEXP (SET_SRC (PATTERN (PREV_INSN (loop_end
))), 2);
7042 new_add_val
= GEN_INT (- INTVAL (bl
->biv
->add_val
));
7044 /* Set start_value; if this is not a CONST_INT, we need
7046 Initialize biv to start_value before loop start.
7047 The old initializing insn will be deleted as a
7048 dead store by flow.c. */
7049 if (initial_value
== const0_rtx
7050 && GET_CODE (comparison_value
) == CONST_INT
)
7052 start_value
= GEN_INT (comparison_val
- add_adjust
);
7053 emit_insn_before (gen_move_insn (reg
, start_value
),
7056 else if (GET_CODE (initial_value
) == CONST_INT
)
7058 rtx offset
= GEN_INT (-INTVAL (initial_value
) - add_adjust
);
7059 enum machine_mode mode
= GET_MODE (reg
);
7060 enum insn_code icode
7061 = add_optab
->handlers
[(int) mode
].insn_code
;
7062 if (! (*insn_operand_predicate
[icode
][0]) (reg
, mode
)
7063 || ! ((*insn_operand_predicate
[icode
][1])
7064 (comparison_value
, mode
))
7065 || ! (*insn_operand_predicate
[icode
][2]) (offset
, mode
))
7068 = gen_rtx_PLUS (mode
, comparison_value
, offset
);
7069 emit_insn_before ((GEN_FCN (icode
)
7070 (reg
, comparison_value
, offset
)),
7072 if (GET_CODE (comparison
) == LE
)
7073 final_value
= gen_rtx_PLUS (mode
, comparison_value
,
7076 else if (! add_adjust
)
7078 enum machine_mode mode
= GET_MODE (reg
);
7079 enum insn_code icode
7080 = sub_optab
->handlers
[(int) mode
].insn_code
;
7081 if (! (*insn_operand_predicate
[icode
][0]) (reg
, mode
)
7082 || ! ((*insn_operand_predicate
[icode
][1])
7083 (comparison_value
, mode
))
7084 || ! ((*insn_operand_predicate
[icode
][2])
7085 (initial_value
, mode
)))
7088 = gen_rtx_MINUS (mode
, comparison_value
, initial_value
);
7089 emit_insn_before ((GEN_FCN (icode
)
7090 (reg
, comparison_value
, initial_value
)),
7094 /* We could handle the other cases too, but it'll be
7095 better to have a testcase first. */
7098 /* Add insn to decrement register, and delete insn
7099 that incremented the register. */
7100 p
= emit_insn_before (gen_add2_insn (reg
, new_add_val
),
7102 delete_insn (bl
->biv
->insn
);
7104 /* Update biv info to reflect its new status. */
7106 bl
->initial_value
= start_value
;
7107 bl
->biv
->add_val
= new_add_val
;
7109 /* Inc LABEL_NUSES so that delete_insn will
7110 not delete the label. */
7111 LABEL_NUSES (XEXP (jump_label
, 0)) ++;
7113 /* Emit an insn after the end of the loop to set the biv's
7114 proper exit value if it is used anywhere outside the loop. */
7115 if ((REGNO_LAST_UID (bl
->regno
) != INSN_UID (first_compare
))
7117 || REGNO_FIRST_UID (bl
->regno
) != INSN_UID (bl
->init_insn
))
7118 emit_insn_after (gen_move_insn (reg
, final_value
),
7121 /* Delete compare/branch at end of loop. */
7122 delete_insn (PREV_INSN (loop_end
));
7123 if (compare_and_branch
== 2)
7124 delete_insn (first_compare
);
7126 /* Add new compare/branch insn at end of loop. */
7128 emit_cmp_insn (reg
, const0_rtx
, cmp_code
, NULL_RTX
,
7129 GET_MODE (reg
), 0, 0);
7130 emit_jump_insn ((*bcc_gen_fctn
[(int) cmp_code
])
7131 (XEXP (jump_label
, 0)));
7132 tem
= gen_sequence ();
7134 emit_jump_insn_before (tem
, loop_end
);
7138 for (tem
= PREV_INSN (loop_end
);
7139 tem
&& GET_CODE (tem
) != JUMP_INSN
;
7140 tem
= PREV_INSN (tem
))
7144 JUMP_LABEL (tem
) = XEXP (jump_label
, 0);
7146 /* Increment of LABEL_NUSES done above. */
7147 /* Register is now always nonnegative,
7148 so add REG_NONNEG note to the branch. */
7149 REG_NOTES (tem
) = gen_rtx_EXPR_LIST (REG_NONNEG
, NULL_RTX
,
7155 /* Mark that this biv has been reversed. Each giv which depends
7156 on this biv, and which is also live past the end of the loop
7157 will have to be fixed up. */
7161 if (loop_dump_stream
)
7162 fprintf (loop_dump_stream
,
7163 "Reversed loop and added reg_nonneg\n");
7173 /* Verify whether the biv BL appears to be eliminable,
7174 based on the insns in the loop that refer to it.
7175 LOOP_START is the first insn of the loop, and END is the end insn.
7177 If ELIMINATE_P is non-zero, actually do the elimination.
7179 THRESHOLD and INSN_COUNT are from loop_optimize and are used to
7180 determine whether invariant insns should be placed inside or at the
7181 start of the loop. */
7184 maybe_eliminate_biv (bl
, loop_start
, end
, eliminate_p
, threshold
, insn_count
)
7185 struct iv_class
*bl
;
7189 int threshold
, insn_count
;
7191 rtx reg
= bl
->biv
->dest_reg
;
7194 /* Scan all insns in the loop, stopping if we find one that uses the
7195 biv in a way that we cannot eliminate. */
7197 for (p
= loop_start
; p
!= end
; p
= NEXT_INSN (p
))
7199 enum rtx_code code
= GET_CODE (p
);
7200 rtx where
= threshold
>= insn_count
? loop_start
: p
;
7202 if ((code
== INSN
|| code
== JUMP_INSN
|| code
== CALL_INSN
)
7203 && reg_mentioned_p (reg
, PATTERN (p
))
7204 && ! maybe_eliminate_biv_1 (PATTERN (p
), p
, bl
, eliminate_p
, where
))
7206 if (loop_dump_stream
)
7207 fprintf (loop_dump_stream
,
7208 "Cannot eliminate biv %d: biv used in insn %d.\n",
7209 bl
->regno
, INSN_UID (p
));
7216 if (loop_dump_stream
)
7217 fprintf (loop_dump_stream
, "biv %d %s eliminated.\n",
7218 bl
->regno
, eliminate_p
? "was" : "can be");
7225 /* If BL appears in X (part of the pattern of INSN), see if we can
7226 eliminate its use. If so, return 1. If not, return 0.
7228 If BIV does not appear in X, return 1.
7230 If ELIMINATE_P is non-zero, actually do the elimination. WHERE indicates
7231 where extra insns should be added. Depending on how many items have been
7232 moved out of the loop, it will either be before INSN or at the start of
7236 maybe_eliminate_biv_1 (x
, insn
, bl
, eliminate_p
, where
)
7238 struct iv_class
*bl
;
7242 enum rtx_code code
= GET_CODE (x
);
7243 rtx reg
= bl
->biv
->dest_reg
;
7244 enum machine_mode mode
= GET_MODE (reg
);
7245 struct induction
*v
;
7257 /* If we haven't already been able to do something with this BIV,
7258 we can't eliminate it. */
7264 /* If this sets the BIV, it is not a problem. */
7265 if (SET_DEST (x
) == reg
)
7268 /* If this is an insn that defines a giv, it is also ok because
7269 it will go away when the giv is reduced. */
7270 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
7271 if (v
->giv_type
== DEST_REG
&& SET_DEST (x
) == v
->dest_reg
)
7275 if (SET_DEST (x
) == cc0_rtx
&& SET_SRC (x
) == reg
)
7277 /* Can replace with any giv that was reduced and
7278 that has (MULT_VAL != 0) and (ADD_VAL == 0).
7279 Require a constant for MULT_VAL, so we know it's nonzero.
7280 ??? We disable this optimization to avoid potential
7283 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
7284 if (CONSTANT_P (v
->mult_val
) && v
->mult_val
!= const0_rtx
7285 && v
->add_val
== const0_rtx
7286 && ! v
->ignore
&& ! v
->maybe_dead
&& v
->always_computable
7290 /* If the giv V had the auto-inc address optimization applied
7291 to it, and INSN occurs between the giv insn and the biv
7292 insn, then we must adjust the value used here.
7293 This is rare, so we don't bother to do so. */
7295 && ((INSN_LUID (v
->insn
) < INSN_LUID (insn
)
7296 && INSN_LUID (insn
) < INSN_LUID (bl
->biv
->insn
))
7297 || (INSN_LUID (v
->insn
) > INSN_LUID (insn
)
7298 && INSN_LUID (insn
) > INSN_LUID (bl
->biv
->insn
))))
7304 /* If the giv has the opposite direction of change,
7305 then reverse the comparison. */
7306 if (INTVAL (v
->mult_val
) < 0)
7307 new = gen_rtx_COMPARE (GET_MODE (v
->new_reg
),
7308 const0_rtx
, v
->new_reg
);
7312 /* We can probably test that giv's reduced reg. */
7313 if (validate_change (insn
, &SET_SRC (x
), new, 0))
7317 /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0);
7318 replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL).
7319 Require a constant for MULT_VAL, so we know it's nonzero.
7320 ??? Do this only if ADD_VAL is a pointer to avoid a potential
7321 overflow problem. */
7323 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
7324 if (CONSTANT_P (v
->mult_val
) && v
->mult_val
!= const0_rtx
7325 && ! v
->ignore
&& ! v
->maybe_dead
&& v
->always_computable
7327 && (GET_CODE (v
->add_val
) == SYMBOL_REF
7328 || GET_CODE (v
->add_val
) == LABEL_REF
7329 || GET_CODE (v
->add_val
) == CONST
7330 || (GET_CODE (v
->add_val
) == REG
7331 && REGNO_POINTER_FLAG (REGNO (v
->add_val
)))))
7333 /* If the giv V had the auto-inc address optimization applied
7334 to it, and INSN occurs between the giv insn and the biv
7335 insn, then we must adjust the value used here.
7336 This is rare, so we don't bother to do so. */
7338 && ((INSN_LUID (v
->insn
) < INSN_LUID (insn
)
7339 && INSN_LUID (insn
) < INSN_LUID (bl
->biv
->insn
))
7340 || (INSN_LUID (v
->insn
) > INSN_LUID (insn
)
7341 && INSN_LUID (insn
) > INSN_LUID (bl
->biv
->insn
))))
7347 /* If the giv has the opposite direction of change,
7348 then reverse the comparison. */
7349 if (INTVAL (v
->mult_val
) < 0)
7350 new = gen_rtx_COMPARE (VOIDmode
, copy_rtx (v
->add_val
),
7353 new = gen_rtx_COMPARE (VOIDmode
, v
->new_reg
,
7354 copy_rtx (v
->add_val
));
7356 /* Replace biv with the giv's reduced register. */
7357 update_reg_last_use (v
->add_val
, insn
);
7358 if (validate_change (insn
, &SET_SRC (PATTERN (insn
)), new, 0))
7361 /* Insn doesn't support that constant or invariant. Copy it
7362 into a register (it will be a loop invariant.) */
7363 tem
= gen_reg_rtx (GET_MODE (v
->new_reg
));
7365 emit_insn_before (gen_move_insn (tem
, copy_rtx (v
->add_val
)),
7368 /* Substitute the new register for its invariant value in
7369 the compare expression. */
7370 XEXP (new, (INTVAL (v
->mult_val
) < 0) ? 0 : 1) = tem
;
7371 if (validate_change (insn
, &SET_SRC (PATTERN (insn
)), new, 0))
7380 case GT
: case GE
: case GTU
: case GEU
:
7381 case LT
: case LE
: case LTU
: case LEU
:
7382 /* See if either argument is the biv. */
7383 if (XEXP (x
, 0) == reg
)
7384 arg
= XEXP (x
, 1), arg_operand
= 1;
7385 else if (XEXP (x
, 1) == reg
)
7386 arg
= XEXP (x
, 0), arg_operand
= 0;
7390 if (CONSTANT_P (arg
))
7392 /* First try to replace with any giv that has constant positive
7393 mult_val and constant add_val. We might be able to support
7394 negative mult_val, but it seems complex to do it in general. */
7396 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
7397 if (CONSTANT_P (v
->mult_val
) && INTVAL (v
->mult_val
) > 0
7398 && (GET_CODE (v
->add_val
) == SYMBOL_REF
7399 || GET_CODE (v
->add_val
) == LABEL_REF
7400 || GET_CODE (v
->add_val
) == CONST
7401 || (GET_CODE (v
->add_val
) == REG
7402 && REGNO_POINTER_FLAG (REGNO (v
->add_val
))))
7403 && ! v
->ignore
&& ! v
->maybe_dead
&& v
->always_computable
7406 /* If the giv V had the auto-inc address optimization applied
7407 to it, and INSN occurs between the giv insn and the biv
7408 insn, then we must adjust the value used here.
7409 This is rare, so we don't bother to do so. */
7411 && ((INSN_LUID (v
->insn
) < INSN_LUID (insn
)
7412 && INSN_LUID (insn
) < INSN_LUID (bl
->biv
->insn
))
7413 || (INSN_LUID (v
->insn
) > INSN_LUID (insn
)
7414 && INSN_LUID (insn
) > INSN_LUID (bl
->biv
->insn
))))
7420 /* Replace biv with the giv's reduced reg. */
7421 XEXP (x
, 1-arg_operand
) = v
->new_reg
;
7423 /* If all constants are actually constant integers and
7424 the derived constant can be directly placed in the COMPARE,
7426 if (GET_CODE (arg
) == CONST_INT
7427 && GET_CODE (v
->mult_val
) == CONST_INT
7428 && GET_CODE (v
->add_val
) == CONST_INT
7429 && validate_change (insn
, &XEXP (x
, arg_operand
),
7430 GEN_INT (INTVAL (arg
)
7431 * INTVAL (v
->mult_val
)
7432 + INTVAL (v
->add_val
)), 0))
7435 /* Otherwise, load it into a register. */
7436 tem
= gen_reg_rtx (mode
);
7437 emit_iv_add_mult (arg
, v
->mult_val
, v
->add_val
, tem
, where
);
7438 if (validate_change (insn
, &XEXP (x
, arg_operand
), tem
, 0))
7441 /* If that failed, put back the change we made above. */
7442 XEXP (x
, 1-arg_operand
) = reg
;
7445 /* Look for giv with positive constant mult_val and nonconst add_val.
7446 Insert insns to calculate new compare value.
7447 ??? Turn this off due to possible overflow. */
7449 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
7450 if (CONSTANT_P (v
->mult_val
) && INTVAL (v
->mult_val
) > 0
7451 && ! v
->ignore
&& ! v
->maybe_dead
&& v
->always_computable
7457 /* If the giv V had the auto-inc address optimization applied
7458 to it, and INSN occurs between the giv insn and the biv
7459 insn, then we must adjust the value used here.
7460 This is rare, so we don't bother to do so. */
7462 && ((INSN_LUID (v
->insn
) < INSN_LUID (insn
)
7463 && INSN_LUID (insn
) < INSN_LUID (bl
->biv
->insn
))
7464 || (INSN_LUID (v
->insn
) > INSN_LUID (insn
)
7465 && INSN_LUID (insn
) > INSN_LUID (bl
->biv
->insn
))))
7471 tem
= gen_reg_rtx (mode
);
7473 /* Replace biv with giv's reduced register. */
7474 validate_change (insn
, &XEXP (x
, 1 - arg_operand
),
7477 /* Compute value to compare against. */
7478 emit_iv_add_mult (arg
, v
->mult_val
, v
->add_val
, tem
, where
);
7479 /* Use it in this insn. */
7480 validate_change (insn
, &XEXP (x
, arg_operand
), tem
, 1);
7481 if (apply_change_group ())
7485 else if (GET_CODE (arg
) == REG
|| GET_CODE (arg
) == MEM
)
7487 if (invariant_p (arg
) == 1)
7489 /* Look for giv with constant positive mult_val and nonconst
7490 add_val. Insert insns to compute new compare value.
7491 ??? Turn this off due to possible overflow. */
7493 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
7494 if (CONSTANT_P (v
->mult_val
) && INTVAL (v
->mult_val
) > 0
7495 && ! v
->ignore
&& ! v
->maybe_dead
&& v
->always_computable
7501 /* If the giv V had the auto-inc address optimization applied
7502 to it, and INSN occurs between the giv insn and the biv
7503 insn, then we must adjust the value used here.
7504 This is rare, so we don't bother to do so. */
7506 && ((INSN_LUID (v
->insn
) < INSN_LUID (insn
)
7507 && INSN_LUID (insn
) < INSN_LUID (bl
->biv
->insn
))
7508 || (INSN_LUID (v
->insn
) > INSN_LUID (insn
)
7509 && INSN_LUID (insn
) > INSN_LUID (bl
->biv
->insn
))))
7515 tem
= gen_reg_rtx (mode
);
7517 /* Replace biv with giv's reduced register. */
7518 validate_change (insn
, &XEXP (x
, 1 - arg_operand
),
7521 /* Compute value to compare against. */
7522 emit_iv_add_mult (arg
, v
->mult_val
, v
->add_val
,
7524 validate_change (insn
, &XEXP (x
, arg_operand
), tem
, 1);
7525 if (apply_change_group ())
7530 /* This code has problems. Basically, you can't know when
7531 seeing if we will eliminate BL, whether a particular giv
7532 of ARG will be reduced. If it isn't going to be reduced,
7533 we can't eliminate BL. We can try forcing it to be reduced,
7534 but that can generate poor code.
7536 The problem is that the benefit of reducing TV, below should
7537 be increased if BL can actually be eliminated, but this means
7538 we might have to do a topological sort of the order in which
7539 we try to process biv. It doesn't seem worthwhile to do
7540 this sort of thing now. */
7543 /* Otherwise the reg compared with had better be a biv. */
7544 if (GET_CODE (arg
) != REG
7545 || reg_iv_type
[REGNO (arg
)] != BASIC_INDUCT
)
7548 /* Look for a pair of givs, one for each biv,
7549 with identical coefficients. */
7550 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
7552 struct induction
*tv
;
7554 if (v
->ignore
|| v
->maybe_dead
|| v
->mode
!= mode
)
7557 for (tv
= reg_biv_class
[REGNO (arg
)]->giv
; tv
; tv
= tv
->next_iv
)
7558 if (! tv
->ignore
&& ! tv
->maybe_dead
7559 && rtx_equal_p (tv
->mult_val
, v
->mult_val
)
7560 && rtx_equal_p (tv
->add_val
, v
->add_val
)
7561 && tv
->mode
== mode
)
7563 /* If the giv V had the auto-inc address optimization applied
7564 to it, and INSN occurs between the giv insn and the biv
7565 insn, then we must adjust the value used here.
7566 This is rare, so we don't bother to do so. */
7568 && ((INSN_LUID (v
->insn
) < INSN_LUID (insn
)
7569 && INSN_LUID (insn
) < INSN_LUID (bl
->biv
->insn
))
7570 || (INSN_LUID (v
->insn
) > INSN_LUID (insn
)
7571 && INSN_LUID (insn
) > INSN_LUID (bl
->biv
->insn
))))
7577 /* Replace biv with its giv's reduced reg. */
7578 XEXP (x
, 1-arg_operand
) = v
->new_reg
;
7579 /* Replace other operand with the other giv's
7581 XEXP (x
, arg_operand
) = tv
->new_reg
;
7588 /* If we get here, the biv can't be eliminated. */
7592 /* If this address is a DEST_ADDR giv, it doesn't matter if the
7593 biv is used in it, since it will be replaced. */
7594 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
7595 if (v
->giv_type
== DEST_ADDR
&& v
->location
== &XEXP (x
, 0))
7603 /* See if any subexpression fails elimination. */
7604 fmt
= GET_RTX_FORMAT (code
);
7605 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
7610 if (! maybe_eliminate_biv_1 (XEXP (x
, i
), insn
, bl
,
7611 eliminate_p
, where
))
7616 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
7617 if (! maybe_eliminate_biv_1 (XVECEXP (x
, i
, j
), insn
, bl
,
7618 eliminate_p
, where
))
7627 /* Return nonzero if the last use of REG
7628 is in an insn following INSN in the same basic block. */
7631 last_use_this_basic_block (reg
, insn
)
7637 n
&& GET_CODE (n
) != CODE_LABEL
&& GET_CODE (n
) != JUMP_INSN
;
7640 if (REGNO_LAST_UID (REGNO (reg
)) == INSN_UID (n
))
7646 /* Called via `note_stores' to record the initial value of a biv. Here we
7647 just record the location of the set and process it later. */
7650 record_initial (dest
, set
)
7654 struct iv_class
*bl
;
7656 if (GET_CODE (dest
) != REG
7657 || REGNO (dest
) >= max_reg_before_loop
7658 || reg_iv_type
[REGNO (dest
)] != BASIC_INDUCT
)
7661 bl
= reg_biv_class
[REGNO (dest
)];
7663 /* If this is the first set found, record it. */
7664 if (bl
->init_insn
== 0)
7666 bl
->init_insn
= note_insn
;
7671 /* If any of the registers in X are "old" and currently have a last use earlier
7672 than INSN, update them to have a last use of INSN. Their actual last use
7673 will be the previous insn but it will not have a valid uid_luid so we can't
7677 update_reg_last_use (x
, insn
)
7681 /* Check for the case where INSN does not have a valid luid. In this case,
7682 there is no need to modify the regno_last_uid, as this can only happen
7683 when code is inserted after the loop_end to set a pseudo's final value,
7684 and hence this insn will never be the last use of x. */
7685 if (GET_CODE (x
) == REG
&& REGNO (x
) < max_reg_before_loop
7686 && INSN_UID (insn
) < max_uid_for_loop
7687 && uid_luid
[REGNO_LAST_UID (REGNO (x
))] < uid_luid
[INSN_UID (insn
)])
7688 REGNO_LAST_UID (REGNO (x
)) = INSN_UID (insn
);
7692 register char *fmt
= GET_RTX_FORMAT (GET_CODE (x
));
7693 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
7696 update_reg_last_use (XEXP (x
, i
), insn
);
7697 else if (fmt
[i
] == 'E')
7698 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
7699 update_reg_last_use (XVECEXP (x
, i
, j
), insn
);
7704 /* Given a jump insn JUMP, return the condition that will cause it to branch
7705 to its JUMP_LABEL. If the condition cannot be understood, or is an
7706 inequality floating-point comparison which needs to be reversed, 0 will
7709 If EARLIEST is non-zero, it is a pointer to a place where the earliest
7710 insn used in locating the condition was found. If a replacement test
7711 of the condition is desired, it should be placed in front of that
7712 insn and we will be sure that the inputs are still valid.
7714 The condition will be returned in a canonical form to simplify testing by
7715 callers. Specifically:
7717 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
7718 (2) Both operands will be machine operands; (cc0) will have been replaced.
7719 (3) If an operand is a constant, it will be the second operand.
7720 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
7721 for GE, GEU, and LEU. */
7724 get_condition (jump
, earliest
)
7733 int reverse_code
= 0;
7734 int did_reverse_condition
= 0;
7735 enum machine_mode mode
;
7737 /* If this is not a standard conditional jump, we can't parse it. */
7738 if (GET_CODE (jump
) != JUMP_INSN
7739 || ! condjump_p (jump
) || simplejump_p (jump
))
7742 code
= GET_CODE (XEXP (SET_SRC (PATTERN (jump
)), 0));
7743 mode
= GET_MODE (XEXP (SET_SRC (PATTERN (jump
)), 0));
7744 op0
= XEXP (XEXP (SET_SRC (PATTERN (jump
)), 0), 0);
7745 op1
= XEXP (XEXP (SET_SRC (PATTERN (jump
)), 0), 1);
7750 /* If this branches to JUMP_LABEL when the condition is false, reverse
7752 if (GET_CODE (XEXP (SET_SRC (PATTERN (jump
)), 2)) == LABEL_REF
7753 && XEXP (XEXP (SET_SRC (PATTERN (jump
)), 2), 0) == JUMP_LABEL (jump
))
7754 code
= reverse_condition (code
), did_reverse_condition
^= 1;
7756 /* If we are comparing a register with zero, see if the register is set
7757 in the previous insn to a COMPARE or a comparison operation. Perform
7758 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
7761 while (GET_RTX_CLASS (code
) == '<' && op1
== CONST0_RTX (GET_MODE (op0
)))
7763 /* Set non-zero when we find something of interest. */
7767 /* If comparison with cc0, import actual comparison from compare
7771 if ((prev
= prev_nonnote_insn (prev
)) == 0
7772 || GET_CODE (prev
) != INSN
7773 || (set
= single_set (prev
)) == 0
7774 || SET_DEST (set
) != cc0_rtx
)
7777 op0
= SET_SRC (set
);
7778 op1
= CONST0_RTX (GET_MODE (op0
));
7784 /* If this is a COMPARE, pick up the two things being compared. */
7785 if (GET_CODE (op0
) == COMPARE
)
7787 op1
= XEXP (op0
, 1);
7788 op0
= XEXP (op0
, 0);
7791 else if (GET_CODE (op0
) != REG
)
7794 /* Go back to the previous insn. Stop if it is not an INSN. We also
7795 stop if it isn't a single set or if it has a REG_INC note because
7796 we don't want to bother dealing with it. */
7798 if ((prev
= prev_nonnote_insn (prev
)) == 0
7799 || GET_CODE (prev
) != INSN
7800 || FIND_REG_INC_NOTE (prev
, 0)
7801 || (set
= single_set (prev
)) == 0)
7804 /* If this is setting OP0, get what it sets it to if it looks
7806 if (rtx_equal_p (SET_DEST (set
), op0
))
7808 enum machine_mode inner_mode
= GET_MODE (SET_SRC (set
));
7810 /* ??? We may not combine comparisons done in a CCmode with
7811 comparisons not done in a CCmode. This is to aid targets
7812 like Alpha that have an IEEE compliant EQ instruction, and
7813 a non-IEEE compliant BEQ instruction. The use of CCmode is
7814 actually artificial, simply to prevent the combination, but
7815 should not affect other platforms. */
7817 if ((GET_CODE (SET_SRC (set
)) == COMPARE
7820 && GET_MODE_CLASS (inner_mode
) == MODE_INT
7821 && (GET_MODE_BITSIZE (inner_mode
)
7822 <= HOST_BITS_PER_WIDE_INT
)
7823 && (STORE_FLAG_VALUE
7824 & ((HOST_WIDE_INT
) 1
7825 << (GET_MODE_BITSIZE (inner_mode
) - 1))))
7826 #ifdef FLOAT_STORE_FLAG_VALUE
7828 && GET_MODE_CLASS (inner_mode
) == MODE_FLOAT
7829 && FLOAT_STORE_FLAG_VALUE
< 0)
7832 && GET_RTX_CLASS (GET_CODE (SET_SRC (set
))) == '<'))
7833 && ((GET_MODE_CLASS (mode
) == MODE_CC
)
7834 == (GET_MODE_CLASS (inner_mode
) == MODE_CC
)))
7836 else if (((code
== EQ
7838 && (GET_MODE_BITSIZE (inner_mode
)
7839 <= HOST_BITS_PER_WIDE_INT
)
7840 && GET_MODE_CLASS (inner_mode
) == MODE_INT
7841 && (STORE_FLAG_VALUE
7842 & ((HOST_WIDE_INT
) 1
7843 << (GET_MODE_BITSIZE (inner_mode
) - 1))))
7844 #ifdef FLOAT_STORE_FLAG_VALUE
7846 && GET_MODE_CLASS (inner_mode
) == MODE_FLOAT
7847 && FLOAT_STORE_FLAG_VALUE
< 0)
7850 && GET_RTX_CLASS (GET_CODE (SET_SRC (set
))) == '<'
7851 && ((GET_MODE_CLASS (mode
) == MODE_CC
)
7852 == (GET_MODE_CLASS (inner_mode
) == MODE_CC
)))
7854 /* We might have reversed a LT to get a GE here. But this wasn't
7855 actually the comparison of data, so we don't flag that we
7856 have had to reverse the condition. */
7857 did_reverse_condition
^= 1;
7865 else if (reg_set_p (op0
, prev
))
7866 /* If this sets OP0, but not directly, we have to give up. */
7871 if (GET_RTX_CLASS (GET_CODE (x
)) == '<')
7872 code
= GET_CODE (x
);
7875 code
= reverse_condition (code
);
7876 did_reverse_condition
^= 1;
7880 op0
= XEXP (x
, 0), op1
= XEXP (x
, 1);
7886 /* If constant is first, put it last. */
7887 if (CONSTANT_P (op0
))
7888 code
= swap_condition (code
), tem
= op0
, op0
= op1
, op1
= tem
;
7890 /* If OP0 is the result of a comparison, we weren't able to find what
7891 was really being compared, so fail. */
7892 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
)
7895 /* Canonicalize any ordered comparison with integers involving equality
7896 if we can do computations in the relevant mode and we do not
7899 if (GET_CODE (op1
) == CONST_INT
7900 && GET_MODE (op0
) != VOIDmode
7901 && GET_MODE_BITSIZE (GET_MODE (op0
)) <= HOST_BITS_PER_WIDE_INT
)
7903 HOST_WIDE_INT const_val
= INTVAL (op1
);
7904 unsigned HOST_WIDE_INT uconst_val
= const_val
;
7905 unsigned HOST_WIDE_INT max_val
7906 = (unsigned HOST_WIDE_INT
) GET_MODE_MASK (GET_MODE (op0
));
7911 if (const_val
!= max_val
>> 1)
7912 code
= LT
, op1
= GEN_INT (const_val
+ 1);
7915 /* When cross-compiling, const_val might be sign-extended from
7916 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
7918 if ((const_val
& max_val
)
7919 != (((HOST_WIDE_INT
) 1
7920 << (GET_MODE_BITSIZE (GET_MODE (op0
)) - 1))))
7921 code
= GT
, op1
= GEN_INT (const_val
- 1);
7925 if (uconst_val
< max_val
)
7926 code
= LTU
, op1
= GEN_INT (uconst_val
+ 1);
7930 if (uconst_val
!= 0)
7931 code
= GTU
, op1
= GEN_INT (uconst_val
- 1);
7939 /* If this was floating-point and we reversed anything other than an
7940 EQ or NE, return zero. */
7941 if (TARGET_FLOAT_FORMAT
== IEEE_FLOAT_FORMAT
7942 && did_reverse_condition
&& code
!= NE
&& code
!= EQ
7944 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_FLOAT
)
7948 /* Never return CC0; return zero instead. */
7953 return gen_rtx_fmt_ee (code
, VOIDmode
, op0
, op1
);
7956 /* Similar to above routine, except that we also put an invariant last
7957 unless both operands are invariants. */
7960 get_condition_for_loop (x
)
7963 rtx comparison
= get_condition (x
, NULL_PTR
);
7966 || ! invariant_p (XEXP (comparison
, 0))
7967 || invariant_p (XEXP (comparison
, 1)))
7970 return gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison
)), VOIDmode
,
7971 XEXP (comparison
, 1), XEXP (comparison
, 0));
7975 /* Analyze a loop in order to instrument it with the use of count register.
7976 loop_start and loop_end are the first and last insns of the loop.
7977 This function works in cooperation with insert_bct ().
7978 loop_can_insert_bct[loop_num] is set according to whether the optimization
7979 is applicable to the loop. When it is applicable, the following variables
7981 loop_start_value[loop_num]
7982 loop_comparison_value[loop_num]
7983 loop_increment[loop_num]
7984 loop_comparison_code[loop_num] */
7986 #ifdef HAVE_decrement_and_branch_on_count
7988 analyze_loop_iterations (loop_start
, loop_end
)
7989 rtx loop_start
, loop_end
;
7991 rtx comparison
, comparison_value
;
7992 rtx iteration_var
, initial_value
, increment
;
7993 enum rtx_code comparison_code
;
7999 /* loop_variable mode */
8000 enum machine_mode original_mode
;
8002 /* find the number of the loop */
8003 int loop_num
= uid_loop_num
[INSN_UID (loop_start
)];
8005 /* we change our mind only when we are sure that loop will be instrumented */
8006 loop_can_insert_bct
[loop_num
] = 0;
8008 /* is the optimization suppressed. */
8009 if ( !flag_branch_on_count_reg
)
8012 /* make sure that count-reg is not in use */
8013 if (loop_used_count_register
[loop_num
]){
8014 if (loop_dump_stream
)
8015 fprintf (loop_dump_stream
,
8016 "analyze_loop_iterations %d: BCT instrumentation failed: count register already in use\n",
8021 /* make sure that the function has no indirect jumps. */
8022 if (indirect_jump_in_function
){
8023 if (loop_dump_stream
)
8024 fprintf (loop_dump_stream
,
8025 "analyze_loop_iterations %d: BCT instrumentation failed: indirect jump in function\n",
8030 /* make sure that the last loop insn is a conditional jump */
8031 last_loop_insn
= PREV_INSN (loop_end
);
8032 if (GET_CODE (last_loop_insn
) != JUMP_INSN
|| !condjump_p (last_loop_insn
)) {
8033 if (loop_dump_stream
)
8034 fprintf (loop_dump_stream
,
8035 "analyze_loop_iterations %d: BCT instrumentation failed: invalid jump at loop end\n",
8040 /* First find the iteration variable. If the last insn is a conditional
8041 branch, and the insn preceding it tests a register value, make that
8042 register the iteration variable. */
8044 /* We used to use prev_nonnote_insn here, but that fails because it might
8045 accidentally get the branch for a contained loop if the branch for this
8046 loop was deleted. We can only trust branches immediately before the
8049 comparison
= get_condition_for_loop (last_loop_insn
);
8050 /* ??? Get_condition may switch position of induction variable and
8051 invariant register when it canonicalizes the comparison. */
8053 if (comparison
== 0) {
8054 if (loop_dump_stream
)
8055 fprintf (loop_dump_stream
,
8056 "analyze_loop_iterations %d: BCT instrumentation failed: comparison not found\n",
8061 comparison_code
= GET_CODE (comparison
);
8062 iteration_var
= XEXP (comparison
, 0);
8063 comparison_value
= XEXP (comparison
, 1);
8065 original_mode
= GET_MODE (iteration_var
);
8066 if (GET_MODE_CLASS (original_mode
) != MODE_INT
8067 || GET_MODE_SIZE (original_mode
) != UNITS_PER_WORD
) {
8068 if (loop_dump_stream
)
8069 fprintf (loop_dump_stream
,
8070 "analyze_loop_iterations %d: BCT Instrumentation failed: loop variable not integer\n",
8075 /* get info about loop bounds and increment */
8076 iteration_info (iteration_var
, &initial_value
, &increment
,
8077 loop_start
, loop_end
);
8079 /* make sure that all required loop data were found */
8080 if (!(initial_value
&& increment
&& comparison_value
8081 && invariant_p (comparison_value
) && invariant_p (increment
)
8082 && ! indirect_jump_in_function
))
8084 if (loop_dump_stream
) {
8085 fprintf (loop_dump_stream
,
8086 "analyze_loop_iterations %d: BCT instrumentation failed because of wrong loop: ", loop_num
);
8087 if (!(initial_value
&& increment
&& comparison_value
)) {
8088 fprintf (loop_dump_stream
, "\tbounds not available: ");
8089 if ( ! initial_value
)
8090 fprintf (loop_dump_stream
, "initial ");
8092 fprintf (loop_dump_stream
, "increment ");
8093 if ( ! comparison_value
)
8094 fprintf (loop_dump_stream
, "comparison ");
8095 fprintf (loop_dump_stream
, "\n");
8097 if (!invariant_p (comparison_value
) || !invariant_p (increment
))
8098 fprintf (loop_dump_stream
, "\tloop bounds not invariant\n");
8103 /* make sure that the increment is constant */
8104 if (GET_CODE (increment
) != CONST_INT
) {
8105 if (loop_dump_stream
)
8106 fprintf (loop_dump_stream
,
8107 "analyze_loop_iterations %d: instrumentation failed: not arithmetic loop\n",
8112 /* make sure that the loop contains neither function call, nor jump on table.
8113 (the count register might be altered by the called function, and might
8114 be used for a branch on table). */
8115 for (insn
= loop_start
; insn
&& insn
!= loop_end
; insn
= NEXT_INSN (insn
)) {
8116 if (GET_CODE (insn
) == CALL_INSN
){
8117 if (loop_dump_stream
)
8118 fprintf (loop_dump_stream
,
8119 "analyze_loop_iterations %d: BCT instrumentation failed: function call in the loop\n",
8124 if (GET_CODE (insn
) == JUMP_INSN
8125 && (GET_CODE (PATTERN (insn
)) == ADDR_DIFF_VEC
8126 || GET_CODE (PATTERN (insn
)) == ADDR_VEC
)){
8127 if (loop_dump_stream
)
8128 fprintf (loop_dump_stream
,
8129 "analyze_loop_iterations %d: BCT instrumentation failed: computed branch in the loop\n",
8135 /* At this point, we are sure that the loop can be instrumented with BCT.
8136 Some of the loops, however, will not be instrumented - the final decision
8137 is taken by insert_bct () */
8138 if (loop_dump_stream
)
8139 fprintf (loop_dump_stream
,
8140 "analyze_loop_iterations: loop (luid =%d) can be BCT instrumented.\n",
8143 /* mark all enclosing loops that they cannot use count register */
8144 /* ???: In fact, since insert_bct may decide not to instrument this loop,
8145 marking here may prevent instrumenting an enclosing loop that could
8146 actually be instrumented. But since this is rare, it is safer to mark
8147 here in case the order of calling (analyze/insert)_bct would be changed. */
8148 for (i
=loop_num
; i
!= -1; i
= loop_outer_loop
[i
])
8149 loop_used_count_register
[i
] = 1;
8151 /* Set data structures which will be used by the instrumentation phase */
8152 loop_start_value
[loop_num
] = initial_value
;
8153 loop_comparison_value
[loop_num
] = comparison_value
;
8154 loop_increment
[loop_num
] = increment
;
8155 loop_comparison_code
[loop_num
] = comparison_code
;
8156 loop_can_insert_bct
[loop_num
] = 1;
8160 /* instrument loop for insertion of bct instruction. We distinguish between
8161 loops with compile-time bounds, to those with run-time bounds. The loop
8162 behaviour is analized according to the following characteristics/variables:
8164 ; comparison-value: the value to which the iteration counter is compared.
8165 ; initial-value: iteration-counter initial value.
8166 ; increment: iteration-counter increment.
8167 ; Computed variables:
8168 ; increment-direction: the sign of the increment.
8169 ; compare-direction: '1' for GT, GTE, '-1' for LT, LTE, '0' for NE.
8170 ; range-direction: sign (comparison-value - initial-value)
8171 We give up on the following cases:
8172 ; loop variable overflow.
8173 ; run-time loop bounds with comparison code NE.
8177 insert_bct (loop_start
, loop_end
)
8178 rtx loop_start
, loop_end
;
8180 rtx initial_value
, comparison_value
, increment
;
8181 enum rtx_code comparison_code
;
8183 int increment_direction
, compare_direction
;
8186 /* if the loop condition is <= or >=, the number of iteration
8187 is 1 more than the range of the bounds of the loop */
8188 int add_iteration
= 0;
8190 /* the only machine mode we work with - is the integer of the size that the
8192 enum machine_mode loop_var_mode
= word_mode
;
8194 int loop_num
= uid_loop_num
[INSN_UID (loop_start
)];
8196 /* get loop-variables. No need to check that these are valid - already
8197 checked in analyze_loop_iterations (). */
8198 comparison_code
= loop_comparison_code
[loop_num
];
8199 initial_value
= loop_start_value
[loop_num
];
8200 comparison_value
= loop_comparison_value
[loop_num
];
8201 increment
= loop_increment
[loop_num
];
8203 /* check analyze_loop_iterations decision for this loop. */
8204 if (! loop_can_insert_bct
[loop_num
]){
8205 if (loop_dump_stream
)
8206 fprintf (loop_dump_stream
,
8207 "insert_bct: [%d] - was decided not to instrument by analyze_loop_iterations ()\n",
8212 /* It's impossible to instrument a competely unrolled loop. */
8213 if (loop_unroll_factor
[loop_num
] == -1)
8216 /* make sure that the last loop insn is a conditional jump .
8217 This check is repeated from analyze_loop_iterations (),
8218 because unrolling might have changed that. */
8219 if (GET_CODE (PREV_INSN (loop_end
)) != JUMP_INSN
8220 || !condjump_p (PREV_INSN (loop_end
))) {
8221 if (loop_dump_stream
)
8222 fprintf (loop_dump_stream
,
8223 "insert_bct: not instrumenting BCT because of invalid branch\n");
8227 /* fix increment in case loop was unrolled. */
8228 if (loop_unroll_factor
[loop_num
] > 1)
8229 increment
= GEN_INT ( INTVAL (increment
) * loop_unroll_factor
[loop_num
] );
8231 /* determine properties and directions of the loop */
8232 increment_direction
= (INTVAL (increment
) > 0) ? 1:-1;
8233 switch ( comparison_code
) {
8238 compare_direction
= 1;
8245 compare_direction
= -1;
8249 /* in this case we cannot know the number of iterations */
8250 if (loop_dump_stream
)
8251 fprintf (loop_dump_stream
,
8252 "insert_bct: %d: loop cannot be instrumented: == in condition\n",
8259 compare_direction
= 1;
8265 compare_direction
= -1;
8268 compare_direction
= 0;
8275 /* make sure that the loop does not end by an overflow */
8276 if (compare_direction
!= increment_direction
) {
8277 if (loop_dump_stream
)
8278 fprintf (loop_dump_stream
,
8279 "insert_bct: %d: loop cannot be instrumented: terminated by overflow\n",
8284 /* try to instrument the loop. */
8286 /* Handle the simpler case, where the bounds are known at compile time. */
8287 if (GET_CODE (initial_value
) == CONST_INT
8288 && GET_CODE (comparison_value
) == CONST_INT
)
8291 int increment_value_abs
= INTVAL (increment
) * increment_direction
;
8293 /* check the relation between compare-val and initial-val */
8294 int difference
= INTVAL (comparison_value
) - INTVAL (initial_value
);
8295 int range_direction
= (difference
> 0) ? 1 : -1;
8297 /* make sure the loop executes enough iterations to gain from BCT */
8298 if (difference
> -3 && difference
< 3) {
8299 if (loop_dump_stream
)
8300 fprintf (loop_dump_stream
,
8301 "insert_bct: loop %d not BCT instrumented: too small iteration count.\n",
8306 /* make sure that the loop executes at least once */
8307 if ((range_direction
== 1 && compare_direction
== -1)
8308 || (range_direction
== -1 && compare_direction
== 1))
8310 if (loop_dump_stream
)
8311 fprintf (loop_dump_stream
,
8312 "insert_bct: loop %d: does not iterate even once. Not instrumenting.\n",
8317 /* make sure that the loop does not end by an overflow (in compile time
8318 bounds we must have an additional check for overflow, because here
8319 we also support the compare code of 'NE'. */
8320 if (comparison_code
== NE
8321 && increment_direction
!= range_direction
) {
8322 if (loop_dump_stream
)
8323 fprintf (loop_dump_stream
,
8324 "insert_bct (compile time bounds): %d: loop not instrumented: terminated by overflow\n",
8329 /* Determine the number of iterations by:
8331 ; compare-val - initial-val + (increment -1) + additional-iteration
8332 ; num_iterations = -----------------------------------------------------------------
8335 difference
= (range_direction
> 0) ? difference
: -difference
;
8337 fprintf (stderr
, "difference is: %d\n", difference
); /* @*/
8338 fprintf (stderr
, "increment_value_abs is: %d\n", increment_value_abs
); /* @*/
8339 fprintf (stderr
, "add_iteration is: %d\n", add_iteration
); /* @*/
8340 fprintf (stderr
, "INTVAL (comparison_value) is: %d\n", INTVAL (comparison_value
)); /* @*/
8341 fprintf (stderr
, "INTVAL (initial_value) is: %d\n", INTVAL (initial_value
)); /* @*/
8344 if (increment_value_abs
== 0) {
8345 fprintf (stderr
, "insert_bct: error: increment == 0 !!!\n");
8348 n_iterations
= (difference
+ increment_value_abs
- 1 + add_iteration
)
8349 / increment_value_abs
;
8352 fprintf (stderr
, "number of iterations is: %d\n", n_iterations
); /* @*/
8354 instrument_loop_bct (loop_start
, loop_end
, GEN_INT (n_iterations
));
8356 /* Done with this loop. */
8360 /* Handle the more complex case, that the bounds are NOT known at compile time. */
8361 /* In this case we generate run_time calculation of the number of iterations */
8363 /* With runtime bounds, if the compare is of the form '!=' we give up */
8364 if (comparison_code
== NE
) {
8365 if (loop_dump_stream
)
8366 fprintf (loop_dump_stream
,
8367 "insert_bct: fail for loop %d: runtime bounds with != comparison\n",
8373 /* We rely on the existence of run-time guard to ensure that the
8374 loop executes at least once. */
8376 rtx iterations_num_reg
;
8378 int increment_value_abs
= INTVAL (increment
) * increment_direction
;
8380 /* make sure that the increment is a power of two, otherwise (an
8381 expensive) divide is needed. */
8382 if (exact_log2 (increment_value_abs
) == -1)
8384 if (loop_dump_stream
)
8385 fprintf (loop_dump_stream
,
8386 "insert_bct: not instrumenting BCT because the increment is not power of 2\n");
8390 /* compute the number of iterations */
8395 /* Again, the number of iterations is calculated by:
8397 ; compare-val - initial-val + (increment -1) + additional-iteration
8398 ; num_iterations = -----------------------------------------------------------------
8401 /* ??? Do we have to call copy_rtx here before passing rtx to
8403 if (compare_direction
> 0) {
8404 /* <, <= :the loop variable is increasing */
8405 temp_reg
= expand_binop (loop_var_mode
, sub_optab
, comparison_value
,
8406 initial_value
, NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
8409 temp_reg
= expand_binop (loop_var_mode
, sub_optab
, initial_value
,
8410 comparison_value
, NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
8413 if (increment_value_abs
- 1 + add_iteration
!= 0)
8414 temp_reg
= expand_binop (loop_var_mode
, add_optab
, temp_reg
,
8415 GEN_INT (increment_value_abs
- 1 + add_iteration
),
8416 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
8418 if (increment_value_abs
!= 1)
8420 /* ??? This will generate an expensive divide instruction for
8421 most targets. The original authors apparently expected this
8422 to be a shift, since they test for power-of-2 divisors above,
8423 but just naively generating a divide instruction will not give
8424 a shift. It happens to work for the PowerPC target because
8425 the rs6000.md file has a divide pattern that emits shifts.
8426 It will probably not work for any other target. */
8427 iterations_num_reg
= expand_binop (loop_var_mode
, sdiv_optab
,
8429 GEN_INT (increment_value_abs
),
8430 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
8433 iterations_num_reg
= temp_reg
;
8435 sequence
= gen_sequence ();
8437 emit_insn_before (sequence
, loop_start
);
8438 instrument_loop_bct (loop_start
, loop_end
, iterations_num_reg
);
8442 /* instrument loop by inserting a bct in it. This is done in the following way:
8443 1. A new register is created and assigned the hard register number of the count
8445 2. In the head of the loop the new variable is initialized by the value passed in the
8446 loop_num_iterations parameter.
8447 3. At the end of the loop, comparison of the register with 0 is generated.
8448 The created comparison follows the pattern defined for the
8449 decrement_and_branch_on_count insn, so this insn will be generated in assembly
8451 4. The compare&branch on the old variable is deleted. So, if the loop-variable was
8452 not used elsewhere, it will be eliminated by data-flow analisys. */
8455 instrument_loop_bct (loop_start
, loop_end
, loop_num_iterations
)
8456 rtx loop_start
, loop_end
;
8457 rtx loop_num_iterations
;
8459 rtx temp_reg1
, temp_reg2
;
8463 enum machine_mode loop_var_mode
= word_mode
;
8465 if (HAVE_decrement_and_branch_on_count
)
8467 if (loop_dump_stream
)
8468 fprintf (loop_dump_stream
, "Loop: Inserting BCT\n");
8470 /* Discard original jump to continue loop. Original compare result
8471 may still be live, so it cannot be discarded explicitly. */
8472 delete_insn (PREV_INSN (loop_end
));
8474 /* insert the label which will delimit the start of the loop */
8475 start_label
= gen_label_rtx ();
8476 emit_label_after (start_label
, loop_start
);
8478 /* insert initialization of the count register into the loop header */
8480 temp_reg1
= gen_reg_rtx (loop_var_mode
);
8481 emit_insn (gen_move_insn (temp_reg1
, loop_num_iterations
));
8483 /* this will be count register */
8484 temp_reg2
= gen_rtx_REG (loop_var_mode
, COUNT_REGISTER_REGNUM
);
8485 /* we have to move the value to the count register from an GPR
8486 because rtx pointed to by loop_num_iterations could contain
8487 expression which cannot be moved into count register */
8488 emit_insn (gen_move_insn (temp_reg2
, temp_reg1
));
8490 sequence
= gen_sequence ();
8492 emit_insn_before (sequence
, loop_start
);
8494 /* insert new comparison on the count register instead of the
8495 old one, generating the needed BCT pattern (that will be
8496 later recognized by assembly generation phase). */
8497 emit_jump_insn_before (gen_decrement_and_branch_on_count (temp_reg2
,
8500 LABEL_NUSES (start_label
)++;
8504 #endif /* HAVE_decrement_and_branch_on_count */
8508 /* Scan the function and determine whether it has indirect (computed) jumps.
8510 This is taken mostly from flow.c; similar code exists elsewhere
8511 in the compiler. It may be useful to put this into rtlanal.c. */
8513 indirect_jump_in_function_p (start
)
8518 for (insn
= start
; insn
; insn
= NEXT_INSN (insn
))
8519 if (computed_jump_p (insn
))
8525 /* Add MEM to the LOOP_MEMS array, if appropriate. See the
8526 documentation for LOOP_MEMS for the definition of `appropriate'.
8527 This function is called from prescan_loop via for_each_rtx. */
8530 insert_loop_mem (mem
, data
)
8540 switch (GET_CODE (m
))
8546 /* We're not interested in the MEM associated with a
8547 CONST_DOUBLE, so there's no need to traverse into this. */
8551 /* This is not a MEM. */
8555 /* See if we've already seen this MEM. */
8556 for (i
= 0; i
< loop_mems_idx
; ++i
)
8557 if (rtx_equal_p (m
, loop_mems
[i
].mem
))
8559 if (GET_MODE (m
) != GET_MODE (loop_mems
[i
].mem
))
8560 /* The modes of the two memory accesses are different. If
8561 this happens, something tricky is going on, and we just
8562 don't optimize accesses to this MEM. */
8563 loop_mems
[i
].optimize
= 0;
8568 /* Resize the array, if necessary. */
8569 if (loop_mems_idx
== loop_mems_allocated
)
8571 if (loop_mems_allocated
!= 0)
8572 loop_mems_allocated
*= 2;
8574 loop_mems_allocated
= 32;
8576 loop_mems
= (loop_mem_info
*)
8577 xrealloc (loop_mems
,
8578 loop_mems_allocated
* sizeof (loop_mem_info
));
8581 /* Actually insert the MEM. */
8582 loop_mems
[loop_mems_idx
].mem
= m
;
8583 /* We can't hoist this MEM out of the loop if it's a BLKmode MEM
8584 because we can't put it in a register. We still store it in the
8585 table, though, so that if we see the same address later, but in a
8586 non-BLK mode, we'll not think we can optimize it at that point. */
8587 loop_mems
[loop_mems_idx
].optimize
= (GET_MODE (m
) != BLKmode
);
8588 loop_mems
[loop_mems_idx
].reg
= NULL_RTX
;
8594 /* Like load_mems, but also ensures that N_TIMES_SET,
8595 MAY_NOT_OPTIMIZE, REG_SINGLE_USAGE, and INSN_COUNT have the correct
8596 values after load_mems. */
8599 load_mems_and_recount_loop_regs_set (scan_start
, end
, loop_top
, start
,
8600 reg_single_usage
, insn_count
)
8605 varray_type reg_single_usage
;
8608 int nregs
= max_reg_num ();
8610 load_mems (scan_start
, end
, loop_top
, start
);
8612 /* Recalculate n_times_set and friends since load_mems may have
8613 created new registers. */
8614 if (max_reg_num () > nregs
)
8620 nregs
= max_reg_num ();
8622 if (nregs
> n_times_set
->num_elements
)
8624 /* Grow all the arrays. */
8625 VARRAY_GROW (n_times_set
, nregs
);
8626 VARRAY_GROW (n_times_used
, nregs
);
8627 VARRAY_GROW (may_not_optimize
, nregs
);
8628 if (reg_single_usage
)
8629 VARRAY_GROW (reg_single_usage
, nregs
);
8631 /* Clear the arrays */
8632 bzero ((char *) &n_times_set
->data
, nregs
* sizeof (int));
8633 bzero ((char *) &may_not_optimize
->data
, nregs
* sizeof (char));
8634 if (reg_single_usage
)
8635 bzero ((char *) ®_single_usage
->data
, nregs
* sizeof (rtx
));
8637 count_loop_regs_set (loop_top
? loop_top
: start
, end
,
8638 may_not_optimize
, reg_single_usage
,
8641 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
8643 VARRAY_CHAR (may_not_optimize
, i
) = 1;
8644 VARRAY_INT (n_times_set
, i
) = 1;
8647 #ifdef AVOID_CCMODE_COPIES
8648 /* Don't try to move insns which set CC registers if we should not
8649 create CCmode register copies. */
8650 for (i
= max_reg_num () - 1; i
>= FIRST_PSEUDO_REGISTER
; i
--)
8651 if (GET_MODE_CLASS (GET_MODE (regno_reg_rtx
[i
])) == MODE_CC
)
8652 VARRAY_CHAR (may_not_optimize
, i
) = 1;
8655 /* Set n_times_used for the new registers. */
8656 bcopy ((char *) (&n_times_set
->data
.i
[0] + old_nregs
),
8657 (char *) (&n_times_used
->data
.i
[0] + old_nregs
),
8658 (nregs
- old_nregs
) * sizeof (int));
8662 /* Move MEMs into registers for the duration of the loop. SCAN_START
8663 is the first instruction in the loop (as it is executed). The
8664 other parameters are as for next_insn_in_loop. */
8667 load_mems (scan_start
, end
, loop_top
, start
)
8673 int maybe_never
= 0;
8676 rtx label
= NULL_RTX
;
8679 if (loop_mems_idx
> 0)
8681 /* Nonzero if the next instruction may never be executed. */
8682 int next_maybe_never
= 0;
8684 /* Check to see if it's possible that some instructions in the
8685 loop are never executed. */
8686 for (p
= next_insn_in_loop (scan_start
, scan_start
, end
, loop_top
);
8687 p
!= NULL_RTX
&& !maybe_never
;
8688 p
= next_insn_in_loop (p
, scan_start
, end
, loop_top
))
8690 if (GET_CODE (p
) == CODE_LABEL
)
8692 else if (GET_CODE (p
) == JUMP_INSN
8693 /* If we enter the loop in the middle, and scan
8694 around to the beginning, don't set maybe_never
8695 for that. This must be an unconditional jump,
8696 otherwise the code at the top of the loop might
8697 never be executed. Unconditional jumps are
8698 followed a by barrier then loop end. */
8699 && ! (GET_CODE (p
) == JUMP_INSN
8700 && JUMP_LABEL (p
) == loop_top
8701 && NEXT_INSN (NEXT_INSN (p
)) == end
8702 && simplejump_p (p
)))
8704 if (!condjump_p (p
))
8705 /* Something complicated. */
8708 /* If there are any more instructions in the loop, they
8709 might not be reached. */
8710 next_maybe_never
= 1;
8712 else if (next_maybe_never
)
8716 /* Actually move the MEMs. */
8717 for (i
= 0; i
< loop_mems_idx
; ++i
)
8722 rtx mem
= loop_mems
[i
].mem
;
8724 if (MEM_VOLATILE_P (mem
)
8725 || invariant_p (XEXP (mem
, 0)) != 1)
8726 /* There's no telling whether or not MEM is modified. */
8727 loop_mems
[i
].optimize
= 0;
8729 /* Go through the MEMs written to in the loop to see if this
8730 one is aliased by one of them. */
8731 for (j
= 0; j
< loop_store_mems_idx
; ++j
)
8733 if (rtx_equal_p (mem
, loop_store_mems
[j
]))
8735 else if (true_dependence (loop_store_mems
[j
], VOIDmode
,
8738 /* MEM is indeed aliased by this store. */
8739 loop_mems
[i
].optimize
= 0;
8744 /* If this MEM is written to, we must be sure that there
8745 are no reads from another MEM that aliases this one. */
8746 if (loop_mems
[i
].optimize
&& written
)
8750 for (j
= 0; j
< loop_mems_idx
; ++j
)
8754 else if (true_dependence (mem
,
8759 /* It's not safe to hoist loop_mems[i] out of
8760 the loop because writes to it might not be
8761 seen by reads from loop_mems[j]. */
8762 loop_mems
[i
].optimize
= 0;
8768 if (maybe_never
&& may_trap_p (mem
))
8769 /* We can't access the MEM outside the loop; it might
8770 cause a trap that wouldn't have happened otherwise. */
8771 loop_mems
[i
].optimize
= 0;
8773 if (!loop_mems
[i
].optimize
)
8774 /* We thought we were going to lift this MEM out of the
8775 loop, but later discovered that we could not. */
8778 /* Allocate a pseudo for this MEM. We set REG_USERVAR_P in
8779 order to keep scan_loop from moving stores to this MEM
8780 out of the loop just because this REG is neither a
8781 user-variable nor used in the loop test. */
8782 reg
= gen_reg_rtx (GET_MODE (mem
));
8783 REG_USERVAR_P (reg
) = 1;
8784 loop_mems
[i
].reg
= reg
;
8786 /* Now, replace all references to the MEM with the
8787 corresponding pesudos. */
8788 for (p
= next_insn_in_loop (scan_start
, scan_start
, end
, loop_top
);
8790 p
= next_insn_in_loop (p
, scan_start
, end
, loop_top
))
8795 for_each_rtx (&p
, replace_loop_mem
, &ri
);
8798 if (!apply_change_group ())
8799 /* We couldn't replace all occurrences of the MEM. */
8800 loop_mems
[i
].optimize
= 0;
8805 /* Load the memory immediately before START, which is
8806 the NOTE_LOOP_BEG. */
8807 set
= gen_rtx_SET (GET_MODE (reg
), reg
, mem
);
8808 emit_insn_before (set
, start
);
8812 if (label
== NULL_RTX
)
8814 /* We must compute the former
8815 right-after-the-end label before we insert
8817 end_label
= next_label (end
);
8818 label
= gen_label_rtx ();
8819 emit_label_after (label
, end
);
8822 /* Store the memory immediately after END, which is
8823 the NOTE_LOOP_END. */
8824 set
= gen_rtx_SET (GET_MODE (reg
), copy_rtx (mem
), reg
);
8825 emit_insn_after (set
, label
);
8828 if (loop_dump_stream
)
8830 fprintf (loop_dump_stream
, "Hoisted regno %d %s from ",
8831 REGNO (reg
), (written
? "r/w" : "r/o"));
8832 print_rtl (loop_dump_stream
, mem
);
8833 fputc ('\n', loop_dump_stream
);
8839 if (label
!= NULL_RTX
)
8841 /* Now, we need to replace all references to the previous exit
8842 label with the new one. */
8847 for (p
= start
; p
!= end
; p
= NEXT_INSN (p
))
8849 for_each_rtx (&p
, replace_label
, &rr
);
8851 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
8852 field. This is not handled by for_each_rtx because it doesn't
8853 handle unprinted ('0') fields. We need to update JUMP_LABEL
8854 because the immediately following unroll pass will use it.
8855 replace_label would not work anyways, because that only handles
8857 if (GET_CODE (p
) == JUMP_INSN
&& JUMP_LABEL (p
) == end_label
)
8858 JUMP_LABEL (p
) = label
;
8863 /* Replace MEM with its associated pseudo register. This function is
8864 called from load_mems via for_each_rtx. DATA is actually an
8865 rtx_and_int * describing the instruction currently being scanned
8866 and the MEM we are currently replacing. */
8869 replace_loop_mem (mem
, data
)
8881 switch (GET_CODE (m
))
8887 /* We're not interested in the MEM associated with a
8888 CONST_DOUBLE, so there's no need to traverse into one. */
8892 /* This is not a MEM. */
8896 ri
= (rtx_and_int
*) data
;
8899 if (!rtx_equal_p (loop_mems
[i
].mem
, m
))
8900 /* This is not the MEM we are currently replacing. */
8905 /* Actually replace the MEM. */
8906 validate_change (insn
, mem
, loop_mems
[i
].reg
, 1);
8911 /* Replace occurrences of the old exit label for the loop with the new
8912 one. DATA is an rtx_pair containing the old and new labels,
8916 replace_label (x
, data
)
8921 rtx old_label
= ((rtx_pair
*) data
)->r1
;
8922 rtx new_label
= ((rtx_pair
*) data
)->r2
;
8927 if (GET_CODE (l
) != LABEL_REF
)
8930 if (XEXP (l
, 0) != old_label
)
8933 XEXP (l
, 0) = new_label
;
8934 ++LABEL_NUSES (new_label
);
8935 --LABEL_NUSES (old_label
);