1 /* Perform various loop optimizations, including strength reduction.
2 Copyright (C) 1987, 88, 89, 91-6, 1997 Free Software Foundation, Inc.
4 This file is part of GNU CC.
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
22 /* This is the loop optimization pass of the compiler.
23 It finds invariant computations within loops and moves them
24 to the beginning of the loop. Then it identifies basic and
25 general induction variables. Strength reduction is applied to the general
26 induction variables, and induction variable elimination is applied to
27 the basic induction variables.
29 It also finds cases where
30 a register is set within the loop by zero-extending a narrower value
31 and changes these to zero the entire register once before the loop
32 and merely copy the low part within the loop.
34 Most of the complexity is in heuristics to decide when it is worth
35 while to do these things. */
42 #include "insn-config.h"
43 #include "insn-flags.h"
45 #include "hard-reg-set.h"
52 /* Vector mapping INSN_UIDs to luids.
53 The luids are like uids but increase monotonically always.
54 We use them to see whether a jump comes from outside a given loop. */
58 /* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop
59 number the insn is contained in. */
63 /* 1 + largest uid of any insn. */
67 /* 1 + luid of last insn. */
71 /* Number of loops detected in current function. Used as index to the
74 static int max_loop_num
;
76 /* Indexed by loop number, contains the first and last insn of each loop. */
78 static rtx
*loop_number_loop_starts
, *loop_number_loop_ends
;
80 /* For each loop, gives the containing loop number, -1 if none. */
84 /* Indexed by loop number, contains a nonzero value if the "loop" isn't
85 really a loop (an insn outside the loop branches into it). */
87 static char *loop_invalid
;
89 /* Indexed by loop number, links together all LABEL_REFs which refer to
90 code labels outside the loop. Used by routines that need to know all
91 loop exits, such as final_biv_value and final_giv_value.
93 This does not include loop exits due to return instructions. This is
94 because all bivs and givs are pseudos, and hence must be dead after a
95 return, so the presense of a return does not affect any of the
96 optimizations that use this info. It is simpler to just not include return
97 instructions on this list. */
99 rtx
*loop_number_exit_labels
;
101 /* Indexed by loop number, counts the number of LABEL_REFs on
102 loop_number_exit_labels for this loop and all loops nested inside it. */
104 int *loop_number_exit_count
;
106 /* Holds the number of loop iterations. It is zero if the number could not be
107 calculated. Must be unsigned since the number of iterations can
108 be as high as 2^wordsize-1. For loops with a wider iterator, this number
109 will will be zero if the number of loop iterations is too large for an
110 unsigned integer to hold. */
112 unsigned HOST_WIDE_INT loop_n_iterations
;
114 /* Nonzero if there is a subroutine call in the current loop.
115 (unknown_address_altered is also nonzero in this case.) */
117 static int loop_has_call
;
119 /* Nonzero if there is a volatile memory reference in the current
122 static int loop_has_volatile
;
124 /* Added loop_continue which is the NOTE_INSN_LOOP_CONT of the
125 current loop. A continue statement will generate a branch to
126 NEXT_INSN (loop_continue). */
128 static rtx loop_continue
;
130 /* Indexed by register number, contains the number of times the reg
131 is set during the loop being scanned.
132 During code motion, a negative value indicates a reg that has been
133 made a candidate; in particular -2 means that it is an candidate that
134 we know is equal to a constant and -1 means that it is an candidate
135 not known equal to a constant.
136 After code motion, regs moved have 0 (which is accurate now)
137 while the failed candidates have the original number of times set.
139 Therefore, at all times, == 0 indicates an invariant register;
140 < 0 a conditionally invariant one. */
142 static int *n_times_set
;
144 /* Original value of n_times_set; same except that this value
145 is not set negative for a reg whose sets have been made candidates
146 and not set to 0 for a reg that is moved. */
148 static int *n_times_used
;
150 /* Index by register number, 1 indicates that the register
151 cannot be moved or strength reduced. */
153 static char *may_not_optimize
;
155 /* Nonzero means reg N has already been moved out of one loop.
156 This reduces the desire to move it out of another. */
158 static char *moved_once
;
160 /* Array of MEMs that are stored in this loop. If there are too many to fit
161 here, we just turn on unknown_address_altered. */
163 #define NUM_STORES 20
164 static rtx loop_store_mems
[NUM_STORES
];
166 /* Index of first available slot in above array. */
167 static int loop_store_mems_idx
;
169 /* Nonzero if we don't know what MEMs were changed in the current loop.
170 This happens if the loop contains a call (in which case `loop_has_call'
171 will also be set) or if we store into more than NUM_STORES MEMs. */
173 static int unknown_address_altered
;
175 /* Count of movable (i.e. invariant) instructions discovered in the loop. */
176 static int num_movables
;
178 /* Count of memory write instructions discovered in the loop. */
179 static int num_mem_sets
;
181 /* Number of loops contained within the current one, including itself. */
182 static int loops_enclosed
;
184 /* Bound on pseudo register number before loop optimization.
185 A pseudo has valid regscan info if its number is < max_reg_before_loop. */
186 int max_reg_before_loop
;
188 /* This obstack is used in product_cheap_p to allocate its rtl. It
189 may call gen_reg_rtx which, in turn, may reallocate regno_reg_rtx.
190 If we used the same obstack that it did, we would be deallocating
193 static struct obstack temp_obstack
;
195 /* This is where the pointer to the obstack being used for RTL is stored. */
197 extern struct obstack
*rtl_obstack
;
199 #define obstack_chunk_alloc xmalloc
200 #define obstack_chunk_free free
202 extern char *oballoc ();
204 /* During the analysis of a loop, a chain of `struct movable's
205 is made to record all the movable insns found.
206 Then the entire chain can be scanned to decide which to move. */
210 rtx insn
; /* A movable insn */
211 rtx set_src
; /* The expression this reg is set from. */
212 rtx set_dest
; /* The destination of this SET. */
213 rtx dependencies
; /* When INSN is libcall, this is an EXPR_LIST
214 of any registers used within the LIBCALL. */
215 int consec
; /* Number of consecutive following insns
216 that must be moved with this one. */
217 int regno
; /* The register it sets */
218 short lifetime
; /* lifetime of that register;
219 may be adjusted when matching movables
220 that load the same value are found. */
221 short savings
; /* Number of insns we can move for this reg,
222 including other movables that force this
223 or match this one. */
224 unsigned int cond
: 1; /* 1 if only conditionally movable */
225 unsigned int force
: 1; /* 1 means MUST move this insn */
226 unsigned int global
: 1; /* 1 means reg is live outside this loop */
227 /* If PARTIAL is 1, GLOBAL means something different:
228 that the reg is live outside the range from where it is set
229 to the following label. */
230 unsigned int done
: 1; /* 1 inhibits further processing of this */
232 unsigned int partial
: 1; /* 1 means this reg is used for zero-extending.
233 In particular, moving it does not make it
235 unsigned int move_insn
: 1; /* 1 means that we call emit_move_insn to
236 load SRC, rather than copying INSN. */
237 unsigned int is_equiv
: 1; /* 1 means a REG_EQUIV is present on INSN. */
238 enum machine_mode savemode
; /* Nonzero means it is a mode for a low part
239 that we should avoid changing when clearing
240 the rest of the reg. */
241 struct movable
*match
; /* First entry for same value */
242 struct movable
*forces
; /* An insn that must be moved if this is */
243 struct movable
*next
;
246 FILE *loop_dump_stream
;
248 /* Forward declarations. */
250 static void find_and_verify_loops ();
251 static void mark_loop_jump ();
252 static void prescan_loop ();
253 static int reg_in_basic_block_p ();
254 static int consec_sets_invariant_p ();
255 static rtx
libcall_other_reg ();
256 static int labels_in_range_p ();
257 static void count_loop_regs_set ();
258 static void note_addr_stored ();
259 static int loop_reg_used_before_p ();
260 static void scan_loop ();
261 static void replace_call_address ();
262 static rtx
skip_consec_insns ();
263 static int libcall_benefit ();
264 static void ignore_some_movables ();
265 static void force_movables ();
266 static void combine_movables ();
267 static int rtx_equal_for_loop_p ();
268 static void move_movables ();
269 static void strength_reduce ();
270 static int valid_initial_value_p ();
271 static void find_mem_givs ();
272 static void record_biv ();
273 static void check_final_value ();
274 static void record_giv ();
275 static void update_giv_derive ();
276 static int basic_induction_var ();
277 static rtx
simplify_giv_expr ();
278 static int general_induction_var ();
279 static int consec_sets_giv ();
280 static int check_dbra_loop ();
281 static rtx
express_from ();
282 static int combine_givs_p ();
283 static void combine_givs ();
284 static int product_cheap_p ();
285 static int maybe_eliminate_biv ();
286 static int maybe_eliminate_biv_1 ();
287 static int last_use_this_basic_block ();
288 static void record_initial ();
289 static void update_reg_last_use ();
291 /* Relative gain of eliminating various kinds of operations. */
298 /* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to
299 copy the value of the strength reduced giv to its original register. */
305 char *free_point
= (char *) oballoc (1);
306 rtx reg
= gen_rtx (REG
, word_mode
, LAST_VIRTUAL_REGISTER
+ 1);
308 add_cost
= rtx_cost (gen_rtx (PLUS
, word_mode
, reg
, reg
), SET
);
310 /* We multiply by 2 to reconcile the difference in scale between
311 these two ways of computing costs. Otherwise the cost of a copy
312 will be far less than the cost of an add. */
316 /* Free the objects we just allocated. */
319 /* Initialize the obstack used for rtl in product_cheap_p. */
320 gcc_obstack_init (&temp_obstack
);
323 /* Entry point of this file. Perform loop optimization
324 on the current function. F is the first insn of the function
325 and DUMPFILE is a stream for output of a trace of actions taken
326 (or 0 if none should be output). */
329 loop_optimize (f
, dumpfile
)
330 /* f is the first instruction of a chain of insns for one function */
338 loop_dump_stream
= dumpfile
;
340 init_recog_no_volatile ();
341 init_alias_analysis ();
343 max_reg_before_loop
= max_reg_num ();
345 moved_once
= (char *) alloca (max_reg_before_loop
);
346 bzero (moved_once
, max_reg_before_loop
);
350 /* Count the number of loops. */
353 for (insn
= f
; insn
; insn
= NEXT_INSN (insn
))
355 if (GET_CODE (insn
) == NOTE
356 && NOTE_LINE_NUMBER (insn
) == NOTE_INSN_LOOP_BEG
)
360 /* Don't waste time if no loops. */
361 if (max_loop_num
== 0)
364 /* Get size to use for tables indexed by uids.
365 Leave some space for labels allocated by find_and_verify_loops. */
366 max_uid_for_loop
= get_max_uid () + 1 + max_loop_num
* 32;
368 uid_luid
= (int *) alloca (max_uid_for_loop
* sizeof (int));
369 uid_loop_num
= (int *) alloca (max_uid_for_loop
* sizeof (int));
371 bzero ((char *) uid_luid
, max_uid_for_loop
* sizeof (int));
372 bzero ((char *) uid_loop_num
, max_uid_for_loop
* sizeof (int));
374 /* Allocate tables for recording each loop. We set each entry, so they need
376 loop_number_loop_starts
= (rtx
*) alloca (max_loop_num
* sizeof (rtx
));
377 loop_number_loop_ends
= (rtx
*) alloca (max_loop_num
* sizeof (rtx
));
378 loop_outer_loop
= (int *) alloca (max_loop_num
* sizeof (int));
379 loop_invalid
= (char *) alloca (max_loop_num
* sizeof (char));
380 loop_number_exit_labels
= (rtx
*) alloca (max_loop_num
* sizeof (rtx
));
381 loop_number_exit_count
= (int *) alloca (max_loop_num
* sizeof (int));
383 /* Find and process each loop.
384 First, find them, and record them in order of their beginnings. */
385 find_and_verify_loops (f
);
387 /* Now find all register lifetimes. This must be done after
388 find_and_verify_loops, because it might reorder the insns in the
390 reg_scan (f
, max_reg_num (), 1);
392 /* See if we went too far. */
393 if (get_max_uid () > max_uid_for_loop
)
396 /* Compute the mapping from uids to luids.
397 LUIDs are numbers assigned to insns, like uids,
398 except that luids increase monotonically through the code.
399 Don't assign luids to line-number NOTEs, so that the distance in luids
400 between two insns is not affected by -g. */
402 for (insn
= f
, i
= 0; insn
; insn
= NEXT_INSN (insn
))
405 if (GET_CODE (insn
) != NOTE
406 || NOTE_LINE_NUMBER (insn
) <= 0)
407 uid_luid
[INSN_UID (insn
)] = ++i
;
409 /* Give a line number note the same luid as preceding insn. */
410 uid_luid
[INSN_UID (insn
)] = i
;
415 /* Don't leave gaps in uid_luid for insns that have been
416 deleted. It is possible that the first or last insn
417 using some register has been deleted by cross-jumping.
418 Make sure that uid_luid for that former insn's uid
419 points to the general area where that insn used to be. */
420 for (i
= 0; i
< max_uid_for_loop
; i
++)
422 uid_luid
[0] = uid_luid
[i
];
423 if (uid_luid
[0] != 0)
426 for (i
= 0; i
< max_uid_for_loop
; i
++)
427 if (uid_luid
[i
] == 0)
428 uid_luid
[i
] = uid_luid
[i
- 1];
430 /* Create a mapping from loops to BLOCK tree nodes. */
431 if (flag_unroll_loops
&& write_symbols
!= NO_DEBUG
)
432 find_loop_tree_blocks ();
434 /* Now scan the loops, last ones first, since this means inner ones are done
435 before outer ones. */
436 for (i
= max_loop_num
-1; i
>= 0; i
--)
437 if (! loop_invalid
[i
] && loop_number_loop_ends
[i
])
438 scan_loop (loop_number_loop_starts
[i
], loop_number_loop_ends
[i
],
441 /* If debugging and unrolling loops, we must replicate the tree nodes
442 corresponding to the blocks inside the loop, so that the original one
443 to one mapping will remain. */
444 if (flag_unroll_loops
&& write_symbols
!= NO_DEBUG
)
445 unroll_block_trees ();
448 /* Optimize one loop whose start is LOOP_START and end is END.
449 LOOP_START is the NOTE_INSN_LOOP_BEG and END is the matching
450 NOTE_INSN_LOOP_END. */
452 /* ??? Could also move memory writes out of loops if the destination address
453 is invariant, the source is invariant, the memory write is not volatile,
454 and if we can prove that no read inside the loop can read this address
455 before the write occurs. If there is a read of this address after the
456 write, then we can also mark the memory read as invariant. */
459 scan_loop (loop_start
, end
, nregs
)
465 /* 1 if we are scanning insns that could be executed zero times. */
467 /* 1 if we are scanning insns that might never be executed
468 due to a subroutine call which might exit before they are reached. */
470 /* For a rotated loop that is entered near the bottom,
471 this is the label at the top. Otherwise it is zero. */
473 /* Jump insn that enters the loop, or 0 if control drops in. */
474 rtx loop_entry_jump
= 0;
475 /* Place in the loop where control enters. */
477 /* Number of insns in the loop. */
482 /* The SET from an insn, if it is the only SET in the insn. */
484 /* Chain describing insns movable in current loop. */
485 struct movable
*movables
= 0;
486 /* Last element in `movables' -- so we can add elements at the end. */
487 struct movable
*last_movable
= 0;
488 /* Ratio of extra register life span we can justify
489 for saving an instruction. More if loop doesn't call subroutines
490 since in that case saving an insn makes more difference
491 and more registers are available. */
493 /* If we have calls, contains the insn in which a register was used
494 if it was used exactly once; contains const0_rtx if it was used more
496 rtx
*reg_single_usage
= 0;
497 /* Nonzero if we are scanning instructions in a sub-loop. */
500 n_times_set
= (int *) alloca (nregs
* sizeof (int));
501 n_times_used
= (int *) alloca (nregs
* sizeof (int));
502 may_not_optimize
= (char *) alloca (nregs
);
504 /* Determine whether this loop starts with a jump down to a test at
505 the end. This will occur for a small number of loops with a test
506 that is too complex to duplicate in front of the loop.
508 We search for the first insn or label in the loop, skipping NOTEs.
509 However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG
510 (because we might have a loop executed only once that contains a
511 loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END
512 (in case we have a degenerate loop).
514 Note that if we mistakenly think that a loop is entered at the top
515 when, in fact, it is entered at the exit test, the only effect will be
516 slightly poorer optimization. Making the opposite error can generate
517 incorrect code. Since very few loops now start with a jump to the
518 exit test, the code here to detect that case is very conservative. */
520 for (p
= NEXT_INSN (loop_start
);
522 && GET_CODE (p
) != CODE_LABEL
&& GET_RTX_CLASS (GET_CODE (p
)) != 'i'
523 && (GET_CODE (p
) != NOTE
524 || (NOTE_LINE_NUMBER (p
) != NOTE_INSN_LOOP_BEG
525 && NOTE_LINE_NUMBER (p
) != NOTE_INSN_LOOP_END
));
531 /* Set up variables describing this loop. */
532 prescan_loop (loop_start
, end
);
533 threshold
= (loop_has_call
? 1 : 2) * (1 + n_non_fixed_regs
);
535 /* If loop has a jump before the first label,
536 the true entry is the target of that jump.
537 Start scan from there.
538 But record in LOOP_TOP the place where the end-test jumps
539 back to so we can scan that after the end of the loop. */
540 if (GET_CODE (p
) == JUMP_INSN
)
544 /* Loop entry must be unconditional jump (and not a RETURN) */
546 && JUMP_LABEL (p
) != 0
547 /* Check to see whether the jump actually
548 jumps out of the loop (meaning it's no loop).
549 This case can happen for things like
550 do {..} while (0). If this label was generated previously
551 by loop, we can't tell anything about it and have to reject
553 && INSN_UID (JUMP_LABEL (p
)) < max_uid_for_loop
554 && INSN_LUID (JUMP_LABEL (p
)) >= INSN_LUID (loop_start
)
555 && INSN_LUID (JUMP_LABEL (p
)) < INSN_LUID (end
))
557 loop_top
= next_label (scan_start
);
558 scan_start
= JUMP_LABEL (p
);
562 /* If SCAN_START was an insn created by loop, we don't know its luid
563 as required by loop_reg_used_before_p. So skip such loops. (This
564 test may never be true, but it's best to play it safe.)
566 Also, skip loops where we do not start scanning at a label. This
567 test also rejects loops starting with a JUMP_INSN that failed the
570 if (INSN_UID (scan_start
) >= max_uid_for_loop
571 || GET_CODE (scan_start
) != CODE_LABEL
)
573 if (loop_dump_stream
)
574 fprintf (loop_dump_stream
, "\nLoop from %d to %d is phony.\n\n",
575 INSN_UID (loop_start
), INSN_UID (end
));
579 /* Count number of times each reg is set during this loop.
580 Set may_not_optimize[I] if it is not safe to move out
581 the setting of register I. If this loop has calls, set
582 reg_single_usage[I]. */
584 bzero ((char *) n_times_set
, nregs
* sizeof (int));
585 bzero (may_not_optimize
, nregs
);
589 reg_single_usage
= (rtx
*) alloca (nregs
* sizeof (rtx
));
590 bzero ((char *) reg_single_usage
, nregs
* sizeof (rtx
));
593 count_loop_regs_set (loop_top
? loop_top
: loop_start
, end
,
594 may_not_optimize
, reg_single_usage
, &insn_count
, nregs
);
596 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
597 may_not_optimize
[i
] = 1, n_times_set
[i
] = 1;
598 bcopy ((char *) n_times_set
, (char *) n_times_used
, nregs
* sizeof (int));
600 if (loop_dump_stream
)
602 fprintf (loop_dump_stream
, "\nLoop from %d to %d: %d real insns.\n",
603 INSN_UID (loop_start
), INSN_UID (end
), insn_count
);
605 fprintf (loop_dump_stream
, "Continue at insn %d.\n",
606 INSN_UID (loop_continue
));
609 /* Scan through the loop finding insns that are safe to move.
610 Set n_times_set negative for the reg being set, so that
611 this reg will be considered invariant for subsequent insns.
612 We consider whether subsequent insns use the reg
613 in deciding whether it is worth actually moving.
615 MAYBE_NEVER is nonzero if we have passed a conditional jump insn
616 and therefore it is possible that the insns we are scanning
617 would never be executed. At such times, we must make sure
618 that it is safe to execute the insn once instead of zero times.
619 When MAYBE_NEVER is 0, all insns will be executed at least once
620 so that is not a problem. */
626 /* At end of a straight-in loop, we are done.
627 At end of a loop entered at the bottom, scan the top. */
640 if (GET_RTX_CLASS (GET_CODE (p
)) == 'i'
641 && find_reg_note (p
, REG_LIBCALL
, NULL_RTX
))
643 else if (GET_RTX_CLASS (GET_CODE (p
)) == 'i'
644 && find_reg_note (p
, REG_RETVAL
, NULL_RTX
))
647 if (GET_CODE (p
) == INSN
648 && (set
= single_set (p
))
649 && GET_CODE (SET_DEST (set
)) == REG
650 && ! may_not_optimize
[REGNO (SET_DEST (set
))])
655 rtx src
= SET_SRC (set
);
656 rtx dependencies
= 0;
658 /* Figure out what to use as a source of this insn. If a REG_EQUIV
659 note is given or if a REG_EQUAL note with a constant operand is
660 specified, use it as the source and mark that we should move
661 this insn by calling emit_move_insn rather that duplicating the
664 Otherwise, only use the REG_EQUAL contents if a REG_RETVAL note
666 temp
= find_reg_note (p
, REG_EQUIV
, NULL_RTX
);
668 src
= XEXP (temp
, 0), move_insn
= 1;
671 temp
= find_reg_note (p
, REG_EQUAL
, NULL_RTX
);
672 if (temp
&& CONSTANT_P (XEXP (temp
, 0)))
673 src
= XEXP (temp
, 0), move_insn
= 1;
674 if (temp
&& find_reg_note (p
, REG_RETVAL
, NULL_RTX
))
676 src
= XEXP (temp
, 0);
677 /* A libcall block can use regs that don't appear in
678 the equivalent expression. To move the libcall,
679 we must move those regs too. */
680 dependencies
= libcall_other_reg (p
, src
);
684 /* Don't try to optimize a register that was made
685 by loop-optimization for an inner loop.
686 We don't know its life-span, so we can't compute the benefit. */
687 if (REGNO (SET_DEST (set
)) >= max_reg_before_loop
)
689 /* In order to move a register, we need to have one of three cases:
690 (1) it is used only in the same basic block as the set
691 (2) it is not a user variable and it is not used in the
692 exit test (this can cause the variable to be used
693 before it is set just like a user-variable).
694 (3) the set is guaranteed to be executed once the loop starts,
695 and the reg is not used until after that. */
696 else if (! ((! maybe_never
697 && ! loop_reg_used_before_p (set
, p
, loop_start
,
699 || (! REG_USERVAR_P (SET_DEST (set
))
700 && ! REG_LOOP_TEST_P (SET_DEST (set
)))
701 || reg_in_basic_block_p (p
, SET_DEST (set
))))
703 else if ((tem
= invariant_p (src
))
704 && (dependencies
== 0
705 || (tem2
= invariant_p (dependencies
)) != 0)
706 && (n_times_set
[REGNO (SET_DEST (set
))] == 1
708 = consec_sets_invariant_p (SET_DEST (set
),
709 n_times_set
[REGNO (SET_DEST (set
))],
711 /* If the insn can cause a trap (such as divide by zero),
712 can't move it unless it's guaranteed to be executed
713 once loop is entered. Even a function call might
714 prevent the trap insn from being reached
715 (since it might exit!) */
716 && ! ((maybe_never
|| call_passed
)
717 && may_trap_p (src
)))
719 register struct movable
*m
;
720 register int regno
= REGNO (SET_DEST (set
));
722 /* A potential lossage is where we have a case where two insns
723 can be combined as long as they are both in the loop, but
724 we move one of them outside the loop. For large loops,
725 this can lose. The most common case of this is the address
726 of a function being called.
728 Therefore, if this register is marked as being used exactly
729 once if we are in a loop with calls (a "large loop"), see if
730 we can replace the usage of this register with the source
731 of this SET. If we can, delete this insn.
733 Don't do this if P has a REG_RETVAL note or if we have
734 SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */
736 if (reg_single_usage
&& reg_single_usage
[regno
] != 0
737 && reg_single_usage
[regno
] != const0_rtx
738 && REGNO_FIRST_UID (regno
) == INSN_UID (p
)
739 && (REGNO_LAST_UID (regno
)
740 == INSN_UID (reg_single_usage
[regno
]))
741 && n_times_set
[REGNO (SET_DEST (set
))] == 1
742 && ! side_effects_p (SET_SRC (set
))
743 && ! find_reg_note (p
, REG_RETVAL
, NULL_RTX
)
744 #ifdef SMALL_REGISTER_CLASSES
745 && ! (SMALL_REGISTER_CLASSES
746 && GET_CODE (SET_SRC (set
)) == REG
747 && REGNO (SET_SRC (set
)) < FIRST_PSEUDO_REGISTER
)
749 /* This test is not redundant; SET_SRC (set) might be
750 a call-clobbered register and the life of REGNO
751 might span a call. */
752 && ! modified_between_p (SET_SRC (set
), p
,
753 reg_single_usage
[regno
])
754 && no_labels_between_p (p
, reg_single_usage
[regno
])
755 && validate_replace_rtx (SET_DEST (set
), SET_SRC (set
),
756 reg_single_usage
[regno
]))
758 /* Replace any usage in a REG_EQUAL note. Must copy the
759 new source, so that we don't get rtx sharing between the
760 SET_SOURCE and REG_NOTES of insn p. */
761 REG_NOTES (reg_single_usage
[regno
])
762 = replace_rtx (REG_NOTES (reg_single_usage
[regno
]),
763 SET_DEST (set
), copy_rtx (SET_SRC (set
)));
766 NOTE_LINE_NUMBER (p
) = NOTE_INSN_DELETED
;
767 NOTE_SOURCE_FILE (p
) = 0;
768 n_times_set
[regno
] = 0;
772 m
= (struct movable
*) alloca (sizeof (struct movable
));
776 m
->dependencies
= dependencies
;
777 m
->set_dest
= SET_DEST (set
);
779 m
->consec
= n_times_set
[REGNO (SET_DEST (set
))] - 1;
783 m
->move_insn
= move_insn
;
784 m
->is_equiv
= (find_reg_note (p
, REG_EQUIV
, NULL_RTX
) != 0);
785 m
->savemode
= VOIDmode
;
787 /* Set M->cond if either invariant_p or consec_sets_invariant_p
788 returned 2 (only conditionally invariant). */
789 m
->cond
= ((tem
| tem1
| tem2
) > 1);
790 m
->global
= (uid_luid
[REGNO_LAST_UID (regno
)] > INSN_LUID (end
)
791 || uid_luid
[REGNO_FIRST_UID (regno
)] < INSN_LUID (loop_start
));
793 m
->lifetime
= (uid_luid
[REGNO_LAST_UID (regno
)]
794 - uid_luid
[REGNO_FIRST_UID (regno
)]);
795 m
->savings
= n_times_used
[regno
];
796 if (find_reg_note (p
, REG_RETVAL
, NULL_RTX
))
797 m
->savings
+= libcall_benefit (p
);
798 n_times_set
[regno
] = move_insn
? -2 : -1;
799 /* Add M to the end of the chain MOVABLES. */
803 last_movable
->next
= m
;
808 /* Skip this insn, not checking REG_LIBCALL notes. */
809 p
= next_nonnote_insn (p
);
810 /* Skip the consecutive insns, if there are any. */
811 p
= skip_consec_insns (p
, m
->consec
);
812 /* Back up to the last insn of the consecutive group. */
813 p
= prev_nonnote_insn (p
);
815 /* We must now reset m->move_insn, m->is_equiv, and possibly
816 m->set_src to correspond to the effects of all the
818 temp
= find_reg_note (p
, REG_EQUIV
, NULL_RTX
);
820 m
->set_src
= XEXP (temp
, 0), m
->move_insn
= 1;
823 temp
= find_reg_note (p
, REG_EQUAL
, NULL_RTX
);
824 if (temp
&& CONSTANT_P (XEXP (temp
, 0)))
825 m
->set_src
= XEXP (temp
, 0), m
->move_insn
= 1;
830 m
->is_equiv
= (find_reg_note (p
, REG_EQUIV
, NULL_RTX
) != 0);
833 /* If this register is always set within a STRICT_LOW_PART
834 or set to zero, then its high bytes are constant.
835 So clear them outside the loop and within the loop
836 just load the low bytes.
837 We must check that the machine has an instruction to do so.
838 Also, if the value loaded into the register
839 depends on the same register, this cannot be done. */
840 else if (SET_SRC (set
) == const0_rtx
841 && GET_CODE (NEXT_INSN (p
)) == INSN
842 && (set1
= single_set (NEXT_INSN (p
)))
843 && GET_CODE (set1
) == SET
844 && (GET_CODE (SET_DEST (set1
)) == STRICT_LOW_PART
)
845 && (GET_CODE (XEXP (SET_DEST (set1
), 0)) == SUBREG
)
846 && (SUBREG_REG (XEXP (SET_DEST (set1
), 0))
848 && !reg_mentioned_p (SET_DEST (set
), SET_SRC (set1
)))
850 register int regno
= REGNO (SET_DEST (set
));
851 if (n_times_set
[regno
] == 2)
853 register struct movable
*m
;
854 m
= (struct movable
*) alloca (sizeof (struct movable
));
857 m
->set_dest
= SET_DEST (set
);
865 /* If the insn may not be executed on some cycles,
866 we can't clear the whole reg; clear just high part.
867 Not even if the reg is used only within this loop.
874 Clearing x before the inner loop could clobber a value
875 being saved from the last time around the outer loop.
876 However, if the reg is not used outside this loop
877 and all uses of the register are in the same
878 basic block as the store, there is no problem.
880 If this insn was made by loop, we don't know its
881 INSN_LUID and hence must make a conservative
883 m
->global
= (INSN_UID (p
) >= max_uid_for_loop
884 || (uid_luid
[REGNO_LAST_UID (regno
)]
886 || (uid_luid
[REGNO_FIRST_UID (regno
)]
888 || (labels_in_range_p
889 (p
, uid_luid
[REGNO_FIRST_UID (regno
)])));
890 if (maybe_never
&& m
->global
)
891 m
->savemode
= GET_MODE (SET_SRC (set1
));
893 m
->savemode
= VOIDmode
;
897 m
->lifetime
= (uid_luid
[REGNO_LAST_UID (regno
)]
898 - uid_luid
[REGNO_FIRST_UID (regno
)]);
900 n_times_set
[regno
] = -1;
901 /* Add M to the end of the chain MOVABLES. */
905 last_movable
->next
= m
;
910 /* Past a call insn, we get to insns which might not be executed
911 because the call might exit. This matters for insns that trap.
912 Call insns inside a REG_LIBCALL/REG_RETVAL block always return,
913 so they don't count. */
914 else if (GET_CODE (p
) == CALL_INSN
&& ! in_libcall
)
916 /* Past a label or a jump, we get to insns for which we
917 can't count on whether or how many times they will be
918 executed during each iteration. Therefore, we can
919 only move out sets of trivial variables
920 (those not used after the loop). */
921 /* Similar code appears twice in strength_reduce. */
922 else if ((GET_CODE (p
) == CODE_LABEL
|| GET_CODE (p
) == JUMP_INSN
)
923 /* If we enter the loop in the middle, and scan around to the
924 beginning, don't set maybe_never for that. This must be an
925 unconditional jump, otherwise the code at the top of the
926 loop might never be executed. Unconditional jumps are
927 followed a by barrier then loop end. */
928 && ! (GET_CODE (p
) == JUMP_INSN
&& JUMP_LABEL (p
) == loop_top
929 && NEXT_INSN (NEXT_INSN (p
)) == end
930 && simplejump_p (p
)))
932 else if (GET_CODE (p
) == NOTE
)
934 /* At the virtual top of a converted loop, insns are again known to
935 be executed: logically, the loop begins here even though the exit
936 code has been duplicated. */
937 if (NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_VTOP
&& loop_depth
== 0)
938 maybe_never
= call_passed
= 0;
939 else if (NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_BEG
)
941 else if (NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_END
)
946 /* If one movable subsumes another, ignore that other. */
948 ignore_some_movables (movables
);
950 /* For each movable insn, see if the reg that it loads
951 leads when it dies right into another conditionally movable insn.
952 If so, record that the second insn "forces" the first one,
953 since the second can be moved only if the first is. */
955 force_movables (movables
);
957 /* See if there are multiple movable insns that load the same value.
958 If there are, make all but the first point at the first one
959 through the `match' field, and add the priorities of them
960 all together as the priority of the first. */
962 combine_movables (movables
, nregs
);
964 /* Now consider each movable insn to decide whether it is worth moving.
965 Store 0 in n_times_set for each reg that is moved. */
967 move_movables (movables
, threshold
,
968 insn_count
, loop_start
, end
, nregs
);
970 /* Now candidates that still are negative are those not moved.
971 Change n_times_set to indicate that those are not actually invariant. */
972 for (i
= 0; i
< nregs
; i
++)
973 if (n_times_set
[i
] < 0)
974 n_times_set
[i
] = n_times_used
[i
];
976 if (flag_strength_reduce
)
977 strength_reduce (scan_start
, end
, loop_top
,
978 insn_count
, loop_start
, end
);
981 /* Add elements to *OUTPUT to record all the pseudo-regs
982 mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */
985 record_excess_regs (in_this
, not_in_this
, output
)
986 rtx in_this
, not_in_this
;
993 code
= GET_CODE (in_this
);
1007 if (REGNO (in_this
) >= FIRST_PSEUDO_REGISTER
1008 && ! reg_mentioned_p (in_this
, not_in_this
))
1009 *output
= gen_rtx (EXPR_LIST
, VOIDmode
, in_this
, *output
);
1013 fmt
= GET_RTX_FORMAT (code
);
1014 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1021 for (j
= 0; j
< XVECLEN (in_this
, i
); j
++)
1022 record_excess_regs (XVECEXP (in_this
, i
, j
), not_in_this
, output
);
1026 record_excess_regs (XEXP (in_this
, i
), not_in_this
, output
);
1032 /* Check what regs are referred to in the libcall block ending with INSN,
1033 aside from those mentioned in the equivalent value.
1034 If there are none, return 0.
1035 If there are one or more, return an EXPR_LIST containing all of them. */
1038 libcall_other_reg (insn
, equiv
)
1041 rtx note
= find_reg_note (insn
, REG_RETVAL
, NULL_RTX
);
1042 rtx p
= XEXP (note
, 0);
1045 /* First, find all the regs used in the libcall block
1046 that are not mentioned as inputs to the result. */
1050 if (GET_CODE (p
) == INSN
|| GET_CODE (p
) == JUMP_INSN
1051 || GET_CODE (p
) == CALL_INSN
)
1052 record_excess_regs (PATTERN (p
), equiv
, &output
);
1059 /* Return 1 if all uses of REG
1060 are between INSN and the end of the basic block. */
1063 reg_in_basic_block_p (insn
, reg
)
1066 int regno
= REGNO (reg
);
1069 if (REGNO_FIRST_UID (regno
) != INSN_UID (insn
))
1072 /* Search this basic block for the already recorded last use of the reg. */
1073 for (p
= insn
; p
; p
= NEXT_INSN (p
))
1075 switch (GET_CODE (p
))
1082 /* Ordinary insn: if this is the last use, we win. */
1083 if (REGNO_LAST_UID (regno
) == INSN_UID (p
))
1088 /* Jump insn: if this is the last use, we win. */
1089 if (REGNO_LAST_UID (regno
) == INSN_UID (p
))
1091 /* Otherwise, it's the end of the basic block, so we lose. */
1096 /* It's the end of the basic block, so we lose. */
1101 /* The "last use" doesn't follow the "first use"?? */
1105 /* Compute the benefit of eliminating the insns in the block whose
1106 last insn is LAST. This may be a group of insns used to compute a
1107 value directly or can contain a library call. */
1110 libcall_benefit (last
)
1116 for (insn
= XEXP (find_reg_note (last
, REG_RETVAL
, NULL_RTX
), 0);
1117 insn
!= last
; insn
= NEXT_INSN (insn
))
1119 if (GET_CODE (insn
) == CALL_INSN
)
1120 benefit
+= 10; /* Assume at least this many insns in a library
1122 else if (GET_CODE (insn
) == INSN
1123 && GET_CODE (PATTERN (insn
)) != USE
1124 && GET_CODE (PATTERN (insn
)) != CLOBBER
)
1131 /* Skip COUNT insns from INSN, counting library calls as 1 insn. */
1134 skip_consec_insns (insn
, count
)
1138 for (; count
> 0; count
--)
1142 /* If first insn of libcall sequence, skip to end. */
1143 /* Do this at start of loop, since INSN is guaranteed to
1145 if (GET_CODE (insn
) != NOTE
1146 && (temp
= find_reg_note (insn
, REG_LIBCALL
, NULL_RTX
)))
1147 insn
= XEXP (temp
, 0);
1149 do insn
= NEXT_INSN (insn
);
1150 while (GET_CODE (insn
) == NOTE
);
1156 /* Ignore any movable whose insn falls within a libcall
1157 which is part of another movable.
1158 We make use of the fact that the movable for the libcall value
1159 was made later and so appears later on the chain. */
1162 ignore_some_movables (movables
)
1163 struct movable
*movables
;
1165 register struct movable
*m
, *m1
;
1167 for (m
= movables
; m
; m
= m
->next
)
1169 /* Is this a movable for the value of a libcall? */
1170 rtx note
= find_reg_note (m
->insn
, REG_RETVAL
, NULL_RTX
);
1174 /* Check for earlier movables inside that range,
1175 and mark them invalid. We cannot use LUIDs here because
1176 insns created by loop.c for prior loops don't have LUIDs.
1177 Rather than reject all such insns from movables, we just
1178 explicitly check each insn in the libcall (since invariant
1179 libcalls aren't that common). */
1180 for (insn
= XEXP (note
, 0); insn
!= m
->insn
; insn
= NEXT_INSN (insn
))
1181 for (m1
= movables
; m1
!= m
; m1
= m1
->next
)
1182 if (m1
->insn
== insn
)
1188 /* For each movable insn, see if the reg that it loads
1189 leads when it dies right into another conditionally movable insn.
1190 If so, record that the second insn "forces" the first one,
1191 since the second can be moved only if the first is. */
1194 force_movables (movables
)
1195 struct movable
*movables
;
1197 register struct movable
*m
, *m1
;
1198 for (m1
= movables
; m1
; m1
= m1
->next
)
1199 /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */
1200 if (!m1
->partial
&& !m1
->done
)
1202 int regno
= m1
->regno
;
1203 for (m
= m1
->next
; m
; m
= m
->next
)
1204 /* ??? Could this be a bug? What if CSE caused the
1205 register of M1 to be used after this insn?
1206 Since CSE does not update regno_last_uid,
1207 this insn M->insn might not be where it dies.
1208 But very likely this doesn't matter; what matters is
1209 that M's reg is computed from M1's reg. */
1210 if (INSN_UID (m
->insn
) == REGNO_LAST_UID (regno
)
1213 if (m
!= 0 && m
->set_src
== m1
->set_dest
1214 /* If m->consec, m->set_src isn't valid. */
1218 /* Increase the priority of the moving the first insn
1219 since it permits the second to be moved as well. */
1223 m1
->lifetime
+= m
->lifetime
;
1224 m1
->savings
+= m1
->savings
;
1229 /* Find invariant expressions that are equal and can be combined into
1233 combine_movables (movables
, nregs
)
1234 struct movable
*movables
;
1237 register struct movable
*m
;
1238 char *matched_regs
= (char *) alloca (nregs
);
1239 enum machine_mode mode
;
1241 /* Regs that are set more than once are not allowed to match
1242 or be matched. I'm no longer sure why not. */
1243 /* Perhaps testing m->consec_sets would be more appropriate here? */
1245 for (m
= movables
; m
; m
= m
->next
)
1246 if (m
->match
== 0 && n_times_used
[m
->regno
] == 1 && !m
->partial
)
1248 register struct movable
*m1
;
1249 int regno
= m
->regno
;
1251 bzero (matched_regs
, nregs
);
1252 matched_regs
[regno
] = 1;
1254 for (m1
= movables
; m1
; m1
= m1
->next
)
1255 if (m
!= m1
&& m1
->match
== 0 && n_times_used
[m1
->regno
] == 1
1256 /* A reg used outside the loop mustn't be eliminated. */
1258 /* A reg used for zero-extending mustn't be eliminated. */
1260 && (matched_regs
[m1
->regno
]
1263 /* Can combine regs with different modes loaded from the
1264 same constant only if the modes are the same or
1265 if both are integer modes with M wider or the same
1266 width as M1. The check for integer is redundant, but
1267 safe, since the only case of differing destination
1268 modes with equal sources is when both sources are
1269 VOIDmode, i.e., CONST_INT. */
1270 (GET_MODE (m
->set_dest
) == GET_MODE (m1
->set_dest
)
1271 || (GET_MODE_CLASS (GET_MODE (m
->set_dest
)) == MODE_INT
1272 && GET_MODE_CLASS (GET_MODE (m1
->set_dest
)) == MODE_INT
1273 && (GET_MODE_BITSIZE (GET_MODE (m
->set_dest
))
1274 >= GET_MODE_BITSIZE (GET_MODE (m1
->set_dest
)))))
1275 /* See if the source of M1 says it matches M. */
1276 && ((GET_CODE (m1
->set_src
) == REG
1277 && matched_regs
[REGNO (m1
->set_src
)])
1278 || rtx_equal_for_loop_p (m
->set_src
, m1
->set_src
,
1280 && ((m
->dependencies
== m1
->dependencies
)
1281 || rtx_equal_p (m
->dependencies
, m1
->dependencies
)))
1283 m
->lifetime
+= m1
->lifetime
;
1284 m
->savings
+= m1
->savings
;
1287 matched_regs
[m1
->regno
] = 1;
1291 /* Now combine the regs used for zero-extension.
1292 This can be done for those not marked `global'
1293 provided their lives don't overlap. */
1295 for (mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
); mode
!= VOIDmode
;
1296 mode
= GET_MODE_WIDER_MODE (mode
))
1298 register struct movable
*m0
= 0;
1300 /* Combine all the registers for extension from mode MODE.
1301 Don't combine any that are used outside this loop. */
1302 for (m
= movables
; m
; m
= m
->next
)
1303 if (m
->partial
&& ! m
->global
1304 && mode
== GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m
->insn
)))))
1306 register struct movable
*m1
;
1307 int first
= uid_luid
[REGNO_FIRST_UID (m
->regno
)];
1308 int last
= uid_luid
[REGNO_LAST_UID (m
->regno
)];
1312 /* First one: don't check for overlap, just record it. */
1317 /* Make sure they extend to the same mode.
1318 (Almost always true.) */
1319 if (GET_MODE (m
->set_dest
) != GET_MODE (m0
->set_dest
))
1322 /* We already have one: check for overlap with those
1323 already combined together. */
1324 for (m1
= movables
; m1
!= m
; m1
= m1
->next
)
1325 if (m1
== m0
|| (m1
->partial
&& m1
->match
== m0
))
1326 if (! (uid_luid
[REGNO_FIRST_UID (m1
->regno
)] > last
1327 || uid_luid
[REGNO_LAST_UID (m1
->regno
)] < first
))
1330 /* No overlap: we can combine this with the others. */
1331 m0
->lifetime
+= m
->lifetime
;
1332 m0
->savings
+= m
->savings
;
1341 /* Return 1 if regs X and Y will become the same if moved. */
1344 regs_match_p (x
, y
, movables
)
1346 struct movable
*movables
;
1350 struct movable
*mx
, *my
;
1352 for (mx
= movables
; mx
; mx
= mx
->next
)
1353 if (mx
->regno
== xn
)
1356 for (my
= movables
; my
; my
= my
->next
)
1357 if (my
->regno
== yn
)
1361 && ((mx
->match
== my
->match
&& mx
->match
!= 0)
1363 || mx
== my
->match
));
1366 /* Return 1 if X and Y are identical-looking rtx's.
1367 This is the Lisp function EQUAL for rtx arguments.
1369 If two registers are matching movables or a movable register and an
1370 equivalent constant, consider them equal. */
1373 rtx_equal_for_loop_p (x
, y
, movables
)
1375 struct movable
*movables
;
1379 register struct movable
*m
;
1380 register enum rtx_code code
;
1385 if (x
== 0 || y
== 0)
1388 code
= GET_CODE (x
);
1390 /* If we have a register and a constant, they may sometimes be
1392 if (GET_CODE (x
) == REG
&& n_times_set
[REGNO (x
)] == -2
1394 for (m
= movables
; m
; m
= m
->next
)
1395 if (m
->move_insn
&& m
->regno
== REGNO (x
)
1396 && rtx_equal_p (m
->set_src
, y
))
1399 else if (GET_CODE (y
) == REG
&& n_times_set
[REGNO (y
)] == -2
1401 for (m
= movables
; m
; m
= m
->next
)
1402 if (m
->move_insn
&& m
->regno
== REGNO (y
)
1403 && rtx_equal_p (m
->set_src
, x
))
1406 /* Otherwise, rtx's of different codes cannot be equal. */
1407 if (code
!= GET_CODE (y
))
1410 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1411 (REG:SI x) and (REG:HI x) are NOT equivalent. */
1413 if (GET_MODE (x
) != GET_MODE (y
))
1416 /* These three types of rtx's can be compared nonrecursively. */
1418 return (REGNO (x
) == REGNO (y
) || regs_match_p (x
, y
, movables
));
1420 if (code
== LABEL_REF
)
1421 return XEXP (x
, 0) == XEXP (y
, 0);
1422 if (code
== SYMBOL_REF
)
1423 return XSTR (x
, 0) == XSTR (y
, 0);
1425 /* Compare the elements. If any pair of corresponding elements
1426 fail to match, return 0 for the whole things. */
1428 fmt
= GET_RTX_FORMAT (code
);
1429 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1434 if (XWINT (x
, i
) != XWINT (y
, i
))
1439 if (XINT (x
, i
) != XINT (y
, i
))
1444 /* Two vectors must have the same length. */
1445 if (XVECLEN (x
, i
) != XVECLEN (y
, i
))
1448 /* And the corresponding elements must match. */
1449 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
1450 if (rtx_equal_for_loop_p (XVECEXP (x
, i
, j
), XVECEXP (y
, i
, j
), movables
) == 0)
1455 if (rtx_equal_for_loop_p (XEXP (x
, i
), XEXP (y
, i
), movables
) == 0)
1460 if (strcmp (XSTR (x
, i
), XSTR (y
, i
)))
1465 /* These are just backpointers, so they don't matter. */
1471 /* It is believed that rtx's at this level will never
1472 contain anything but integers and other rtx's,
1473 except for within LABEL_REFs and SYMBOL_REFs. */
1481 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to all
1482 insns in INSNS which use thet reference. */
1485 add_label_notes (x
, insns
)
1489 enum rtx_code code
= GET_CODE (x
);
1494 if (code
== LABEL_REF
&& !LABEL_REF_NONLOCAL_P (x
))
1496 rtx next
= next_real_insn (XEXP (x
, 0));
1498 /* Don't record labels that refer to dispatch tables.
1499 This is not necessary, since the tablejump references the same label.
1500 And if we did record them, flow.c would make worse code. */
1502 || ! (GET_CODE (next
) == JUMP_INSN
1503 && (GET_CODE (PATTERN (next
)) == ADDR_VEC
1504 || GET_CODE (PATTERN (next
)) == ADDR_DIFF_VEC
)))
1506 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
1507 if (reg_mentioned_p (XEXP (x
, 0), insn
))
1508 REG_NOTES (insn
) = gen_rtx (EXPR_LIST
, REG_LABEL
, XEXP (x
, 0),
1514 fmt
= GET_RTX_FORMAT (code
);
1515 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1518 add_label_notes (XEXP (x
, i
), insns
);
1519 else if (fmt
[i
] == 'E')
1520 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1521 add_label_notes (XVECEXP (x
, i
, j
), insns
);
1525 /* Scan MOVABLES, and move the insns that deserve to be moved.
1526 If two matching movables are combined, replace one reg with the
1527 other throughout. */
1530 move_movables (movables
, threshold
, insn_count
, loop_start
, end
, nregs
)
1531 struct movable
*movables
;
1539 register struct movable
*m
;
1541 /* Map of pseudo-register replacements to handle combining
1542 when we move several insns that load the same value
1543 into different pseudo-registers. */
1544 rtx
*reg_map
= (rtx
*) alloca (nregs
* sizeof (rtx
));
1545 char *already_moved
= (char *) alloca (nregs
);
1547 bzero (already_moved
, nregs
);
1548 bzero ((char *) reg_map
, nregs
* sizeof (rtx
));
1552 for (m
= movables
; m
; m
= m
->next
)
1554 /* Describe this movable insn. */
1556 if (loop_dump_stream
)
1558 fprintf (loop_dump_stream
, "Insn %d: regno %d (life %d), ",
1559 INSN_UID (m
->insn
), m
->regno
, m
->lifetime
);
1561 fprintf (loop_dump_stream
, "consec %d, ", m
->consec
);
1563 fprintf (loop_dump_stream
, "cond ");
1565 fprintf (loop_dump_stream
, "force ");
1567 fprintf (loop_dump_stream
, "global ");
1569 fprintf (loop_dump_stream
, "done ");
1571 fprintf (loop_dump_stream
, "move-insn ");
1573 fprintf (loop_dump_stream
, "matches %d ",
1574 INSN_UID (m
->match
->insn
));
1576 fprintf (loop_dump_stream
, "forces %d ",
1577 INSN_UID (m
->forces
->insn
));
1580 /* Count movables. Value used in heuristics in strength_reduce. */
1583 /* Ignore the insn if it's already done (it matched something else).
1584 Otherwise, see if it is now safe to move. */
1588 || (1 == invariant_p (m
->set_src
)
1589 && (m
->dependencies
== 0
1590 || 1 == invariant_p (m
->dependencies
))
1592 || 1 == consec_sets_invariant_p (m
->set_dest
,
1595 && (! m
->forces
|| m
->forces
->done
))
1599 int savings
= m
->savings
;
1601 /* We have an insn that is safe to move.
1602 Compute its desirability. */
1607 if (loop_dump_stream
)
1608 fprintf (loop_dump_stream
, "savings %d ", savings
);
1610 if (moved_once
[regno
])
1614 if (loop_dump_stream
)
1615 fprintf (loop_dump_stream
, "halved since already moved ");
1618 /* An insn MUST be moved if we already moved something else
1619 which is safe only if this one is moved too: that is,
1620 if already_moved[REGNO] is nonzero. */
1622 /* An insn is desirable to move if the new lifetime of the
1623 register is no more than THRESHOLD times the old lifetime.
1624 If it's not desirable, it means the loop is so big
1625 that moving won't speed things up much,
1626 and it is liable to make register usage worse. */
1628 /* It is also desirable to move if it can be moved at no
1629 extra cost because something else was already moved. */
1631 if (already_moved
[regno
]
1632 || (threshold
* savings
* m
->lifetime
) >= insn_count
1633 || (m
->forces
&& m
->forces
->done
1634 && n_times_used
[m
->forces
->regno
] == 1))
1637 register struct movable
*m1
;
1640 /* Now move the insns that set the reg. */
1642 if (m
->partial
&& m
->match
)
1646 /* Find the end of this chain of matching regs.
1647 Thus, we load each reg in the chain from that one reg.
1648 And that reg is loaded with 0 directly,
1649 since it has ->match == 0. */
1650 for (m1
= m
; m1
->match
; m1
= m1
->match
);
1651 newpat
= gen_move_insn (SET_DEST (PATTERN (m
->insn
)),
1652 SET_DEST (PATTERN (m1
->insn
)));
1653 i1
= emit_insn_before (newpat
, loop_start
);
1655 /* Mark the moved, invariant reg as being allowed to
1656 share a hard reg with the other matching invariant. */
1657 REG_NOTES (i1
) = REG_NOTES (m
->insn
);
1658 r1
= SET_DEST (PATTERN (m
->insn
));
1659 r2
= SET_DEST (PATTERN (m1
->insn
));
1660 regs_may_share
= gen_rtx (EXPR_LIST
, VOIDmode
, r1
,
1661 gen_rtx (EXPR_LIST
, VOIDmode
, r2
,
1663 delete_insn (m
->insn
);
1668 if (loop_dump_stream
)
1669 fprintf (loop_dump_stream
, " moved to %d", INSN_UID (i1
));
1671 /* If we are to re-generate the item being moved with a
1672 new move insn, first delete what we have and then emit
1673 the move insn before the loop. */
1674 else if (m
->move_insn
)
1678 for (count
= m
->consec
; count
>= 0; count
--)
1680 /* If this is the first insn of a library call sequence,
1682 if (GET_CODE (p
) != NOTE
1683 && (temp
= find_reg_note (p
, REG_LIBCALL
, NULL_RTX
)))
1686 /* If this is the last insn of a libcall sequence, then
1687 delete every insn in the sequence except the last.
1688 The last insn is handled in the normal manner. */
1689 if (GET_CODE (p
) != NOTE
1690 && (temp
= find_reg_note (p
, REG_RETVAL
, NULL_RTX
)))
1692 temp
= XEXP (temp
, 0);
1694 temp
= delete_insn (temp
);
1697 p
= delete_insn (p
);
1698 while (p
&& GET_CODE (p
) == NOTE
)
1703 emit_move_insn (m
->set_dest
, m
->set_src
);
1704 temp
= get_insns ();
1707 add_label_notes (m
->set_src
, temp
);
1709 i1
= emit_insns_before (temp
, loop_start
);
1710 if (! find_reg_note (i1
, REG_EQUAL
, NULL_RTX
))
1712 = gen_rtx (EXPR_LIST
,
1713 m
->is_equiv
? REG_EQUIV
: REG_EQUAL
,
1714 m
->set_src
, REG_NOTES (i1
));
1716 if (loop_dump_stream
)
1717 fprintf (loop_dump_stream
, " moved to %d", INSN_UID (i1
));
1719 /* The more regs we move, the less we like moving them. */
1724 for (count
= m
->consec
; count
>= 0; count
--)
1728 /* If first insn of libcall sequence, skip to end. */
1729 /* Do this at start of loop, since p is guaranteed to
1731 if (GET_CODE (p
) != NOTE
1732 && (temp
= find_reg_note (p
, REG_LIBCALL
, NULL_RTX
)))
1735 /* If last insn of libcall sequence, move all
1736 insns except the last before the loop. The last
1737 insn is handled in the normal manner. */
1738 if (GET_CODE (p
) != NOTE
1739 && (temp
= find_reg_note (p
, REG_RETVAL
, NULL_RTX
)))
1743 rtx fn_address_insn
= 0;
1746 for (temp
= XEXP (temp
, 0); temp
!= p
;
1747 temp
= NEXT_INSN (temp
))
1753 if (GET_CODE (temp
) == NOTE
)
1756 body
= PATTERN (temp
);
1758 /* Find the next insn after TEMP,
1759 not counting USE or NOTE insns. */
1760 for (next
= NEXT_INSN (temp
); next
!= p
;
1761 next
= NEXT_INSN (next
))
1762 if (! (GET_CODE (next
) == INSN
1763 && GET_CODE (PATTERN (next
)) == USE
)
1764 && GET_CODE (next
) != NOTE
)
1767 /* If that is the call, this may be the insn
1768 that loads the function address.
1770 Extract the function address from the insn
1771 that loads it into a register.
1772 If this insn was cse'd, we get incorrect code.
1774 So emit a new move insn that copies the
1775 function address into the register that the
1776 call insn will use. flow.c will delete any
1777 redundant stores that we have created. */
1778 if (GET_CODE (next
) == CALL_INSN
1779 && GET_CODE (body
) == SET
1780 && GET_CODE (SET_DEST (body
)) == REG
1781 && (n
= find_reg_note (temp
, REG_EQUAL
,
1784 fn_reg
= SET_SRC (body
);
1785 if (GET_CODE (fn_reg
) != REG
)
1786 fn_reg
= SET_DEST (body
);
1787 fn_address
= XEXP (n
, 0);
1788 fn_address_insn
= temp
;
1790 /* We have the call insn.
1791 If it uses the register we suspect it might,
1792 load it with the correct address directly. */
1793 if (GET_CODE (temp
) == CALL_INSN
1795 && reg_referenced_p (fn_reg
, body
))
1796 emit_insn_after (gen_move_insn (fn_reg
,
1800 if (GET_CODE (temp
) == CALL_INSN
)
1802 i1
= emit_call_insn_before (body
, loop_start
);
1803 /* Because the USAGE information potentially
1804 contains objects other than hard registers
1805 we need to copy it. */
1806 if (CALL_INSN_FUNCTION_USAGE (temp
))
1807 CALL_INSN_FUNCTION_USAGE (i1
)
1808 = copy_rtx (CALL_INSN_FUNCTION_USAGE (temp
));
1811 i1
= emit_insn_before (body
, loop_start
);
1814 if (temp
== fn_address_insn
)
1815 fn_address_insn
= i1
;
1816 REG_NOTES (i1
) = REG_NOTES (temp
);
1820 if (m
->savemode
!= VOIDmode
)
1822 /* P sets REG to zero; but we should clear only
1823 the bits that are not covered by the mode
1825 rtx reg
= m
->set_dest
;
1831 (GET_MODE (reg
), and_optab
, reg
,
1832 GEN_INT ((((HOST_WIDE_INT
) 1
1833 << GET_MODE_BITSIZE (m
->savemode
)))
1835 reg
, 1, OPTAB_LIB_WIDEN
);
1839 emit_move_insn (reg
, tem
);
1840 sequence
= gen_sequence ();
1842 i1
= emit_insn_before (sequence
, loop_start
);
1844 else if (GET_CODE (p
) == CALL_INSN
)
1846 i1
= emit_call_insn_before (PATTERN (p
), loop_start
);
1847 /* Because the USAGE information potentially
1848 contains objects other than hard registers
1849 we need to copy it. */
1850 if (CALL_INSN_FUNCTION_USAGE (p
))
1851 CALL_INSN_FUNCTION_USAGE (i1
)
1852 = copy_rtx (CALL_INSN_FUNCTION_USAGE (p
));
1855 i1
= emit_insn_before (PATTERN (p
), loop_start
);
1857 REG_NOTES (i1
) = REG_NOTES (p
);
1859 /* If there is a REG_EQUAL note present whose value is
1860 not loop invariant, then delete it, since it may
1861 cause problems with later optimization passes.
1862 It is possible for cse to create such notes
1863 like this as a result of record_jump_cond. */
1865 if ((temp
= find_reg_note (i1
, REG_EQUAL
, NULL_RTX
))
1866 && ! invariant_p (XEXP (temp
, 0)))
1867 remove_note (i1
, temp
);
1872 if (loop_dump_stream
)
1873 fprintf (loop_dump_stream
, " moved to %d",
1877 /* This isn't needed because REG_NOTES is copied
1878 below and is wrong since P might be a PARALLEL. */
1879 if (REG_NOTES (i1
) == 0
1880 && ! m
->partial
/* But not if it's a zero-extend clr. */
1881 && ! m
->global
/* and not if used outside the loop
1882 (since it might get set outside). */
1883 && CONSTANT_P (SET_SRC (PATTERN (p
))))
1885 = gen_rtx (EXPR_LIST
, REG_EQUAL
,
1886 SET_SRC (PATTERN (p
)), REG_NOTES (i1
));
1889 /* If library call, now fix the REG_NOTES that contain
1890 insn pointers, namely REG_LIBCALL on FIRST
1891 and REG_RETVAL on I1. */
1892 if (temp
= find_reg_note (i1
, REG_RETVAL
, NULL_RTX
))
1894 XEXP (temp
, 0) = first
;
1895 temp
= find_reg_note (first
, REG_LIBCALL
, NULL_RTX
);
1896 XEXP (temp
, 0) = i1
;
1900 do p
= NEXT_INSN (p
);
1901 while (p
&& GET_CODE (p
) == NOTE
);
1904 /* The more regs we move, the less we like moving them. */
1908 /* Any other movable that loads the same register
1910 already_moved
[regno
] = 1;
1912 /* This reg has been moved out of one loop. */
1913 moved_once
[regno
] = 1;
1915 /* The reg set here is now invariant. */
1917 n_times_set
[regno
] = 0;
1921 /* Change the length-of-life info for the register
1922 to say it lives at least the full length of this loop.
1923 This will help guide optimizations in outer loops. */
1925 if (uid_luid
[REGNO_FIRST_UID (regno
)] > INSN_LUID (loop_start
))
1926 /* This is the old insn before all the moved insns.
1927 We can't use the moved insn because it is out of range
1928 in uid_luid. Only the old insns have luids. */
1929 REGNO_FIRST_UID (regno
) = INSN_UID (loop_start
);
1930 if (uid_luid
[REGNO_LAST_UID (regno
)] < INSN_LUID (end
))
1931 REGNO_LAST_UID (regno
) = INSN_UID (end
);
1933 /* Combine with this moved insn any other matching movables. */
1936 for (m1
= movables
; m1
; m1
= m1
->next
)
1941 /* Schedule the reg loaded by M1
1942 for replacement so that shares the reg of M.
1943 If the modes differ (only possible in restricted
1944 circumstances, make a SUBREG. */
1945 if (GET_MODE (m
->set_dest
) == GET_MODE (m1
->set_dest
))
1946 reg_map
[m1
->regno
] = m
->set_dest
;
1949 = gen_lowpart_common (GET_MODE (m1
->set_dest
),
1952 /* Get rid of the matching insn
1953 and prevent further processing of it. */
1956 /* if library call, delete all insn except last, which
1958 if (temp
= find_reg_note (m1
->insn
, REG_RETVAL
,
1961 for (temp
= XEXP (temp
, 0); temp
!= m1
->insn
;
1962 temp
= NEXT_INSN (temp
))
1965 delete_insn (m1
->insn
);
1967 /* Any other movable that loads the same register
1969 already_moved
[m1
->regno
] = 1;
1971 /* The reg merged here is now invariant,
1972 if the reg it matches is invariant. */
1974 n_times_set
[m1
->regno
] = 0;
1977 else if (loop_dump_stream
)
1978 fprintf (loop_dump_stream
, "not desirable");
1980 else if (loop_dump_stream
&& !m
->match
)
1981 fprintf (loop_dump_stream
, "not safe");
1983 if (loop_dump_stream
)
1984 fprintf (loop_dump_stream
, "\n");
1988 new_start
= loop_start
;
1990 /* Go through all the instructions in the loop, making
1991 all the register substitutions scheduled in REG_MAP. */
1992 for (p
= new_start
; p
!= end
; p
= NEXT_INSN (p
))
1993 if (GET_CODE (p
) == INSN
|| GET_CODE (p
) == JUMP_INSN
1994 || GET_CODE (p
) == CALL_INSN
)
1996 replace_regs (PATTERN (p
), reg_map
, nregs
, 0);
1997 replace_regs (REG_NOTES (p
), reg_map
, nregs
, 0);
2003 /* Scan X and replace the address of any MEM in it with ADDR.
2004 REG is the address that MEM should have before the replacement. */
2007 replace_call_address (x
, reg
, addr
)
2010 register enum rtx_code code
;
2016 code
= GET_CODE (x
);
2030 /* Short cut for very common case. */
2031 replace_call_address (XEXP (x
, 1), reg
, addr
);
2035 /* Short cut for very common case. */
2036 replace_call_address (XEXP (x
, 0), reg
, addr
);
2040 /* If this MEM uses a reg other than the one we expected,
2041 something is wrong. */
2042 if (XEXP (x
, 0) != reg
)
2048 fmt
= GET_RTX_FORMAT (code
);
2049 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2052 replace_call_address (XEXP (x
, i
), reg
, addr
);
2056 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2057 replace_call_address (XVECEXP (x
, i
, j
), reg
, addr
);
2063 /* Return the number of memory refs to addresses that vary
2067 count_nonfixed_reads (x
)
2070 register enum rtx_code code
;
2078 code
= GET_CODE (x
);
2092 return ((invariant_p (XEXP (x
, 0)) != 1)
2093 + count_nonfixed_reads (XEXP (x
, 0)));
2097 fmt
= GET_RTX_FORMAT (code
);
2098 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2101 value
+= count_nonfixed_reads (XEXP (x
, i
));
2105 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2106 value
+= count_nonfixed_reads (XVECEXP (x
, i
, j
));
2114 /* P is an instruction that sets a register to the result of a ZERO_EXTEND.
2115 Replace it with an instruction to load just the low bytes
2116 if the machine supports such an instruction,
2117 and insert above LOOP_START an instruction to clear the register. */
2120 constant_high_bytes (p
, loop_start
)
2124 register int insn_code_number
;
2126 /* Try to change (SET (REG ...) (ZERO_EXTEND (..:B ...)))
2127 to (SET (STRICT_LOW_PART (SUBREG:B (REG...))) ...). */
2129 new = gen_rtx (SET
, VOIDmode
,
2130 gen_rtx (STRICT_LOW_PART
, VOIDmode
,
2131 gen_rtx (SUBREG
, GET_MODE (XEXP (SET_SRC (PATTERN (p
)), 0)),
2132 SET_DEST (PATTERN (p
)),
2134 XEXP (SET_SRC (PATTERN (p
)), 0));
2135 insn_code_number
= recog (new, p
);
2137 if (insn_code_number
)
2141 /* Clear destination register before the loop. */
2142 emit_insn_before (gen_rtx (SET
, VOIDmode
,
2143 SET_DEST (PATTERN (p
)),
2147 /* Inside the loop, just load the low part. */
2153 /* Scan a loop setting the variables `unknown_address_altered',
2154 `num_mem_sets', `loop_continue', loops_enclosed', `loop_has_call',
2155 and `loop_has_volatile'.
2156 Also, fill in the array `loop_store_mems'. */
2159 prescan_loop (start
, end
)
2162 register int level
= 1;
2165 unknown_address_altered
= 0;
2167 loop_has_volatile
= 0;
2168 loop_store_mems_idx
= 0;
2174 for (insn
= NEXT_INSN (start
); insn
!= NEXT_INSN (end
);
2175 insn
= NEXT_INSN (insn
))
2177 if (GET_CODE (insn
) == NOTE
)
2179 if (NOTE_LINE_NUMBER (insn
) == NOTE_INSN_LOOP_BEG
)
2182 /* Count number of loops contained in this one. */
2185 else if (NOTE_LINE_NUMBER (insn
) == NOTE_INSN_LOOP_END
)
2194 else if (NOTE_LINE_NUMBER (insn
) == NOTE_INSN_LOOP_CONT
)
2197 loop_continue
= insn
;
2200 else if (GET_CODE (insn
) == CALL_INSN
)
2202 unknown_address_altered
= 1;
2207 if (GET_CODE (insn
) == INSN
|| GET_CODE (insn
) == JUMP_INSN
)
2209 if (volatile_refs_p (PATTERN (insn
)))
2210 loop_has_volatile
= 1;
2212 note_stores (PATTERN (insn
), note_addr_stored
);
2218 /* Scan the function looking for loops. Record the start and end of each loop.
2219 Also mark as invalid loops any loops that contain a setjmp or are branched
2220 to from outside the loop. */
2223 find_and_verify_loops (f
)
2227 int current_loop
= -1;
2231 /* If there are jumps to undefined labels,
2232 treat them as jumps out of any/all loops.
2233 This also avoids writing past end of tables when there are no loops. */
2234 uid_loop_num
[0] = -1;
2236 /* Find boundaries of loops, mark which loops are contained within
2237 loops, and invalidate loops that have setjmp. */
2239 for (insn
= f
; insn
; insn
= NEXT_INSN (insn
))
2241 if (GET_CODE (insn
) == NOTE
)
2242 switch (NOTE_LINE_NUMBER (insn
))
2244 case NOTE_INSN_LOOP_BEG
:
2245 loop_number_loop_starts
[++next_loop
] = insn
;
2246 loop_number_loop_ends
[next_loop
] = 0;
2247 loop_outer_loop
[next_loop
] = current_loop
;
2248 loop_invalid
[next_loop
] = 0;
2249 loop_number_exit_labels
[next_loop
] = 0;
2250 loop_number_exit_count
[next_loop
] = 0;
2251 current_loop
= next_loop
;
2254 case NOTE_INSN_SETJMP
:
2255 /* In this case, we must invalidate our current loop and any
2257 for (loop
= current_loop
; loop
!= -1; loop
= loop_outer_loop
[loop
])
2259 loop_invalid
[loop
] = 1;
2260 if (loop_dump_stream
)
2261 fprintf (loop_dump_stream
,
2262 "\nLoop at %d ignored due to setjmp.\n",
2263 INSN_UID (loop_number_loop_starts
[loop
]));
2267 case NOTE_INSN_LOOP_END
:
2268 if (current_loop
== -1)
2271 loop_number_loop_ends
[current_loop
] = insn
;
2272 current_loop
= loop_outer_loop
[current_loop
];
2277 /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the
2278 enclosing loop, but this doesn't matter. */
2279 uid_loop_num
[INSN_UID (insn
)] = current_loop
;
2282 /* Any loop containing a label used in an initializer must be invalidated,
2283 because it can be jumped into from anywhere. */
2285 for (label
= forced_labels
; label
; label
= XEXP (label
, 1))
2289 for (loop_num
= uid_loop_num
[INSN_UID (XEXP (label
, 0))];
2291 loop_num
= loop_outer_loop
[loop_num
])
2292 loop_invalid
[loop_num
] = 1;
2295 /* Any loop containing a label used for an exception handler must be
2296 invalidated, because it can be jumped into from anywhere. */
2298 for (label
= exception_handler_labels
; label
; label
= XEXP (label
, 1))
2302 for (loop_num
= uid_loop_num
[INSN_UID (XEXP (label
, 0))];
2304 loop_num
= loop_outer_loop
[loop_num
])
2305 loop_invalid
[loop_num
] = 1;
2308 /* Now scan all insn's in the function. If any JUMP_INSN branches into a
2309 loop that it is not contained within, that loop is marked invalid.
2310 If any INSN or CALL_INSN uses a label's address, then the loop containing
2311 that label is marked invalid, because it could be jumped into from
2314 Also look for blocks of code ending in an unconditional branch that
2315 exits the loop. If such a block is surrounded by a conditional
2316 branch around the block, move the block elsewhere (see below) and
2317 invert the jump to point to the code block. This may eliminate a
2318 label in our loop and will simplify processing by both us and a
2319 possible second cse pass. */
2321 for (insn
= f
; insn
; insn
= NEXT_INSN (insn
))
2322 if (GET_RTX_CLASS (GET_CODE (insn
)) == 'i')
2324 int this_loop_num
= uid_loop_num
[INSN_UID (insn
)];
2326 if (GET_CODE (insn
) == INSN
|| GET_CODE (insn
) == CALL_INSN
)
2328 rtx note
= find_reg_note (insn
, REG_LABEL
, NULL_RTX
);
2333 for (loop_num
= uid_loop_num
[INSN_UID (XEXP (note
, 0))];
2335 loop_num
= loop_outer_loop
[loop_num
])
2336 loop_invalid
[loop_num
] = 1;
2340 if (GET_CODE (insn
) != JUMP_INSN
)
2343 mark_loop_jump (PATTERN (insn
), this_loop_num
);
2345 /* See if this is an unconditional branch outside the loop. */
2346 if (this_loop_num
!= -1
2347 && (GET_CODE (PATTERN (insn
)) == RETURN
2348 || (simplejump_p (insn
)
2349 && (uid_loop_num
[INSN_UID (JUMP_LABEL (insn
))]
2351 && get_max_uid () < max_uid_for_loop
)
2354 rtx our_next
= next_real_insn (insn
);
2356 int outer_loop
= -1;
2358 /* Go backwards until we reach the start of the loop, a label,
2360 for (p
= PREV_INSN (insn
);
2361 GET_CODE (p
) != CODE_LABEL
2362 && ! (GET_CODE (p
) == NOTE
2363 && NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_BEG
)
2364 && GET_CODE (p
) != JUMP_INSN
;
2368 /* Check for the case where we have a jump to an inner nested
2369 loop, and do not perform the optimization in that case. */
2371 if (JUMP_LABEL (insn
))
2373 dest_loop
= uid_loop_num
[INSN_UID (JUMP_LABEL (insn
))];
2374 if (dest_loop
!= -1)
2376 for (outer_loop
= dest_loop
; outer_loop
!= -1;
2377 outer_loop
= loop_outer_loop
[outer_loop
])
2378 if (outer_loop
== this_loop_num
)
2383 /* Make sure that the target of P is within the current loop. */
2385 if (GET_CODE (p
) == JUMP_INSN
&& JUMP_LABEL (p
)
2386 && uid_loop_num
[INSN_UID (JUMP_LABEL (p
))] != this_loop_num
)
2387 outer_loop
= this_loop_num
;
2389 /* If we stopped on a JUMP_INSN to the next insn after INSN,
2390 we have a block of code to try to move.
2392 We look backward and then forward from the target of INSN
2393 to find a BARRIER at the same loop depth as the target.
2394 If we find such a BARRIER, we make a new label for the start
2395 of the block, invert the jump in P and point it to that label,
2396 and move the block of code to the spot we found. */
2398 if (outer_loop
== -1
2399 && GET_CODE (p
) == JUMP_INSN
2400 && JUMP_LABEL (p
) != 0
2401 /* Just ignore jumps to labels that were never emitted.
2402 These always indicate compilation errors. */
2403 && INSN_UID (JUMP_LABEL (p
)) != 0
2405 && ! simplejump_p (p
)
2406 && next_real_insn (JUMP_LABEL (p
)) == our_next
)
2409 = JUMP_LABEL (insn
) ? JUMP_LABEL (insn
) : get_last_insn ();
2410 int target_loop_num
= uid_loop_num
[INSN_UID (target
)];
2413 for (loc
= target
; loc
; loc
= PREV_INSN (loc
))
2414 if (GET_CODE (loc
) == BARRIER
2415 && uid_loop_num
[INSN_UID (loc
)] == target_loop_num
)
2419 for (loc
= target
; loc
; loc
= NEXT_INSN (loc
))
2420 if (GET_CODE (loc
) == BARRIER
2421 && uid_loop_num
[INSN_UID (loc
)] == target_loop_num
)
2426 rtx cond_label
= JUMP_LABEL (p
);
2427 rtx new_label
= get_label_after (p
);
2429 /* Ensure our label doesn't go away. */
2430 LABEL_NUSES (cond_label
)++;
2432 /* Verify that uid_loop_num is large enough and that
2434 if (invert_jump (p
, new_label
))
2438 /* Include the BARRIER after INSN and copy the
2440 new_label
= squeeze_notes (new_label
, NEXT_INSN (insn
));
2441 reorder_insns (new_label
, NEXT_INSN (insn
), loc
);
2443 /* All those insns are now in TARGET_LOOP_NUM. */
2444 for (q
= new_label
; q
!= NEXT_INSN (NEXT_INSN (insn
));
2446 uid_loop_num
[INSN_UID (q
)] = target_loop_num
;
2448 /* The label jumped to by INSN is no longer a loop exit.
2449 Unless INSN does not have a label (e.g., it is a
2450 RETURN insn), search loop_number_exit_labels to find
2451 its label_ref, and remove it. Also turn off
2452 LABEL_OUTSIDE_LOOP_P bit. */
2453 if (JUMP_LABEL (insn
))
2458 r
= loop_number_exit_labels
[this_loop_num
];
2459 r
; q
= r
, r
= LABEL_NEXTREF (r
))
2460 if (XEXP (r
, 0) == JUMP_LABEL (insn
))
2462 LABEL_OUTSIDE_LOOP_P (r
) = 0;
2464 LABEL_NEXTREF (q
) = LABEL_NEXTREF (r
);
2466 loop_number_exit_labels
[this_loop_num
]
2467 = LABEL_NEXTREF (r
);
2471 for (loop_num
= this_loop_num
;
2472 loop_num
!= -1 && loop_num
!= target_loop_num
;
2473 loop_num
= loop_outer_loop
[loop_num
])
2474 loop_number_exit_count
[loop_num
]--;
2476 /* If we didn't find it, then something is wrong. */
2481 /* P is now a jump outside the loop, so it must be put
2482 in loop_number_exit_labels, and marked as such.
2483 The easiest way to do this is to just call
2484 mark_loop_jump again for P. */
2485 mark_loop_jump (PATTERN (p
), this_loop_num
);
2487 /* If INSN now jumps to the insn after it,
2489 if (JUMP_LABEL (insn
) != 0
2490 && (next_real_insn (JUMP_LABEL (insn
))
2491 == next_real_insn (insn
)))
2495 /* Continue the loop after where the conditional
2496 branch used to jump, since the only branch insn
2497 in the block (if it still remains) is an inter-loop
2498 branch and hence needs no processing. */
2499 insn
= NEXT_INSN (cond_label
);
2501 if (--LABEL_NUSES (cond_label
) == 0)
2502 delete_insn (cond_label
);
2504 /* This loop will be continued with NEXT_INSN (insn). */
2505 insn
= PREV_INSN (insn
);
2512 /* If any label in X jumps to a loop different from LOOP_NUM and any of the
2513 loops it is contained in, mark the target loop invalid.
2515 For speed, we assume that X is part of a pattern of a JUMP_INSN. */
2518 mark_loop_jump (x
, loop_num
)
2526 switch (GET_CODE (x
))
2539 /* There could be a label reference in here. */
2540 mark_loop_jump (XEXP (x
, 0), loop_num
);
2546 mark_loop_jump (XEXP (x
, 0), loop_num
);
2547 mark_loop_jump (XEXP (x
, 1), loop_num
);
2552 mark_loop_jump (XEXP (x
, 0), loop_num
);
2556 dest_loop
= uid_loop_num
[INSN_UID (XEXP (x
, 0))];
2558 /* Link together all labels that branch outside the loop. This
2559 is used by final_[bg]iv_value and the loop unrolling code. Also
2560 mark this LABEL_REF so we know that this branch should predict
2563 /* A check to make sure the label is not in an inner nested loop,
2564 since this does not count as a loop exit. */
2565 if (dest_loop
!= -1)
2567 for (outer_loop
= dest_loop
; outer_loop
!= -1;
2568 outer_loop
= loop_outer_loop
[outer_loop
])
2569 if (outer_loop
== loop_num
)
2575 if (loop_num
!= -1 && outer_loop
== -1)
2577 LABEL_OUTSIDE_LOOP_P (x
) = 1;
2578 LABEL_NEXTREF (x
) = loop_number_exit_labels
[loop_num
];
2579 loop_number_exit_labels
[loop_num
] = x
;
2581 for (outer_loop
= loop_num
;
2582 outer_loop
!= -1 && outer_loop
!= dest_loop
;
2583 outer_loop
= loop_outer_loop
[outer_loop
])
2584 loop_number_exit_count
[outer_loop
]++;
2587 /* If this is inside a loop, but not in the current loop or one enclosed
2588 by it, it invalidates at least one loop. */
2590 if (dest_loop
== -1)
2593 /* We must invalidate every nested loop containing the target of this
2594 label, except those that also contain the jump insn. */
2596 for (; dest_loop
!= -1; dest_loop
= loop_outer_loop
[dest_loop
])
2598 /* Stop when we reach a loop that also contains the jump insn. */
2599 for (outer_loop
= loop_num
; outer_loop
!= -1;
2600 outer_loop
= loop_outer_loop
[outer_loop
])
2601 if (dest_loop
== outer_loop
)
2604 /* If we get here, we know we need to invalidate a loop. */
2605 if (loop_dump_stream
&& ! loop_invalid
[dest_loop
])
2606 fprintf (loop_dump_stream
,
2607 "\nLoop at %d ignored due to multiple entry points.\n",
2608 INSN_UID (loop_number_loop_starts
[dest_loop
]));
2610 loop_invalid
[dest_loop
] = 1;
2615 /* If this is not setting pc, ignore. */
2616 if (SET_DEST (x
) == pc_rtx
)
2617 mark_loop_jump (SET_SRC (x
), loop_num
);
2621 mark_loop_jump (XEXP (x
, 1), loop_num
);
2622 mark_loop_jump (XEXP (x
, 2), loop_num
);
2627 for (i
= 0; i
< XVECLEN (x
, 0); i
++)
2628 mark_loop_jump (XVECEXP (x
, 0, i
), loop_num
);
2632 for (i
= 0; i
< XVECLEN (x
, 1); i
++)
2633 mark_loop_jump (XVECEXP (x
, 1, i
), loop_num
);
2637 /* Treat anything else (such as a symbol_ref)
2638 as a branch out of this loop, but not into any loop. */
2642 loop_number_exit_labels
[loop_num
] = x
;
2644 for (outer_loop
= loop_num
; outer_loop
!= -1;
2645 outer_loop
= loop_outer_loop
[outer_loop
])
2646 loop_number_exit_count
[outer_loop
]++;
2652 /* Return nonzero if there is a label in the range from
2653 insn INSN to and including the insn whose luid is END
2654 INSN must have an assigned luid (i.e., it must not have
2655 been previously created by loop.c). */
2658 labels_in_range_p (insn
, end
)
2662 while (insn
&& INSN_LUID (insn
) <= end
)
2664 if (GET_CODE (insn
) == CODE_LABEL
)
2666 insn
= NEXT_INSN (insn
);
2672 /* Record that a memory reference X is being set. */
2675 note_addr_stored (x
)
2680 if (x
== 0 || GET_CODE (x
) != MEM
)
2683 /* Count number of memory writes.
2684 This affects heuristics in strength_reduce. */
2687 /* BLKmode MEM means all memory is clobbered. */
2688 if (GET_MODE (x
) == BLKmode
)
2689 unknown_address_altered
= 1;
2691 if (unknown_address_altered
)
2694 for (i
= 0; i
< loop_store_mems_idx
; i
++)
2695 if (rtx_equal_p (XEXP (loop_store_mems
[i
], 0), XEXP (x
, 0))
2696 && MEM_IN_STRUCT_P (x
) == MEM_IN_STRUCT_P (loop_store_mems
[i
]))
2698 /* We are storing at the same address as previously noted. Save the
2700 if (GET_MODE_SIZE (GET_MODE (x
))
2701 > GET_MODE_SIZE (GET_MODE (loop_store_mems
[i
])))
2702 loop_store_mems
[i
] = x
;
2706 if (i
== NUM_STORES
)
2707 unknown_address_altered
= 1;
2709 else if (i
== loop_store_mems_idx
)
2710 loop_store_mems
[loop_store_mems_idx
++] = x
;
2713 /* Return nonzero if the rtx X is invariant over the current loop.
2715 The value is 2 if we refer to something only conditionally invariant.
2717 If `unknown_address_altered' is nonzero, no memory ref is invariant.
2718 Otherwise, a memory ref is invariant if it does not conflict with
2719 anything stored in `loop_store_mems'. */
2726 register enum rtx_code code
;
2728 int conditional
= 0;
2732 code
= GET_CODE (x
);
2742 /* A LABEL_REF is normally invariant, however, if we are unrolling
2743 loops, and this label is inside the loop, then it isn't invariant.
2744 This is because each unrolled copy of the loop body will have
2745 a copy of this label. If this was invariant, then an insn loading
2746 the address of this label into a register might get moved outside
2747 the loop, and then each loop body would end up using the same label.
2749 We don't know the loop bounds here though, so just fail for all
2751 if (flag_unroll_loops
)
2758 case UNSPEC_VOLATILE
:
2762 /* We used to check RTX_UNCHANGING_P (x) here, but that is invalid
2763 since the reg might be set by initialization within the loop. */
2765 if ((x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
2766 || x
== arg_pointer_rtx
)
2767 && ! current_function_has_nonlocal_goto
)
2771 && REGNO (x
) < FIRST_PSEUDO_REGISTER
&& call_used_regs
[REGNO (x
)])
2774 if (n_times_set
[REGNO (x
)] < 0)
2777 return n_times_set
[REGNO (x
)] == 0;
2780 /* Volatile memory references must be rejected. Do this before
2781 checking for read-only items, so that volatile read-only items
2782 will be rejected also. */
2783 if (MEM_VOLATILE_P (x
))
2786 /* Read-only items (such as constants in a constant pool) are
2787 invariant if their address is. */
2788 if (RTX_UNCHANGING_P (x
))
2791 /* If we filled the table (or had a subroutine call), any location
2792 in memory could have been clobbered. */
2793 if (unknown_address_altered
)
2796 /* See if there is any dependence between a store and this load. */
2797 for (i
= loop_store_mems_idx
- 1; i
>= 0; i
--)
2798 if (true_dependence (loop_store_mems
[i
], x
))
2801 /* It's not invalidated by a store in memory
2802 but we must still verify the address is invariant. */
2806 /* Don't mess with insns declared volatile. */
2807 if (MEM_VOLATILE_P (x
))
2811 fmt
= GET_RTX_FORMAT (code
);
2812 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2816 int tem
= invariant_p (XEXP (x
, i
));
2822 else if (fmt
[i
] == 'E')
2825 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2827 int tem
= invariant_p (XVECEXP (x
, i
, j
));
2837 return 1 + conditional
;
2841 /* Return nonzero if all the insns in the loop that set REG
2842 are INSN and the immediately following insns,
2843 and if each of those insns sets REG in an invariant way
2844 (not counting uses of REG in them).
2846 The value is 2 if some of these insns are only conditionally invariant.
2848 We assume that INSN itself is the first set of REG
2849 and that its source is invariant. */
2852 consec_sets_invariant_p (reg
, n_sets
, insn
)
2856 register rtx p
= insn
;
2857 register int regno
= REGNO (reg
);
2859 /* Number of sets we have to insist on finding after INSN. */
2860 int count
= n_sets
- 1;
2861 int old
= n_times_set
[regno
];
2865 /* If N_SETS hit the limit, we can't rely on its value. */
2869 n_times_set
[regno
] = 0;
2873 register enum rtx_code code
;
2877 code
= GET_CODE (p
);
2879 /* If library call, skip to end of of it. */
2880 if (code
== INSN
&& (temp
= find_reg_note (p
, REG_LIBCALL
, NULL_RTX
)))
2885 && (set
= single_set (p
))
2886 && GET_CODE (SET_DEST (set
)) == REG
2887 && REGNO (SET_DEST (set
)) == regno
)
2889 this = invariant_p (SET_SRC (set
));
2892 else if (temp
= find_reg_note (p
, REG_EQUAL
, NULL_RTX
))
2894 /* If this is a libcall, then any invariant REG_EQUAL note is OK.
2895 If this is an ordinary insn, then only CONSTANT_P REG_EQUAL
2897 this = (CONSTANT_P (XEXP (temp
, 0))
2898 || (find_reg_note (p
, REG_RETVAL
, NULL_RTX
)
2899 && invariant_p (XEXP (temp
, 0))));
2906 else if (code
!= NOTE
)
2908 n_times_set
[regno
] = old
;
2913 n_times_set
[regno
] = old
;
2914 /* If invariant_p ever returned 2, we return 2. */
2915 return 1 + (value
& 2);
2919 /* I don't think this condition is sufficient to allow INSN
2920 to be moved, so we no longer test it. */
2922 /* Return 1 if all insns in the basic block of INSN and following INSN
2923 that set REG are invariant according to TABLE. */
2926 all_sets_invariant_p (reg
, insn
, table
)
2930 register rtx p
= insn
;
2931 register int regno
= REGNO (reg
);
2935 register enum rtx_code code
;
2937 code
= GET_CODE (p
);
2938 if (code
== CODE_LABEL
|| code
== JUMP_INSN
)
2940 if (code
== INSN
&& GET_CODE (PATTERN (p
)) == SET
2941 && GET_CODE (SET_DEST (PATTERN (p
))) == REG
2942 && REGNO (SET_DEST (PATTERN (p
))) == regno
)
2944 if (!invariant_p (SET_SRC (PATTERN (p
)), table
))
2951 /* Look at all uses (not sets) of registers in X. For each, if it is
2952 the single use, set USAGE[REGNO] to INSN; if there was a previous use in
2953 a different insn, set USAGE[REGNO] to const0_rtx. */
2956 find_single_use_in_loop (insn
, x
, usage
)
2961 enum rtx_code code
= GET_CODE (x
);
2962 char *fmt
= GET_RTX_FORMAT (code
);
2967 = (usage
[REGNO (x
)] != 0 && usage
[REGNO (x
)] != insn
)
2968 ? const0_rtx
: insn
;
2970 else if (code
== SET
)
2972 /* Don't count SET_DEST if it is a REG; otherwise count things
2973 in SET_DEST because if a register is partially modified, it won't
2974 show up as a potential movable so we don't care how USAGE is set
2976 if (GET_CODE (SET_DEST (x
)) != REG
)
2977 find_single_use_in_loop (insn
, SET_DEST (x
), usage
);
2978 find_single_use_in_loop (insn
, SET_SRC (x
), usage
);
2981 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2983 if (fmt
[i
] == 'e' && XEXP (x
, i
) != 0)
2984 find_single_use_in_loop (insn
, XEXP (x
, i
), usage
);
2985 else if (fmt
[i
] == 'E')
2986 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
2987 find_single_use_in_loop (insn
, XVECEXP (x
, i
, j
), usage
);
2991 /* Increment N_TIMES_SET at the index of each register
2992 that is modified by an insn between FROM and TO.
2993 If the value of an element of N_TIMES_SET becomes 127 or more,
2994 stop incrementing it, to avoid overflow.
2996 Store in SINGLE_USAGE[I] the single insn in which register I is
2997 used, if it is only used once. Otherwise, it is set to 0 (for no
2998 uses) or const0_rtx for more than one use. This parameter may be zero,
2999 in which case this processing is not done.
3001 Store in *COUNT_PTR the number of actual instruction
3002 in the loop. We use this to decide what is worth moving out. */
3004 /* last_set[n] is nonzero iff reg n has been set in the current basic block.
3005 In that case, it is the insn that last set reg n. */
3008 count_loop_regs_set (from
, to
, may_not_move
, single_usage
, count_ptr
, nregs
)
3009 register rtx from
, to
;
3015 register rtx
*last_set
= (rtx
*) alloca (nregs
* sizeof (rtx
));
3017 register int count
= 0;
3020 bzero ((char *) last_set
, nregs
* sizeof (rtx
));
3021 for (insn
= from
; insn
!= to
; insn
= NEXT_INSN (insn
))
3023 if (GET_RTX_CLASS (GET_CODE (insn
)) == 'i')
3027 /* If requested, record registers that have exactly one use. */
3030 find_single_use_in_loop (insn
, PATTERN (insn
), single_usage
);
3032 /* Include uses in REG_EQUAL notes. */
3033 if (REG_NOTES (insn
))
3034 find_single_use_in_loop (insn
, REG_NOTES (insn
), single_usage
);
3037 if (GET_CODE (PATTERN (insn
)) == CLOBBER
3038 && GET_CODE (XEXP (PATTERN (insn
), 0)) == REG
)
3039 /* Don't move a reg that has an explicit clobber.
3040 We might do so sometimes, but it's not worth the pain. */
3041 may_not_move
[REGNO (XEXP (PATTERN (insn
), 0))] = 1;
3043 if (GET_CODE (PATTERN (insn
)) == SET
3044 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
3046 dest
= SET_DEST (PATTERN (insn
));
3047 while (GET_CODE (dest
) == SUBREG
3048 || GET_CODE (dest
) == ZERO_EXTRACT
3049 || GET_CODE (dest
) == SIGN_EXTRACT
3050 || GET_CODE (dest
) == STRICT_LOW_PART
)
3051 dest
= XEXP (dest
, 0);
3052 if (GET_CODE (dest
) == REG
)
3054 register int regno
= REGNO (dest
);
3055 /* If this is the first setting of this reg
3056 in current basic block, and it was set before,
3057 it must be set in two basic blocks, so it cannot
3058 be moved out of the loop. */
3059 if (n_times_set
[regno
] > 0 && last_set
[regno
] == 0)
3060 may_not_move
[regno
] = 1;
3061 /* If this is not first setting in current basic block,
3062 see if reg was used in between previous one and this.
3063 If so, neither one can be moved. */
3064 if (last_set
[regno
] != 0
3065 && reg_used_between_p (dest
, last_set
[regno
], insn
))
3066 may_not_move
[regno
] = 1;
3067 if (n_times_set
[regno
] < 127)
3068 ++n_times_set
[regno
];
3069 last_set
[regno
] = insn
;
3072 else if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
3075 for (i
= XVECLEN (PATTERN (insn
), 0) - 1; i
>= 0; i
--)
3077 register rtx x
= XVECEXP (PATTERN (insn
), 0, i
);
3078 if (GET_CODE (x
) == CLOBBER
&& GET_CODE (XEXP (x
, 0)) == REG
)
3079 /* Don't move a reg that has an explicit clobber.
3080 It's not worth the pain to try to do it correctly. */
3081 may_not_move
[REGNO (XEXP (x
, 0))] = 1;
3083 if (GET_CODE (x
) == SET
|| GET_CODE (x
) == CLOBBER
)
3085 dest
= SET_DEST (x
);
3086 while (GET_CODE (dest
) == SUBREG
3087 || GET_CODE (dest
) == ZERO_EXTRACT
3088 || GET_CODE (dest
) == SIGN_EXTRACT
3089 || GET_CODE (dest
) == STRICT_LOW_PART
)
3090 dest
= XEXP (dest
, 0);
3091 if (GET_CODE (dest
) == REG
)
3093 register int regno
= REGNO (dest
);
3094 if (n_times_set
[regno
] > 0 && last_set
[regno
] == 0)
3095 may_not_move
[regno
] = 1;
3096 if (last_set
[regno
] != 0
3097 && reg_used_between_p (dest
, last_set
[regno
], insn
))
3098 may_not_move
[regno
] = 1;
3099 if (n_times_set
[regno
] < 127)
3100 ++n_times_set
[regno
];
3101 last_set
[regno
] = insn
;
3108 if (GET_CODE (insn
) == CODE_LABEL
|| GET_CODE (insn
) == JUMP_INSN
)
3109 bzero ((char *) last_set
, nregs
* sizeof (rtx
));
3114 /* Given a loop that is bounded by LOOP_START and LOOP_END
3115 and that is entered at SCAN_START,
3116 return 1 if the register set in SET contained in insn INSN is used by
3117 any insn that precedes INSN in cyclic order starting
3118 from the loop entry point.
3120 We don't want to use INSN_LUID here because if we restrict INSN to those
3121 that have a valid INSN_LUID, it means we cannot move an invariant out
3122 from an inner loop past two loops. */
3125 loop_reg_used_before_p (set
, insn
, loop_start
, scan_start
, loop_end
)
3126 rtx set
, insn
, loop_start
, scan_start
, loop_end
;
3128 rtx reg
= SET_DEST (set
);
3131 /* Scan forward checking for register usage. If we hit INSN, we
3132 are done. Otherwise, if we hit LOOP_END, wrap around to LOOP_START. */
3133 for (p
= scan_start
; p
!= insn
; p
= NEXT_INSN (p
))
3135 if (GET_RTX_CLASS (GET_CODE (p
)) == 'i'
3136 && reg_overlap_mentioned_p (reg
, PATTERN (p
)))
3146 /* A "basic induction variable" or biv is a pseudo reg that is set
3147 (within this loop) only by incrementing or decrementing it. */
3148 /* A "general induction variable" or giv is a pseudo reg whose
3149 value is a linear function of a biv. */
3151 /* Bivs are recognized by `basic_induction_var';
3152 Givs by `general_induct_var'. */
3154 /* Indexed by register number, indicates whether or not register is an
3155 induction variable, and if so what type. */
3157 enum iv_mode
*reg_iv_type
;
3159 /* Indexed by register number, contains pointer to `struct induction'
3160 if register is an induction variable. This holds general info for
3161 all induction variables. */
3163 struct induction
**reg_iv_info
;
3165 /* Indexed by register number, contains pointer to `struct iv_class'
3166 if register is a basic induction variable. This holds info describing
3167 the class (a related group) of induction variables that the biv belongs
3170 struct iv_class
**reg_biv_class
;
3172 /* The head of a list which links together (via the next field)
3173 every iv class for the current loop. */
3175 struct iv_class
*loop_iv_list
;
3177 /* Communication with routines called via `note_stores'. */
3179 static rtx note_insn
;
3181 /* Dummy register to have non-zero DEST_REG for DEST_ADDR type givs. */
3183 static rtx addr_placeholder
;
3185 /* ??? Unfinished optimizations, and possible future optimizations,
3186 for the strength reduction code. */
3188 /* ??? There is one more optimization you might be interested in doing: to
3189 allocate pseudo registers for frequently-accessed memory locations.
3190 If the same memory location is referenced each time around, it might
3191 be possible to copy it into a register before and out after.
3192 This is especially useful when the memory location is a variable which
3193 is in a stack slot because somewhere its address is taken. If the
3194 loop doesn't contain a function call and the variable isn't volatile,
3195 it is safe to keep the value in a register for the duration of the
3196 loop. One tricky thing is that the copying of the value back from the
3197 register has to be done on all exits from the loop. You need to check that
3198 all the exits from the loop go to the same place. */
3200 /* ??? The interaction of biv elimination, and recognition of 'constant'
3201 bivs, may cause problems. */
3203 /* ??? Add heuristics so that DEST_ADDR strength reduction does not cause
3204 performance problems.
3206 Perhaps don't eliminate things that can be combined with an addressing
3207 mode. Find all givs that have the same biv, mult_val, and add_val;
3208 then for each giv, check to see if its only use dies in a following
3209 memory address. If so, generate a new memory address and check to see
3210 if it is valid. If it is valid, then store the modified memory address,
3211 otherwise, mark the giv as not done so that it will get its own iv. */
3213 /* ??? Could try to optimize branches when it is known that a biv is always
3216 /* ??? When replace a biv in a compare insn, we should replace with closest
3217 giv so that an optimized branch can still be recognized by the combiner,
3218 e.g. the VAX acb insn. */
3220 /* ??? Many of the checks involving uid_luid could be simplified if regscan
3221 was rerun in loop_optimize whenever a register was added or moved.
3222 Also, some of the optimizations could be a little less conservative. */
3224 /* Perform strength reduction and induction variable elimination. */
3226 /* Pseudo registers created during this function will be beyond the last
3227 valid index in several tables including n_times_set and regno_last_uid.
3228 This does not cause a problem here, because the added registers cannot be
3229 givs outside of their loop, and hence will never be reconsidered.
3230 But scan_loop must check regnos to make sure they are in bounds. */
3233 strength_reduce (scan_start
, end
, loop_top
, insn_count
,
3234 loop_start
, loop_end
)
3247 /* This is 1 if current insn is not executed at least once for every loop
3249 int not_every_iteration
= 0;
3250 /* This is 1 if current insn may be executed more than once for every
3252 int maybe_multiple
= 0;
3253 /* Temporary list pointers for traversing loop_iv_list. */
3254 struct iv_class
*bl
, **backbl
;
3255 /* Ratio of extra register life span we can justify
3256 for saving an instruction. More if loop doesn't call subroutines
3257 since in that case saving an insn makes more difference
3258 and more registers are available. */
3259 /* ??? could set this to last value of threshold in move_movables */
3260 int threshold
= (loop_has_call
? 1 : 2) * (3 + n_non_fixed_regs
);
3261 /* Map of pseudo-register replacements. */
3265 rtx end_insert_before
;
3268 reg_iv_type
= (enum iv_mode
*) alloca (max_reg_before_loop
3269 * sizeof (enum iv_mode
*));
3270 bzero ((char *) reg_iv_type
, max_reg_before_loop
* sizeof (enum iv_mode
*));
3271 reg_iv_info
= (struct induction
**)
3272 alloca (max_reg_before_loop
* sizeof (struct induction
*));
3273 bzero ((char *) reg_iv_info
, (max_reg_before_loop
3274 * sizeof (struct induction
*)));
3275 reg_biv_class
= (struct iv_class
**)
3276 alloca (max_reg_before_loop
* sizeof (struct iv_class
*));
3277 bzero ((char *) reg_biv_class
, (max_reg_before_loop
3278 * sizeof (struct iv_class
*)));
3281 addr_placeholder
= gen_reg_rtx (Pmode
);
3283 /* Save insn immediately after the loop_end. Insns inserted after loop_end
3284 must be put before this insn, so that they will appear in the right
3285 order (i.e. loop order).
3287 If loop_end is the end of the current function, then emit a
3288 NOTE_INSN_DELETED after loop_end and set end_insert_before to the
3290 if (NEXT_INSN (loop_end
) != 0)
3291 end_insert_before
= NEXT_INSN (loop_end
);
3293 end_insert_before
= emit_note_after (NOTE_INSN_DELETED
, loop_end
);
3295 /* Scan through loop to find all possible bivs. */
3301 /* At end of a straight-in loop, we are done.
3302 At end of a loop entered at the bottom, scan the top. */
3303 if (p
== scan_start
)
3311 if (p
== scan_start
)
3315 if (GET_CODE (p
) == INSN
3316 && (set
= single_set (p
))
3317 && GET_CODE (SET_DEST (set
)) == REG
)
3319 dest_reg
= SET_DEST (set
);
3320 if (REGNO (dest_reg
) < max_reg_before_loop
3321 && REGNO (dest_reg
) >= FIRST_PSEUDO_REGISTER
3322 && reg_iv_type
[REGNO (dest_reg
)] != NOT_BASIC_INDUCT
)
3324 if (basic_induction_var (SET_SRC (set
), GET_MODE (SET_SRC (set
)),
3325 dest_reg
, p
, &inc_val
, &mult_val
))
3327 /* It is a possible basic induction variable.
3328 Create and initialize an induction structure for it. */
3331 = (struct induction
*) alloca (sizeof (struct induction
));
3333 record_biv (v
, p
, dest_reg
, inc_val
, mult_val
,
3334 not_every_iteration
, maybe_multiple
);
3335 reg_iv_type
[REGNO (dest_reg
)] = BASIC_INDUCT
;
3337 else if (REGNO (dest_reg
) < max_reg_before_loop
)
3338 reg_iv_type
[REGNO (dest_reg
)] = NOT_BASIC_INDUCT
;
3342 /* Past CODE_LABEL, we get to insns that may be executed multiple
3343 times. The only way we can be sure that they can't is if every
3344 every jump insn between here and the end of the loop either
3345 returns, exits the loop, is a forward jump, or is a jump
3346 to the loop start. */
3348 if (GET_CODE (p
) == CODE_LABEL
)
3356 insn
= NEXT_INSN (insn
);
3357 if (insn
== scan_start
)
3365 if (insn
== scan_start
)
3369 if (GET_CODE (insn
) == JUMP_INSN
3370 && GET_CODE (PATTERN (insn
)) != RETURN
3371 && (! condjump_p (insn
)
3372 || (JUMP_LABEL (insn
) != 0
3373 && JUMP_LABEL (insn
) != scan_start
3374 && (INSN_UID (JUMP_LABEL (insn
)) >= max_uid_for_loop
3375 || INSN_UID (insn
) >= max_uid_for_loop
3376 || (INSN_LUID (JUMP_LABEL (insn
))
3377 < INSN_LUID (insn
))))))
3385 /* Past a jump, we get to insns for which we can't count
3386 on whether they will be executed during each iteration. */
3387 /* This code appears twice in strength_reduce. There is also similar
3388 code in scan_loop. */
3389 if (GET_CODE (p
) == JUMP_INSN
3390 /* If we enter the loop in the middle, and scan around to the
3391 beginning, don't set not_every_iteration for that.
3392 This can be any kind of jump, since we want to know if insns
3393 will be executed if the loop is executed. */
3394 && ! (JUMP_LABEL (p
) == loop_top
3395 && ((NEXT_INSN (NEXT_INSN (p
)) == loop_end
&& simplejump_p (p
))
3396 || (NEXT_INSN (p
) == loop_end
&& condjump_p (p
)))))
3400 /* If this is a jump outside the loop, then it also doesn't
3401 matter. Check to see if the target of this branch is on the
3402 loop_number_exits_labels list. */
3404 for (label
= loop_number_exit_labels
[uid_loop_num
[INSN_UID (loop_start
)]];
3406 label
= LABEL_NEXTREF (label
))
3407 if (XEXP (label
, 0) == JUMP_LABEL (p
))
3411 not_every_iteration
= 1;
3414 else if (GET_CODE (p
) == NOTE
)
3416 /* At the virtual top of a converted loop, insns are again known to
3417 be executed each iteration: logically, the loop begins here
3418 even though the exit code has been duplicated. */
3419 if (NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_VTOP
&& loop_depth
== 0)
3420 not_every_iteration
= 0;
3421 else if (NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_BEG
)
3423 else if (NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_END
)
3427 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
3428 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
3429 or not an insn is known to be executed each iteration of the
3430 loop, whether or not any iterations are known to occur.
3432 Therefore, if we have just passed a label and have no more labels
3433 between here and the test insn of the loop, we know these insns
3434 will be executed each iteration. */
3436 if (not_every_iteration
&& GET_CODE (p
) == CODE_LABEL
3437 && no_labels_between_p (p
, loop_end
))
3438 not_every_iteration
= 0;
3441 /* Scan loop_iv_list to remove all regs that proved not to be bivs.
3442 Make a sanity check against n_times_set. */
3443 for (backbl
= &loop_iv_list
, bl
= *backbl
; bl
; bl
= bl
->next
)
3445 if (reg_iv_type
[bl
->regno
] != BASIC_INDUCT
3446 /* Above happens if register modified by subreg, etc. */
3447 /* Make sure it is not recognized as a basic induction var: */
3448 || n_times_set
[bl
->regno
] != bl
->biv_count
3449 /* If never incremented, it is invariant that we decided not to
3450 move. So leave it alone. */
3451 || ! bl
->incremented
)
3453 if (loop_dump_stream
)
3454 fprintf (loop_dump_stream
, "Reg %d: biv discarded, %s\n",
3456 (reg_iv_type
[bl
->regno
] != BASIC_INDUCT
3457 ? "not induction variable"
3458 : (! bl
->incremented
? "never incremented"
3461 reg_iv_type
[bl
->regno
] = NOT_BASIC_INDUCT
;
3468 if (loop_dump_stream
)
3469 fprintf (loop_dump_stream
, "Reg %d: biv verified\n", bl
->regno
);
3473 /* Exit if there are no bivs. */
3476 /* Can still unroll the loop anyways, but indicate that there is no
3477 strength reduction info available. */
3478 if (flag_unroll_loops
)
3479 unroll_loop (loop_end
, insn_count
, loop_start
, end_insert_before
, 0);
3484 /* Find initial value for each biv by searching backwards from loop_start,
3485 halting at first label. Also record any test condition. */
3488 for (p
= loop_start
; p
&& GET_CODE (p
) != CODE_LABEL
; p
= PREV_INSN (p
))
3492 if (GET_CODE (p
) == CALL_INSN
)
3495 if (GET_CODE (p
) == INSN
|| GET_CODE (p
) == JUMP_INSN
3496 || GET_CODE (p
) == CALL_INSN
)
3497 note_stores (PATTERN (p
), record_initial
);
3499 /* Record any test of a biv that branches around the loop if no store
3500 between it and the start of loop. We only care about tests with
3501 constants and registers and only certain of those. */
3502 if (GET_CODE (p
) == JUMP_INSN
3503 && JUMP_LABEL (p
) != 0
3504 && next_real_insn (JUMP_LABEL (p
)) == next_real_insn (loop_end
)
3505 && (test
= get_condition_for_loop (p
)) != 0
3506 && GET_CODE (XEXP (test
, 0)) == REG
3507 && REGNO (XEXP (test
, 0)) < max_reg_before_loop
3508 && (bl
= reg_biv_class
[REGNO (XEXP (test
, 0))]) != 0
3509 && valid_initial_value_p (XEXP (test
, 1), p
, call_seen
, loop_start
)
3510 && bl
->init_insn
== 0)
3512 /* If an NE test, we have an initial value! */
3513 if (GET_CODE (test
) == NE
)
3516 bl
->init_set
= gen_rtx (SET
, VOIDmode
,
3517 XEXP (test
, 0), XEXP (test
, 1));
3520 bl
->initial_test
= test
;
3524 /* Look at the each biv and see if we can say anything better about its
3525 initial value from any initializing insns set up above. (This is done
3526 in two passes to avoid missing SETs in a PARALLEL.) */
3527 for (bl
= loop_iv_list
; bl
; bl
= bl
->next
)
3531 if (! bl
->init_insn
)
3534 src
= SET_SRC (bl
->init_set
);
3536 if (loop_dump_stream
)
3537 fprintf (loop_dump_stream
,
3538 "Biv %d initialized at insn %d: initial value ",
3539 bl
->regno
, INSN_UID (bl
->init_insn
));
3541 if ((GET_MODE (src
) == GET_MODE (regno_reg_rtx
[bl
->regno
])
3542 || GET_MODE (src
) == VOIDmode
)
3543 && valid_initial_value_p (src
, bl
->init_insn
, call_seen
, loop_start
))
3545 bl
->initial_value
= src
;
3547 if (loop_dump_stream
)
3549 if (GET_CODE (src
) == CONST_INT
)
3550 fprintf (loop_dump_stream
, "%d\n", INTVAL (src
));
3553 print_rtl (loop_dump_stream
, src
);
3554 fprintf (loop_dump_stream
, "\n");
3560 /* Biv initial value is not simple move,
3561 so let it keep initial value of "itself". */
3563 if (loop_dump_stream
)
3564 fprintf (loop_dump_stream
, "is complex\n");
3568 /* Search the loop for general induction variables. */
3570 /* A register is a giv if: it is only set once, it is a function of a
3571 biv and a constant (or invariant), and it is not a biv. */
3573 not_every_iteration
= 0;
3579 /* At end of a straight-in loop, we are done.
3580 At end of a loop entered at the bottom, scan the top. */
3581 if (p
== scan_start
)
3589 if (p
== scan_start
)
3593 /* Look for a general induction variable in a register. */
3594 if (GET_CODE (p
) == INSN
3595 && (set
= single_set (p
))
3596 && GET_CODE (SET_DEST (set
)) == REG
3597 && ! may_not_optimize
[REGNO (SET_DEST (set
))])
3605 dest_reg
= SET_DEST (set
);
3606 if (REGNO (dest_reg
) < FIRST_PSEUDO_REGISTER
)
3609 if (/* SET_SRC is a giv. */
3610 ((benefit
= general_induction_var (SET_SRC (set
),
3613 /* Equivalent expression is a giv. */
3614 || ((regnote
= find_reg_note (p
, REG_EQUAL
, NULL_RTX
))
3615 && (benefit
= general_induction_var (XEXP (regnote
, 0),
3617 &add_val
, &mult_val
))))
3618 /* Don't try to handle any regs made by loop optimization.
3619 We have nothing on them in regno_first_uid, etc. */
3620 && REGNO (dest_reg
) < max_reg_before_loop
3621 /* Don't recognize a BASIC_INDUCT_VAR here. */
3622 && dest_reg
!= src_reg
3623 /* This must be the only place where the register is set. */
3624 && (n_times_set
[REGNO (dest_reg
)] == 1
3625 /* or all sets must be consecutive and make a giv. */
3626 || (benefit
= consec_sets_giv (benefit
, p
,
3628 &add_val
, &mult_val
))))
3632 = (struct induction
*) alloca (sizeof (struct induction
));
3635 /* If this is a library call, increase benefit. */
3636 if (find_reg_note (p
, REG_RETVAL
, NULL_RTX
))
3637 benefit
+= libcall_benefit (p
);
3639 /* Skip the consecutive insns, if there are any. */
3640 for (count
= n_times_set
[REGNO (dest_reg
)] - 1;
3643 /* If first insn of libcall sequence, skip to end.
3644 Do this at start of loop, since INSN is guaranteed to
3646 if (GET_CODE (p
) != NOTE
3647 && (temp
= find_reg_note (p
, REG_LIBCALL
, NULL_RTX
)))
3650 do p
= NEXT_INSN (p
);
3651 while (GET_CODE (p
) == NOTE
);
3654 record_giv (v
, p
, src_reg
, dest_reg
, mult_val
, add_val
, benefit
,
3655 DEST_REG
, not_every_iteration
, NULL_PTR
, loop_start
,
3661 #ifndef DONT_REDUCE_ADDR
3662 /* Look for givs which are memory addresses. */
3663 /* This resulted in worse code on a VAX 8600. I wonder if it
3665 if (GET_CODE (p
) == INSN
)
3666 find_mem_givs (PATTERN (p
), p
, not_every_iteration
, loop_start
,
3670 /* Update the status of whether giv can derive other givs. This can
3671 change when we pass a label or an insn that updates a biv. */
3672 if (GET_CODE (p
) == INSN
|| GET_CODE (p
) == JUMP_INSN
3673 || GET_CODE (p
) == CODE_LABEL
)
3674 update_giv_derive (p
);
3676 /* Past a jump, we get to insns for which we can't count
3677 on whether they will be executed during each iteration. */
3678 /* This code appears twice in strength_reduce. There is also similar
3679 code in scan_loop. */
3680 if (GET_CODE (p
) == JUMP_INSN
3681 /* If we enter the loop in the middle, and scan around to the
3682 beginning, don't set not_every_iteration for that.
3683 This can be any kind of jump, since we want to know if insns
3684 will be executed if the loop is executed. */
3685 && ! (JUMP_LABEL (p
) == loop_top
3686 && ((NEXT_INSN (NEXT_INSN (p
)) == loop_end
&& simplejump_p (p
))
3687 || (NEXT_INSN (p
) == loop_end
&& condjump_p (p
)))))
3691 /* If this is a jump outside the loop, then it also doesn't
3692 matter. Check to see if the target of this branch is on the
3693 loop_number_exits_labels list. */
3695 for (label
= loop_number_exit_labels
[uid_loop_num
[INSN_UID (loop_start
)]];
3697 label
= LABEL_NEXTREF (label
))
3698 if (XEXP (label
, 0) == JUMP_LABEL (p
))
3702 not_every_iteration
= 1;
3705 else if (GET_CODE (p
) == NOTE
)
3707 /* At the virtual top of a converted loop, insns are again known to
3708 be executed each iteration: logically, the loop begins here
3709 even though the exit code has been duplicated. */
3710 if (NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_VTOP
&& loop_depth
== 0)
3711 not_every_iteration
= 0;
3712 else if (NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_BEG
)
3714 else if (NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_END
)
3718 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
3719 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
3720 or not an insn is known to be executed each iteration of the
3721 loop, whether or not any iterations are known to occur.
3723 Therefore, if we have just passed a label and have no more labels
3724 between here and the test insn of the loop, we know these insns
3725 will be executed each iteration. */
3727 if (not_every_iteration
&& GET_CODE (p
) == CODE_LABEL
3728 && no_labels_between_p (p
, loop_end
))
3729 not_every_iteration
= 0;
3732 /* Try to calculate and save the number of loop iterations. This is
3733 set to zero if the actual number can not be calculated. This must
3734 be called after all giv's have been identified, since otherwise it may
3735 fail if the iteration variable is a giv. */
3737 loop_n_iterations
= loop_iterations (loop_start
, loop_end
);
3739 /* Now for each giv for which we still don't know whether or not it is
3740 replaceable, check to see if it is replaceable because its final value
3741 can be calculated. This must be done after loop_iterations is called,
3742 so that final_giv_value will work correctly. */
3744 for (bl
= loop_iv_list
; bl
; bl
= bl
->next
)
3746 struct induction
*v
;
3748 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
3749 if (! v
->replaceable
&& ! v
->not_replaceable
)
3750 check_final_value (v
, loop_start
, loop_end
);
3753 /* Try to prove that the loop counter variable (if any) is always
3754 nonnegative; if so, record that fact with a REG_NONNEG note
3755 so that "decrement and branch until zero" insn can be used. */
3756 check_dbra_loop (loop_end
, insn_count
, loop_start
);
3758 /* Create reg_map to hold substitutions for replaceable giv regs. */
3759 reg_map
= (rtx
*) alloca (max_reg_before_loop
* sizeof (rtx
));
3760 bzero ((char *) reg_map
, max_reg_before_loop
* sizeof (rtx
));
3762 /* Examine each iv class for feasibility of strength reduction/induction
3763 variable elimination. */
3765 for (bl
= loop_iv_list
; bl
; bl
= bl
->next
)
3767 struct induction
*v
;
3770 rtx final_value
= 0;
3772 /* Test whether it will be possible to eliminate this biv
3773 provided all givs are reduced. This is possible if either
3774 the reg is not used outside the loop, or we can compute
3775 what its final value will be.
3777 For architectures with a decrement_and_branch_until_zero insn,
3778 don't do this if we put a REG_NONNEG note on the endtest for
3781 /* Compare against bl->init_insn rather than loop_start.
3782 We aren't concerned with any uses of the biv between
3783 init_insn and loop_start since these won't be affected
3784 by the value of the biv elsewhere in the function, so
3785 long as init_insn doesn't use the biv itself.
3786 March 14, 1989 -- self@bayes.arc.nasa.gov */
3788 if ((uid_luid
[REGNO_LAST_UID (bl
->regno
)] < INSN_LUID (loop_end
)
3790 && INSN_UID (bl
->init_insn
) < max_uid_for_loop
3791 && uid_luid
[REGNO_FIRST_UID (bl
->regno
)] >= INSN_LUID (bl
->init_insn
)
3792 #ifdef HAVE_decrement_and_branch_until_zero
3795 && ! reg_mentioned_p (bl
->biv
->dest_reg
, SET_SRC (bl
->init_set
)))
3796 || ((final_value
= final_biv_value (bl
, loop_start
, loop_end
))
3797 #ifdef HAVE_decrement_and_branch_until_zero
3801 bl
->eliminable
= maybe_eliminate_biv (bl
, loop_start
, end
, 0,
3802 threshold
, insn_count
);
3805 if (loop_dump_stream
)
3807 fprintf (loop_dump_stream
,
3808 "Cannot eliminate biv %d.\n",
3810 fprintf (loop_dump_stream
,
3811 "First use: insn %d, last use: insn %d.\n",
3812 REGNO_FIRST_UID (bl
->regno
),
3813 REGNO_LAST_UID (bl
->regno
));
3817 /* Combine all giv's for this iv_class. */
3820 /* This will be true at the end, if all givs which depend on this
3821 biv have been strength reduced.
3822 We can't (currently) eliminate the biv unless this is so. */
3825 /* Check each giv in this class to see if we will benefit by reducing
3826 it. Skip giv's combined with others. */
3827 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
3829 struct induction
*tv
;
3831 if (v
->ignore
|| v
->same
)
3834 benefit
= v
->benefit
;
3836 /* Reduce benefit if not replaceable, since we will insert
3837 a move-insn to replace the insn that calculates this giv.
3838 Don't do this unless the giv is a user variable, since it
3839 will often be marked non-replaceable because of the duplication
3840 of the exit code outside the loop. In such a case, the copies
3841 we insert are dead and will be deleted. So they don't have
3842 a cost. Similar situations exist. */
3843 /* ??? The new final_[bg]iv_value code does a much better job
3844 of finding replaceable giv's, and hence this code may no longer
3846 if (! v
->replaceable
&& ! bl
->eliminable
3847 && REG_USERVAR_P (v
->dest_reg
))
3848 benefit
-= copy_cost
;
3850 /* Decrease the benefit to count the add-insns that we will
3851 insert to increment the reduced reg for the giv. */
3852 benefit
-= add_cost
* bl
->biv_count
;
3854 /* Decide whether to strength-reduce this giv or to leave the code
3855 unchanged (recompute it from the biv each time it is used).
3856 This decision can be made independently for each giv. */
3859 /* Attempt to guess whether autoincrement will handle some of the
3860 new add insns; if so, increase BENEFIT (undo the subtraction of
3861 add_cost that was done above). */
3862 if (v
->giv_type
== DEST_ADDR
3863 && GET_CODE (v
->mult_val
) == CONST_INT
)
3865 #if defined (HAVE_POST_INCREMENT) || defined (HAVE_PRE_INCREMENT)
3866 if (INTVAL (v
->mult_val
) == GET_MODE_SIZE (v
->mem_mode
))
3867 benefit
+= add_cost
* bl
->biv_count
;
3869 #if defined (HAVE_POST_DECREMENT) || defined (HAVE_PRE_DECREMENT)
3870 if (-INTVAL (v
->mult_val
) == GET_MODE_SIZE (v
->mem_mode
))
3871 benefit
+= add_cost
* bl
->biv_count
;
3876 /* If an insn is not to be strength reduced, then set its ignore
3877 flag, and clear all_reduced. */
3879 /* A giv that depends on a reversed biv must be reduced if it is
3880 used after the loop exit, otherwise, it would have the wrong
3881 value after the loop exit. To make it simple, just reduce all
3882 of such giv's whether or not we know they are used after the loop
3885 if (v
->lifetime
* threshold
* benefit
< insn_count
3888 if (loop_dump_stream
)
3889 fprintf (loop_dump_stream
,
3890 "giv of insn %d not worth while, %d vs %d.\n",
3892 v
->lifetime
* threshold
* benefit
, insn_count
);
3898 /* Check that we can increment the reduced giv without a
3899 multiply insn. If not, reject it. */
3901 for (tv
= bl
->biv
; tv
; tv
= tv
->next_iv
)
3902 if (tv
->mult_val
== const1_rtx
3903 && ! product_cheap_p (tv
->add_val
, v
->mult_val
))
3905 if (loop_dump_stream
)
3906 fprintf (loop_dump_stream
,
3907 "giv of insn %d: would need a multiply.\n",
3908 INSN_UID (v
->insn
));
3916 /* Reduce each giv that we decided to reduce. */
3918 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
3920 struct induction
*tv
;
3921 if (! v
->ignore
&& v
->same
== 0)
3923 int auto_inc_opt
= 0;
3925 v
->new_reg
= gen_reg_rtx (v
->mode
);
3928 /* If the target has auto-increment addressing modes, and
3929 this is an address giv, then try to put the increment
3930 immediately after its use, so that flow can create an
3931 auto-increment addressing mode. */
3932 if (v
->giv_type
== DEST_ADDR
&& bl
->biv_count
== 1
3933 && bl
->biv
->always_executed
&& ! bl
->biv
->maybe_multiple
3934 /* We don't handle reversed biv's because bl->biv->insn
3935 does not have a valid INSN_LUID. */
3937 && v
->always_executed
&& ! v
->maybe_multiple
)
3939 /* If other giv's have been combined with this one, then
3940 this will work only if all uses of the other giv's occur
3941 before this giv's insn. This is difficult to check.
3943 We simplify this by looking for the common case where
3944 there is one DEST_REG giv, and this giv's insn is the
3945 last use of the dest_reg of that DEST_REG giv. If the
3946 the increment occurs after the address giv, then we can
3947 perform the optimization. (Otherwise, the increment
3948 would have to go before other_giv, and we would not be
3949 able to combine it with the address giv to get an
3950 auto-inc address.) */
3951 if (v
->combined_with
)
3953 struct induction
*other_giv
= 0;
3955 for (tv
= bl
->giv
; tv
; tv
= tv
->next_iv
)
3963 if (! tv
&& other_giv
3964 && REGNO (other_giv
->dest_reg
) <= max_reg_before_loop
3965 && (REGNO_LAST_UID (REGNO (other_giv
->dest_reg
))
3966 == INSN_UID (v
->insn
))
3967 && INSN_LUID (v
->insn
) < INSN_LUID (bl
->biv
->insn
))
3970 /* Check for case where increment is before the the address
3972 else if (INSN_LUID (v
->insn
) > INSN_LUID (bl
->biv
->insn
))
3981 /* We can't put an insn immediately after one setting
3982 cc0, or immediately before one using cc0. */
3983 if ((auto_inc_opt
== 1 && sets_cc0_p (PATTERN (v
->insn
)))
3984 || (auto_inc_opt
== -1
3985 && (prev
= prev_nonnote_insn (v
->insn
)) != 0
3986 && GET_RTX_CLASS (GET_CODE (prev
)) == 'i'
3987 && sets_cc0_p (PATTERN (prev
))))
3993 v
->auto_inc_opt
= 1;
3997 /* For each place where the biv is incremented, add an insn
3998 to increment the new, reduced reg for the giv. */
3999 for (tv
= bl
->biv
; tv
; tv
= tv
->next_iv
)
4004 insert_before
= tv
->insn
;
4005 else if (auto_inc_opt
== 1)
4006 insert_before
= NEXT_INSN (v
->insn
);
4008 insert_before
= v
->insn
;
4010 if (tv
->mult_val
== const1_rtx
)
4011 emit_iv_add_mult (tv
->add_val
, v
->mult_val
,
4012 v
->new_reg
, v
->new_reg
, insert_before
);
4013 else /* tv->mult_val == const0_rtx */
4014 /* A multiply is acceptable here
4015 since this is presumed to be seldom executed. */
4016 emit_iv_add_mult (tv
->add_val
, v
->mult_val
,
4017 v
->add_val
, v
->new_reg
, insert_before
);
4020 /* Add code at loop start to initialize giv's reduced reg. */
4022 emit_iv_add_mult (bl
->initial_value
, v
->mult_val
,
4023 v
->add_val
, v
->new_reg
, loop_start
);
4027 /* Rescan all givs. If a giv is the same as a giv not reduced, mark it
4030 For each giv register that can be reduced now: if replaceable,
4031 substitute reduced reg wherever the old giv occurs;
4032 else add new move insn "giv_reg = reduced_reg".
4034 Also check for givs whose first use is their definition and whose
4035 last use is the definition of another giv. If so, it is likely
4036 dead and should not be used to eliminate a biv. */
4037 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
4039 if (v
->same
&& v
->same
->ignore
)
4045 if (v
->giv_type
== DEST_REG
4046 && REGNO_FIRST_UID (REGNO (v
->dest_reg
)) == INSN_UID (v
->insn
))
4048 struct induction
*v1
;
4050 for (v1
= bl
->giv
; v1
; v1
= v1
->next_iv
)
4051 if (REGNO_LAST_UID (REGNO (v
->dest_reg
)) == INSN_UID (v1
->insn
))
4055 /* Update expression if this was combined, in case other giv was
4058 v
->new_reg
= replace_rtx (v
->new_reg
,
4059 v
->same
->dest_reg
, v
->same
->new_reg
);
4061 if (v
->giv_type
== DEST_ADDR
)
4062 /* Store reduced reg as the address in the memref where we found
4064 validate_change (v
->insn
, v
->location
, v
->new_reg
, 0);
4065 else if (v
->replaceable
)
4067 reg_map
[REGNO (v
->dest_reg
)] = v
->new_reg
;
4070 /* I can no longer duplicate the original problem. Perhaps
4071 this is unnecessary now? */
4073 /* Replaceable; it isn't strictly necessary to delete the old
4074 insn and emit a new one, because v->dest_reg is now dead.
4076 However, especially when unrolling loops, the special
4077 handling for (set REG0 REG1) in the second cse pass may
4078 make v->dest_reg live again. To avoid this problem, emit
4079 an insn to set the original giv reg from the reduced giv.
4080 We can not delete the original insn, since it may be part
4081 of a LIBCALL, and the code in flow that eliminates dead
4082 libcalls will fail if it is deleted. */
4083 emit_insn_after (gen_move_insn (v
->dest_reg
, v
->new_reg
),
4089 /* Not replaceable; emit an insn to set the original giv reg from
4090 the reduced giv, same as above. */
4091 emit_insn_after (gen_move_insn (v
->dest_reg
, v
->new_reg
),
4095 /* When a loop is reversed, givs which depend on the reversed
4096 biv, and which are live outside the loop, must be set to their
4097 correct final value. This insn is only needed if the giv is
4098 not replaceable. The correct final value is the same as the
4099 value that the giv starts the reversed loop with. */
4100 if (bl
->reversed
&& ! v
->replaceable
)
4101 emit_iv_add_mult (bl
->initial_value
, v
->mult_val
,
4102 v
->add_val
, v
->dest_reg
, end_insert_before
);
4103 else if (v
->final_value
)
4107 /* If the loop has multiple exits, emit the insn before the
4108 loop to ensure that it will always be executed no matter
4109 how the loop exits. Otherwise, emit the insn after the loop,
4110 since this is slightly more efficient. */
4111 if (loop_number_exit_count
[uid_loop_num
[INSN_UID (loop_start
)]])
4112 insert_before
= loop_start
;
4114 insert_before
= end_insert_before
;
4115 emit_insn_before (gen_move_insn (v
->dest_reg
, v
->final_value
),
4119 /* If the insn to set the final value of the giv was emitted
4120 before the loop, then we must delete the insn inside the loop
4121 that sets it. If this is a LIBCALL, then we must delete
4122 every insn in the libcall. Note, however, that
4123 final_giv_value will only succeed when there are multiple
4124 exits if the giv is dead at each exit, hence it does not
4125 matter that the original insn remains because it is dead
4127 /* Delete the insn inside the loop that sets the giv since
4128 the giv is now set before (or after) the loop. */
4129 delete_insn (v
->insn
);
4133 if (loop_dump_stream
)
4135 fprintf (loop_dump_stream
, "giv at %d reduced to ",
4136 INSN_UID (v
->insn
));
4137 print_rtl (loop_dump_stream
, v
->new_reg
);
4138 fprintf (loop_dump_stream
, "\n");
4142 /* All the givs based on the biv bl have been reduced if they
4145 /* For each giv not marked as maybe dead that has been combined with a
4146 second giv, clear any "maybe dead" mark on that second giv.
4147 v->new_reg will either be or refer to the register of the giv it
4150 Doing this clearing avoids problems in biv elimination where a
4151 giv's new_reg is a complex value that can't be put in the insn but
4152 the giv combined with (with a reg as new_reg) is marked maybe_dead.
4153 Since the register will be used in either case, we'd prefer it be
4154 used from the simpler giv. */
4156 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
4157 if (! v
->maybe_dead
&& v
->same
)
4158 v
->same
->maybe_dead
= 0;
4160 /* Try to eliminate the biv, if it is a candidate.
4161 This won't work if ! all_reduced,
4162 since the givs we planned to use might not have been reduced.
4164 We have to be careful that we didn't initially think we could eliminate
4165 this biv because of a giv that we now think may be dead and shouldn't
4166 be used as a biv replacement.
4168 Also, there is the possibility that we may have a giv that looks
4169 like it can be used to eliminate a biv, but the resulting insn
4170 isn't valid. This can happen, for example, on the 88k, where a
4171 JUMP_INSN can compare a register only with zero. Attempts to
4172 replace it with a compare with a constant will fail.
4174 Note that in cases where this call fails, we may have replaced some
4175 of the occurrences of the biv with a giv, but no harm was done in
4176 doing so in the rare cases where it can occur. */
4178 if (all_reduced
== 1 && bl
->eliminable
4179 && maybe_eliminate_biv (bl
, loop_start
, end
, 1,
4180 threshold
, insn_count
))
4183 /* ?? If we created a new test to bypass the loop entirely,
4184 or otherwise drop straight in, based on this test, then
4185 we might want to rewrite it also. This way some later
4186 pass has more hope of removing the initialization of this
4189 /* If final_value != 0, then the biv may be used after loop end
4190 and we must emit an insn to set it just in case.
4192 Reversed bivs already have an insn after the loop setting their
4193 value, so we don't need another one. We can't calculate the
4194 proper final value for such a biv here anyways. */
4195 if (final_value
!= 0 && ! bl
->reversed
)
4199 /* If the loop has multiple exits, emit the insn before the
4200 loop to ensure that it will always be executed no matter
4201 how the loop exits. Otherwise, emit the insn after the
4202 loop, since this is slightly more efficient. */
4203 if (loop_number_exit_count
[uid_loop_num
[INSN_UID (loop_start
)]])
4204 insert_before
= loop_start
;
4206 insert_before
= end_insert_before
;
4208 emit_insn_before (gen_move_insn (bl
->biv
->dest_reg
, final_value
),
4213 /* Delete all of the instructions inside the loop which set
4214 the biv, as they are all dead. If is safe to delete them,
4215 because an insn setting a biv will never be part of a libcall. */
4216 /* However, deleting them will invalidate the regno_last_uid info,
4217 so keeping them around is more convenient. Final_biv_value
4218 will only succeed when there are multiple exits if the biv
4219 is dead at each exit, hence it does not matter that the original
4220 insn remains, because it is dead anyways. */
4221 for (v
= bl
->biv
; v
; v
= v
->next_iv
)
4222 delete_insn (v
->insn
);
4225 if (loop_dump_stream
)
4226 fprintf (loop_dump_stream
, "Reg %d: biv eliminated\n",
4231 /* Go through all the instructions in the loop, making all the
4232 register substitutions scheduled in REG_MAP. */
4234 for (p
= loop_start
; p
!= end
; p
= NEXT_INSN (p
))
4235 if (GET_CODE (p
) == INSN
|| GET_CODE (p
) == JUMP_INSN
4236 || GET_CODE (p
) == CALL_INSN
)
4238 replace_regs (PATTERN (p
), reg_map
, max_reg_before_loop
, 0);
4239 replace_regs (REG_NOTES (p
), reg_map
, max_reg_before_loop
, 0);
4243 /* Unroll loops from within strength reduction so that we can use the
4244 induction variable information that strength_reduce has already
4247 if (flag_unroll_loops
)
4248 unroll_loop (loop_end
, insn_count
, loop_start
, end_insert_before
, 1);
4250 if (loop_dump_stream
)
4251 fprintf (loop_dump_stream
, "\n");
4254 /* Return 1 if X is a valid source for an initial value (or as value being
4255 compared against in an initial test).
4257 X must be either a register or constant and must not be clobbered between
4258 the current insn and the start of the loop.
4260 INSN is the insn containing X. */
4263 valid_initial_value_p (x
, insn
, call_seen
, loop_start
)
4272 /* Only consider pseudos we know about initialized in insns whose luids
4274 if (GET_CODE (x
) != REG
4275 || REGNO (x
) >= max_reg_before_loop
)
4278 /* Don't use call-clobbered registers across a call which clobbers it. On
4279 some machines, don't use any hard registers at all. */
4280 if (REGNO (x
) < FIRST_PSEUDO_REGISTER
4282 #ifdef SMALL_REGISTER_CLASSES
4283 SMALL_REGISTER_CLASSES
4287 || (call_used_regs
[REGNO (x
)] && call_seen
))
4291 /* Don't use registers that have been clobbered before the start of the
4293 if (reg_set_between_p (x
, insn
, loop_start
))
4299 /* Scan X for memory refs and check each memory address
4300 as a possible giv. INSN is the insn whose pattern X comes from.
4301 NOT_EVERY_ITERATION is 1 if the insn might not be executed during
4302 every loop iteration. */
4305 find_mem_givs (x
, insn
, not_every_iteration
, loop_start
, loop_end
)
4308 int not_every_iteration
;
4309 rtx loop_start
, loop_end
;
4312 register enum rtx_code code
;
4318 code
= GET_CODE (x
);
4342 benefit
= general_induction_var (XEXP (x
, 0),
4343 &src_reg
, &add_val
, &mult_val
);
4345 /* Don't make a DEST_ADDR giv with mult_val == 1 && add_val == 0.
4346 Such a giv isn't useful. */
4347 if (benefit
> 0 && (mult_val
!= const1_rtx
|| add_val
!= const0_rtx
))
4349 /* Found one; record it. */
4351 = (struct induction
*) oballoc (sizeof (struct induction
));
4353 record_giv (v
, insn
, src_reg
, addr_placeholder
, mult_val
,
4354 add_val
, benefit
, DEST_ADDR
, not_every_iteration
,
4355 &XEXP (x
, 0), loop_start
, loop_end
);
4357 v
->mem_mode
= GET_MODE (x
);
4363 /* Recursively scan the subexpressions for other mem refs. */
4365 fmt
= GET_RTX_FORMAT (code
);
4366 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
4368 find_mem_givs (XEXP (x
, i
), insn
, not_every_iteration
, loop_start
,
4370 else if (fmt
[i
] == 'E')
4371 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
4372 find_mem_givs (XVECEXP (x
, i
, j
), insn
, not_every_iteration
,
4373 loop_start
, loop_end
);
4376 /* Fill in the data about one biv update.
4377 V is the `struct induction' in which we record the biv. (It is
4378 allocated by the caller, with alloca.)
4379 INSN is the insn that sets it.
4380 DEST_REG is the biv's reg.
4382 MULT_VAL is const1_rtx if the biv is being incremented here, in which case
4383 INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is
4384 being set to INC_VAL.
4386 NOT_EVERY_ITERATION is nonzero if this biv update is not know to be
4387 executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update
4388 can be executed more than once per iteration. If MAYBE_MULTIPLE
4389 and NOT_EVERY_ITERATION are both zero, we know that the biv update is
4390 executed exactly once per iteration. */
4393 record_biv (v
, insn
, dest_reg
, inc_val
, mult_val
,
4394 not_every_iteration
, maybe_multiple
)
4395 struct induction
*v
;
4400 int not_every_iteration
;
4403 struct iv_class
*bl
;
4406 v
->src_reg
= dest_reg
;
4407 v
->dest_reg
= dest_reg
;
4408 v
->mult_val
= mult_val
;
4409 v
->add_val
= inc_val
;
4410 v
->mode
= GET_MODE (dest_reg
);
4411 v
->always_computable
= ! not_every_iteration
;
4412 v
->always_executed
= ! not_every_iteration
;
4413 v
->maybe_multiple
= maybe_multiple
;
4415 /* Add this to the reg's iv_class, creating a class
4416 if this is the first incrementation of the reg. */
4418 bl
= reg_biv_class
[REGNO (dest_reg
)];
4421 /* Create and initialize new iv_class. */
4423 bl
= (struct iv_class
*) oballoc (sizeof (struct iv_class
));
4425 bl
->regno
= REGNO (dest_reg
);
4431 /* Set initial value to the reg itself. */
4432 bl
->initial_value
= dest_reg
;
4433 /* We haven't seen the initializing insn yet */
4436 bl
->initial_test
= 0;
4437 bl
->incremented
= 0;
4441 bl
->total_benefit
= 0;
4443 /* Add this class to loop_iv_list. */
4444 bl
->next
= loop_iv_list
;
4447 /* Put it in the array of biv register classes. */
4448 reg_biv_class
[REGNO (dest_reg
)] = bl
;
4451 /* Update IV_CLASS entry for this biv. */
4452 v
->next_iv
= bl
->biv
;
4455 if (mult_val
== const1_rtx
)
4456 bl
->incremented
= 1;
4458 if (loop_dump_stream
)
4460 fprintf (loop_dump_stream
,
4461 "Insn %d: possible biv, reg %d,",
4462 INSN_UID (insn
), REGNO (dest_reg
));
4463 if (GET_CODE (inc_val
) == CONST_INT
)
4464 fprintf (loop_dump_stream
, " const = %d\n",
4468 fprintf (loop_dump_stream
, " const = ");
4469 print_rtl (loop_dump_stream
, inc_val
);
4470 fprintf (loop_dump_stream
, "\n");
4475 /* Fill in the data about one giv.
4476 V is the `struct induction' in which we record the giv. (It is
4477 allocated by the caller, with alloca.)
4478 INSN is the insn that sets it.
4479 BENEFIT estimates the savings from deleting this insn.
4480 TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed
4481 into a register or is used as a memory address.
4483 SRC_REG is the biv reg which the giv is computed from.
4484 DEST_REG is the giv's reg (if the giv is stored in a reg).
4485 MULT_VAL and ADD_VAL are the coefficients used to compute the giv.
4486 LOCATION points to the place where this giv's value appears in INSN. */
4489 record_giv (v
, insn
, src_reg
, dest_reg
, mult_val
, add_val
, benefit
,
4490 type
, not_every_iteration
, location
, loop_start
, loop_end
)
4491 struct induction
*v
;
4495 rtx mult_val
, add_val
;
4498 int not_every_iteration
;
4500 rtx loop_start
, loop_end
;
4502 struct induction
*b
;
4503 struct iv_class
*bl
;
4504 rtx set
= single_set (insn
);
4508 v
->src_reg
= src_reg
;
4510 v
->dest_reg
= dest_reg
;
4511 v
->mult_val
= mult_val
;
4512 v
->add_val
= add_val
;
4513 v
->benefit
= benefit
;
4514 v
->location
= location
;
4516 v
->combined_with
= 0;
4517 v
->maybe_multiple
= 0;
4519 v
->derive_adjustment
= 0;
4525 v
->auto_inc_opt
= 0;
4527 /* The v->always_computable field is used in update_giv_derive, to
4528 determine whether a giv can be used to derive another giv. For a
4529 DEST_REG giv, INSN computes a new value for the giv, so its value
4530 isn't computable if INSN insn't executed every iteration.
4531 However, for a DEST_ADDR giv, INSN merely uses the value of the giv;
4532 it does not compute a new value. Hence the value is always computable
4533 regardless of whether INSN is executed each iteration. */
4535 if (type
== DEST_ADDR
)
4536 v
->always_computable
= 1;
4538 v
->always_computable
= ! not_every_iteration
;
4540 v
->always_executed
= ! not_every_iteration
;
4542 if (type
== DEST_ADDR
)
4544 v
->mode
= GET_MODE (*location
);
4548 else /* type == DEST_REG */
4550 v
->mode
= GET_MODE (SET_DEST (set
));
4552 v
->lifetime
= (uid_luid
[REGNO_LAST_UID (REGNO (dest_reg
))]
4553 - uid_luid
[REGNO_FIRST_UID (REGNO (dest_reg
))]);
4555 v
->times_used
= n_times_used
[REGNO (dest_reg
)];
4557 /* If the lifetime is zero, it means that this register is
4558 really a dead store. So mark this as a giv that can be
4559 ignored. This will not prevent the biv from being eliminated. */
4560 if (v
->lifetime
== 0)
4563 reg_iv_type
[REGNO (dest_reg
)] = GENERAL_INDUCT
;
4564 reg_iv_info
[REGNO (dest_reg
)] = v
;
4567 /* Add the giv to the class of givs computed from one biv. */
4569 bl
= reg_biv_class
[REGNO (src_reg
)];
4572 v
->next_iv
= bl
->giv
;
4574 /* Don't count DEST_ADDR. This is supposed to count the number of
4575 insns that calculate givs. */
4576 if (type
== DEST_REG
)
4578 bl
->total_benefit
+= benefit
;
4581 /* Fatal error, biv missing for this giv? */
4584 if (type
== DEST_ADDR
)
4588 /* The giv can be replaced outright by the reduced register only if all
4589 of the following conditions are true:
4590 - the insn that sets the giv is always executed on any iteration
4591 on which the giv is used at all
4592 (there are two ways to deduce this:
4593 either the insn is executed on every iteration,
4594 or all uses follow that insn in the same basic block),
4595 - the giv is not used outside the loop
4596 - no assignments to the biv occur during the giv's lifetime. */
4598 if (REGNO_FIRST_UID (REGNO (dest_reg
)) == INSN_UID (insn
)
4599 /* Previous line always fails if INSN was moved by loop opt. */
4600 && uid_luid
[REGNO_LAST_UID (REGNO (dest_reg
))] < INSN_LUID (loop_end
)
4601 && (! not_every_iteration
4602 || last_use_this_basic_block (dest_reg
, insn
)))
4604 /* Now check that there are no assignments to the biv within the
4605 giv's lifetime. This requires two separate checks. */
4607 /* Check each biv update, and fail if any are between the first
4608 and last use of the giv.
4610 If this loop contains an inner loop that was unrolled, then
4611 the insn modifying the biv may have been emitted by the loop
4612 unrolling code, and hence does not have a valid luid. Just
4613 mark the biv as not replaceable in this case. It is not very
4614 useful as a biv, because it is used in two different loops.
4615 It is very unlikely that we would be able to optimize the giv
4616 using this biv anyways. */
4619 for (b
= bl
->biv
; b
; b
= b
->next_iv
)
4621 if (INSN_UID (b
->insn
) >= max_uid_for_loop
4622 || ((uid_luid
[INSN_UID (b
->insn
)]
4623 >= uid_luid
[REGNO_FIRST_UID (REGNO (dest_reg
))])
4624 && (uid_luid
[INSN_UID (b
->insn
)]
4625 <= uid_luid
[REGNO_LAST_UID (REGNO (dest_reg
))])))
4628 v
->not_replaceable
= 1;
4633 /* If there are any backwards branches that go from after the
4634 biv update to before it, then this giv is not replaceable. */
4636 for (b
= bl
->biv
; b
; b
= b
->next_iv
)
4637 if (back_branch_in_range_p (b
->insn
, loop_start
, loop_end
))
4640 v
->not_replaceable
= 1;
4646 /* May still be replaceable, we don't have enough info here to
4649 v
->not_replaceable
= 0;
4653 if (loop_dump_stream
)
4655 if (type
== DEST_REG
)
4656 fprintf (loop_dump_stream
, "Insn %d: giv reg %d",
4657 INSN_UID (insn
), REGNO (dest_reg
));
4659 fprintf (loop_dump_stream
, "Insn %d: dest address",
4662 fprintf (loop_dump_stream
, " src reg %d benefit %d",
4663 REGNO (src_reg
), v
->benefit
);
4664 fprintf (loop_dump_stream
, " used %d lifetime %d",
4665 v
->times_used
, v
->lifetime
);
4668 fprintf (loop_dump_stream
, " replaceable");
4670 if (GET_CODE (mult_val
) == CONST_INT
)
4671 fprintf (loop_dump_stream
, " mult %d",
4675 fprintf (loop_dump_stream
, " mult ");
4676 print_rtl (loop_dump_stream
, mult_val
);
4679 if (GET_CODE (add_val
) == CONST_INT
)
4680 fprintf (loop_dump_stream
, " add %d",
4684 fprintf (loop_dump_stream
, " add ");
4685 print_rtl (loop_dump_stream
, add_val
);
4689 if (loop_dump_stream
)
4690 fprintf (loop_dump_stream
, "\n");
4695 /* All this does is determine whether a giv can be made replaceable because
4696 its final value can be calculated. This code can not be part of record_giv
4697 above, because final_giv_value requires that the number of loop iterations
4698 be known, and that can not be accurately calculated until after all givs
4699 have been identified. */
4702 check_final_value (v
, loop_start
, loop_end
)
4703 struct induction
*v
;
4704 rtx loop_start
, loop_end
;
4706 struct iv_class
*bl
;
4707 rtx final_value
= 0;
4709 bl
= reg_biv_class
[REGNO (v
->src_reg
)];
4711 /* DEST_ADDR givs will never reach here, because they are always marked
4712 replaceable above in record_giv. */
4714 /* The giv can be replaced outright by the reduced register only if all
4715 of the following conditions are true:
4716 - the insn that sets the giv is always executed on any iteration
4717 on which the giv is used at all
4718 (there are two ways to deduce this:
4719 either the insn is executed on every iteration,
4720 or all uses follow that insn in the same basic block),
4721 - its final value can be calculated (this condition is different
4722 than the one above in record_giv)
4723 - no assignments to the biv occur during the giv's lifetime. */
4726 /* This is only called now when replaceable is known to be false. */
4727 /* Clear replaceable, so that it won't confuse final_giv_value. */
4731 if ((final_value
= final_giv_value (v
, loop_start
, loop_end
))
4732 && (v
->always_computable
|| last_use_this_basic_block (v
->dest_reg
, v
->insn
)))
4734 int biv_increment_seen
= 0;
4740 /* When trying to determine whether or not a biv increment occurs
4741 during the lifetime of the giv, we can ignore uses of the variable
4742 outside the loop because final_value is true. Hence we can not
4743 use regno_last_uid and regno_first_uid as above in record_giv. */
4745 /* Search the loop to determine whether any assignments to the
4746 biv occur during the giv's lifetime. Start with the insn
4747 that sets the giv, and search around the loop until we come
4748 back to that insn again.
4750 Also fail if there is a jump within the giv's lifetime that jumps
4751 to somewhere outside the lifetime but still within the loop. This
4752 catches spaghetti code where the execution order is not linear, and
4753 hence the above test fails. Here we assume that the giv lifetime
4754 does not extend from one iteration of the loop to the next, so as
4755 to make the test easier. Since the lifetime isn't known yet,
4756 this requires two loops. See also record_giv above. */
4758 last_giv_use
= v
->insn
;
4764 p
= NEXT_INSN (loop_start
);
4768 if (GET_CODE (p
) == INSN
|| GET_CODE (p
) == JUMP_INSN
4769 || GET_CODE (p
) == CALL_INSN
)
4771 if (biv_increment_seen
)
4773 if (reg_mentioned_p (v
->dest_reg
, PATTERN (p
)))
4776 v
->not_replaceable
= 1;
4780 else if (GET_CODE (PATTERN (p
)) == SET
4781 && SET_DEST (PATTERN (p
)) == v
->src_reg
)
4782 biv_increment_seen
= 1;
4783 else if (reg_mentioned_p (v
->dest_reg
, PATTERN (p
)))
4788 /* Now that the lifetime of the giv is known, check for branches
4789 from within the lifetime to outside the lifetime if it is still
4799 p
= NEXT_INSN (loop_start
);
4800 if (p
== last_giv_use
)
4803 if (GET_CODE (p
) == JUMP_INSN
&& JUMP_LABEL (p
)
4804 && LABEL_NAME (JUMP_LABEL (p
))
4805 && ((INSN_UID (JUMP_LABEL (p
)) >= max_uid_for_loop
)
4806 || (INSN_UID (v
->insn
) >= max_uid_for_loop
)
4807 || (INSN_UID (last_giv_use
) >= max_uid_for_loop
)
4808 || (INSN_LUID (JUMP_LABEL (p
)) < INSN_LUID (v
->insn
)
4809 && INSN_LUID (JUMP_LABEL (p
)) > INSN_LUID (loop_start
))
4810 || (INSN_LUID (JUMP_LABEL (p
)) > INSN_LUID (last_giv_use
)
4811 && INSN_LUID (JUMP_LABEL (p
)) < INSN_LUID (loop_end
))))
4814 v
->not_replaceable
= 1;
4816 if (loop_dump_stream
)
4817 fprintf (loop_dump_stream
,
4818 "Found branch outside giv lifetime.\n");
4825 /* If it is replaceable, then save the final value. */
4827 v
->final_value
= final_value
;
4830 if (loop_dump_stream
&& v
->replaceable
)
4831 fprintf (loop_dump_stream
, "Insn %d: giv reg %d final_value replaceable\n",
4832 INSN_UID (v
->insn
), REGNO (v
->dest_reg
));
4835 /* Update the status of whether a giv can derive other givs.
4837 We need to do something special if there is or may be an update to the biv
4838 between the time the giv is defined and the time it is used to derive
4841 In addition, a giv that is only conditionally set is not allowed to
4842 derive another giv once a label has been passed.
4844 The cases we look at are when a label or an update to a biv is passed. */
4847 update_giv_derive (p
)
4850 struct iv_class
*bl
;
4851 struct induction
*biv
, *giv
;
4855 /* Search all IV classes, then all bivs, and finally all givs.
4857 There are three cases we are concerned with. First we have the situation
4858 of a giv that is only updated conditionally. In that case, it may not
4859 derive any givs after a label is passed.
4861 The second case is when a biv update occurs, or may occur, after the
4862 definition of a giv. For certain biv updates (see below) that are
4863 known to occur between the giv definition and use, we can adjust the
4864 giv definition. For others, or when the biv update is conditional,
4865 we must prevent the giv from deriving any other givs. There are two
4866 sub-cases within this case.
4868 If this is a label, we are concerned with any biv update that is done
4869 conditionally, since it may be done after the giv is defined followed by
4870 a branch here (actually, we need to pass both a jump and a label, but
4871 this extra tracking doesn't seem worth it).
4873 If this is a jump, we are concerned about any biv update that may be
4874 executed multiple times. We are actually only concerned about
4875 backward jumps, but it is probably not worth performing the test
4876 on the jump again here.
4878 If this is a biv update, we must adjust the giv status to show that a
4879 subsequent biv update was performed. If this adjustment cannot be done,
4880 the giv cannot derive further givs. */
4882 for (bl
= loop_iv_list
; bl
; bl
= bl
->next
)
4883 for (biv
= bl
->biv
; biv
; biv
= biv
->next_iv
)
4884 if (GET_CODE (p
) == CODE_LABEL
|| GET_CODE (p
) == JUMP_INSN
4887 for (giv
= bl
->giv
; giv
; giv
= giv
->next_iv
)
4889 /* If cant_derive is already true, there is no point in
4890 checking all of these conditions again. */
4891 if (giv
->cant_derive
)
4894 /* If this giv is conditionally set and we have passed a label,
4895 it cannot derive anything. */
4896 if (GET_CODE (p
) == CODE_LABEL
&& ! giv
->always_computable
)
4897 giv
->cant_derive
= 1;
4899 /* Skip givs that have mult_val == 0, since
4900 they are really invariants. Also skip those that are
4901 replaceable, since we know their lifetime doesn't contain
4903 else if (giv
->mult_val
== const0_rtx
|| giv
->replaceable
)
4906 /* The only way we can allow this giv to derive another
4907 is if this is a biv increment and we can form the product
4908 of biv->add_val and giv->mult_val. In this case, we will
4909 be able to compute a compensation. */
4910 else if (biv
->insn
== p
)
4914 if (biv
->mult_val
== const1_rtx
)
4915 tem
= simplify_giv_expr (gen_rtx (MULT
, giv
->mode
,
4920 if (tem
&& giv
->derive_adjustment
)
4921 tem
= simplify_giv_expr (gen_rtx (PLUS
, giv
->mode
, tem
,
4922 giv
->derive_adjustment
),
4925 giv
->derive_adjustment
= tem
;
4927 giv
->cant_derive
= 1;
4929 else if ((GET_CODE (p
) == CODE_LABEL
&& ! biv
->always_computable
)
4930 || (GET_CODE (p
) == JUMP_INSN
&& biv
->maybe_multiple
))
4931 giv
->cant_derive
= 1;
4936 /* Check whether an insn is an increment legitimate for a basic induction var.
4937 X is the source of insn P, or a part of it.
4938 MODE is the mode in which X should be interpreted.
4940 DEST_REG is the putative biv, also the destination of the insn.
4941 We accept patterns of these forms:
4942 REG = REG + INVARIANT (includes REG = REG - CONSTANT)
4943 REG = INVARIANT + REG
4945 If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX,
4946 and store the additive term into *INC_VAL.
4948 If X is an assignment of an invariant into DEST_REG, we set
4949 *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL.
4951 We also want to detect a BIV when it corresponds to a variable
4952 whose mode was promoted via PROMOTED_MODE. In that case, an increment
4953 of the variable may be a PLUS that adds a SUBREG of that variable to
4954 an invariant and then sign- or zero-extends the result of the PLUS
4957 Most GIVs in such cases will be in the promoted mode, since that is the
4958 probably the natural computation mode (and almost certainly the mode
4959 used for addresses) on the machine. So we view the pseudo-reg containing
4960 the variable as the BIV, as if it were simply incremented.
4962 Note that treating the entire pseudo as a BIV will result in making
4963 simple increments to any GIVs based on it. However, if the variable
4964 overflows in its declared mode but not its promoted mode, the result will
4965 be incorrect. This is acceptable if the variable is signed, since
4966 overflows in such cases are undefined, but not if it is unsigned, since
4967 those overflows are defined. So we only check for SIGN_EXTEND and
4970 If we cannot find a biv, we return 0. */
4973 basic_induction_var (x
, mode
, dest_reg
, p
, inc_val
, mult_val
)
4975 enum machine_mode mode
;
4981 register enum rtx_code code
;
4985 code
= GET_CODE (x
);
4989 if (XEXP (x
, 0) == dest_reg
4990 || (GET_CODE (XEXP (x
, 0)) == SUBREG
4991 && SUBREG_PROMOTED_VAR_P (XEXP (x
, 0))
4992 && SUBREG_REG (XEXP (x
, 0)) == dest_reg
))
4994 else if (XEXP (x
, 1) == dest_reg
4995 || (GET_CODE (XEXP (x
, 1)) == SUBREG
4996 && SUBREG_PROMOTED_VAR_P (XEXP (x
, 1))
4997 && SUBREG_REG (XEXP (x
, 1)) == dest_reg
))
5002 if (invariant_p (arg
) != 1)
5005 *inc_val
= convert_modes (GET_MODE (dest_reg
), GET_MODE (x
), arg
, 0);
5006 *mult_val
= const1_rtx
;
5010 /* If this is a SUBREG for a promoted variable, check the inner
5012 if (SUBREG_PROMOTED_VAR_P (x
))
5013 return basic_induction_var (SUBREG_REG (x
), GET_MODE (SUBREG_REG (x
)),
5014 dest_reg
, p
, inc_val
, mult_val
);
5018 /* If this register is assigned in the previous insn, look at its
5019 source, but don't go outside the loop or past a label. */
5021 for (insn
= PREV_INSN (p
);
5022 (insn
&& GET_CODE (insn
) == NOTE
5023 && NOTE_LINE_NUMBER (insn
) != NOTE_INSN_LOOP_BEG
);
5024 insn
= PREV_INSN (insn
))
5028 set
= single_set (insn
);
5031 && (SET_DEST (set
) == x
5032 || (GET_CODE (SET_DEST (set
)) == SUBREG
5033 && (GET_MODE_SIZE (GET_MODE (SET_DEST (set
)))
5035 && SUBREG_REG (SET_DEST (set
)) == x
)))
5036 return basic_induction_var (SET_SRC (set
),
5037 (GET_MODE (SET_SRC (set
)) == VOIDmode
5039 : GET_MODE (SET_SRC (set
))),
5042 /* ... fall through ... */
5044 /* Can accept constant setting of biv only when inside inner most loop.
5045 Otherwise, a biv of an inner loop may be incorrectly recognized
5046 as a biv of the outer loop,
5047 causing code to be moved INTO the inner loop. */
5049 if (invariant_p (x
) != 1)
5054 if (loops_enclosed
== 1)
5056 /* Possible bug here? Perhaps we don't know the mode of X. */
5057 *inc_val
= convert_modes (GET_MODE (dest_reg
), mode
, x
, 0);
5058 *mult_val
= const0_rtx
;
5065 return basic_induction_var (XEXP (x
, 0), GET_MODE (XEXP (x
, 0)),
5066 dest_reg
, p
, inc_val
, mult_val
);
5068 /* Similar, since this can be a sign extension. */
5069 for (insn
= PREV_INSN (p
);
5070 (insn
&& GET_CODE (insn
) == NOTE
5071 && NOTE_LINE_NUMBER (insn
) != NOTE_INSN_LOOP_BEG
);
5072 insn
= PREV_INSN (insn
))
5076 set
= single_set (insn
);
5078 if (set
&& SET_DEST (set
) == XEXP (x
, 0)
5079 && GET_CODE (XEXP (x
, 1)) == CONST_INT
5080 && INTVAL (XEXP (x
, 1)) >= 0
5081 && GET_CODE (SET_SRC (set
)) == ASHIFT
5082 && XEXP (x
, 1) == XEXP (SET_SRC (set
), 1))
5083 return basic_induction_var (XEXP (SET_SRC (set
), 0),
5084 GET_MODE (XEXP (x
, 0)),
5085 dest_reg
, insn
, inc_val
, mult_val
);
5093 /* A general induction variable (giv) is any quantity that is a linear
5094 function of a basic induction variable,
5095 i.e. giv = biv * mult_val + add_val.
5096 The coefficients can be any loop invariant quantity.
5097 A giv need not be computed directly from the biv;
5098 it can be computed by way of other givs. */
5100 /* Determine whether X computes a giv.
5101 If it does, return a nonzero value
5102 which is the benefit from eliminating the computation of X;
5103 set *SRC_REG to the register of the biv that it is computed from;
5104 set *ADD_VAL and *MULT_VAL to the coefficients,
5105 such that the value of X is biv * mult + add; */
5108 general_induction_var (x
, src_reg
, add_val
, mult_val
)
5118 /* If this is an invariant, forget it, it isn't a giv. */
5119 if (invariant_p (x
) == 1)
5122 /* See if the expression could be a giv and get its form.
5123 Mark our place on the obstack in case we don't find a giv. */
5124 storage
= (char *) oballoc (0);
5125 x
= simplify_giv_expr (x
, &benefit
);
5132 switch (GET_CODE (x
))
5136 /* Since this is now an invariant and wasn't before, it must be a giv
5137 with MULT_VAL == 0. It doesn't matter which BIV we associate this
5139 *src_reg
= loop_iv_list
->biv
->dest_reg
;
5140 *mult_val
= const0_rtx
;
5145 /* This is equivalent to a BIV. */
5147 *mult_val
= const1_rtx
;
5148 *add_val
= const0_rtx
;
5152 /* Either (plus (biv) (invar)) or
5153 (plus (mult (biv) (invar_1)) (invar_2)). */
5154 if (GET_CODE (XEXP (x
, 0)) == MULT
)
5156 *src_reg
= XEXP (XEXP (x
, 0), 0);
5157 *mult_val
= XEXP (XEXP (x
, 0), 1);
5161 *src_reg
= XEXP (x
, 0);
5162 *mult_val
= const1_rtx
;
5164 *add_val
= XEXP (x
, 1);
5168 /* ADD_VAL is zero. */
5169 *src_reg
= XEXP (x
, 0);
5170 *mult_val
= XEXP (x
, 1);
5171 *add_val
= const0_rtx
;
5178 /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be
5179 unless they are CONST_INT). */
5180 if (GET_CODE (*add_val
) == USE
)
5181 *add_val
= XEXP (*add_val
, 0);
5182 if (GET_CODE (*mult_val
) == USE
)
5183 *mult_val
= XEXP (*mult_val
, 0);
5185 benefit
+= rtx_cost (orig_x
, SET
);
5187 /* Always return some benefit if this is a giv so it will be detected
5188 as such. This allows elimination of bivs that might otherwise
5189 not be eliminated. */
5190 return benefit
== 0 ? 1 : benefit
;
5193 /* Given an expression, X, try to form it as a linear function of a biv.
5194 We will canonicalize it to be of the form
5195 (plus (mult (BIV) (invar_1))
5197 with possible degeneracies.
5199 The invariant expressions must each be of a form that can be used as a
5200 machine operand. We surround then with a USE rtx (a hack, but localized
5201 and certainly unambiguous!) if not a CONST_INT for simplicity in this
5202 routine; it is the caller's responsibility to strip them.
5204 If no such canonicalization is possible (i.e., two biv's are used or an
5205 expression that is neither invariant nor a biv or giv), this routine
5208 For a non-zero return, the result will have a code of CONST_INT, USE,
5209 REG (for a BIV), PLUS, or MULT. No other codes will occur.
5211 *BENEFIT will be incremented by the benefit of any sub-giv encountered. */
5214 simplify_giv_expr (x
, benefit
)
5218 enum machine_mode mode
= GET_MODE (x
);
5222 /* If this is not an integer mode, or if we cannot do arithmetic in this
5223 mode, this can't be a giv. */
5224 if (mode
!= VOIDmode
5225 && (GET_MODE_CLASS (mode
) != MODE_INT
5226 || GET_MODE_BITSIZE (mode
) > HOST_BITS_PER_WIDE_INT
))
5229 switch (GET_CODE (x
))
5232 arg0
= simplify_giv_expr (XEXP (x
, 0), benefit
);
5233 arg1
= simplify_giv_expr (XEXP (x
, 1), benefit
);
5234 if (arg0
== 0 || arg1
== 0)
5237 /* Put constant last, CONST_INT last if both constant. */
5238 if ((GET_CODE (arg0
) == USE
5239 || GET_CODE (arg0
) == CONST_INT
)
5240 && GET_CODE (arg1
) != CONST_INT
)
5241 tem
= arg0
, arg0
= arg1
, arg1
= tem
;
5243 /* Handle addition of zero, then addition of an invariant. */
5244 if (arg1
== const0_rtx
)
5246 else if (GET_CODE (arg1
) == CONST_INT
|| GET_CODE (arg1
) == USE
)
5247 switch (GET_CODE (arg0
))
5251 /* Both invariant. Only valid if sum is machine operand.
5252 First strip off possible USE on first operand. */
5253 if (GET_CODE (arg0
) == USE
)
5254 arg0
= XEXP (arg0
, 0);
5257 if (CONSTANT_P (arg0
) && GET_CODE (arg1
) == CONST_INT
)
5259 tem
= plus_constant (arg0
, INTVAL (arg1
));
5260 if (GET_CODE (tem
) != CONST_INT
)
5261 tem
= gen_rtx (USE
, mode
, tem
);
5268 /* biv + invar or mult + invar. Return sum. */
5269 return gen_rtx (PLUS
, mode
, arg0
, arg1
);
5272 /* (a + invar_1) + invar_2. Associate. */
5273 return simplify_giv_expr (gen_rtx (PLUS
, mode
,
5275 gen_rtx (PLUS
, mode
,
5276 XEXP (arg0
, 1), arg1
)),
5283 /* Each argument must be either REG, PLUS, or MULT. Convert REG to
5284 MULT to reduce cases. */
5285 if (GET_CODE (arg0
) == REG
)
5286 arg0
= gen_rtx (MULT
, mode
, arg0
, const1_rtx
);
5287 if (GET_CODE (arg1
) == REG
)
5288 arg1
= gen_rtx (MULT
, mode
, arg1
, const1_rtx
);
5290 /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT.
5291 Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT.
5292 Recurse to associate the second PLUS. */
5293 if (GET_CODE (arg1
) == MULT
)
5294 tem
= arg0
, arg0
= arg1
, arg1
= tem
;
5296 if (GET_CODE (arg1
) == PLUS
)
5297 return simplify_giv_expr (gen_rtx (PLUS
, mode
,
5298 gen_rtx (PLUS
, mode
,
5299 arg0
, XEXP (arg1
, 0)),
5303 /* Now must have MULT + MULT. Distribute if same biv, else not giv. */
5304 if (GET_CODE (arg0
) != MULT
|| GET_CODE (arg1
) != MULT
)
5307 if (XEXP (arg0
, 0) != XEXP (arg1
, 0))
5310 return simplify_giv_expr (gen_rtx (MULT
, mode
,
5312 gen_rtx (PLUS
, mode
,
5318 /* Handle "a - b" as "a + b * (-1)". */
5319 return simplify_giv_expr (gen_rtx (PLUS
, mode
,
5321 gen_rtx (MULT
, mode
,
5322 XEXP (x
, 1), constm1_rtx
)),
5326 arg0
= simplify_giv_expr (XEXP (x
, 0), benefit
);
5327 arg1
= simplify_giv_expr (XEXP (x
, 1), benefit
);
5328 if (arg0
== 0 || arg1
== 0)
5331 /* Put constant last, CONST_INT last if both constant. */
5332 if ((GET_CODE (arg0
) == USE
|| GET_CODE (arg0
) == CONST_INT
)
5333 && GET_CODE (arg1
) != CONST_INT
)
5334 tem
= arg0
, arg0
= arg1
, arg1
= tem
;
5336 /* If second argument is not now constant, not giv. */
5337 if (GET_CODE (arg1
) != USE
&& GET_CODE (arg1
) != CONST_INT
)
5340 /* Handle multiply by 0 or 1. */
5341 if (arg1
== const0_rtx
)
5344 else if (arg1
== const1_rtx
)
5347 switch (GET_CODE (arg0
))
5350 /* biv * invar. Done. */
5351 return gen_rtx (MULT
, mode
, arg0
, arg1
);
5354 /* Product of two constants. */
5355 return GEN_INT (INTVAL (arg0
) * INTVAL (arg1
));
5358 /* invar * invar. Not giv. */
5362 /* (a * invar_1) * invar_2. Associate. */
5363 return simplify_giv_expr (gen_rtx (MULT
, mode
,
5365 gen_rtx (MULT
, mode
,
5366 XEXP (arg0
, 1), arg1
)),
5370 /* (a + invar_1) * invar_2. Distribute. */
5371 return simplify_giv_expr (gen_rtx (PLUS
, mode
,
5372 gen_rtx (MULT
, mode
,
5373 XEXP (arg0
, 0), arg1
),
5374 gen_rtx (MULT
, mode
,
5375 XEXP (arg0
, 1), arg1
)),
5383 /* Shift by constant is multiply by power of two. */
5384 if (GET_CODE (XEXP (x
, 1)) != CONST_INT
)
5387 return simplify_giv_expr (gen_rtx (MULT
, mode
,
5389 GEN_INT ((HOST_WIDE_INT
) 1
5390 << INTVAL (XEXP (x
, 1)))),
5394 /* "-a" is "a * (-1)" */
5395 return simplify_giv_expr (gen_rtx (MULT
, mode
, XEXP (x
, 0), constm1_rtx
),
5399 /* "~a" is "-a - 1". Silly, but easy. */
5400 return simplify_giv_expr (gen_rtx (MINUS
, mode
,
5401 gen_rtx (NEG
, mode
, XEXP (x
, 0)),
5406 /* Already in proper form for invariant. */
5410 /* If this is a new register, we can't deal with it. */
5411 if (REGNO (x
) >= max_reg_before_loop
)
5414 /* Check for biv or giv. */
5415 switch (reg_iv_type
[REGNO (x
)])
5419 case GENERAL_INDUCT
:
5421 struct induction
*v
= reg_iv_info
[REGNO (x
)];
5423 /* Form expression from giv and add benefit. Ensure this giv
5424 can derive another and subtract any needed adjustment if so. */
5425 *benefit
+= v
->benefit
;
5429 tem
= gen_rtx (PLUS
, mode
, gen_rtx (MULT
, mode
,
5430 v
->src_reg
, v
->mult_val
),
5432 if (v
->derive_adjustment
)
5433 tem
= gen_rtx (MINUS
, mode
, tem
, v
->derive_adjustment
);
5434 return simplify_giv_expr (tem
, benefit
);
5438 /* Fall through to general case. */
5440 /* If invariant, return as USE (unless CONST_INT).
5441 Otherwise, not giv. */
5442 if (GET_CODE (x
) == USE
)
5445 if (invariant_p (x
) == 1)
5447 if (GET_CODE (x
) == CONST_INT
)
5450 return gen_rtx (USE
, mode
, x
);
5457 /* Help detect a giv that is calculated by several consecutive insns;
5461 The caller has already identified the first insn P as having a giv as dest;
5462 we check that all other insns that set the same register follow
5463 immediately after P, that they alter nothing else,
5464 and that the result of the last is still a giv.
5466 The value is 0 if the reg set in P is not really a giv.
5467 Otherwise, the value is the amount gained by eliminating
5468 all the consecutive insns that compute the value.
5470 FIRST_BENEFIT is the amount gained by eliminating the first insn, P.
5471 SRC_REG is the reg of the biv; DEST_REG is the reg of the giv.
5473 The coefficients of the ultimate giv value are stored in
5474 *MULT_VAL and *ADD_VAL. */
5477 consec_sets_giv (first_benefit
, p
, src_reg
, dest_reg
,
5492 /* Indicate that this is a giv so that we can update the value produced in
5493 each insn of the multi-insn sequence.
5495 This induction structure will be used only by the call to
5496 general_induction_var below, so we can allocate it on our stack.
5497 If this is a giv, our caller will replace the induct var entry with
5498 a new induction structure. */
5500 = (struct induction
*) alloca (sizeof (struct induction
));
5501 v
->src_reg
= src_reg
;
5502 v
->mult_val
= *mult_val
;
5503 v
->add_val
= *add_val
;
5504 v
->benefit
= first_benefit
;
5506 v
->derive_adjustment
= 0;
5508 reg_iv_type
[REGNO (dest_reg
)] = GENERAL_INDUCT
;
5509 reg_iv_info
[REGNO (dest_reg
)] = v
;
5511 count
= n_times_set
[REGNO (dest_reg
)] - 1;
5516 code
= GET_CODE (p
);
5518 /* If libcall, skip to end of call sequence. */
5519 if (code
== INSN
&& (temp
= find_reg_note (p
, REG_LIBCALL
, NULL_RTX
)))
5523 && (set
= single_set (p
))
5524 && GET_CODE (SET_DEST (set
)) == REG
5525 && SET_DEST (set
) == dest_reg
5526 && ((benefit
= general_induction_var (SET_SRC (set
), &src_reg
,
5528 /* Giv created by equivalent expression. */
5529 || ((temp
= find_reg_note (p
, REG_EQUAL
, NULL_RTX
))
5530 && (benefit
= general_induction_var (XEXP (temp
, 0), &src_reg
,
5531 add_val
, mult_val
))))
5532 && src_reg
== v
->src_reg
)
5534 if (find_reg_note (p
, REG_RETVAL
, NULL_RTX
))
5535 benefit
+= libcall_benefit (p
);
5538 v
->mult_val
= *mult_val
;
5539 v
->add_val
= *add_val
;
5540 v
->benefit
= benefit
;
5542 else if (code
!= NOTE
)
5544 /* Allow insns that set something other than this giv to a
5545 constant. Such insns are needed on machines which cannot
5546 include long constants and should not disqualify a giv. */
5548 && (set
= single_set (p
))
5549 && SET_DEST (set
) != dest_reg
5550 && CONSTANT_P (SET_SRC (set
)))
5553 reg_iv_type
[REGNO (dest_reg
)] = UNKNOWN_INDUCT
;
5561 /* Return an rtx, if any, that expresses giv G2 as a function of the register
5562 represented by G1. If no such expression can be found, or it is clear that
5563 it cannot possibly be a valid address, 0 is returned.
5565 To perform the computation, we note that
5568 where `v' is the biv.
5570 So G2 = (c/a) * G1 + (d - b*c/a) */
5574 express_from (g1
, g2
)
5575 struct induction
*g1
, *g2
;
5579 /* The value that G1 will be multiplied by must be a constant integer. Also,
5580 the only chance we have of getting a valid address is if b*c/a (see above
5581 for notation) is also an integer. */
5582 if (GET_CODE (g1
->mult_val
) != CONST_INT
5583 || GET_CODE (g2
->mult_val
) != CONST_INT
5584 || GET_CODE (g1
->add_val
) != CONST_INT
5585 || g1
->mult_val
== const0_rtx
5586 || INTVAL (g2
->mult_val
) % INTVAL (g1
->mult_val
) != 0)
5589 mult
= GEN_INT (INTVAL (g2
->mult_val
) / INTVAL (g1
->mult_val
));
5590 add
= plus_constant (g2
->add_val
, - INTVAL (g1
->add_val
) * INTVAL (mult
));
5592 /* Form simplified final result. */
5593 if (mult
== const0_rtx
)
5595 else if (mult
== const1_rtx
)
5596 mult
= g1
->dest_reg
;
5598 mult
= gen_rtx (MULT
, g2
->mode
, g1
->dest_reg
, mult
);
5600 if (add
== const0_rtx
)
5603 return gen_rtx (PLUS
, g2
->mode
, mult
, add
);
5607 /* Return 1 if giv G2 can be combined with G1. This means that G2 can use
5608 (either directly or via an address expression) a register used to represent
5609 G1. Set g2->new_reg to a represtation of G1 (normally just
5613 combine_givs_p (g1
, g2
)
5614 struct induction
*g1
, *g2
;
5618 /* If these givs are identical, they can be combined. */
5619 if (rtx_equal_p (g1
->mult_val
, g2
->mult_val
)
5620 && rtx_equal_p (g1
->add_val
, g2
->add_val
))
5622 g2
->new_reg
= g1
->dest_reg
;
5627 /* If G2 can be expressed as a function of G1 and that function is valid
5628 as an address and no more expensive than using a register for G2,
5629 the expression of G2 in terms of G1 can be used. */
5630 if (g2
->giv_type
== DEST_ADDR
5631 && (tem
= express_from (g1
, g2
)) != 0
5632 && memory_address_p (g2
->mem_mode
, tem
)
5633 && ADDRESS_COST (tem
) <= ADDRESS_COST (*g2
->location
))
5643 #ifdef GIV_SORT_CRITERION
5644 /* Compare two givs and sort the most desirable one for combinations first.
5645 This is used only in one qsort call below. */
5649 struct induction
**x
, **y
;
5651 GIV_SORT_CRITERION (*x
, *y
);
5657 /* Check all pairs of givs for iv_class BL and see if any can be combined with
5658 any other. If so, point SAME to the giv combined with and set NEW_REG to
5659 be an expression (in terms of the other giv's DEST_REG) equivalent to the
5660 giv. Also, update BENEFIT and related fields for cost/benefit analysis. */
5664 struct iv_class
*bl
;
5666 struct induction
*g1
, *g2
, **giv_array
, *temp_iv
;
5667 int i
, j
, giv_count
, pass
;
5669 /* Count givs, because bl->giv_count is incorrect here. */
5671 for (g1
= bl
->giv
; g1
; g1
= g1
->next_iv
)
5675 = (struct induction
**) alloca (giv_count
* sizeof (struct induction
*));
5677 for (g1
= bl
->giv
; g1
; g1
= g1
->next_iv
)
5678 giv_array
[i
++] = g1
;
5680 #ifdef GIV_SORT_CRITERION
5681 /* Sort the givs if GIV_SORT_CRITERION is defined.
5682 This is usually defined for processors which lack
5683 negative register offsets so more givs may be combined. */
5685 if (loop_dump_stream
)
5686 fprintf (loop_dump_stream
, "%d givs counted, sorting...\n", giv_count
);
5688 qsort (giv_array
, giv_count
, sizeof (struct induction
*), giv_sort
);
5691 for (i
= 0; i
< giv_count
; i
++)
5694 for (pass
= 0; pass
<= 1; pass
++)
5695 for (j
= 0; j
< giv_count
; j
++)
5699 /* First try to combine with replaceable givs, then all givs. */
5700 && (g1
->replaceable
|| pass
== 1)
5701 /* If either has already been combined or is to be ignored, can't
5703 && ! g1
->ignore
&& ! g2
->ignore
&& ! g1
->same
&& ! g2
->same
5704 /* If something has been based on G2, G2 cannot itself be based
5705 on something else. */
5706 && ! g2
->combined_with
5707 && combine_givs_p (g1
, g2
))
5709 /* g2->new_reg set by `combine_givs_p' */
5711 g1
->combined_with
= 1;
5713 /* If one of these givs is a DEST_REG that was only used
5714 once, by the other giv, this is actually a single use.
5715 The DEST_REG has the correct cost, while the other giv
5716 counts the REG use too often. */
5717 if (g2
->giv_type
== DEST_REG
5718 && n_times_used
[REGNO (g2
->dest_reg
)] == 1
5719 && reg_mentioned_p (g2
->dest_reg
, PATTERN (g1
->insn
)))
5720 g1
->benefit
= g2
->benefit
;
5721 else if (g1
->giv_type
!= DEST_REG
5722 || n_times_used
[REGNO (g1
->dest_reg
)] != 1
5723 || ! reg_mentioned_p (g1
->dest_reg
,
5724 PATTERN (g2
->insn
)))
5726 g1
->benefit
+= g2
->benefit
;
5727 g1
->times_used
+= g2
->times_used
;
5729 /* ??? The new final_[bg]iv_value code does a much better job
5730 of finding replaceable giv's, and hence this code may no
5731 longer be necessary. */
5732 if (! g2
->replaceable
&& REG_USERVAR_P (g2
->dest_reg
))
5733 g1
->benefit
-= copy_cost
;
5734 g1
->lifetime
+= g2
->lifetime
;
5736 if (loop_dump_stream
)
5737 fprintf (loop_dump_stream
, "giv at %d combined with giv at %d\n",
5738 INSN_UID (g2
->insn
), INSN_UID (g1
->insn
));
5744 /* EMIT code before INSERT_BEFORE to set REG = B * M + A. */
5747 emit_iv_add_mult (b
, m
, a
, reg
, insert_before
)
5748 rtx b
; /* initial value of basic induction variable */
5749 rtx m
; /* multiplicative constant */
5750 rtx a
; /* additive constant */
5751 rtx reg
; /* destination register */
5757 /* Prevent unexpected sharing of these rtx. */
5761 /* Increase the lifetime of any invariants moved further in code. */
5762 update_reg_last_use (a
, insert_before
);
5763 update_reg_last_use (b
, insert_before
);
5764 update_reg_last_use (m
, insert_before
);
5767 result
= expand_mult_add (b
, reg
, m
, a
, GET_MODE (reg
), 0);
5769 emit_move_insn (reg
, result
);
5770 seq
= gen_sequence ();
5773 emit_insn_before (seq
, insert_before
);
5776 /* Test whether A * B can be computed without
5777 an actual multiply insn. Value is 1 if so. */
5780 product_cheap_p (a
, b
)
5786 struct obstack
*old_rtl_obstack
= rtl_obstack
;
5787 char *storage
= (char *) obstack_alloc (&temp_obstack
, 0);
5790 /* If only one is constant, make it B. */
5791 if (GET_CODE (a
) == CONST_INT
)
5792 tmp
= a
, a
= b
, b
= tmp
;
5794 /* If first constant, both constant, so don't need multiply. */
5795 if (GET_CODE (a
) == CONST_INT
)
5798 /* If second not constant, neither is constant, so would need multiply. */
5799 if (GET_CODE (b
) != CONST_INT
)
5802 /* One operand is constant, so might not need multiply insn. Generate the
5803 code for the multiply and see if a call or multiply, or long sequence
5804 of insns is generated. */
5806 rtl_obstack
= &temp_obstack
;
5808 expand_mult (GET_MODE (a
), a
, b
, NULL_RTX
, 0);
5809 tmp
= gen_sequence ();
5812 if (GET_CODE (tmp
) == SEQUENCE
)
5814 if (XVEC (tmp
, 0) == 0)
5816 else if (XVECLEN (tmp
, 0) > 3)
5819 for (i
= 0; i
< XVECLEN (tmp
, 0); i
++)
5821 rtx insn
= XVECEXP (tmp
, 0, i
);
5823 if (GET_CODE (insn
) != INSN
5824 || (GET_CODE (PATTERN (insn
)) == SET
5825 && GET_CODE (SET_SRC (PATTERN (insn
))) == MULT
)
5826 || (GET_CODE (PATTERN (insn
)) == PARALLEL
5827 && GET_CODE (XVECEXP (PATTERN (insn
), 0, 0)) == SET
5828 && GET_CODE (SET_SRC (XVECEXP (PATTERN (insn
), 0, 0))) == MULT
))
5835 else if (GET_CODE (tmp
) == SET
5836 && GET_CODE (SET_SRC (tmp
)) == MULT
)
5838 else if (GET_CODE (tmp
) == PARALLEL
5839 && GET_CODE (XVECEXP (tmp
, 0, 0)) == SET
5840 && GET_CODE (SET_SRC (XVECEXP (tmp
, 0, 0))) == MULT
)
5843 /* Free any storage we obtained in generating this multiply and restore rtl
5844 allocation to its normal obstack. */
5845 obstack_free (&temp_obstack
, storage
);
5846 rtl_obstack
= old_rtl_obstack
;
5851 /* Check to see if loop can be terminated by a "decrement and branch until
5852 zero" instruction. If so, add a REG_NONNEG note to the branch insn if so.
5853 Also try reversing an increment loop to a decrement loop
5854 to see if the optimization can be performed.
5855 Value is nonzero if optimization was performed. */
5857 /* This is useful even if the architecture doesn't have such an insn,
5858 because it might change a loops which increments from 0 to n to a loop
5859 which decrements from n to 0. A loop that decrements to zero is usually
5860 faster than one that increments from zero. */
5862 /* ??? This could be rewritten to use some of the loop unrolling procedures,
5863 such as approx_final_value, biv_total_increment, loop_iterations, and
5864 final_[bg]iv_value. */
5867 check_dbra_loop (loop_end
, insn_count
, loop_start
)
5872 struct iv_class
*bl
;
5879 rtx before_comparison
;
5882 /* If last insn is a conditional branch, and the insn before tests a
5883 register value, try to optimize it. Otherwise, we can't do anything. */
5885 comparison
= get_condition_for_loop (PREV_INSN (loop_end
));
5886 if (comparison
== 0)
5889 /* Check all of the bivs to see if the compare uses one of them.
5890 Skip biv's set more than once because we can't guarantee that
5891 it will be zero on the last iteration. Also skip if the biv is
5892 used between its update and the test insn. */
5894 for (bl
= loop_iv_list
; bl
; bl
= bl
->next
)
5896 if (bl
->biv_count
== 1
5897 && bl
->biv
->dest_reg
== XEXP (comparison
, 0)
5898 && ! reg_used_between_p (regno_reg_rtx
[bl
->regno
], bl
->biv
->insn
,
5899 PREV_INSN (PREV_INSN (loop_end
))))
5906 /* Look for the case where the basic induction variable is always
5907 nonnegative, and equals zero on the last iteration.
5908 In this case, add a reg_note REG_NONNEG, which allows the
5909 m68k DBRA instruction to be used. */
5911 if (((GET_CODE (comparison
) == GT
5912 && GET_CODE (XEXP (comparison
, 1)) == CONST_INT
5913 && INTVAL (XEXP (comparison
, 1)) == -1)
5914 || (GET_CODE (comparison
) == NE
&& XEXP (comparison
, 1) == const0_rtx
))
5915 && GET_CODE (bl
->biv
->add_val
) == CONST_INT
5916 && INTVAL (bl
->biv
->add_val
) < 0)
5918 /* Initial value must be greater than 0,
5919 init_val % -dec_value == 0 to ensure that it equals zero on
5920 the last iteration */
5922 if (GET_CODE (bl
->initial_value
) == CONST_INT
5923 && INTVAL (bl
->initial_value
) > 0
5924 && (INTVAL (bl
->initial_value
)
5925 % (-INTVAL (bl
->biv
->add_val
))) == 0)
5927 /* register always nonnegative, add REG_NOTE to branch */
5928 REG_NOTES (PREV_INSN (loop_end
))
5929 = gen_rtx (EXPR_LIST
, REG_NONNEG
, NULL_RTX
,
5930 REG_NOTES (PREV_INSN (loop_end
)));
5936 /* If the decrement is 1 and the value was tested as >= 0 before
5937 the loop, then we can safely optimize. */
5938 for (p
= loop_start
; p
; p
= PREV_INSN (p
))
5940 if (GET_CODE (p
) == CODE_LABEL
)
5942 if (GET_CODE (p
) != JUMP_INSN
)
5945 before_comparison
= get_condition_for_loop (p
);
5946 if (before_comparison
5947 && XEXP (before_comparison
, 0) == bl
->biv
->dest_reg
5948 && GET_CODE (before_comparison
) == LT
5949 && XEXP (before_comparison
, 1) == const0_rtx
5950 && ! reg_set_between_p (bl
->biv
->dest_reg
, p
, loop_start
)
5951 && INTVAL (bl
->biv
->add_val
) == -1)
5953 REG_NOTES (PREV_INSN (loop_end
))
5954 = gen_rtx (EXPR_LIST
, REG_NONNEG
, NULL_RTX
,
5955 REG_NOTES (PREV_INSN (loop_end
)));
5962 else if (num_mem_sets
<= 1)
5964 /* Try to change inc to dec, so can apply above optimization. */
5966 all registers modified are induction variables or invariant,
5967 all memory references have non-overlapping addresses
5968 (obviously true if only one write)
5969 allow 2 insns for the compare/jump at the end of the loop. */
5970 /* Also, we must avoid any instructions which use both the reversed
5971 biv and another biv. Such instructions will fail if the loop is
5972 reversed. We meet this condition by requiring that either
5973 no_use_except_counting is true, or else that there is only
5975 int num_nonfixed_reads
= 0;
5976 /* 1 if the iteration var is used only to count iterations. */
5977 int no_use_except_counting
= 0;
5978 /* 1 if the loop has no memory store, or it has a single memory store
5979 which is reversible. */
5980 int reversible_mem_store
= 1;
5982 for (p
= loop_start
; p
!= loop_end
; p
= NEXT_INSN (p
))
5983 if (GET_RTX_CLASS (GET_CODE (p
)) == 'i')
5984 num_nonfixed_reads
+= count_nonfixed_reads (PATTERN (p
));
5986 if (bl
->giv_count
== 0
5987 && ! loop_number_exit_count
[uid_loop_num
[INSN_UID (loop_start
)]])
5989 rtx bivreg
= regno_reg_rtx
[bl
->regno
];
5991 /* If there are no givs for this biv, and the only exit is the
5992 fall through at the end of the the loop, then
5993 see if perhaps there are no uses except to count. */
5994 no_use_except_counting
= 1;
5995 for (p
= loop_start
; p
!= loop_end
; p
= NEXT_INSN (p
))
5996 if (GET_RTX_CLASS (GET_CODE (p
)) == 'i')
5998 rtx set
= single_set (p
);
6000 if (set
&& GET_CODE (SET_DEST (set
)) == REG
6001 && REGNO (SET_DEST (set
)) == bl
->regno
)
6002 /* An insn that sets the biv is okay. */
6004 else if (p
== prev_nonnote_insn (prev_nonnote_insn (loop_end
))
6005 || p
== prev_nonnote_insn (loop_end
))
6006 /* Don't bother about the end test. */
6008 else if (reg_mentioned_p (bivreg
, PATTERN (p
)))
6009 /* Any other use of the biv is no good. */
6011 no_use_except_counting
= 0;
6017 /* If the loop has a single store, and the destination address is
6018 invariant, then we can't reverse the loop, because this address
6019 might then have the wrong value at loop exit.
6020 This would work if the source was invariant also, however, in that
6021 case, the insn should have been moved out of the loop. */
6023 if (num_mem_sets
== 1)
6024 reversible_mem_store
6025 = (! unknown_address_altered
6026 && ! invariant_p (XEXP (loop_store_mems
[0], 0)));
6028 /* This code only acts for innermost loops. Also it simplifies
6029 the memory address check by only reversing loops with
6030 zero or one memory access.
6031 Two memory accesses could involve parts of the same array,
6032 and that can't be reversed. */
6034 if (num_nonfixed_reads
<= 1
6036 && !loop_has_volatile
6037 && reversible_mem_store
6038 && (no_use_except_counting
6039 || ((bl
->giv_count
+ bl
->biv_count
+ num_mem_sets
6040 + num_movables
+ 2 == insn_count
)
6041 && (bl
== loop_iv_list
&& bl
->next
== 0))))
6045 /* Loop can be reversed. */
6046 if (loop_dump_stream
)
6047 fprintf (loop_dump_stream
, "Can reverse loop\n");
6049 /* Now check other conditions:
6050 initial_value must be zero,
6051 final_value % add_val == 0, so that when reversed, the
6052 biv will be zero on the last iteration.
6054 This test can probably be improved since +/- 1 in the constant
6055 can be obtained by changing LT to LE and vice versa; this is
6058 if (comparison
&& bl
->initial_value
== const0_rtx
6059 && GET_CODE (XEXP (comparison
, 1)) == CONST_INT
6060 /* LE gets turned into LT */
6061 && GET_CODE (comparison
) == LT
6062 && (INTVAL (XEXP (comparison
, 1))
6063 % INTVAL (bl
->biv
->add_val
)) == 0)
6065 /* Register will always be nonnegative, with value
6066 0 on last iteration if loop reversed */
6068 /* Save some info needed to produce the new insns. */
6069 reg
= bl
->biv
->dest_reg
;
6070 jump_label
= XEXP (SET_SRC (PATTERN (PREV_INSN (loop_end
))), 1);
6071 if (jump_label
== pc_rtx
)
6072 jump_label
= XEXP (SET_SRC (PATTERN (PREV_INSN (loop_end
))), 2);
6073 new_add_val
= GEN_INT (- INTVAL (bl
->biv
->add_val
));
6075 final_value
= XEXP (comparison
, 1);
6076 start_value
= GEN_INT (INTVAL (XEXP (comparison
, 1))
6077 - INTVAL (bl
->biv
->add_val
));
6079 /* Initialize biv to start_value before loop start.
6080 The old initializing insn will be deleted as a
6081 dead store by flow.c. */
6082 emit_insn_before (gen_move_insn (reg
, start_value
), loop_start
);
6084 /* Add insn to decrement register, and delete insn
6085 that incremented the register. */
6086 p
= emit_insn_before (gen_add2_insn (reg
, new_add_val
),
6088 delete_insn (bl
->biv
->insn
);
6090 /* Update biv info to reflect its new status. */
6092 bl
->initial_value
= start_value
;
6093 bl
->biv
->add_val
= new_add_val
;
6095 /* Inc LABEL_NUSES so that delete_insn will
6096 not delete the label. */
6097 LABEL_NUSES (XEXP (jump_label
, 0)) ++;
6099 /* Emit an insn after the end of the loop to set the biv's
6100 proper exit value if it is used anywhere outside the loop. */
6101 if ((REGNO_LAST_UID (bl
->regno
)
6102 != INSN_UID (PREV_INSN (PREV_INSN (loop_end
))))
6104 || REGNO_FIRST_UID (bl
->regno
) != INSN_UID (bl
->init_insn
))
6105 emit_insn_after (gen_move_insn (reg
, final_value
),
6108 /* Delete compare/branch at end of loop. */
6109 delete_insn (PREV_INSN (loop_end
));
6110 delete_insn (PREV_INSN (loop_end
));
6112 /* Add new compare/branch insn at end of loop. */
6114 emit_cmp_insn (reg
, const0_rtx
, GE
, NULL_RTX
,
6115 GET_MODE (reg
), 0, 0);
6116 emit_jump_insn (gen_bge (XEXP (jump_label
, 0)));
6117 tem
= gen_sequence ();
6119 emit_jump_insn_before (tem
, loop_end
);
6121 for (tem
= PREV_INSN (loop_end
);
6122 tem
&& GET_CODE (tem
) != JUMP_INSN
; tem
= PREV_INSN (tem
))
6126 JUMP_LABEL (tem
) = XEXP (jump_label
, 0);
6128 /* Increment of LABEL_NUSES done above. */
6129 /* Register is now always nonnegative,
6130 so add REG_NONNEG note to the branch. */
6131 REG_NOTES (tem
) = gen_rtx (EXPR_LIST
, REG_NONNEG
, NULL_RTX
,
6137 /* Mark that this biv has been reversed. Each giv which depends
6138 on this biv, and which is also live past the end of the loop
6139 will have to be fixed up. */
6143 if (loop_dump_stream
)
6144 fprintf (loop_dump_stream
,
6145 "Reversed loop and added reg_nonneg\n");
6155 /* Verify whether the biv BL appears to be eliminable,
6156 based on the insns in the loop that refer to it.
6157 LOOP_START is the first insn of the loop, and END is the end insn.
6159 If ELIMINATE_P is non-zero, actually do the elimination.
6161 THRESHOLD and INSN_COUNT are from loop_optimize and are used to
6162 determine whether invariant insns should be placed inside or at the
6163 start of the loop. */
6166 maybe_eliminate_biv (bl
, loop_start
, end
, eliminate_p
, threshold
, insn_count
)
6167 struct iv_class
*bl
;
6171 int threshold
, insn_count
;
6173 rtx reg
= bl
->biv
->dest_reg
;
6176 /* Scan all insns in the loop, stopping if we find one that uses the
6177 biv in a way that we cannot eliminate. */
6179 for (p
= loop_start
; p
!= end
; p
= NEXT_INSN (p
))
6181 enum rtx_code code
= GET_CODE (p
);
6182 rtx where
= threshold
>= insn_count
? loop_start
: p
;
6184 if ((code
== INSN
|| code
== JUMP_INSN
|| code
== CALL_INSN
)
6185 && reg_mentioned_p (reg
, PATTERN (p
))
6186 && ! maybe_eliminate_biv_1 (PATTERN (p
), p
, bl
, eliminate_p
, where
))
6188 if (loop_dump_stream
)
6189 fprintf (loop_dump_stream
,
6190 "Cannot eliminate biv %d: biv used in insn %d.\n",
6191 bl
->regno
, INSN_UID (p
));
6198 if (loop_dump_stream
)
6199 fprintf (loop_dump_stream
, "biv %d %s eliminated.\n",
6200 bl
->regno
, eliminate_p
? "was" : "can be");
6207 /* If BL appears in X (part of the pattern of INSN), see if we can
6208 eliminate its use. If so, return 1. If not, return 0.
6210 If BIV does not appear in X, return 1.
6212 If ELIMINATE_P is non-zero, actually do the elimination. WHERE indicates
6213 where extra insns should be added. Depending on how many items have been
6214 moved out of the loop, it will either be before INSN or at the start of
6218 maybe_eliminate_biv_1 (x
, insn
, bl
, eliminate_p
, where
)
6220 struct iv_class
*bl
;
6224 enum rtx_code code
= GET_CODE (x
);
6225 rtx reg
= bl
->biv
->dest_reg
;
6226 enum machine_mode mode
= GET_MODE (reg
);
6227 struct induction
*v
;
6236 /* If we haven't already been able to do something with this BIV,
6237 we can't eliminate it. */
6243 /* If this sets the BIV, it is not a problem. */
6244 if (SET_DEST (x
) == reg
)
6247 /* If this is an insn that defines a giv, it is also ok because
6248 it will go away when the giv is reduced. */
6249 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
6250 if (v
->giv_type
== DEST_REG
&& SET_DEST (x
) == v
->dest_reg
)
6254 if (SET_DEST (x
) == cc0_rtx
&& SET_SRC (x
) == reg
)
6256 /* Can replace with any giv that was reduced and
6257 that has (MULT_VAL != 0) and (ADD_VAL == 0).
6258 Require a constant for MULT_VAL, so we know it's nonzero.
6259 ??? We disable this optimization to avoid potential
6262 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
6263 if (CONSTANT_P (v
->mult_val
) && v
->mult_val
!= const0_rtx
6264 && v
->add_val
== const0_rtx
6265 && ! v
->ignore
&& ! v
->maybe_dead
&& v
->always_computable
6269 /* If the giv V had the auto-inc address optimization applied
6270 to it, and INSN occurs between the giv insn and the biv
6271 insn, then we must adjust the value used here.
6272 This is rare, so we don't bother to do so. */
6274 && ((INSN_LUID (v
->insn
) < INSN_LUID (insn
)
6275 && INSN_LUID (insn
) < INSN_LUID (bl
->biv
->insn
))
6276 || (INSN_LUID (v
->insn
) > INSN_LUID (insn
)
6277 && INSN_LUID (insn
) > INSN_LUID (bl
->biv
->insn
))))
6283 /* If the giv has the opposite direction of change,
6284 then reverse the comparison. */
6285 if (INTVAL (v
->mult_val
) < 0)
6286 new = gen_rtx (COMPARE
, GET_MODE (v
->new_reg
),
6287 const0_rtx
, v
->new_reg
);
6291 /* We can probably test that giv's reduced reg. */
6292 if (validate_change (insn
, &SET_SRC (x
), new, 0))
6296 /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0);
6297 replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL).
6298 Require a constant for MULT_VAL, so we know it's nonzero.
6299 ??? Do this only if ADD_VAL is a pointer to avoid a potential
6300 overflow problem. */
6302 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
6303 if (CONSTANT_P (v
->mult_val
) && v
->mult_val
!= const0_rtx
6304 && ! v
->ignore
&& ! v
->maybe_dead
&& v
->always_computable
6306 && (GET_CODE (v
->add_val
) == SYMBOL_REF
6307 || GET_CODE (v
->add_val
) == LABEL_REF
6308 || GET_CODE (v
->add_val
) == CONST
6309 || (GET_CODE (v
->add_val
) == REG
6310 && REGNO_POINTER_FLAG (REGNO (v
->add_val
)))))
6312 /* If the giv V had the auto-inc address optimization applied
6313 to it, and INSN occurs between the giv insn and the biv
6314 insn, then we must adjust the value used here.
6315 This is rare, so we don't bother to do so. */
6317 && ((INSN_LUID (v
->insn
) < INSN_LUID (insn
)
6318 && INSN_LUID (insn
) < INSN_LUID (bl
->biv
->insn
))
6319 || (INSN_LUID (v
->insn
) > INSN_LUID (insn
)
6320 && INSN_LUID (insn
) > INSN_LUID (bl
->biv
->insn
))))
6326 /* If the giv has the opposite direction of change,
6327 then reverse the comparison. */
6328 if (INTVAL (v
->mult_val
) < 0)
6329 new = gen_rtx (COMPARE
, VOIDmode
, copy_rtx (v
->add_val
),
6332 new = gen_rtx (COMPARE
, VOIDmode
, v
->new_reg
,
6333 copy_rtx (v
->add_val
));
6335 /* Replace biv with the giv's reduced register. */
6336 update_reg_last_use (v
->add_val
, insn
);
6337 if (validate_change (insn
, &SET_SRC (PATTERN (insn
)), new, 0))
6340 /* Insn doesn't support that constant or invariant. Copy it
6341 into a register (it will be a loop invariant.) */
6342 tem
= gen_reg_rtx (GET_MODE (v
->new_reg
));
6344 emit_insn_before (gen_move_insn (tem
, copy_rtx (v
->add_val
)),
6347 /* Substitute the new register for its invariant value in
6348 the compare expression. */
6349 XEXP (new, (INTVAL (v
->mult_val
) < 0) ? 0 : 1) = tem
;
6350 if (validate_change (insn
, &SET_SRC (PATTERN (insn
)), new, 0))
6359 case GT
: case GE
: case GTU
: case GEU
:
6360 case LT
: case LE
: case LTU
: case LEU
:
6361 /* See if either argument is the biv. */
6362 if (XEXP (x
, 0) == reg
)
6363 arg
= XEXP (x
, 1), arg_operand
= 1;
6364 else if (XEXP (x
, 1) == reg
)
6365 arg
= XEXP (x
, 0), arg_operand
= 0;
6369 if (CONSTANT_P (arg
))
6371 /* First try to replace with any giv that has constant positive
6372 mult_val and constant add_val. We might be able to support
6373 negative mult_val, but it seems complex to do it in general. */
6375 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
6376 if (CONSTANT_P (v
->mult_val
) && INTVAL (v
->mult_val
) > 0
6377 && (GET_CODE (v
->add_val
) == SYMBOL_REF
6378 || GET_CODE (v
->add_val
) == LABEL_REF
6379 || GET_CODE (v
->add_val
) == CONST
6380 || (GET_CODE (v
->add_val
) == REG
6381 && REGNO_POINTER_FLAG (REGNO (v
->add_val
))))
6382 && ! v
->ignore
&& ! v
->maybe_dead
&& v
->always_computable
6385 /* If the giv V had the auto-inc address optimization applied
6386 to it, and INSN occurs between the giv insn and the biv
6387 insn, then we must adjust the value used here.
6388 This is rare, so we don't bother to do so. */
6390 && ((INSN_LUID (v
->insn
) < INSN_LUID (insn
)
6391 && INSN_LUID (insn
) < INSN_LUID (bl
->biv
->insn
))
6392 || (INSN_LUID (v
->insn
) > INSN_LUID (insn
)
6393 && INSN_LUID (insn
) > INSN_LUID (bl
->biv
->insn
))))
6399 /* Replace biv with the giv's reduced reg. */
6400 XEXP (x
, 1-arg_operand
) = v
->new_reg
;
6402 /* If all constants are actually constant integers and
6403 the derived constant can be directly placed in the COMPARE,
6405 if (GET_CODE (arg
) == CONST_INT
6406 && GET_CODE (v
->mult_val
) == CONST_INT
6407 && GET_CODE (v
->add_val
) == CONST_INT
6408 && validate_change (insn
, &XEXP (x
, arg_operand
),
6409 GEN_INT (INTVAL (arg
)
6410 * INTVAL (v
->mult_val
)
6411 + INTVAL (v
->add_val
)), 0))
6414 /* Otherwise, load it into a register. */
6415 tem
= gen_reg_rtx (mode
);
6416 emit_iv_add_mult (arg
, v
->mult_val
, v
->add_val
, tem
, where
);
6417 if (validate_change (insn
, &XEXP (x
, arg_operand
), tem
, 0))
6420 /* If that failed, put back the change we made above. */
6421 XEXP (x
, 1-arg_operand
) = reg
;
6424 /* Look for giv with positive constant mult_val and nonconst add_val.
6425 Insert insns to calculate new compare value.
6426 ??? Turn this off due to possible overflow. */
6428 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
6429 if (CONSTANT_P (v
->mult_val
) && INTVAL (v
->mult_val
) > 0
6430 && ! v
->ignore
&& ! v
->maybe_dead
&& v
->always_computable
6436 /* If the giv V had the auto-inc address optimization applied
6437 to it, and INSN occurs between the giv insn and the biv
6438 insn, then we must adjust the value used here.
6439 This is rare, so we don't bother to do so. */
6441 && ((INSN_LUID (v
->insn
) < INSN_LUID (insn
)
6442 && INSN_LUID (insn
) < INSN_LUID (bl
->biv
->insn
))
6443 || (INSN_LUID (v
->insn
) > INSN_LUID (insn
)
6444 && INSN_LUID (insn
) > INSN_LUID (bl
->biv
->insn
))))
6450 tem
= gen_reg_rtx (mode
);
6452 /* Replace biv with giv's reduced register. */
6453 validate_change (insn
, &XEXP (x
, 1 - arg_operand
),
6456 /* Compute value to compare against. */
6457 emit_iv_add_mult (arg
, v
->mult_val
, v
->add_val
, tem
, where
);
6458 /* Use it in this insn. */
6459 validate_change (insn
, &XEXP (x
, arg_operand
), tem
, 1);
6460 if (apply_change_group ())
6464 else if (GET_CODE (arg
) == REG
|| GET_CODE (arg
) == MEM
)
6466 if (invariant_p (arg
) == 1)
6468 /* Look for giv with constant positive mult_val and nonconst
6469 add_val. Insert insns to compute new compare value.
6470 ??? Turn this off due to possible overflow. */
6472 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
6473 if (CONSTANT_P (v
->mult_val
) && INTVAL (v
->mult_val
) > 0
6474 && ! v
->ignore
&& ! v
->maybe_dead
&& v
->always_computable
6480 /* If the giv V had the auto-inc address optimization applied
6481 to it, and INSN occurs between the giv insn and the biv
6482 insn, then we must adjust the value used here.
6483 This is rare, so we don't bother to do so. */
6485 && ((INSN_LUID (v
->insn
) < INSN_LUID (insn
)
6486 && INSN_LUID (insn
) < INSN_LUID (bl
->biv
->insn
))
6487 || (INSN_LUID (v
->insn
) > INSN_LUID (insn
)
6488 && INSN_LUID (insn
) > INSN_LUID (bl
->biv
->insn
))))
6494 tem
= gen_reg_rtx (mode
);
6496 /* Replace biv with giv's reduced register. */
6497 validate_change (insn
, &XEXP (x
, 1 - arg_operand
),
6500 /* Compute value to compare against. */
6501 emit_iv_add_mult (arg
, v
->mult_val
, v
->add_val
,
6503 validate_change (insn
, &XEXP (x
, arg_operand
), tem
, 1);
6504 if (apply_change_group ())
6509 /* This code has problems. Basically, you can't know when
6510 seeing if we will eliminate BL, whether a particular giv
6511 of ARG will be reduced. If it isn't going to be reduced,
6512 we can't eliminate BL. We can try forcing it to be reduced,
6513 but that can generate poor code.
6515 The problem is that the benefit of reducing TV, below should
6516 be increased if BL can actually be eliminated, but this means
6517 we might have to do a topological sort of the order in which
6518 we try to process biv. It doesn't seem worthwhile to do
6519 this sort of thing now. */
6522 /* Otherwise the reg compared with had better be a biv. */
6523 if (GET_CODE (arg
) != REG
6524 || reg_iv_type
[REGNO (arg
)] != BASIC_INDUCT
)
6527 /* Look for a pair of givs, one for each biv,
6528 with identical coefficients. */
6529 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
6531 struct induction
*tv
;
6533 if (v
->ignore
|| v
->maybe_dead
|| v
->mode
!= mode
)
6536 for (tv
= reg_biv_class
[REGNO (arg
)]->giv
; tv
; tv
= tv
->next_iv
)
6537 if (! tv
->ignore
&& ! tv
->maybe_dead
6538 && rtx_equal_p (tv
->mult_val
, v
->mult_val
)
6539 && rtx_equal_p (tv
->add_val
, v
->add_val
)
6540 && tv
->mode
== mode
)
6542 /* If the giv V had the auto-inc address optimization applied
6543 to it, and INSN occurs between the giv insn and the biv
6544 insn, then we must adjust the value used here.
6545 This is rare, so we don't bother to do so. */
6547 && ((INSN_LUID (v
->insn
) < INSN_LUID (insn
)
6548 && INSN_LUID (insn
) < INSN_LUID (bl
->biv
->insn
))
6549 || (INSN_LUID (v
->insn
) > INSN_LUID (insn
)
6550 && INSN_LUID (insn
) > INSN_LUID (bl
->biv
->insn
))))
6556 /* Replace biv with its giv's reduced reg. */
6557 XEXP (x
, 1-arg_operand
) = v
->new_reg
;
6558 /* Replace other operand with the other giv's
6560 XEXP (x
, arg_operand
) = tv
->new_reg
;
6567 /* If we get here, the biv can't be eliminated. */
6571 /* If this address is a DEST_ADDR giv, it doesn't matter if the
6572 biv is used in it, since it will be replaced. */
6573 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
6574 if (v
->giv_type
== DEST_ADDR
&& v
->location
== &XEXP (x
, 0))
6579 /* See if any subexpression fails elimination. */
6580 fmt
= GET_RTX_FORMAT (code
);
6581 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
6586 if (! maybe_eliminate_biv_1 (XEXP (x
, i
), insn
, bl
,
6587 eliminate_p
, where
))
6592 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
6593 if (! maybe_eliminate_biv_1 (XVECEXP (x
, i
, j
), insn
, bl
,
6594 eliminate_p
, where
))
6603 /* Return nonzero if the last use of REG
6604 is in an insn following INSN in the same basic block. */
6607 last_use_this_basic_block (reg
, insn
)
6613 n
&& GET_CODE (n
) != CODE_LABEL
&& GET_CODE (n
) != JUMP_INSN
;
6616 if (REGNO_LAST_UID (REGNO (reg
)) == INSN_UID (n
))
6622 /* Called via `note_stores' to record the initial value of a biv. Here we
6623 just record the location of the set and process it later. */
6626 record_initial (dest
, set
)
6630 struct iv_class
*bl
;
6632 if (GET_CODE (dest
) != REG
6633 || REGNO (dest
) >= max_reg_before_loop
6634 || reg_iv_type
[REGNO (dest
)] != BASIC_INDUCT
)
6637 bl
= reg_biv_class
[REGNO (dest
)];
6639 /* If this is the first set found, record it. */
6640 if (bl
->init_insn
== 0)
6642 bl
->init_insn
= note_insn
;
6647 /* If any of the registers in X are "old" and currently have a last use earlier
6648 than INSN, update them to have a last use of INSN. Their actual last use
6649 will be the previous insn but it will not have a valid uid_luid so we can't
6653 update_reg_last_use (x
, insn
)
6657 /* Check for the case where INSN does not have a valid luid. In this case,
6658 there is no need to modify the regno_last_uid, as this can only happen
6659 when code is inserted after the loop_end to set a pseudo's final value,
6660 and hence this insn will never be the last use of x. */
6661 if (GET_CODE (x
) == REG
&& REGNO (x
) < max_reg_before_loop
6662 && INSN_UID (insn
) < max_uid_for_loop
6663 && uid_luid
[REGNO_LAST_UID (REGNO (x
))] < uid_luid
[INSN_UID (insn
)])
6664 REGNO_LAST_UID (REGNO (x
)) = INSN_UID (insn
);
6668 register char *fmt
= GET_RTX_FORMAT (GET_CODE (x
));
6669 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
6672 update_reg_last_use (XEXP (x
, i
), insn
);
6673 else if (fmt
[i
] == 'E')
6674 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
6675 update_reg_last_use (XVECEXP (x
, i
, j
), insn
);
6680 /* Given a jump insn JUMP, return the condition that will cause it to branch
6681 to its JUMP_LABEL. If the condition cannot be understood, or is an
6682 inequality floating-point comparison which needs to be reversed, 0 will
6685 If EARLIEST is non-zero, it is a pointer to a place where the earliest
6686 insn used in locating the condition was found. If a replacement test
6687 of the condition is desired, it should be placed in front of that
6688 insn and we will be sure that the inputs are still valid.
6690 The condition will be returned in a canonical form to simplify testing by
6691 callers. Specifically:
6693 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
6694 (2) Both operands will be machine operands; (cc0) will have been replaced.
6695 (3) If an operand is a constant, it will be the second operand.
6696 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
6697 for GE, GEU, and LEU. */
6700 get_condition (jump
, earliest
)
6709 int reverse_code
= 0;
6710 int did_reverse_condition
= 0;
6712 /* If this is not a standard conditional jump, we can't parse it. */
6713 if (GET_CODE (jump
) != JUMP_INSN
6714 || ! condjump_p (jump
) || simplejump_p (jump
))
6717 code
= GET_CODE (XEXP (SET_SRC (PATTERN (jump
)), 0));
6718 op0
= XEXP (XEXP (SET_SRC (PATTERN (jump
)), 0), 0);
6719 op1
= XEXP (XEXP (SET_SRC (PATTERN (jump
)), 0), 1);
6724 /* If this branches to JUMP_LABEL when the condition is false, reverse
6726 if (GET_CODE (XEXP (SET_SRC (PATTERN (jump
)), 2)) == LABEL_REF
6727 && XEXP (XEXP (SET_SRC (PATTERN (jump
)), 2), 0) == JUMP_LABEL (jump
))
6728 code
= reverse_condition (code
), did_reverse_condition
^= 1;
6730 /* If we are comparing a register with zero, see if the register is set
6731 in the previous insn to a COMPARE or a comparison operation. Perform
6732 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
6735 while (GET_RTX_CLASS (code
) == '<' && op1
== CONST0_RTX (GET_MODE (op0
)))
6737 /* Set non-zero when we find something of interest. */
6741 /* If comparison with cc0, import actual comparison from compare
6745 if ((prev
= prev_nonnote_insn (prev
)) == 0
6746 || GET_CODE (prev
) != INSN
6747 || (set
= single_set (prev
)) == 0
6748 || SET_DEST (set
) != cc0_rtx
)
6751 op0
= SET_SRC (set
);
6752 op1
= CONST0_RTX (GET_MODE (op0
));
6758 /* If this is a COMPARE, pick up the two things being compared. */
6759 if (GET_CODE (op0
) == COMPARE
)
6761 op1
= XEXP (op0
, 1);
6762 op0
= XEXP (op0
, 0);
6765 else if (GET_CODE (op0
) != REG
)
6768 /* Go back to the previous insn. Stop if it is not an INSN. We also
6769 stop if it isn't a single set or if it has a REG_INC note because
6770 we don't want to bother dealing with it. */
6772 if ((prev
= prev_nonnote_insn (prev
)) == 0
6773 || GET_CODE (prev
) != INSN
6774 || FIND_REG_INC_NOTE (prev
, 0)
6775 || (set
= single_set (prev
)) == 0)
6778 /* If this is setting OP0, get what it sets it to if it looks
6780 if (rtx_equal_p (SET_DEST (set
), op0
))
6782 enum machine_mode inner_mode
= GET_MODE (SET_SRC (set
));
6784 if ((GET_CODE (SET_SRC (set
)) == COMPARE
6787 && GET_MODE_CLASS (inner_mode
) == MODE_INT
6788 && (GET_MODE_BITSIZE (inner_mode
)
6789 <= HOST_BITS_PER_WIDE_INT
)
6790 && (STORE_FLAG_VALUE
6791 & ((HOST_WIDE_INT
) 1
6792 << (GET_MODE_BITSIZE (inner_mode
) - 1))))
6793 #ifdef FLOAT_STORE_FLAG_VALUE
6795 && GET_MODE_CLASS (inner_mode
) == MODE_FLOAT
6796 && FLOAT_STORE_FLAG_VALUE
< 0)
6799 && GET_RTX_CLASS (GET_CODE (SET_SRC (set
))) == '<')))
6801 else if (((code
== EQ
6803 && (GET_MODE_BITSIZE (inner_mode
)
6804 <= HOST_BITS_PER_WIDE_INT
)
6805 && GET_MODE_CLASS (inner_mode
) == MODE_INT
6806 && (STORE_FLAG_VALUE
6807 & ((HOST_WIDE_INT
) 1
6808 << (GET_MODE_BITSIZE (inner_mode
) - 1))))
6809 #ifdef FLOAT_STORE_FLAG_VALUE
6811 && GET_MODE_CLASS (inner_mode
) == MODE_FLOAT
6812 && FLOAT_STORE_FLAG_VALUE
< 0)
6815 && GET_RTX_CLASS (GET_CODE (SET_SRC (set
))) == '<')
6817 /* We might have reversed a LT to get a GE here. But this wasn't
6818 actually the comparison of data, so we don't flag that we
6819 have had to reverse the condition. */
6820 did_reverse_condition
^= 1;
6828 else if (reg_set_p (op0
, prev
))
6829 /* If this sets OP0, but not directly, we have to give up. */
6834 if (GET_RTX_CLASS (GET_CODE (x
)) == '<')
6835 code
= GET_CODE (x
);
6838 code
= reverse_condition (code
);
6839 did_reverse_condition
^= 1;
6843 op0
= XEXP (x
, 0), op1
= XEXP (x
, 1);
6849 /* If constant is first, put it last. */
6850 if (CONSTANT_P (op0
))
6851 code
= swap_condition (code
), tem
= op0
, op0
= op1
, op1
= tem
;
6853 /* If OP0 is the result of a comparison, we weren't able to find what
6854 was really being compared, so fail. */
6855 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
)
6858 /* Canonicalize any ordered comparison with integers involving equality
6859 if we can do computations in the relevant mode and we do not
6862 if (GET_CODE (op1
) == CONST_INT
6863 && GET_MODE (op0
) != VOIDmode
6864 && GET_MODE_BITSIZE (GET_MODE (op0
)) <= HOST_BITS_PER_WIDE_INT
)
6866 HOST_WIDE_INT const_val
= INTVAL (op1
);
6867 unsigned HOST_WIDE_INT uconst_val
= const_val
;
6868 unsigned HOST_WIDE_INT max_val
6869 = (unsigned HOST_WIDE_INT
) GET_MODE_MASK (GET_MODE (op0
));
6874 if (const_val
!= max_val
>> 1)
6875 code
= LT
, op1
= GEN_INT (const_val
+ 1);
6880 != (((HOST_WIDE_INT
) 1
6881 << (GET_MODE_BITSIZE (GET_MODE (op0
)) - 1))))
6882 code
= GT
, op1
= GEN_INT (const_val
- 1);
6886 if (uconst_val
!= max_val
)
6887 code
= LTU
, op1
= GEN_INT (uconst_val
+ 1);
6891 if (uconst_val
!= 0)
6892 code
= GTU
, op1
= GEN_INT (uconst_val
- 1);
6897 /* If this was floating-point and we reversed anything other than an
6898 EQ or NE, return zero. */
6899 if (TARGET_FLOAT_FORMAT
== IEEE_FLOAT_FORMAT
6900 && did_reverse_condition
&& code
!= NE
&& code
!= EQ
6902 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_FLOAT
)
6906 /* Never return CC0; return zero instead. */
6911 return gen_rtx (code
, VOIDmode
, op0
, op1
);
6914 /* Similar to above routine, except that we also put an invariant last
6915 unless both operands are invariants. */
6918 get_condition_for_loop (x
)
6921 rtx comparison
= get_condition (x
, NULL_PTR
);
6924 || ! invariant_p (XEXP (comparison
, 0))
6925 || invariant_p (XEXP (comparison
, 1)))
6928 return gen_rtx (swap_condition (GET_CODE (comparison
)), VOIDmode
,
6929 XEXP (comparison
, 1), XEXP (comparison
, 0));