Fix warious warnings:
[gcc.git] / gcc / loop.c
1 /* Perform various loop optimizations, including strength reduction.
2 Copyright (C) 1987, 88, 89, 91-7, 1998 Free Software Foundation, Inc.
3
4 This file is part of GNU CC.
5
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
9 any later version.
10
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
20
21
22 /* This is the loop optimization pass of the compiler.
23 It finds invariant computations within loops and moves them
24 to the beginning of the loop. Then it identifies basic and
25 general induction variables. Strength reduction is applied to the general
26 induction variables, and induction variable elimination is applied to
27 the basic induction variables.
28
29 It also finds cases where
30 a register is set within the loop by zero-extending a narrower value
31 and changes these to zero the entire register once before the loop
32 and merely copy the low part within the loop.
33
34 Most of the complexity is in heuristics to decide when it is worth
35 while to do these things. */
36
37 #include "config.h"
38 #include <stdio.h>
39 #include "rtl.h"
40 #include "obstack.h"
41 #include "expr.h"
42 #include "insn-config.h"
43 #include "insn-flags.h"
44 #include "regs.h"
45 #include "hard-reg-set.h"
46 #include "recog.h"
47 #include "flags.h"
48 #include "real.h"
49 #include "loop.h"
50 #include "except.h"
51
52 /* Vector mapping INSN_UIDs to luids.
53 The luids are like uids but increase monotonically always.
54 We use them to see whether a jump comes from outside a given loop. */
55
56 int *uid_luid;
57
58 /* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop
59 number the insn is contained in. */
60
61 int *uid_loop_num;
62
63 /* 1 + largest uid of any insn. */
64
65 int max_uid_for_loop;
66
67 /* 1 + luid of last insn. */
68
69 static int max_luid;
70
71 /* Number of loops detected in current function. Used as index to the
72 next few tables. */
73
74 static int max_loop_num;
75
76 /* Indexed by loop number, contains the first and last insn of each loop. */
77
78 static rtx *loop_number_loop_starts, *loop_number_loop_ends;
79
80 /* For each loop, gives the containing loop number, -1 if none. */
81
82 int *loop_outer_loop;
83
84 #ifdef HAIFA
85 /* The main output of analyze_loop_iterations is placed here */
86
87 int *loop_can_insert_bct;
88
89 /* For each loop, determines whether some of its inner loops has used
90 count register */
91
92 int *loop_used_count_register;
93
94 /* loop parameters for arithmetic loops. These loops have a loop variable
95 which is initialized to loop_start_value, incremented in each iteration
96 by "loop_increment". At the end of the iteration the loop variable is
97 compared to the loop_comparison_value (using loop_comparison_code). */
98
99 rtx *loop_increment;
100 rtx *loop_comparison_value;
101 rtx *loop_start_value;
102 enum rtx_code *loop_comparison_code;
103 #endif /* HAIFA */
104
105 /* For each loop, keep track of its unrolling factor.
106 Potential values:
107 0: unrolled
108 1: not unrolled.
109 -1: completely unrolled
110 >0: holds the unroll exact factor. */
111 int *loop_unroll_factor;
112
113 /* Indexed by loop number, contains a nonzero value if the "loop" isn't
114 really a loop (an insn outside the loop branches into it). */
115
116 static char *loop_invalid;
117
118 /* Indexed by loop number, links together all LABEL_REFs which refer to
119 code labels outside the loop. Used by routines that need to know all
120 loop exits, such as final_biv_value and final_giv_value.
121
122 This does not include loop exits due to return instructions. This is
123 because all bivs and givs are pseudos, and hence must be dead after a
124 return, so the presense of a return does not affect any of the
125 optimizations that use this info. It is simpler to just not include return
126 instructions on this list. */
127
128 rtx *loop_number_exit_labels;
129
130 /* Indexed by loop number, counts the number of LABEL_REFs on
131 loop_number_exit_labels for this loop and all loops nested inside it. */
132
133 int *loop_number_exit_count;
134
135 /* Holds the number of loop iterations. It is zero if the number could not be
136 calculated. Must be unsigned since the number of iterations can
137 be as high as 2^wordsize-1. For loops with a wider iterator, this number
138 will will be zero if the number of loop iterations is too large for an
139 unsigned integer to hold. */
140
141 unsigned HOST_WIDE_INT loop_n_iterations;
142
143 /* Nonzero if there is a subroutine call in the current loop. */
144
145 static int loop_has_call;
146
147 /* Nonzero if there is a volatile memory reference in the current
148 loop. */
149
150 static int loop_has_volatile;
151
152 /* Added loop_continue which is the NOTE_INSN_LOOP_CONT of the
153 current loop. A continue statement will generate a branch to
154 NEXT_INSN (loop_continue). */
155
156 static rtx loop_continue;
157
158 /* Indexed by register number, contains the number of times the reg
159 is set during the loop being scanned.
160 During code motion, a negative value indicates a reg that has been
161 made a candidate; in particular -2 means that it is an candidate that
162 we know is equal to a constant and -1 means that it is an candidate
163 not known equal to a constant.
164 After code motion, regs moved have 0 (which is accurate now)
165 while the failed candidates have the original number of times set.
166
167 Therefore, at all times, == 0 indicates an invariant register;
168 < 0 a conditionally invariant one. */
169
170 static int *n_times_set;
171
172 /* Original value of n_times_set; same except that this value
173 is not set negative for a reg whose sets have been made candidates
174 and not set to 0 for a reg that is moved. */
175
176 static int *n_times_used;
177
178 /* Index by register number, 1 indicates that the register
179 cannot be moved or strength reduced. */
180
181 static char *may_not_optimize;
182
183 /* Nonzero means reg N has already been moved out of one loop.
184 This reduces the desire to move it out of another. */
185
186 static char *moved_once;
187
188 /* Array of MEMs that are stored in this loop. If there are too many to fit
189 here, we just turn on unknown_address_altered. */
190
191 #define NUM_STORES 30
192 static rtx loop_store_mems[NUM_STORES];
193
194 /* Index of first available slot in above array. */
195 static int loop_store_mems_idx;
196
197 /* Nonzero if we don't know what MEMs were changed in the current loop.
198 This happens if the loop contains a call (in which case `loop_has_call'
199 will also be set) or if we store into more than NUM_STORES MEMs. */
200
201 static int unknown_address_altered;
202
203 /* Count of movable (i.e. invariant) instructions discovered in the loop. */
204 static int num_movables;
205
206 /* Count of memory write instructions discovered in the loop. */
207 static int num_mem_sets;
208
209 /* Number of loops contained within the current one, including itself. */
210 static int loops_enclosed;
211
212 /* Bound on pseudo register number before loop optimization.
213 A pseudo has valid regscan info if its number is < max_reg_before_loop. */
214 int max_reg_before_loop;
215
216 /* This obstack is used in product_cheap_p to allocate its rtl. It
217 may call gen_reg_rtx which, in turn, may reallocate regno_reg_rtx.
218 If we used the same obstack that it did, we would be deallocating
219 that array. */
220
221 static struct obstack temp_obstack;
222
223 /* This is where the pointer to the obstack being used for RTL is stored. */
224
225 extern struct obstack *rtl_obstack;
226
227 #define obstack_chunk_alloc xmalloc
228 #define obstack_chunk_free free
229
230 extern char *oballoc ();
231 \f
232 /* During the analysis of a loop, a chain of `struct movable's
233 is made to record all the movable insns found.
234 Then the entire chain can be scanned to decide which to move. */
235
236 struct movable
237 {
238 rtx insn; /* A movable insn */
239 rtx set_src; /* The expression this reg is set from. */
240 rtx set_dest; /* The destination of this SET. */
241 rtx dependencies; /* When INSN is libcall, this is an EXPR_LIST
242 of any registers used within the LIBCALL. */
243 int consec; /* Number of consecutive following insns
244 that must be moved with this one. */
245 int regno; /* The register it sets */
246 short lifetime; /* lifetime of that register;
247 may be adjusted when matching movables
248 that load the same value are found. */
249 short savings; /* Number of insns we can move for this reg,
250 including other movables that force this
251 or match this one. */
252 unsigned int cond : 1; /* 1 if only conditionally movable */
253 unsigned int force : 1; /* 1 means MUST move this insn */
254 unsigned int global : 1; /* 1 means reg is live outside this loop */
255 /* If PARTIAL is 1, GLOBAL means something different:
256 that the reg is live outside the range from where it is set
257 to the following label. */
258 unsigned int done : 1; /* 1 inhibits further processing of this */
259
260 unsigned int partial : 1; /* 1 means this reg is used for zero-extending.
261 In particular, moving it does not make it
262 invariant. */
263 unsigned int move_insn : 1; /* 1 means that we call emit_move_insn to
264 load SRC, rather than copying INSN. */
265 unsigned int is_equiv : 1; /* 1 means a REG_EQUIV is present on INSN. */
266 enum machine_mode savemode; /* Nonzero means it is a mode for a low part
267 that we should avoid changing when clearing
268 the rest of the reg. */
269 struct movable *match; /* First entry for same value */
270 struct movable *forces; /* An insn that must be moved if this is */
271 struct movable *next;
272 };
273
274 FILE *loop_dump_stream;
275
276 /* Forward declarations. */
277
278 static void find_and_verify_loops ();
279 static void mark_loop_jump ();
280 static void prescan_loop ();
281 static int reg_in_basic_block_p ();
282 static int consec_sets_invariant_p ();
283 static rtx libcall_other_reg ();
284 static int labels_in_range_p ();
285 static void count_loop_regs_set ();
286 static void note_addr_stored ();
287 static int loop_reg_used_before_p ();
288 static void scan_loop ();
289 #if 0
290 static void replace_call_address ();
291 #endif
292 static rtx skip_consec_insns ();
293 static int libcall_benefit ();
294 static void ignore_some_movables ();
295 static void force_movables ();
296 static void combine_movables ();
297 static int rtx_equal_for_loop_p ();
298 static void move_movables ();
299 static void strength_reduce ();
300 static int valid_initial_value_p ();
301 static void find_mem_givs ();
302 static void record_biv ();
303 static void check_final_value ();
304 static void record_giv ();
305 static void update_giv_derive ();
306 static int basic_induction_var ();
307 static rtx simplify_giv_expr ();
308 static int general_induction_var ();
309 static int consec_sets_giv ();
310 static int check_dbra_loop ();
311 static rtx express_from ();
312 static int combine_givs_p ();
313 static void combine_givs ();
314 static int product_cheap_p ();
315 static int maybe_eliminate_biv ();
316 static int maybe_eliminate_biv_1 ();
317 static int last_use_this_basic_block ();
318 static void record_initial ();
319 static void update_reg_last_use ();
320
321 #ifdef HAIFA
322 /* This is extern from unroll.c */
323 void iteration_info ();
324
325 /* Two main functions for implementing bct:
326 first - to be called before loop unrolling, and the second - after */
327 #ifdef HAVE_decrement_and_branch_on_count
328 static void analyze_loop_iterations ();
329 static void insert_bct ();
330
331 /* Auxiliary function that inserts the bct pattern into the loop */
332 static void instrument_loop_bct ();
333 #endif /* HAVE_decrement_and_branch_on_count */
334 #endif /* HAIFA */
335
336 /* Indirect_jump_in_function is computed once per function. */
337 int indirect_jump_in_function = 0;
338 static int indirect_jump_in_function_p ();
339
340 \f
341 /* Relative gain of eliminating various kinds of operations. */
342 int add_cost;
343 #if 0
344 int shift_cost;
345 int mult_cost;
346 #endif
347
348 /* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to
349 copy the value of the strength reduced giv to its original register. */
350 int copy_cost;
351
352 void
353 init_loop ()
354 {
355 char *free_point = (char *) oballoc (1);
356 rtx reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
357
358 add_cost = rtx_cost (gen_rtx_PLUS (word_mode, reg, reg), SET);
359
360 /* We multiply by 2 to reconcile the difference in scale between
361 these two ways of computing costs. Otherwise the cost of a copy
362 will be far less than the cost of an add. */
363
364 copy_cost = 2 * 2;
365
366 /* Free the objects we just allocated. */
367 obfree (free_point);
368
369 /* Initialize the obstack used for rtl in product_cheap_p. */
370 gcc_obstack_init (&temp_obstack);
371 }
372 \f
373 /* Entry point of this file. Perform loop optimization
374 on the current function. F is the first insn of the function
375 and DUMPFILE is a stream for output of a trace of actions taken
376 (or 0 if none should be output). */
377
378 void
379 loop_optimize (f, dumpfile, unroll_p)
380 /* f is the first instruction of a chain of insns for one function */
381 rtx f;
382 FILE *dumpfile;
383 int unroll_p;
384 {
385 register rtx insn;
386 register int i;
387 rtx last_insn;
388
389 loop_dump_stream = dumpfile;
390
391 init_recog_no_volatile ();
392 init_alias_analysis ();
393
394 max_reg_before_loop = max_reg_num ();
395
396 moved_once = (char *) alloca (max_reg_before_loop);
397 bzero (moved_once, max_reg_before_loop);
398
399 regs_may_share = 0;
400
401 /* Count the number of loops. */
402
403 max_loop_num = 0;
404 for (insn = f; insn; insn = NEXT_INSN (insn))
405 {
406 if (GET_CODE (insn) == NOTE
407 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
408 max_loop_num++;
409 }
410
411 /* Don't waste time if no loops. */
412 if (max_loop_num == 0)
413 return;
414
415 /* Get size to use for tables indexed by uids.
416 Leave some space for labels allocated by find_and_verify_loops. */
417 max_uid_for_loop = get_max_uid () + 1 + max_loop_num * 32;
418
419 uid_luid = (int *) alloca (max_uid_for_loop * sizeof (int));
420 uid_loop_num = (int *) alloca (max_uid_for_loop * sizeof (int));
421
422 bzero ((char *) uid_luid, max_uid_for_loop * sizeof (int));
423 bzero ((char *) uid_loop_num, max_uid_for_loop * sizeof (int));
424
425 /* Allocate tables for recording each loop. We set each entry, so they need
426 not be zeroed. */
427 loop_number_loop_starts = (rtx *) alloca (max_loop_num * sizeof (rtx));
428 loop_number_loop_ends = (rtx *) alloca (max_loop_num * sizeof (rtx));
429 loop_outer_loop = (int *) alloca (max_loop_num * sizeof (int));
430 loop_invalid = (char *) alloca (max_loop_num * sizeof (char));
431 loop_number_exit_labels = (rtx *) alloca (max_loop_num * sizeof (rtx));
432 loop_number_exit_count = (int *) alloca (max_loop_num * sizeof (int));
433
434 /* This is initialized by the unrolling code, so we go ahead
435 and clear them just in case we are not performing loop
436 unrolling. */
437 loop_unroll_factor = (int *) alloca (max_loop_num *sizeof (int));
438 bzero ((char *) loop_unroll_factor, max_loop_num * sizeof (int));
439
440 #ifdef HAIFA
441 /* Allocate for BCT optimization */
442 loop_can_insert_bct = (int *) alloca (max_loop_num * sizeof (int));
443 bzero ((char *) loop_can_insert_bct, max_loop_num * sizeof (int));
444
445 loop_used_count_register = (int *) alloca (max_loop_num * sizeof (int));
446 bzero ((char *) loop_used_count_register, max_loop_num * sizeof (int));
447
448 loop_increment = (rtx *) alloca (max_loop_num * sizeof (rtx));
449 loop_comparison_value = (rtx *) alloca (max_loop_num * sizeof (rtx));
450 loop_start_value = (rtx *) alloca (max_loop_num * sizeof (rtx));
451 bzero ((char *) loop_increment, max_loop_num * sizeof (rtx));
452 bzero ((char *) loop_comparison_value, max_loop_num * sizeof (rtx));
453 bzero ((char *) loop_start_value, max_loop_num * sizeof (rtx));
454
455 loop_comparison_code
456 = (enum rtx_code *) alloca (max_loop_num * sizeof (enum rtx_code));
457 bzero ((char *) loop_comparison_code, max_loop_num * sizeof (enum rtx_code));
458 #endif /* HAIFA */
459
460 /* Find and process each loop.
461 First, find them, and record them in order of their beginnings. */
462 find_and_verify_loops (f);
463
464 /* Now find all register lifetimes. This must be done after
465 find_and_verify_loops, because it might reorder the insns in the
466 function. */
467 reg_scan (f, max_reg_num (), 1);
468
469 /* See if we went too far. */
470 if (get_max_uid () > max_uid_for_loop)
471 abort ();
472
473 /* Compute the mapping from uids to luids.
474 LUIDs are numbers assigned to insns, like uids,
475 except that luids increase monotonically through the code.
476 Don't assign luids to line-number NOTEs, so that the distance in luids
477 between two insns is not affected by -g. */
478
479 for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
480 {
481 last_insn = insn;
482 if (GET_CODE (insn) != NOTE
483 || NOTE_LINE_NUMBER (insn) <= 0)
484 uid_luid[INSN_UID (insn)] = ++i;
485 else
486 /* Give a line number note the same luid as preceding insn. */
487 uid_luid[INSN_UID (insn)] = i;
488 }
489
490 max_luid = i + 1;
491
492 /* Don't leave gaps in uid_luid for insns that have been
493 deleted. It is possible that the first or last insn
494 using some register has been deleted by cross-jumping.
495 Make sure that uid_luid for that former insn's uid
496 points to the general area where that insn used to be. */
497 for (i = 0; i < max_uid_for_loop; i++)
498 {
499 uid_luid[0] = uid_luid[i];
500 if (uid_luid[0] != 0)
501 break;
502 }
503 for (i = 0; i < max_uid_for_loop; i++)
504 if (uid_luid[i] == 0)
505 uid_luid[i] = uid_luid[i - 1];
506
507 /* Create a mapping from loops to BLOCK tree nodes. */
508 if (unroll_p && write_symbols != NO_DEBUG)
509 find_loop_tree_blocks ();
510
511 /* Determine if the function has indirect jump. On some systems
512 this prevents low overhead loop instructions from being used. */
513 indirect_jump_in_function = indirect_jump_in_function_p (f);
514
515 /* Now scan the loops, last ones first, since this means inner ones are done
516 before outer ones. */
517 for (i = max_loop_num-1; i >= 0; i--)
518 if (! loop_invalid[i] && loop_number_loop_ends[i])
519 scan_loop (loop_number_loop_starts[i], loop_number_loop_ends[i],
520 max_reg_num (), unroll_p);
521
522 /* If debugging and unrolling loops, we must replicate the tree nodes
523 corresponding to the blocks inside the loop, so that the original one
524 to one mapping will remain. */
525 if (unroll_p && write_symbols != NO_DEBUG)
526 unroll_block_trees ();
527 }
528 \f
529 /* Optimize one loop whose start is LOOP_START and end is END.
530 LOOP_START is the NOTE_INSN_LOOP_BEG and END is the matching
531 NOTE_INSN_LOOP_END. */
532
533 /* ??? Could also move memory writes out of loops if the destination address
534 is invariant, the source is invariant, the memory write is not volatile,
535 and if we can prove that no read inside the loop can read this address
536 before the write occurs. If there is a read of this address after the
537 write, then we can also mark the memory read as invariant. */
538
539 static void
540 scan_loop (loop_start, end, nregs, unroll_p)
541 rtx loop_start, end;
542 int nregs;
543 int unroll_p;
544 {
545 register int i;
546 register rtx p;
547 /* 1 if we are scanning insns that could be executed zero times. */
548 int maybe_never = 0;
549 /* 1 if we are scanning insns that might never be executed
550 due to a subroutine call which might exit before they are reached. */
551 int call_passed = 0;
552 /* For a rotated loop that is entered near the bottom,
553 this is the label at the top. Otherwise it is zero. */
554 rtx loop_top = 0;
555 /* Jump insn that enters the loop, or 0 if control drops in. */
556 rtx loop_entry_jump = 0;
557 /* Place in the loop where control enters. */
558 rtx scan_start;
559 /* Number of insns in the loop. */
560 int insn_count;
561 int in_libcall = 0;
562 int tem;
563 rtx temp;
564 /* The SET from an insn, if it is the only SET in the insn. */
565 rtx set, set1;
566 /* Chain describing insns movable in current loop. */
567 struct movable *movables = 0;
568 /* Last element in `movables' -- so we can add elements at the end. */
569 struct movable *last_movable = 0;
570 /* Ratio of extra register life span we can justify
571 for saving an instruction. More if loop doesn't call subroutines
572 since in that case saving an insn makes more difference
573 and more registers are available. */
574 int threshold;
575 /* If we have calls, contains the insn in which a register was used
576 if it was used exactly once; contains const0_rtx if it was used more
577 than once. */
578 rtx *reg_single_usage = 0;
579 /* Nonzero if we are scanning instructions in a sub-loop. */
580 int loop_depth = 0;
581
582 n_times_set = (int *) alloca (nregs * sizeof (int));
583 n_times_used = (int *) alloca (nregs * sizeof (int));
584 may_not_optimize = (char *) alloca (nregs);
585
586 /* Determine whether this loop starts with a jump down to a test at
587 the end. This will occur for a small number of loops with a test
588 that is too complex to duplicate in front of the loop.
589
590 We search for the first insn or label in the loop, skipping NOTEs.
591 However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG
592 (because we might have a loop executed only once that contains a
593 loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END
594 (in case we have a degenerate loop).
595
596 Note that if we mistakenly think that a loop is entered at the top
597 when, in fact, it is entered at the exit test, the only effect will be
598 slightly poorer optimization. Making the opposite error can generate
599 incorrect code. Since very few loops now start with a jump to the
600 exit test, the code here to detect that case is very conservative. */
601
602 for (p = NEXT_INSN (loop_start);
603 p != end
604 && GET_CODE (p) != CODE_LABEL && GET_RTX_CLASS (GET_CODE (p)) != 'i'
605 && (GET_CODE (p) != NOTE
606 || (NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_BEG
607 && NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_END));
608 p = NEXT_INSN (p))
609 ;
610
611 scan_start = p;
612
613 /* Set up variables describing this loop. */
614 prescan_loop (loop_start, end);
615 threshold = (loop_has_call ? 1 : 2) * (1 + n_non_fixed_regs);
616
617 /* If loop has a jump before the first label,
618 the true entry is the target of that jump.
619 Start scan from there.
620 But record in LOOP_TOP the place where the end-test jumps
621 back to so we can scan that after the end of the loop. */
622 if (GET_CODE (p) == JUMP_INSN)
623 {
624 loop_entry_jump = p;
625
626 /* Loop entry must be unconditional jump (and not a RETURN) */
627 if (simplejump_p (p)
628 && JUMP_LABEL (p) != 0
629 /* Check to see whether the jump actually
630 jumps out of the loop (meaning it's no loop).
631 This case can happen for things like
632 do {..} while (0). If this label was generated previously
633 by loop, we can't tell anything about it and have to reject
634 the loop. */
635 && INSN_UID (JUMP_LABEL (p)) < max_uid_for_loop
636 && INSN_LUID (JUMP_LABEL (p)) >= INSN_LUID (loop_start)
637 && INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (end))
638 {
639 loop_top = next_label (scan_start);
640 scan_start = JUMP_LABEL (p);
641 }
642 }
643
644 /* If SCAN_START was an insn created by loop, we don't know its luid
645 as required by loop_reg_used_before_p. So skip such loops. (This
646 test may never be true, but it's best to play it safe.)
647
648 Also, skip loops where we do not start scanning at a label. This
649 test also rejects loops starting with a JUMP_INSN that failed the
650 test above. */
651
652 if (INSN_UID (scan_start) >= max_uid_for_loop
653 || GET_CODE (scan_start) != CODE_LABEL)
654 {
655 if (loop_dump_stream)
656 fprintf (loop_dump_stream, "\nLoop from %d to %d is phony.\n\n",
657 INSN_UID (loop_start), INSN_UID (end));
658 return;
659 }
660
661 /* Count number of times each reg is set during this loop.
662 Set may_not_optimize[I] if it is not safe to move out
663 the setting of register I. If this loop has calls, set
664 reg_single_usage[I]. */
665
666 bzero ((char *) n_times_set, nregs * sizeof (int));
667 bzero (may_not_optimize, nregs);
668
669 if (loop_has_call)
670 {
671 reg_single_usage = (rtx *) alloca (nregs * sizeof (rtx));
672 bzero ((char *) reg_single_usage, nregs * sizeof (rtx));
673 }
674
675 count_loop_regs_set (loop_top ? loop_top : loop_start, end,
676 may_not_optimize, reg_single_usage, &insn_count, nregs);
677
678 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
679 may_not_optimize[i] = 1, n_times_set[i] = 1;
680 bcopy ((char *) n_times_set, (char *) n_times_used, nregs * sizeof (int));
681
682 if (loop_dump_stream)
683 {
684 fprintf (loop_dump_stream, "\nLoop from %d to %d: %d real insns.\n",
685 INSN_UID (loop_start), INSN_UID (end), insn_count);
686 if (loop_continue)
687 fprintf (loop_dump_stream, "Continue at insn %d.\n",
688 INSN_UID (loop_continue));
689 }
690
691 /* Scan through the loop finding insns that are safe to move.
692 Set n_times_set negative for the reg being set, so that
693 this reg will be considered invariant for subsequent insns.
694 We consider whether subsequent insns use the reg
695 in deciding whether it is worth actually moving.
696
697 MAYBE_NEVER is nonzero if we have passed a conditional jump insn
698 and therefore it is possible that the insns we are scanning
699 would never be executed. At such times, we must make sure
700 that it is safe to execute the insn once instead of zero times.
701 When MAYBE_NEVER is 0, all insns will be executed at least once
702 so that is not a problem. */
703
704 p = scan_start;
705 while (1)
706 {
707 p = NEXT_INSN (p);
708 /* At end of a straight-in loop, we are done.
709 At end of a loop entered at the bottom, scan the top. */
710 if (p == scan_start)
711 break;
712 if (p == end)
713 {
714 if (loop_top != 0)
715 p = loop_top;
716 else
717 break;
718 if (p == scan_start)
719 break;
720 }
721
722 if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
723 && find_reg_note (p, REG_LIBCALL, NULL_RTX))
724 in_libcall = 1;
725 else if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
726 && find_reg_note (p, REG_RETVAL, NULL_RTX))
727 in_libcall = 0;
728
729 if (GET_CODE (p) == INSN
730 && (set = single_set (p))
731 && GET_CODE (SET_DEST (set)) == REG
732 && ! may_not_optimize[REGNO (SET_DEST (set))])
733 {
734 int tem1 = 0;
735 int tem2 = 0;
736 int move_insn = 0;
737 rtx src = SET_SRC (set);
738 rtx dependencies = 0;
739
740 /* Figure out what to use as a source of this insn. If a REG_EQUIV
741 note is given or if a REG_EQUAL note with a constant operand is
742 specified, use it as the source and mark that we should move
743 this insn by calling emit_move_insn rather that duplicating the
744 insn.
745
746 Otherwise, only use the REG_EQUAL contents if a REG_RETVAL note
747 is present. */
748 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
749 if (temp)
750 src = XEXP (temp, 0), move_insn = 1;
751 else
752 {
753 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
754 if (temp && CONSTANT_P (XEXP (temp, 0)))
755 src = XEXP (temp, 0), move_insn = 1;
756 if (temp && find_reg_note (p, REG_RETVAL, NULL_RTX))
757 {
758 src = XEXP (temp, 0);
759 /* A libcall block can use regs that don't appear in
760 the equivalent expression. To move the libcall,
761 we must move those regs too. */
762 dependencies = libcall_other_reg (p, src);
763 }
764 }
765
766 /* Don't try to optimize a register that was made
767 by loop-optimization for an inner loop.
768 We don't know its life-span, so we can't compute the benefit. */
769 if (REGNO (SET_DEST (set)) >= max_reg_before_loop)
770 ;
771 /* In order to move a register, we need to have one of three cases:
772 (1) it is used only in the same basic block as the set
773 (2) it is not a user variable and it is not used in the
774 exit test (this can cause the variable to be used
775 before it is set just like a user-variable).
776 (3) the set is guaranteed to be executed once the loop starts,
777 and the reg is not used until after that. */
778 else if (! ((! maybe_never
779 && ! loop_reg_used_before_p (set, p, loop_start,
780 scan_start, end))
781 || (! REG_USERVAR_P (SET_DEST (set))
782 && ! REG_LOOP_TEST_P (SET_DEST (set)))
783 || reg_in_basic_block_p (p, SET_DEST (set))))
784 ;
785 else if ((tem = invariant_p (src))
786 && (dependencies == 0
787 || (tem2 = invariant_p (dependencies)) != 0)
788 && (n_times_set[REGNO (SET_DEST (set))] == 1
789 || (tem1
790 = consec_sets_invariant_p (SET_DEST (set),
791 n_times_set[REGNO (SET_DEST (set))],
792 p)))
793 /* If the insn can cause a trap (such as divide by zero),
794 can't move it unless it's guaranteed to be executed
795 once loop is entered. Even a function call might
796 prevent the trap insn from being reached
797 (since it might exit!) */
798 && ! ((maybe_never || call_passed)
799 && may_trap_p (src)))
800 {
801 register struct movable *m;
802 register int regno = REGNO (SET_DEST (set));
803
804 /* A potential lossage is where we have a case where two insns
805 can be combined as long as they are both in the loop, but
806 we move one of them outside the loop. For large loops,
807 this can lose. The most common case of this is the address
808 of a function being called.
809
810 Therefore, if this register is marked as being used exactly
811 once if we are in a loop with calls (a "large loop"), see if
812 we can replace the usage of this register with the source
813 of this SET. If we can, delete this insn.
814
815 Don't do this if P has a REG_RETVAL note or if we have
816 SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */
817
818 if (reg_single_usage && reg_single_usage[regno] != 0
819 && reg_single_usage[regno] != const0_rtx
820 && REGNO_FIRST_UID (regno) == INSN_UID (p)
821 && (REGNO_LAST_UID (regno)
822 == INSN_UID (reg_single_usage[regno]))
823 && n_times_set[REGNO (SET_DEST (set))] == 1
824 && ! side_effects_p (SET_SRC (set))
825 && ! find_reg_note (p, REG_RETVAL, NULL_RTX)
826 && (! SMALL_REGISTER_CLASSES
827 || (! (GET_CODE (SET_SRC (set)) == REG
828 && REGNO (SET_SRC (set)) < FIRST_PSEUDO_REGISTER)))
829 /* This test is not redundant; SET_SRC (set) might be
830 a call-clobbered register and the life of REGNO
831 might span a call. */
832 && ! modified_between_p (SET_SRC (set), p,
833 reg_single_usage[regno])
834 && no_labels_between_p (p, reg_single_usage[regno])
835 && validate_replace_rtx (SET_DEST (set), SET_SRC (set),
836 reg_single_usage[regno]))
837 {
838 /* Replace any usage in a REG_EQUAL note. Must copy the
839 new source, so that we don't get rtx sharing between the
840 SET_SOURCE and REG_NOTES of insn p. */
841 REG_NOTES (reg_single_usage[regno])
842 = replace_rtx (REG_NOTES (reg_single_usage[regno]),
843 SET_DEST (set), copy_rtx (SET_SRC (set)));
844
845 PUT_CODE (p, NOTE);
846 NOTE_LINE_NUMBER (p) = NOTE_INSN_DELETED;
847 NOTE_SOURCE_FILE (p) = 0;
848 n_times_set[regno] = 0;
849 continue;
850 }
851
852 m = (struct movable *) alloca (sizeof (struct movable));
853 m->next = 0;
854 m->insn = p;
855 m->set_src = src;
856 m->dependencies = dependencies;
857 m->set_dest = SET_DEST (set);
858 m->force = 0;
859 m->consec = n_times_set[REGNO (SET_DEST (set))] - 1;
860 m->done = 0;
861 m->forces = 0;
862 m->partial = 0;
863 m->move_insn = move_insn;
864 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
865 m->savemode = VOIDmode;
866 m->regno = regno;
867 /* Set M->cond if either invariant_p or consec_sets_invariant_p
868 returned 2 (only conditionally invariant). */
869 m->cond = ((tem | tem1 | tem2) > 1);
870 m->global = (uid_luid[REGNO_LAST_UID (regno)] > INSN_LUID (end)
871 || uid_luid[REGNO_FIRST_UID (regno)] < INSN_LUID (loop_start));
872 m->match = 0;
873 m->lifetime = (uid_luid[REGNO_LAST_UID (regno)]
874 - uid_luid[REGNO_FIRST_UID (regno)]);
875 m->savings = n_times_used[regno];
876 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
877 m->savings += libcall_benefit (p);
878 n_times_set[regno] = move_insn ? -2 : -1;
879 /* Add M to the end of the chain MOVABLES. */
880 if (movables == 0)
881 movables = m;
882 else
883 last_movable->next = m;
884 last_movable = m;
885
886 if (m->consec > 0)
887 {
888 /* Skip this insn, not checking REG_LIBCALL notes. */
889 p = next_nonnote_insn (p);
890 /* Skip the consecutive insns, if there are any. */
891 p = skip_consec_insns (p, m->consec);
892 /* Back up to the last insn of the consecutive group. */
893 p = prev_nonnote_insn (p);
894
895 /* We must now reset m->move_insn, m->is_equiv, and possibly
896 m->set_src to correspond to the effects of all the
897 insns. */
898 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
899 if (temp)
900 m->set_src = XEXP (temp, 0), m->move_insn = 1;
901 else
902 {
903 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
904 if (temp && CONSTANT_P (XEXP (temp, 0)))
905 m->set_src = XEXP (temp, 0), m->move_insn = 1;
906 else
907 m->move_insn = 0;
908
909 }
910 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
911 }
912 }
913 /* If this register is always set within a STRICT_LOW_PART
914 or set to zero, then its high bytes are constant.
915 So clear them outside the loop and within the loop
916 just load the low bytes.
917 We must check that the machine has an instruction to do so.
918 Also, if the value loaded into the register
919 depends on the same register, this cannot be done. */
920 else if (SET_SRC (set) == const0_rtx
921 && GET_CODE (NEXT_INSN (p)) == INSN
922 && (set1 = single_set (NEXT_INSN (p)))
923 && GET_CODE (set1) == SET
924 && (GET_CODE (SET_DEST (set1)) == STRICT_LOW_PART)
925 && (GET_CODE (XEXP (SET_DEST (set1), 0)) == SUBREG)
926 && (SUBREG_REG (XEXP (SET_DEST (set1), 0))
927 == SET_DEST (set))
928 && !reg_mentioned_p (SET_DEST (set), SET_SRC (set1)))
929 {
930 register int regno = REGNO (SET_DEST (set));
931 if (n_times_set[regno] == 2)
932 {
933 register struct movable *m;
934 m = (struct movable *) alloca (sizeof (struct movable));
935 m->next = 0;
936 m->insn = p;
937 m->set_dest = SET_DEST (set);
938 m->dependencies = 0;
939 m->force = 0;
940 m->consec = 0;
941 m->done = 0;
942 m->forces = 0;
943 m->move_insn = 0;
944 m->partial = 1;
945 /* If the insn may not be executed on some cycles,
946 we can't clear the whole reg; clear just high part.
947 Not even if the reg is used only within this loop.
948 Consider this:
949 while (1)
950 while (s != t) {
951 if (foo ()) x = *s;
952 use (x);
953 }
954 Clearing x before the inner loop could clobber a value
955 being saved from the last time around the outer loop.
956 However, if the reg is not used outside this loop
957 and all uses of the register are in the same
958 basic block as the store, there is no problem.
959
960 If this insn was made by loop, we don't know its
961 INSN_LUID and hence must make a conservative
962 assumption. */
963 m->global = (INSN_UID (p) >= max_uid_for_loop
964 || (uid_luid[REGNO_LAST_UID (regno)]
965 > INSN_LUID (end))
966 || (uid_luid[REGNO_FIRST_UID (regno)]
967 < INSN_LUID (p))
968 || (labels_in_range_p
969 (p, uid_luid[REGNO_FIRST_UID (regno)])));
970 if (maybe_never && m->global)
971 m->savemode = GET_MODE (SET_SRC (set1));
972 else
973 m->savemode = VOIDmode;
974 m->regno = regno;
975 m->cond = 0;
976 m->match = 0;
977 m->lifetime = (uid_luid[REGNO_LAST_UID (regno)]
978 - uid_luid[REGNO_FIRST_UID (regno)]);
979 m->savings = 1;
980 n_times_set[regno] = -1;
981 /* Add M to the end of the chain MOVABLES. */
982 if (movables == 0)
983 movables = m;
984 else
985 last_movable->next = m;
986 last_movable = m;
987 }
988 }
989 }
990 /* Past a call insn, we get to insns which might not be executed
991 because the call might exit. This matters for insns that trap.
992 Call insns inside a REG_LIBCALL/REG_RETVAL block always return,
993 so they don't count. */
994 else if (GET_CODE (p) == CALL_INSN && ! in_libcall)
995 call_passed = 1;
996 /* Past a label or a jump, we get to insns for which we
997 can't count on whether or how many times they will be
998 executed during each iteration. Therefore, we can
999 only move out sets of trivial variables
1000 (those not used after the loop). */
1001 /* Similar code appears twice in strength_reduce. */
1002 else if ((GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN)
1003 /* If we enter the loop in the middle, and scan around to the
1004 beginning, don't set maybe_never for that. This must be an
1005 unconditional jump, otherwise the code at the top of the
1006 loop might never be executed. Unconditional jumps are
1007 followed a by barrier then loop end. */
1008 && ! (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == loop_top
1009 && NEXT_INSN (NEXT_INSN (p)) == end
1010 && simplejump_p (p)))
1011 maybe_never = 1;
1012 else if (GET_CODE (p) == NOTE)
1013 {
1014 /* At the virtual top of a converted loop, insns are again known to
1015 be executed: logically, the loop begins here even though the exit
1016 code has been duplicated. */
1017 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
1018 maybe_never = call_passed = 0;
1019 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
1020 loop_depth++;
1021 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
1022 loop_depth--;
1023 }
1024 }
1025
1026 /* If one movable subsumes another, ignore that other. */
1027
1028 ignore_some_movables (movables);
1029
1030 /* For each movable insn, see if the reg that it loads
1031 leads when it dies right into another conditionally movable insn.
1032 If so, record that the second insn "forces" the first one,
1033 since the second can be moved only if the first is. */
1034
1035 force_movables (movables);
1036
1037 /* See if there are multiple movable insns that load the same value.
1038 If there are, make all but the first point at the first one
1039 through the `match' field, and add the priorities of them
1040 all together as the priority of the first. */
1041
1042 combine_movables (movables, nregs);
1043
1044 /* Now consider each movable insn to decide whether it is worth moving.
1045 Store 0 in n_times_set for each reg that is moved. */
1046
1047 move_movables (movables, threshold,
1048 insn_count, loop_start, end, nregs);
1049
1050 /* Now candidates that still are negative are those not moved.
1051 Change n_times_set to indicate that those are not actually invariant. */
1052 for (i = 0; i < nregs; i++)
1053 if (n_times_set[i] < 0)
1054 n_times_set[i] = n_times_used[i];
1055
1056 if (flag_strength_reduce)
1057 strength_reduce (scan_start, end, loop_top,
1058 insn_count, loop_start, end, unroll_p);
1059 }
1060 \f
1061 /* Add elements to *OUTPUT to record all the pseudo-regs
1062 mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */
1063
1064 void
1065 record_excess_regs (in_this, not_in_this, output)
1066 rtx in_this, not_in_this;
1067 rtx *output;
1068 {
1069 enum rtx_code code;
1070 char *fmt;
1071 int i;
1072
1073 code = GET_CODE (in_this);
1074
1075 switch (code)
1076 {
1077 case PC:
1078 case CC0:
1079 case CONST_INT:
1080 case CONST_DOUBLE:
1081 case CONST:
1082 case SYMBOL_REF:
1083 case LABEL_REF:
1084 return;
1085
1086 case REG:
1087 if (REGNO (in_this) >= FIRST_PSEUDO_REGISTER
1088 && ! reg_mentioned_p (in_this, not_in_this))
1089 *output = gen_rtx_EXPR_LIST (VOIDmode, in_this, *output);
1090 return;
1091
1092 default:
1093 break;
1094 }
1095
1096 fmt = GET_RTX_FORMAT (code);
1097 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1098 {
1099 int j;
1100
1101 switch (fmt[i])
1102 {
1103 case 'E':
1104 for (j = 0; j < XVECLEN (in_this, i); j++)
1105 record_excess_regs (XVECEXP (in_this, i, j), not_in_this, output);
1106 break;
1107
1108 case 'e':
1109 record_excess_regs (XEXP (in_this, i), not_in_this, output);
1110 break;
1111 }
1112 }
1113 }
1114 \f
1115 /* Check what regs are referred to in the libcall block ending with INSN,
1116 aside from those mentioned in the equivalent value.
1117 If there are none, return 0.
1118 If there are one or more, return an EXPR_LIST containing all of them. */
1119
1120 static rtx
1121 libcall_other_reg (insn, equiv)
1122 rtx insn, equiv;
1123 {
1124 rtx note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
1125 rtx p = XEXP (note, 0);
1126 rtx output = 0;
1127
1128 /* First, find all the regs used in the libcall block
1129 that are not mentioned as inputs to the result. */
1130
1131 while (p != insn)
1132 {
1133 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
1134 || GET_CODE (p) == CALL_INSN)
1135 record_excess_regs (PATTERN (p), equiv, &output);
1136 p = NEXT_INSN (p);
1137 }
1138
1139 return output;
1140 }
1141 \f
1142 /* Return 1 if all uses of REG
1143 are between INSN and the end of the basic block. */
1144
1145 static int
1146 reg_in_basic_block_p (insn, reg)
1147 rtx insn, reg;
1148 {
1149 int regno = REGNO (reg);
1150 rtx p;
1151
1152 if (REGNO_FIRST_UID (regno) != INSN_UID (insn))
1153 return 0;
1154
1155 /* Search this basic block for the already recorded last use of the reg. */
1156 for (p = insn; p; p = NEXT_INSN (p))
1157 {
1158 switch (GET_CODE (p))
1159 {
1160 case NOTE:
1161 break;
1162
1163 case INSN:
1164 case CALL_INSN:
1165 /* Ordinary insn: if this is the last use, we win. */
1166 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1167 return 1;
1168 break;
1169
1170 case JUMP_INSN:
1171 /* Jump insn: if this is the last use, we win. */
1172 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1173 return 1;
1174 /* Otherwise, it's the end of the basic block, so we lose. */
1175 return 0;
1176
1177 case CODE_LABEL:
1178 case BARRIER:
1179 /* It's the end of the basic block, so we lose. */
1180 return 0;
1181
1182 default:
1183 break;
1184 }
1185 }
1186
1187 /* The "last use" doesn't follow the "first use"?? */
1188 abort ();
1189 }
1190 \f
1191 /* Compute the benefit of eliminating the insns in the block whose
1192 last insn is LAST. This may be a group of insns used to compute a
1193 value directly or can contain a library call. */
1194
1195 static int
1196 libcall_benefit (last)
1197 rtx last;
1198 {
1199 rtx insn;
1200 int benefit = 0;
1201
1202 for (insn = XEXP (find_reg_note (last, REG_RETVAL, NULL_RTX), 0);
1203 insn != last; insn = NEXT_INSN (insn))
1204 {
1205 if (GET_CODE (insn) == CALL_INSN)
1206 benefit += 10; /* Assume at least this many insns in a library
1207 routine. */
1208 else if (GET_CODE (insn) == INSN
1209 && GET_CODE (PATTERN (insn)) != USE
1210 && GET_CODE (PATTERN (insn)) != CLOBBER)
1211 benefit++;
1212 }
1213
1214 return benefit;
1215 }
1216 \f
1217 /* Skip COUNT insns from INSN, counting library calls as 1 insn. */
1218
1219 static rtx
1220 skip_consec_insns (insn, count)
1221 rtx insn;
1222 int count;
1223 {
1224 for (; count > 0; count--)
1225 {
1226 rtx temp;
1227
1228 /* If first insn of libcall sequence, skip to end. */
1229 /* Do this at start of loop, since INSN is guaranteed to
1230 be an insn here. */
1231 if (GET_CODE (insn) != NOTE
1232 && (temp = find_reg_note (insn, REG_LIBCALL, NULL_RTX)))
1233 insn = XEXP (temp, 0);
1234
1235 do insn = NEXT_INSN (insn);
1236 while (GET_CODE (insn) == NOTE);
1237 }
1238
1239 return insn;
1240 }
1241
1242 /* Ignore any movable whose insn falls within a libcall
1243 which is part of another movable.
1244 We make use of the fact that the movable for the libcall value
1245 was made later and so appears later on the chain. */
1246
1247 static void
1248 ignore_some_movables (movables)
1249 struct movable *movables;
1250 {
1251 register struct movable *m, *m1;
1252
1253 for (m = movables; m; m = m->next)
1254 {
1255 /* Is this a movable for the value of a libcall? */
1256 rtx note = find_reg_note (m->insn, REG_RETVAL, NULL_RTX);
1257 if (note)
1258 {
1259 rtx insn;
1260 /* Check for earlier movables inside that range,
1261 and mark them invalid. We cannot use LUIDs here because
1262 insns created by loop.c for prior loops don't have LUIDs.
1263 Rather than reject all such insns from movables, we just
1264 explicitly check each insn in the libcall (since invariant
1265 libcalls aren't that common). */
1266 for (insn = XEXP (note, 0); insn != m->insn; insn = NEXT_INSN (insn))
1267 for (m1 = movables; m1 != m; m1 = m1->next)
1268 if (m1->insn == insn)
1269 m1->done = 1;
1270 }
1271 }
1272 }
1273
1274 /* For each movable insn, see if the reg that it loads
1275 leads when it dies right into another conditionally movable insn.
1276 If so, record that the second insn "forces" the first one,
1277 since the second can be moved only if the first is. */
1278
1279 static void
1280 force_movables (movables)
1281 struct movable *movables;
1282 {
1283 register struct movable *m, *m1;
1284 for (m1 = movables; m1; m1 = m1->next)
1285 /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */
1286 if (!m1->partial && !m1->done)
1287 {
1288 int regno = m1->regno;
1289 for (m = m1->next; m; m = m->next)
1290 /* ??? Could this be a bug? What if CSE caused the
1291 register of M1 to be used after this insn?
1292 Since CSE does not update regno_last_uid,
1293 this insn M->insn might not be where it dies.
1294 But very likely this doesn't matter; what matters is
1295 that M's reg is computed from M1's reg. */
1296 if (INSN_UID (m->insn) == REGNO_LAST_UID (regno)
1297 && !m->done)
1298 break;
1299 if (m != 0 && m->set_src == m1->set_dest
1300 /* If m->consec, m->set_src isn't valid. */
1301 && m->consec == 0)
1302 m = 0;
1303
1304 /* Increase the priority of the moving the first insn
1305 since it permits the second to be moved as well. */
1306 if (m != 0)
1307 {
1308 m->forces = m1;
1309 m1->lifetime += m->lifetime;
1310 m1->savings += m->savings;
1311 }
1312 }
1313 }
1314 \f
1315 /* Find invariant expressions that are equal and can be combined into
1316 one register. */
1317
1318 static void
1319 combine_movables (movables, nregs)
1320 struct movable *movables;
1321 int nregs;
1322 {
1323 register struct movable *m;
1324 char *matched_regs = (char *) alloca (nregs);
1325 enum machine_mode mode;
1326
1327 /* Regs that are set more than once are not allowed to match
1328 or be matched. I'm no longer sure why not. */
1329 /* Perhaps testing m->consec_sets would be more appropriate here? */
1330
1331 for (m = movables; m; m = m->next)
1332 if (m->match == 0 && n_times_used[m->regno] == 1 && !m->partial)
1333 {
1334 register struct movable *m1;
1335 int regno = m->regno;
1336
1337 bzero (matched_regs, nregs);
1338 matched_regs[regno] = 1;
1339
1340 /* We want later insns to match the first one. Don't make the first
1341 one match any later ones. So start this loop at m->next. */
1342 for (m1 = m->next; m1; m1 = m1->next)
1343 if (m != m1 && m1->match == 0 && n_times_used[m1->regno] == 1
1344 /* A reg used outside the loop mustn't be eliminated. */
1345 && !m1->global
1346 /* A reg used for zero-extending mustn't be eliminated. */
1347 && !m1->partial
1348 && (matched_regs[m1->regno]
1349 ||
1350 (
1351 /* Can combine regs with different modes loaded from the
1352 same constant only if the modes are the same or
1353 if both are integer modes with M wider or the same
1354 width as M1. The check for integer is redundant, but
1355 safe, since the only case of differing destination
1356 modes with equal sources is when both sources are
1357 VOIDmode, i.e., CONST_INT. */
1358 (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest)
1359 || (GET_MODE_CLASS (GET_MODE (m->set_dest)) == MODE_INT
1360 && GET_MODE_CLASS (GET_MODE (m1->set_dest)) == MODE_INT
1361 && (GET_MODE_BITSIZE (GET_MODE (m->set_dest))
1362 >= GET_MODE_BITSIZE (GET_MODE (m1->set_dest)))))
1363 /* See if the source of M1 says it matches M. */
1364 && ((GET_CODE (m1->set_src) == REG
1365 && matched_regs[REGNO (m1->set_src)])
1366 || rtx_equal_for_loop_p (m->set_src, m1->set_src,
1367 movables))))
1368 && ((m->dependencies == m1->dependencies)
1369 || rtx_equal_p (m->dependencies, m1->dependencies)))
1370 {
1371 m->lifetime += m1->lifetime;
1372 m->savings += m1->savings;
1373 m1->done = 1;
1374 m1->match = m;
1375 matched_regs[m1->regno] = 1;
1376 }
1377 }
1378
1379 /* Now combine the regs used for zero-extension.
1380 This can be done for those not marked `global'
1381 provided their lives don't overlap. */
1382
1383 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1384 mode = GET_MODE_WIDER_MODE (mode))
1385 {
1386 register struct movable *m0 = 0;
1387
1388 /* Combine all the registers for extension from mode MODE.
1389 Don't combine any that are used outside this loop. */
1390 for (m = movables; m; m = m->next)
1391 if (m->partial && ! m->global
1392 && mode == GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m->insn)))))
1393 {
1394 register struct movable *m1;
1395 int first = uid_luid[REGNO_FIRST_UID (m->regno)];
1396 int last = uid_luid[REGNO_LAST_UID (m->regno)];
1397
1398 if (m0 == 0)
1399 {
1400 /* First one: don't check for overlap, just record it. */
1401 m0 = m;
1402 continue;
1403 }
1404
1405 /* Make sure they extend to the same mode.
1406 (Almost always true.) */
1407 if (GET_MODE (m->set_dest) != GET_MODE (m0->set_dest))
1408 continue;
1409
1410 /* We already have one: check for overlap with those
1411 already combined together. */
1412 for (m1 = movables; m1 != m; m1 = m1->next)
1413 if (m1 == m0 || (m1->partial && m1->match == m0))
1414 if (! (uid_luid[REGNO_FIRST_UID (m1->regno)] > last
1415 || uid_luid[REGNO_LAST_UID (m1->regno)] < first))
1416 goto overlap;
1417
1418 /* No overlap: we can combine this with the others. */
1419 m0->lifetime += m->lifetime;
1420 m0->savings += m->savings;
1421 m->done = 1;
1422 m->match = m0;
1423
1424 overlap: ;
1425 }
1426 }
1427 }
1428 \f
1429 /* Return 1 if regs X and Y will become the same if moved. */
1430
1431 static int
1432 regs_match_p (x, y, movables)
1433 rtx x, y;
1434 struct movable *movables;
1435 {
1436 int xn = REGNO (x);
1437 int yn = REGNO (y);
1438 struct movable *mx, *my;
1439
1440 for (mx = movables; mx; mx = mx->next)
1441 if (mx->regno == xn)
1442 break;
1443
1444 for (my = movables; my; my = my->next)
1445 if (my->regno == yn)
1446 break;
1447
1448 return (mx && my
1449 && ((mx->match == my->match && mx->match != 0)
1450 || mx->match == my
1451 || mx == my->match));
1452 }
1453
1454 /* Return 1 if X and Y are identical-looking rtx's.
1455 This is the Lisp function EQUAL for rtx arguments.
1456
1457 If two registers are matching movables or a movable register and an
1458 equivalent constant, consider them equal. */
1459
1460 static int
1461 rtx_equal_for_loop_p (x, y, movables)
1462 rtx x, y;
1463 struct movable *movables;
1464 {
1465 register int i;
1466 register int j;
1467 register struct movable *m;
1468 register enum rtx_code code;
1469 register char *fmt;
1470
1471 if (x == y)
1472 return 1;
1473 if (x == 0 || y == 0)
1474 return 0;
1475
1476 code = GET_CODE (x);
1477
1478 /* If we have a register and a constant, they may sometimes be
1479 equal. */
1480 if (GET_CODE (x) == REG && n_times_set[REGNO (x)] == -2
1481 && CONSTANT_P (y))
1482 {
1483 for (m = movables; m; m = m->next)
1484 if (m->move_insn && m->regno == REGNO (x)
1485 && rtx_equal_p (m->set_src, y))
1486 return 1;
1487 }
1488 else if (GET_CODE (y) == REG && n_times_set[REGNO (y)] == -2
1489 && CONSTANT_P (x))
1490 {
1491 for (m = movables; m; m = m->next)
1492 if (m->move_insn && m->regno == REGNO (y)
1493 && rtx_equal_p (m->set_src, x))
1494 return 1;
1495 }
1496
1497 /* Otherwise, rtx's of different codes cannot be equal. */
1498 if (code != GET_CODE (y))
1499 return 0;
1500
1501 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1502 (REG:SI x) and (REG:HI x) are NOT equivalent. */
1503
1504 if (GET_MODE (x) != GET_MODE (y))
1505 return 0;
1506
1507 /* These three types of rtx's can be compared nonrecursively. */
1508 if (code == REG)
1509 return (REGNO (x) == REGNO (y) || regs_match_p (x, y, movables));
1510
1511 if (code == LABEL_REF)
1512 return XEXP (x, 0) == XEXP (y, 0);
1513 if (code == SYMBOL_REF)
1514 return XSTR (x, 0) == XSTR (y, 0);
1515
1516 /* Compare the elements. If any pair of corresponding elements
1517 fail to match, return 0 for the whole things. */
1518
1519 fmt = GET_RTX_FORMAT (code);
1520 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1521 {
1522 switch (fmt[i])
1523 {
1524 case 'w':
1525 if (XWINT (x, i) != XWINT (y, i))
1526 return 0;
1527 break;
1528
1529 case 'i':
1530 if (XINT (x, i) != XINT (y, i))
1531 return 0;
1532 break;
1533
1534 case 'E':
1535 /* Two vectors must have the same length. */
1536 if (XVECLEN (x, i) != XVECLEN (y, i))
1537 return 0;
1538
1539 /* And the corresponding elements must match. */
1540 for (j = 0; j < XVECLEN (x, i); j++)
1541 if (rtx_equal_for_loop_p (XVECEXP (x, i, j), XVECEXP (y, i, j), movables) == 0)
1542 return 0;
1543 break;
1544
1545 case 'e':
1546 if (rtx_equal_for_loop_p (XEXP (x, i), XEXP (y, i), movables) == 0)
1547 return 0;
1548 break;
1549
1550 case 's':
1551 if (strcmp (XSTR (x, i), XSTR (y, i)))
1552 return 0;
1553 break;
1554
1555 case 'u':
1556 /* These are just backpointers, so they don't matter. */
1557 break;
1558
1559 case '0':
1560 break;
1561
1562 /* It is believed that rtx's at this level will never
1563 contain anything but integers and other rtx's,
1564 except for within LABEL_REFs and SYMBOL_REFs. */
1565 default:
1566 abort ();
1567 }
1568 }
1569 return 1;
1570 }
1571 \f
1572 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to all
1573 insns in INSNS which use thet reference. */
1574
1575 static void
1576 add_label_notes (x, insns)
1577 rtx x;
1578 rtx insns;
1579 {
1580 enum rtx_code code = GET_CODE (x);
1581 int i, j;
1582 char *fmt;
1583 rtx insn;
1584
1585 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
1586 {
1587 rtx next = next_real_insn (XEXP (x, 0));
1588
1589 /* Don't record labels that refer to dispatch tables.
1590 This is not necessary, since the tablejump references the same label.
1591 And if we did record them, flow.c would make worse code. */
1592 if (next == 0
1593 || ! (GET_CODE (next) == JUMP_INSN
1594 && (GET_CODE (PATTERN (next)) == ADDR_VEC
1595 || GET_CODE (PATTERN (next)) == ADDR_DIFF_VEC)))
1596 {
1597 for (insn = insns; insn; insn = NEXT_INSN (insn))
1598 if (reg_mentioned_p (XEXP (x, 0), insn))
1599 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_LABEL, XEXP (x, 0),
1600 REG_NOTES (insn));
1601 }
1602 return;
1603 }
1604
1605 fmt = GET_RTX_FORMAT (code);
1606 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1607 {
1608 if (fmt[i] == 'e')
1609 add_label_notes (XEXP (x, i), insns);
1610 else if (fmt[i] == 'E')
1611 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1612 add_label_notes (XVECEXP (x, i, j), insns);
1613 }
1614 }
1615 \f
1616 /* Scan MOVABLES, and move the insns that deserve to be moved.
1617 If two matching movables are combined, replace one reg with the
1618 other throughout. */
1619
1620 static void
1621 move_movables (movables, threshold, insn_count, loop_start, end, nregs)
1622 struct movable *movables;
1623 int threshold;
1624 int insn_count;
1625 rtx loop_start;
1626 rtx end;
1627 int nregs;
1628 {
1629 rtx new_start = 0;
1630 register struct movable *m;
1631 register rtx p;
1632 /* Map of pseudo-register replacements to handle combining
1633 when we move several insns that load the same value
1634 into different pseudo-registers. */
1635 rtx *reg_map = (rtx *) alloca (nregs * sizeof (rtx));
1636 char *already_moved = (char *) alloca (nregs);
1637
1638 bzero (already_moved, nregs);
1639 bzero ((char *) reg_map, nregs * sizeof (rtx));
1640
1641 num_movables = 0;
1642
1643 for (m = movables; m; m = m->next)
1644 {
1645 /* Describe this movable insn. */
1646
1647 if (loop_dump_stream)
1648 {
1649 fprintf (loop_dump_stream, "Insn %d: regno %d (life %d), ",
1650 INSN_UID (m->insn), m->regno, m->lifetime);
1651 if (m->consec > 0)
1652 fprintf (loop_dump_stream, "consec %d, ", m->consec);
1653 if (m->cond)
1654 fprintf (loop_dump_stream, "cond ");
1655 if (m->force)
1656 fprintf (loop_dump_stream, "force ");
1657 if (m->global)
1658 fprintf (loop_dump_stream, "global ");
1659 if (m->done)
1660 fprintf (loop_dump_stream, "done ");
1661 if (m->move_insn)
1662 fprintf (loop_dump_stream, "move-insn ");
1663 if (m->match)
1664 fprintf (loop_dump_stream, "matches %d ",
1665 INSN_UID (m->match->insn));
1666 if (m->forces)
1667 fprintf (loop_dump_stream, "forces %d ",
1668 INSN_UID (m->forces->insn));
1669 }
1670
1671 /* Count movables. Value used in heuristics in strength_reduce. */
1672 num_movables++;
1673
1674 /* Ignore the insn if it's already done (it matched something else).
1675 Otherwise, see if it is now safe to move. */
1676
1677 if (!m->done
1678 && (! m->cond
1679 || (1 == invariant_p (m->set_src)
1680 && (m->dependencies == 0
1681 || 1 == invariant_p (m->dependencies))
1682 && (m->consec == 0
1683 || 1 == consec_sets_invariant_p (m->set_dest,
1684 m->consec + 1,
1685 m->insn))))
1686 && (! m->forces || m->forces->done))
1687 {
1688 register int regno;
1689 register rtx p;
1690 int savings = m->savings;
1691
1692 /* We have an insn that is safe to move.
1693 Compute its desirability. */
1694
1695 p = m->insn;
1696 regno = m->regno;
1697
1698 if (loop_dump_stream)
1699 fprintf (loop_dump_stream, "savings %d ", savings);
1700
1701 if (moved_once[regno])
1702 {
1703 insn_count *= 2;
1704
1705 if (loop_dump_stream)
1706 fprintf (loop_dump_stream, "halved since already moved ");
1707 }
1708
1709 /* An insn MUST be moved if we already moved something else
1710 which is safe only if this one is moved too: that is,
1711 if already_moved[REGNO] is nonzero. */
1712
1713 /* An insn is desirable to move if the new lifetime of the
1714 register is no more than THRESHOLD times the old lifetime.
1715 If it's not desirable, it means the loop is so big
1716 that moving won't speed things up much,
1717 and it is liable to make register usage worse. */
1718
1719 /* It is also desirable to move if it can be moved at no
1720 extra cost because something else was already moved. */
1721
1722 if (already_moved[regno]
1723 || flag_move_all_movables
1724 || (threshold * savings * m->lifetime) >= insn_count
1725 || (m->forces && m->forces->done
1726 && n_times_used[m->forces->regno] == 1))
1727 {
1728 int count;
1729 register struct movable *m1;
1730 rtx first;
1731
1732 /* Now move the insns that set the reg. */
1733
1734 if (m->partial && m->match)
1735 {
1736 rtx newpat, i1;
1737 rtx r1, r2;
1738 /* Find the end of this chain of matching regs.
1739 Thus, we load each reg in the chain from that one reg.
1740 And that reg is loaded with 0 directly,
1741 since it has ->match == 0. */
1742 for (m1 = m; m1->match; m1 = m1->match);
1743 newpat = gen_move_insn (SET_DEST (PATTERN (m->insn)),
1744 SET_DEST (PATTERN (m1->insn)));
1745 i1 = emit_insn_before (newpat, loop_start);
1746
1747 /* Mark the moved, invariant reg as being allowed to
1748 share a hard reg with the other matching invariant. */
1749 REG_NOTES (i1) = REG_NOTES (m->insn);
1750 r1 = SET_DEST (PATTERN (m->insn));
1751 r2 = SET_DEST (PATTERN (m1->insn));
1752 regs_may_share
1753 = gen_rtx_EXPR_LIST (VOIDmode, r1,
1754 gen_rtx_EXPR_LIST (VOIDmode, r2,
1755 regs_may_share));
1756 delete_insn (m->insn);
1757
1758 if (new_start == 0)
1759 new_start = i1;
1760
1761 if (loop_dump_stream)
1762 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1763 }
1764 /* If we are to re-generate the item being moved with a
1765 new move insn, first delete what we have and then emit
1766 the move insn before the loop. */
1767 else if (m->move_insn)
1768 {
1769 rtx i1, temp;
1770
1771 for (count = m->consec; count >= 0; count--)
1772 {
1773 /* If this is the first insn of a library call sequence,
1774 skip to the end. */
1775 if (GET_CODE (p) != NOTE
1776 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1777 p = XEXP (temp, 0);
1778
1779 /* If this is the last insn of a libcall sequence, then
1780 delete every insn in the sequence except the last.
1781 The last insn is handled in the normal manner. */
1782 if (GET_CODE (p) != NOTE
1783 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1784 {
1785 temp = XEXP (temp, 0);
1786 while (temp != p)
1787 temp = delete_insn (temp);
1788 }
1789
1790 p = delete_insn (p);
1791 while (p && GET_CODE (p) == NOTE)
1792 p = NEXT_INSN (p);
1793 }
1794
1795 start_sequence ();
1796 emit_move_insn (m->set_dest, m->set_src);
1797 temp = get_insns ();
1798 end_sequence ();
1799
1800 add_label_notes (m->set_src, temp);
1801
1802 i1 = emit_insns_before (temp, loop_start);
1803 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
1804 REG_NOTES (i1)
1805 = gen_rtx_EXPR_LIST (m->is_equiv ? REG_EQUIV : REG_EQUAL,
1806 m->set_src, REG_NOTES (i1));
1807
1808 if (loop_dump_stream)
1809 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1810
1811 /* The more regs we move, the less we like moving them. */
1812 threshold -= 3;
1813 }
1814 else
1815 {
1816 for (count = m->consec; count >= 0; count--)
1817 {
1818 rtx i1, temp;
1819
1820 /* If first insn of libcall sequence, skip to end. */
1821 /* Do this at start of loop, since p is guaranteed to
1822 be an insn here. */
1823 if (GET_CODE (p) != NOTE
1824 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1825 p = XEXP (temp, 0);
1826
1827 /* If last insn of libcall sequence, move all
1828 insns except the last before the loop. The last
1829 insn is handled in the normal manner. */
1830 if (GET_CODE (p) != NOTE
1831 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1832 {
1833 rtx fn_address = 0;
1834 rtx fn_reg = 0;
1835 rtx fn_address_insn = 0;
1836
1837 first = 0;
1838 for (temp = XEXP (temp, 0); temp != p;
1839 temp = NEXT_INSN (temp))
1840 {
1841 rtx body;
1842 rtx n;
1843 rtx next;
1844
1845 if (GET_CODE (temp) == NOTE)
1846 continue;
1847
1848 body = PATTERN (temp);
1849
1850 /* Find the next insn after TEMP,
1851 not counting USE or NOTE insns. */
1852 for (next = NEXT_INSN (temp); next != p;
1853 next = NEXT_INSN (next))
1854 if (! (GET_CODE (next) == INSN
1855 && GET_CODE (PATTERN (next)) == USE)
1856 && GET_CODE (next) != NOTE)
1857 break;
1858
1859 /* If that is the call, this may be the insn
1860 that loads the function address.
1861
1862 Extract the function address from the insn
1863 that loads it into a register.
1864 If this insn was cse'd, we get incorrect code.
1865
1866 So emit a new move insn that copies the
1867 function address into the register that the
1868 call insn will use. flow.c will delete any
1869 redundant stores that we have created. */
1870 if (GET_CODE (next) == CALL_INSN
1871 && GET_CODE (body) == SET
1872 && GET_CODE (SET_DEST (body)) == REG
1873 && (n = find_reg_note (temp, REG_EQUAL,
1874 NULL_RTX)))
1875 {
1876 fn_reg = SET_SRC (body);
1877 if (GET_CODE (fn_reg) != REG)
1878 fn_reg = SET_DEST (body);
1879 fn_address = XEXP (n, 0);
1880 fn_address_insn = temp;
1881 }
1882 /* We have the call insn.
1883 If it uses the register we suspect it might,
1884 load it with the correct address directly. */
1885 if (GET_CODE (temp) == CALL_INSN
1886 && fn_address != 0
1887 && reg_referenced_p (fn_reg, body))
1888 emit_insn_after (gen_move_insn (fn_reg,
1889 fn_address),
1890 fn_address_insn);
1891
1892 if (GET_CODE (temp) == CALL_INSN)
1893 {
1894 i1 = emit_call_insn_before (body, loop_start);
1895 /* Because the USAGE information potentially
1896 contains objects other than hard registers
1897 we need to copy it. */
1898 if (CALL_INSN_FUNCTION_USAGE (temp))
1899 CALL_INSN_FUNCTION_USAGE (i1)
1900 = copy_rtx (CALL_INSN_FUNCTION_USAGE (temp));
1901 }
1902 else
1903 i1 = emit_insn_before (body, loop_start);
1904 if (first == 0)
1905 first = i1;
1906 if (temp == fn_address_insn)
1907 fn_address_insn = i1;
1908 REG_NOTES (i1) = REG_NOTES (temp);
1909 delete_insn (temp);
1910 }
1911 }
1912 if (m->savemode != VOIDmode)
1913 {
1914 /* P sets REG to zero; but we should clear only
1915 the bits that are not covered by the mode
1916 m->savemode. */
1917 rtx reg = m->set_dest;
1918 rtx sequence;
1919 rtx tem;
1920
1921 start_sequence ();
1922 tem = expand_binop
1923 (GET_MODE (reg), and_optab, reg,
1924 GEN_INT ((((HOST_WIDE_INT) 1
1925 << GET_MODE_BITSIZE (m->savemode)))
1926 - 1),
1927 reg, 1, OPTAB_LIB_WIDEN);
1928 if (tem == 0)
1929 abort ();
1930 if (tem != reg)
1931 emit_move_insn (reg, tem);
1932 sequence = gen_sequence ();
1933 end_sequence ();
1934 i1 = emit_insn_before (sequence, loop_start);
1935 }
1936 else if (GET_CODE (p) == CALL_INSN)
1937 {
1938 i1 = emit_call_insn_before (PATTERN (p), loop_start);
1939 /* Because the USAGE information potentially
1940 contains objects other than hard registers
1941 we need to copy it. */
1942 if (CALL_INSN_FUNCTION_USAGE (p))
1943 CALL_INSN_FUNCTION_USAGE (i1)
1944 = copy_rtx (CALL_INSN_FUNCTION_USAGE (p));
1945 }
1946 else
1947 i1 = emit_insn_before (PATTERN (p), loop_start);
1948
1949 REG_NOTES (i1) = REG_NOTES (p);
1950
1951 /* If there is a REG_EQUAL note present whose value is
1952 not loop invariant, then delete it, since it may
1953 cause problems with later optimization passes.
1954 It is possible for cse to create such notes
1955 like this as a result of record_jump_cond. */
1956
1957 if ((temp = find_reg_note (i1, REG_EQUAL, NULL_RTX))
1958 && ! invariant_p (XEXP (temp, 0)))
1959 remove_note (i1, temp);
1960
1961 if (new_start == 0)
1962 new_start = i1;
1963
1964 if (loop_dump_stream)
1965 fprintf (loop_dump_stream, " moved to %d",
1966 INSN_UID (i1));
1967
1968 #if 0
1969 /* This isn't needed because REG_NOTES is copied
1970 below and is wrong since P might be a PARALLEL. */
1971 if (REG_NOTES (i1) == 0
1972 && ! m->partial /* But not if it's a zero-extend clr. */
1973 && ! m->global /* and not if used outside the loop
1974 (since it might get set outside). */
1975 && CONSTANT_P (SET_SRC (PATTERN (p))))
1976 REG_NOTES (i1)
1977 = gen_rtx_EXPR_LIST (REG_EQUAL,
1978 SET_SRC (PATTERN (p)),
1979 REG_NOTES (i1));
1980 #endif
1981
1982 /* If library call, now fix the REG_NOTES that contain
1983 insn pointers, namely REG_LIBCALL on FIRST
1984 and REG_RETVAL on I1. */
1985 if ((temp = find_reg_note (i1, REG_RETVAL, NULL_RTX)))
1986 {
1987 XEXP (temp, 0) = first;
1988 temp = find_reg_note (first, REG_LIBCALL, NULL_RTX);
1989 XEXP (temp, 0) = i1;
1990 }
1991
1992 delete_insn (p);
1993 do p = NEXT_INSN (p);
1994 while (p && GET_CODE (p) == NOTE);
1995 }
1996
1997 /* The more regs we move, the less we like moving them. */
1998 threshold -= 3;
1999 }
2000
2001 /* Any other movable that loads the same register
2002 MUST be moved. */
2003 already_moved[regno] = 1;
2004
2005 /* This reg has been moved out of one loop. */
2006 moved_once[regno] = 1;
2007
2008 /* The reg set here is now invariant. */
2009 if (! m->partial)
2010 n_times_set[regno] = 0;
2011
2012 m->done = 1;
2013
2014 /* Change the length-of-life info for the register
2015 to say it lives at least the full length of this loop.
2016 This will help guide optimizations in outer loops. */
2017
2018 if (uid_luid[REGNO_FIRST_UID (regno)] > INSN_LUID (loop_start))
2019 /* This is the old insn before all the moved insns.
2020 We can't use the moved insn because it is out of range
2021 in uid_luid. Only the old insns have luids. */
2022 REGNO_FIRST_UID (regno) = INSN_UID (loop_start);
2023 if (uid_luid[REGNO_LAST_UID (regno)] < INSN_LUID (end))
2024 REGNO_LAST_UID (regno) = INSN_UID (end);
2025
2026 /* Combine with this moved insn any other matching movables. */
2027
2028 if (! m->partial)
2029 for (m1 = movables; m1; m1 = m1->next)
2030 if (m1->match == m)
2031 {
2032 rtx temp;
2033
2034 /* Schedule the reg loaded by M1
2035 for replacement so that shares the reg of M.
2036 If the modes differ (only possible in restricted
2037 circumstances, make a SUBREG. */
2038 if (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest))
2039 reg_map[m1->regno] = m->set_dest;
2040 else
2041 reg_map[m1->regno]
2042 = gen_lowpart_common (GET_MODE (m1->set_dest),
2043 m->set_dest);
2044
2045 /* Get rid of the matching insn
2046 and prevent further processing of it. */
2047 m1->done = 1;
2048
2049 /* if library call, delete all insn except last, which
2050 is deleted below */
2051 if ((temp = find_reg_note (m1->insn, REG_RETVAL,
2052 NULL_RTX)))
2053 {
2054 for (temp = XEXP (temp, 0); temp != m1->insn;
2055 temp = NEXT_INSN (temp))
2056 delete_insn (temp);
2057 }
2058 delete_insn (m1->insn);
2059
2060 /* Any other movable that loads the same register
2061 MUST be moved. */
2062 already_moved[m1->regno] = 1;
2063
2064 /* The reg merged here is now invariant,
2065 if the reg it matches is invariant. */
2066 if (! m->partial)
2067 n_times_set[m1->regno] = 0;
2068 }
2069 }
2070 else if (loop_dump_stream)
2071 fprintf (loop_dump_stream, "not desirable");
2072 }
2073 else if (loop_dump_stream && !m->match)
2074 fprintf (loop_dump_stream, "not safe");
2075
2076 if (loop_dump_stream)
2077 fprintf (loop_dump_stream, "\n");
2078 }
2079
2080 if (new_start == 0)
2081 new_start = loop_start;
2082
2083 /* Go through all the instructions in the loop, making
2084 all the register substitutions scheduled in REG_MAP. */
2085 for (p = new_start; p != end; p = NEXT_INSN (p))
2086 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
2087 || GET_CODE (p) == CALL_INSN)
2088 {
2089 replace_regs (PATTERN (p), reg_map, nregs, 0);
2090 replace_regs (REG_NOTES (p), reg_map, nregs, 0);
2091 INSN_CODE (p) = -1;
2092 }
2093 }
2094 \f
2095 #if 0
2096 /* Scan X and replace the address of any MEM in it with ADDR.
2097 REG is the address that MEM should have before the replacement. */
2098
2099 static void
2100 replace_call_address (x, reg, addr)
2101 rtx x, reg, addr;
2102 {
2103 register enum rtx_code code;
2104 register int i;
2105 register char *fmt;
2106
2107 if (x == 0)
2108 return;
2109 code = GET_CODE (x);
2110 switch (code)
2111 {
2112 case PC:
2113 case CC0:
2114 case CONST_INT:
2115 case CONST_DOUBLE:
2116 case CONST:
2117 case SYMBOL_REF:
2118 case LABEL_REF:
2119 case REG:
2120 return;
2121
2122 case SET:
2123 /* Short cut for very common case. */
2124 replace_call_address (XEXP (x, 1), reg, addr);
2125 return;
2126
2127 case CALL:
2128 /* Short cut for very common case. */
2129 replace_call_address (XEXP (x, 0), reg, addr);
2130 return;
2131
2132 case MEM:
2133 /* If this MEM uses a reg other than the one we expected,
2134 something is wrong. */
2135 if (XEXP (x, 0) != reg)
2136 abort ();
2137 XEXP (x, 0) = addr;
2138 return;
2139
2140 default:
2141 break;
2142 }
2143
2144 fmt = GET_RTX_FORMAT (code);
2145 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2146 {
2147 if (fmt[i] == 'e')
2148 replace_call_address (XEXP (x, i), reg, addr);
2149 if (fmt[i] == 'E')
2150 {
2151 register int j;
2152 for (j = 0; j < XVECLEN (x, i); j++)
2153 replace_call_address (XVECEXP (x, i, j), reg, addr);
2154 }
2155 }
2156 }
2157 #endif
2158 \f
2159 /* Return the number of memory refs to addresses that vary
2160 in the rtx X. */
2161
2162 static int
2163 count_nonfixed_reads (x)
2164 rtx x;
2165 {
2166 register enum rtx_code code;
2167 register int i;
2168 register char *fmt;
2169 int value;
2170
2171 if (x == 0)
2172 return 0;
2173
2174 code = GET_CODE (x);
2175 switch (code)
2176 {
2177 case PC:
2178 case CC0:
2179 case CONST_INT:
2180 case CONST_DOUBLE:
2181 case CONST:
2182 case SYMBOL_REF:
2183 case LABEL_REF:
2184 case REG:
2185 return 0;
2186
2187 case MEM:
2188 return ((invariant_p (XEXP (x, 0)) != 1)
2189 + count_nonfixed_reads (XEXP (x, 0)));
2190
2191 default:
2192 break;
2193 }
2194
2195 value = 0;
2196 fmt = GET_RTX_FORMAT (code);
2197 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2198 {
2199 if (fmt[i] == 'e')
2200 value += count_nonfixed_reads (XEXP (x, i));
2201 if (fmt[i] == 'E')
2202 {
2203 register int j;
2204 for (j = 0; j < XVECLEN (x, i); j++)
2205 value += count_nonfixed_reads (XVECEXP (x, i, j));
2206 }
2207 }
2208 return value;
2209 }
2210
2211 \f
2212 #if 0
2213 /* P is an instruction that sets a register to the result of a ZERO_EXTEND.
2214 Replace it with an instruction to load just the low bytes
2215 if the machine supports such an instruction,
2216 and insert above LOOP_START an instruction to clear the register. */
2217
2218 static void
2219 constant_high_bytes (p, loop_start)
2220 rtx p, loop_start;
2221 {
2222 register rtx new;
2223 register int insn_code_number;
2224
2225 /* Try to change (SET (REG ...) (ZERO_EXTEND (..:B ...)))
2226 to (SET (STRICT_LOW_PART (SUBREG:B (REG...))) ...). */
2227
2228 new = gen_rtx_SET (VOIDmode,
2229 gen_rtx_STRICT_LOW_PART (VOIDmode,
2230 gen_rtx_SUBREG (GET_MODE (XEXP (SET_SRC (PATTERN (p)), 0)),
2231 SET_DEST (PATTERN (p)),
2232 0)),
2233 XEXP (SET_SRC (PATTERN (p)), 0));
2234 insn_code_number = recog (new, p);
2235
2236 if (insn_code_number)
2237 {
2238 register int i;
2239
2240 /* Clear destination register before the loop. */
2241 emit_insn_before (gen_rtx_SET (VOIDmode, SET_DEST (PATTERN (p)),
2242 const0_rtx),
2243 loop_start);
2244
2245 /* Inside the loop, just load the low part. */
2246 PATTERN (p) = new;
2247 }
2248 }
2249 #endif
2250 \f
2251 /* Scan a loop setting the variables `unknown_address_altered',
2252 `num_mem_sets', `loop_continue', loops_enclosed', `loop_has_call',
2253 and `loop_has_volatile'.
2254 Also, fill in the array `loop_store_mems'. */
2255
2256 static void
2257 prescan_loop (start, end)
2258 rtx start, end;
2259 {
2260 register int level = 1;
2261 register rtx insn;
2262
2263 unknown_address_altered = 0;
2264 loop_has_call = 0;
2265 loop_has_volatile = 0;
2266 loop_store_mems_idx = 0;
2267
2268 num_mem_sets = 0;
2269 loops_enclosed = 1;
2270 loop_continue = 0;
2271
2272 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2273 insn = NEXT_INSN (insn))
2274 {
2275 if (GET_CODE (insn) == NOTE)
2276 {
2277 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
2278 {
2279 ++level;
2280 /* Count number of loops contained in this one. */
2281 loops_enclosed++;
2282 }
2283 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
2284 {
2285 --level;
2286 if (level == 0)
2287 {
2288 end = insn;
2289 break;
2290 }
2291 }
2292 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_CONT)
2293 {
2294 if (level == 1)
2295 loop_continue = insn;
2296 }
2297 }
2298 else if (GET_CODE (insn) == CALL_INSN)
2299 {
2300 if (! CONST_CALL_P (insn))
2301 unknown_address_altered = 1;
2302 loop_has_call = 1;
2303 }
2304 else
2305 {
2306 if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
2307 {
2308 if (volatile_refs_p (PATTERN (insn)))
2309 loop_has_volatile = 1;
2310
2311 note_stores (PATTERN (insn), note_addr_stored);
2312 }
2313 }
2314 }
2315 }
2316 \f
2317 /* Scan the function looking for loops. Record the start and end of each loop.
2318 Also mark as invalid loops any loops that contain a setjmp or are branched
2319 to from outside the loop. */
2320
2321 static void
2322 find_and_verify_loops (f)
2323 rtx f;
2324 {
2325 rtx insn, label;
2326 int current_loop = -1;
2327 int next_loop = -1;
2328 int loop;
2329
2330 /* If there are jumps to undefined labels,
2331 treat them as jumps out of any/all loops.
2332 This also avoids writing past end of tables when there are no loops. */
2333 uid_loop_num[0] = -1;
2334
2335 /* Find boundaries of loops, mark which loops are contained within
2336 loops, and invalidate loops that have setjmp. */
2337
2338 for (insn = f; insn; insn = NEXT_INSN (insn))
2339 {
2340 if (GET_CODE (insn) == NOTE)
2341 switch (NOTE_LINE_NUMBER (insn))
2342 {
2343 case NOTE_INSN_LOOP_BEG:
2344 loop_number_loop_starts[++next_loop] = insn;
2345 loop_number_loop_ends[next_loop] = 0;
2346 loop_outer_loop[next_loop] = current_loop;
2347 loop_invalid[next_loop] = 0;
2348 loop_number_exit_labels[next_loop] = 0;
2349 loop_number_exit_count[next_loop] = 0;
2350 current_loop = next_loop;
2351 break;
2352
2353 case NOTE_INSN_SETJMP:
2354 /* In this case, we must invalidate our current loop and any
2355 enclosing loop. */
2356 for (loop = current_loop; loop != -1; loop = loop_outer_loop[loop])
2357 {
2358 loop_invalid[loop] = 1;
2359 if (loop_dump_stream)
2360 fprintf (loop_dump_stream,
2361 "\nLoop at %d ignored due to setjmp.\n",
2362 INSN_UID (loop_number_loop_starts[loop]));
2363 }
2364 break;
2365
2366 case NOTE_INSN_LOOP_END:
2367 if (current_loop == -1)
2368 abort ();
2369
2370 loop_number_loop_ends[current_loop] = insn;
2371 current_loop = loop_outer_loop[current_loop];
2372 break;
2373
2374 default:
2375 break;
2376 }
2377
2378 /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the
2379 enclosing loop, but this doesn't matter. */
2380 uid_loop_num[INSN_UID (insn)] = current_loop;
2381 }
2382
2383 /* Any loop containing a label used in an initializer must be invalidated,
2384 because it can be jumped into from anywhere. */
2385
2386 for (label = forced_labels; label; label = XEXP (label, 1))
2387 {
2388 int loop_num;
2389
2390 for (loop_num = uid_loop_num[INSN_UID (XEXP (label, 0))];
2391 loop_num != -1;
2392 loop_num = loop_outer_loop[loop_num])
2393 loop_invalid[loop_num] = 1;
2394 }
2395
2396 /* Any loop containing a label used for an exception handler must be
2397 invalidated, because it can be jumped into from anywhere. */
2398
2399 for (label = exception_handler_labels; label; label = XEXP (label, 1))
2400 {
2401 int loop_num;
2402
2403 for (loop_num = uid_loop_num[INSN_UID (XEXP (label, 0))];
2404 loop_num != -1;
2405 loop_num = loop_outer_loop[loop_num])
2406 loop_invalid[loop_num] = 1;
2407 }
2408
2409 /* Now scan all insn's in the function. If any JUMP_INSN branches into a
2410 loop that it is not contained within, that loop is marked invalid.
2411 If any INSN or CALL_INSN uses a label's address, then the loop containing
2412 that label is marked invalid, because it could be jumped into from
2413 anywhere.
2414
2415 Also look for blocks of code ending in an unconditional branch that
2416 exits the loop. If such a block is surrounded by a conditional
2417 branch around the block, move the block elsewhere (see below) and
2418 invert the jump to point to the code block. This may eliminate a
2419 label in our loop and will simplify processing by both us and a
2420 possible second cse pass. */
2421
2422 for (insn = f; insn; insn = NEXT_INSN (insn))
2423 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
2424 {
2425 int this_loop_num = uid_loop_num[INSN_UID (insn)];
2426
2427 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
2428 {
2429 rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
2430 if (note)
2431 {
2432 int loop_num;
2433
2434 for (loop_num = uid_loop_num[INSN_UID (XEXP (note, 0))];
2435 loop_num != -1;
2436 loop_num = loop_outer_loop[loop_num])
2437 loop_invalid[loop_num] = 1;
2438 }
2439 }
2440
2441 if (GET_CODE (insn) != JUMP_INSN)
2442 continue;
2443
2444 mark_loop_jump (PATTERN (insn), this_loop_num);
2445
2446 /* See if this is an unconditional branch outside the loop. */
2447 if (this_loop_num != -1
2448 && (GET_CODE (PATTERN (insn)) == RETURN
2449 || (simplejump_p (insn)
2450 && (uid_loop_num[INSN_UID (JUMP_LABEL (insn))]
2451 != this_loop_num)))
2452 && get_max_uid () < max_uid_for_loop)
2453 {
2454 rtx p;
2455 rtx our_next = next_real_insn (insn);
2456 int dest_loop;
2457 int outer_loop = -1;
2458
2459 /* Go backwards until we reach the start of the loop, a label,
2460 or a JUMP_INSN. */
2461 for (p = PREV_INSN (insn);
2462 GET_CODE (p) != CODE_LABEL
2463 && ! (GET_CODE (p) == NOTE
2464 && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
2465 && GET_CODE (p) != JUMP_INSN;
2466 p = PREV_INSN (p))
2467 ;
2468
2469 /* Check for the case where we have a jump to an inner nested
2470 loop, and do not perform the optimization in that case. */
2471
2472 if (JUMP_LABEL (insn))
2473 {
2474 dest_loop = uid_loop_num[INSN_UID (JUMP_LABEL (insn))];
2475 if (dest_loop != -1)
2476 {
2477 for (outer_loop = dest_loop; outer_loop != -1;
2478 outer_loop = loop_outer_loop[outer_loop])
2479 if (outer_loop == this_loop_num)
2480 break;
2481 }
2482 }
2483
2484 /* Make sure that the target of P is within the current loop. */
2485
2486 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
2487 && uid_loop_num[INSN_UID (JUMP_LABEL (p))] != this_loop_num)
2488 outer_loop = this_loop_num;
2489
2490 /* If we stopped on a JUMP_INSN to the next insn after INSN,
2491 we have a block of code to try to move.
2492
2493 We look backward and then forward from the target of INSN
2494 to find a BARRIER at the same loop depth as the target.
2495 If we find such a BARRIER, we make a new label for the start
2496 of the block, invert the jump in P and point it to that label,
2497 and move the block of code to the spot we found. */
2498
2499 if (outer_loop == -1
2500 && GET_CODE (p) == JUMP_INSN
2501 && JUMP_LABEL (p) != 0
2502 /* Just ignore jumps to labels that were never emitted.
2503 These always indicate compilation errors. */
2504 && INSN_UID (JUMP_LABEL (p)) != 0
2505 && condjump_p (p)
2506 && ! simplejump_p (p)
2507 && next_real_insn (JUMP_LABEL (p)) == our_next)
2508 {
2509 rtx target
2510 = JUMP_LABEL (insn) ? JUMP_LABEL (insn) : get_last_insn ();
2511 int target_loop_num = uid_loop_num[INSN_UID (target)];
2512 rtx loc;
2513
2514 for (loc = target; loc; loc = PREV_INSN (loc))
2515 if (GET_CODE (loc) == BARRIER
2516 && uid_loop_num[INSN_UID (loc)] == target_loop_num)
2517 break;
2518
2519 if (loc == 0)
2520 for (loc = target; loc; loc = NEXT_INSN (loc))
2521 if (GET_CODE (loc) == BARRIER
2522 && uid_loop_num[INSN_UID (loc)] == target_loop_num)
2523 break;
2524
2525 if (loc)
2526 {
2527 rtx cond_label = JUMP_LABEL (p);
2528 rtx new_label = get_label_after (p);
2529
2530 /* Ensure our label doesn't go away. */
2531 LABEL_NUSES (cond_label)++;
2532
2533 /* Verify that uid_loop_num is large enough and that
2534 we can invert P. */
2535 if (invert_jump (p, new_label))
2536 {
2537 rtx q, r;
2538
2539 /* If no suitable BARRIER was found, create a suitable
2540 one before TARGET. Since TARGET is a fall through
2541 path, we'll need to insert an jump around our block
2542 and a add a BARRIER before TARGET.
2543
2544 This creates an extra unconditional jump outside
2545 the loop. However, the benefits of removing rarely
2546 executed instructions from inside the loop usually
2547 outweighs the cost of the extra unconditional jump
2548 outside the loop. */
2549 if (loc == 0)
2550 {
2551 rtx temp;
2552
2553 temp = gen_jump (JUMP_LABEL (insn));
2554 temp = emit_jump_insn_before (temp, target);
2555 JUMP_LABEL (temp) = JUMP_LABEL (insn);
2556 LABEL_NUSES (JUMP_LABEL (insn))++;
2557 loc = emit_barrier_before (target);
2558 }
2559
2560 /* Include the BARRIER after INSN and copy the
2561 block after LOC. */
2562 new_label = squeeze_notes (new_label, NEXT_INSN (insn));
2563 reorder_insns (new_label, NEXT_INSN (insn), loc);
2564
2565 /* All those insns are now in TARGET_LOOP_NUM. */
2566 for (q = new_label; q != NEXT_INSN (NEXT_INSN (insn));
2567 q = NEXT_INSN (q))
2568 uid_loop_num[INSN_UID (q)] = target_loop_num;
2569
2570 /* The label jumped to by INSN is no longer a loop exit.
2571 Unless INSN does not have a label (e.g., it is a
2572 RETURN insn), search loop_number_exit_labels to find
2573 its label_ref, and remove it. Also turn off
2574 LABEL_OUTSIDE_LOOP_P bit. */
2575 if (JUMP_LABEL (insn))
2576 {
2577 int loop_num;
2578
2579 for (q = 0,
2580 r = loop_number_exit_labels[this_loop_num];
2581 r; q = r, r = LABEL_NEXTREF (r))
2582 if (XEXP (r, 0) == JUMP_LABEL (insn))
2583 {
2584 LABEL_OUTSIDE_LOOP_P (r) = 0;
2585 if (q)
2586 LABEL_NEXTREF (q) = LABEL_NEXTREF (r);
2587 else
2588 loop_number_exit_labels[this_loop_num]
2589 = LABEL_NEXTREF (r);
2590 break;
2591 }
2592
2593 for (loop_num = this_loop_num;
2594 loop_num != -1 && loop_num != target_loop_num;
2595 loop_num = loop_outer_loop[loop_num])
2596 loop_number_exit_count[loop_num]--;
2597
2598 /* If we didn't find it, then something is wrong. */
2599 if (! r)
2600 abort ();
2601 }
2602
2603 /* P is now a jump outside the loop, so it must be put
2604 in loop_number_exit_labels, and marked as such.
2605 The easiest way to do this is to just call
2606 mark_loop_jump again for P. */
2607 mark_loop_jump (PATTERN (p), this_loop_num);
2608
2609 /* If INSN now jumps to the insn after it,
2610 delete INSN. */
2611 if (JUMP_LABEL (insn) != 0
2612 && (next_real_insn (JUMP_LABEL (insn))
2613 == next_real_insn (insn)))
2614 delete_insn (insn);
2615 }
2616
2617 /* Continue the loop after where the conditional
2618 branch used to jump, since the only branch insn
2619 in the block (if it still remains) is an inter-loop
2620 branch and hence needs no processing. */
2621 insn = NEXT_INSN (cond_label);
2622
2623 if (--LABEL_NUSES (cond_label) == 0)
2624 delete_insn (cond_label);
2625
2626 /* This loop will be continued with NEXT_INSN (insn). */
2627 insn = PREV_INSN (insn);
2628 }
2629 }
2630 }
2631 }
2632 }
2633
2634 /* If any label in X jumps to a loop different from LOOP_NUM and any of the
2635 loops it is contained in, mark the target loop invalid.
2636
2637 For speed, we assume that X is part of a pattern of a JUMP_INSN. */
2638
2639 static void
2640 mark_loop_jump (x, loop_num)
2641 rtx x;
2642 int loop_num;
2643 {
2644 int dest_loop;
2645 int outer_loop;
2646 int i;
2647
2648 switch (GET_CODE (x))
2649 {
2650 case PC:
2651 case USE:
2652 case CLOBBER:
2653 case REG:
2654 case MEM:
2655 case CONST_INT:
2656 case CONST_DOUBLE:
2657 case RETURN:
2658 return;
2659
2660 case CONST:
2661 /* There could be a label reference in here. */
2662 mark_loop_jump (XEXP (x, 0), loop_num);
2663 return;
2664
2665 case PLUS:
2666 case MINUS:
2667 case MULT:
2668 mark_loop_jump (XEXP (x, 0), loop_num);
2669 mark_loop_jump (XEXP (x, 1), loop_num);
2670 return;
2671
2672 case SIGN_EXTEND:
2673 case ZERO_EXTEND:
2674 mark_loop_jump (XEXP (x, 0), loop_num);
2675 return;
2676
2677 case LABEL_REF:
2678 dest_loop = uid_loop_num[INSN_UID (XEXP (x, 0))];
2679
2680 /* Link together all labels that branch outside the loop. This
2681 is used by final_[bg]iv_value and the loop unrolling code. Also
2682 mark this LABEL_REF so we know that this branch should predict
2683 false. */
2684
2685 /* A check to make sure the label is not in an inner nested loop,
2686 since this does not count as a loop exit. */
2687 if (dest_loop != -1)
2688 {
2689 for (outer_loop = dest_loop; outer_loop != -1;
2690 outer_loop = loop_outer_loop[outer_loop])
2691 if (outer_loop == loop_num)
2692 break;
2693 }
2694 else
2695 outer_loop = -1;
2696
2697 if (loop_num != -1 && outer_loop == -1)
2698 {
2699 LABEL_OUTSIDE_LOOP_P (x) = 1;
2700 LABEL_NEXTREF (x) = loop_number_exit_labels[loop_num];
2701 loop_number_exit_labels[loop_num] = x;
2702
2703 for (outer_loop = loop_num;
2704 outer_loop != -1 && outer_loop != dest_loop;
2705 outer_loop = loop_outer_loop[outer_loop])
2706 loop_number_exit_count[outer_loop]++;
2707 }
2708
2709 /* If this is inside a loop, but not in the current loop or one enclosed
2710 by it, it invalidates at least one loop. */
2711
2712 if (dest_loop == -1)
2713 return;
2714
2715 /* We must invalidate every nested loop containing the target of this
2716 label, except those that also contain the jump insn. */
2717
2718 for (; dest_loop != -1; dest_loop = loop_outer_loop[dest_loop])
2719 {
2720 /* Stop when we reach a loop that also contains the jump insn. */
2721 for (outer_loop = loop_num; outer_loop != -1;
2722 outer_loop = loop_outer_loop[outer_loop])
2723 if (dest_loop == outer_loop)
2724 return;
2725
2726 /* If we get here, we know we need to invalidate a loop. */
2727 if (loop_dump_stream && ! loop_invalid[dest_loop])
2728 fprintf (loop_dump_stream,
2729 "\nLoop at %d ignored due to multiple entry points.\n",
2730 INSN_UID (loop_number_loop_starts[dest_loop]));
2731
2732 loop_invalid[dest_loop] = 1;
2733 }
2734 return;
2735
2736 case SET:
2737 /* If this is not setting pc, ignore. */
2738 if (SET_DEST (x) == pc_rtx)
2739 mark_loop_jump (SET_SRC (x), loop_num);
2740 return;
2741
2742 case IF_THEN_ELSE:
2743 mark_loop_jump (XEXP (x, 1), loop_num);
2744 mark_loop_jump (XEXP (x, 2), loop_num);
2745 return;
2746
2747 case PARALLEL:
2748 case ADDR_VEC:
2749 for (i = 0; i < XVECLEN (x, 0); i++)
2750 mark_loop_jump (XVECEXP (x, 0, i), loop_num);
2751 return;
2752
2753 case ADDR_DIFF_VEC:
2754 for (i = 0; i < XVECLEN (x, 1); i++)
2755 mark_loop_jump (XVECEXP (x, 1, i), loop_num);
2756 return;
2757
2758 default:
2759 /* Treat anything else (such as a symbol_ref)
2760 as a branch out of this loop, but not into any loop. */
2761
2762 if (loop_num != -1)
2763 {
2764 #ifdef HAIFA
2765 LABEL_OUTSIDE_LOOP_P (x) = 1;
2766 LABEL_NEXTREF (x) = loop_number_exit_labels[loop_num];
2767 #endif /* HAIFA */
2768
2769 loop_number_exit_labels[loop_num] = x;
2770
2771 for (outer_loop = loop_num; outer_loop != -1;
2772 outer_loop = loop_outer_loop[outer_loop])
2773 loop_number_exit_count[outer_loop]++;
2774 }
2775 return;
2776 }
2777 }
2778 \f
2779 /* Return nonzero if there is a label in the range from
2780 insn INSN to and including the insn whose luid is END
2781 INSN must have an assigned luid (i.e., it must not have
2782 been previously created by loop.c). */
2783
2784 static int
2785 labels_in_range_p (insn, end)
2786 rtx insn;
2787 int end;
2788 {
2789 while (insn && INSN_LUID (insn) <= end)
2790 {
2791 if (GET_CODE (insn) == CODE_LABEL)
2792 return 1;
2793 insn = NEXT_INSN (insn);
2794 }
2795
2796 return 0;
2797 }
2798
2799 /* Record that a memory reference X is being set. */
2800
2801 static void
2802 note_addr_stored (x)
2803 rtx x;
2804 {
2805 register int i;
2806
2807 if (x == 0 || GET_CODE (x) != MEM)
2808 return;
2809
2810 /* Count number of memory writes.
2811 This affects heuristics in strength_reduce. */
2812 num_mem_sets++;
2813
2814 /* BLKmode MEM means all memory is clobbered. */
2815 if (GET_MODE (x) == BLKmode)
2816 unknown_address_altered = 1;
2817
2818 if (unknown_address_altered)
2819 return;
2820
2821 for (i = 0; i < loop_store_mems_idx; i++)
2822 if (rtx_equal_p (XEXP (loop_store_mems[i], 0), XEXP (x, 0))
2823 && MEM_IN_STRUCT_P (x) == MEM_IN_STRUCT_P (loop_store_mems[i]))
2824 {
2825 /* We are storing at the same address as previously noted. Save the
2826 wider reference. */
2827 if (GET_MODE_SIZE (GET_MODE (x))
2828 > GET_MODE_SIZE (GET_MODE (loop_store_mems[i])))
2829 loop_store_mems[i] = x;
2830 break;
2831 }
2832
2833 if (i == NUM_STORES)
2834 unknown_address_altered = 1;
2835
2836 else if (i == loop_store_mems_idx)
2837 loop_store_mems[loop_store_mems_idx++] = x;
2838 }
2839 \f
2840 /* Return nonzero if the rtx X is invariant over the current loop.
2841
2842 The value is 2 if we refer to something only conditionally invariant.
2843
2844 If `unknown_address_altered' is nonzero, no memory ref is invariant.
2845 Otherwise, a memory ref is invariant if it does not conflict with
2846 anything stored in `loop_store_mems'. */
2847
2848 int
2849 invariant_p (x)
2850 register rtx x;
2851 {
2852 register int i;
2853 register enum rtx_code code;
2854 register char *fmt;
2855 int conditional = 0;
2856
2857 if (x == 0)
2858 return 1;
2859 code = GET_CODE (x);
2860 switch (code)
2861 {
2862 case CONST_INT:
2863 case CONST_DOUBLE:
2864 case SYMBOL_REF:
2865 case CONST:
2866 return 1;
2867
2868 case LABEL_REF:
2869 /* A LABEL_REF is normally invariant, however, if we are unrolling
2870 loops, and this label is inside the loop, then it isn't invariant.
2871 This is because each unrolled copy of the loop body will have
2872 a copy of this label. If this was invariant, then an insn loading
2873 the address of this label into a register might get moved outside
2874 the loop, and then each loop body would end up using the same label.
2875
2876 We don't know the loop bounds here though, so just fail for all
2877 labels. */
2878 if (flag_unroll_loops)
2879 return 0;
2880 else
2881 return 1;
2882
2883 case PC:
2884 case CC0:
2885 case UNSPEC_VOLATILE:
2886 return 0;
2887
2888 case REG:
2889 /* We used to check RTX_UNCHANGING_P (x) here, but that is invalid
2890 since the reg might be set by initialization within the loop. */
2891
2892 if ((x == frame_pointer_rtx || x == hard_frame_pointer_rtx
2893 || x == arg_pointer_rtx)
2894 && ! current_function_has_nonlocal_goto)
2895 return 1;
2896
2897 if (loop_has_call
2898 && REGNO (x) < FIRST_PSEUDO_REGISTER && call_used_regs[REGNO (x)])
2899 return 0;
2900
2901 if (n_times_set[REGNO (x)] < 0)
2902 return 2;
2903
2904 return n_times_set[REGNO (x)] == 0;
2905
2906 case MEM:
2907 /* Volatile memory references must be rejected. Do this before
2908 checking for read-only items, so that volatile read-only items
2909 will be rejected also. */
2910 if (MEM_VOLATILE_P (x))
2911 return 0;
2912
2913 /* Read-only items (such as constants in a constant pool) are
2914 invariant if their address is. */
2915 if (RTX_UNCHANGING_P (x))
2916 break;
2917
2918 /* If we filled the table (or had a subroutine call), any location
2919 in memory could have been clobbered. */
2920 if (unknown_address_altered)
2921 return 0;
2922
2923 /* See if there is any dependence between a store and this load. */
2924 for (i = loop_store_mems_idx - 1; i >= 0; i--)
2925 if (true_dependence (loop_store_mems[i], VOIDmode, x, rtx_varies_p))
2926 return 0;
2927
2928 /* It's not invalidated by a store in memory
2929 but we must still verify the address is invariant. */
2930 break;
2931
2932 case ASM_OPERANDS:
2933 /* Don't mess with insns declared volatile. */
2934 if (MEM_VOLATILE_P (x))
2935 return 0;
2936 break;
2937
2938 default:
2939 break;
2940 }
2941
2942 fmt = GET_RTX_FORMAT (code);
2943 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2944 {
2945 if (fmt[i] == 'e')
2946 {
2947 int tem = invariant_p (XEXP (x, i));
2948 if (tem == 0)
2949 return 0;
2950 if (tem == 2)
2951 conditional = 1;
2952 }
2953 else if (fmt[i] == 'E')
2954 {
2955 register int j;
2956 for (j = 0; j < XVECLEN (x, i); j++)
2957 {
2958 int tem = invariant_p (XVECEXP (x, i, j));
2959 if (tem == 0)
2960 return 0;
2961 if (tem == 2)
2962 conditional = 1;
2963 }
2964
2965 }
2966 }
2967
2968 return 1 + conditional;
2969 }
2970
2971 \f
2972 /* Return nonzero if all the insns in the loop that set REG
2973 are INSN and the immediately following insns,
2974 and if each of those insns sets REG in an invariant way
2975 (not counting uses of REG in them).
2976
2977 The value is 2 if some of these insns are only conditionally invariant.
2978
2979 We assume that INSN itself is the first set of REG
2980 and that its source is invariant. */
2981
2982 static int
2983 consec_sets_invariant_p (reg, n_sets, insn)
2984 int n_sets;
2985 rtx reg, insn;
2986 {
2987 register rtx p = insn;
2988 register int regno = REGNO (reg);
2989 rtx temp;
2990 /* Number of sets we have to insist on finding after INSN. */
2991 int count = n_sets - 1;
2992 int old = n_times_set[regno];
2993 int value = 0;
2994 int this;
2995
2996 /* If N_SETS hit the limit, we can't rely on its value. */
2997 if (n_sets == 127)
2998 return 0;
2999
3000 n_times_set[regno] = 0;
3001
3002 while (count > 0)
3003 {
3004 register enum rtx_code code;
3005 rtx set;
3006
3007 p = NEXT_INSN (p);
3008 code = GET_CODE (p);
3009
3010 /* If library call, skip to end of of it. */
3011 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
3012 p = XEXP (temp, 0);
3013
3014 this = 0;
3015 if (code == INSN
3016 && (set = single_set (p))
3017 && GET_CODE (SET_DEST (set)) == REG
3018 && REGNO (SET_DEST (set)) == regno)
3019 {
3020 this = invariant_p (SET_SRC (set));
3021 if (this != 0)
3022 value |= this;
3023 else if ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX)))
3024 {
3025 /* If this is a libcall, then any invariant REG_EQUAL note is OK.
3026 If this is an ordinary insn, then only CONSTANT_P REG_EQUAL
3027 notes are OK. */
3028 this = (CONSTANT_P (XEXP (temp, 0))
3029 || (find_reg_note (p, REG_RETVAL, NULL_RTX)
3030 && invariant_p (XEXP (temp, 0))));
3031 if (this != 0)
3032 value |= this;
3033 }
3034 }
3035 if (this != 0)
3036 count--;
3037 else if (code != NOTE)
3038 {
3039 n_times_set[regno] = old;
3040 return 0;
3041 }
3042 }
3043
3044 n_times_set[regno] = old;
3045 /* If invariant_p ever returned 2, we return 2. */
3046 return 1 + (value & 2);
3047 }
3048
3049 #if 0
3050 /* I don't think this condition is sufficient to allow INSN
3051 to be moved, so we no longer test it. */
3052
3053 /* Return 1 if all insns in the basic block of INSN and following INSN
3054 that set REG are invariant according to TABLE. */
3055
3056 static int
3057 all_sets_invariant_p (reg, insn, table)
3058 rtx reg, insn;
3059 short *table;
3060 {
3061 register rtx p = insn;
3062 register int regno = REGNO (reg);
3063
3064 while (1)
3065 {
3066 register enum rtx_code code;
3067 p = NEXT_INSN (p);
3068 code = GET_CODE (p);
3069 if (code == CODE_LABEL || code == JUMP_INSN)
3070 return 1;
3071 if (code == INSN && GET_CODE (PATTERN (p)) == SET
3072 && GET_CODE (SET_DEST (PATTERN (p))) == REG
3073 && REGNO (SET_DEST (PATTERN (p))) == regno)
3074 {
3075 if (!invariant_p (SET_SRC (PATTERN (p)), table))
3076 return 0;
3077 }
3078 }
3079 }
3080 #endif /* 0 */
3081 \f
3082 /* Look at all uses (not sets) of registers in X. For each, if it is
3083 the single use, set USAGE[REGNO] to INSN; if there was a previous use in
3084 a different insn, set USAGE[REGNO] to const0_rtx. */
3085
3086 static void
3087 find_single_use_in_loop (insn, x, usage)
3088 rtx insn;
3089 rtx x;
3090 rtx *usage;
3091 {
3092 enum rtx_code code = GET_CODE (x);
3093 char *fmt = GET_RTX_FORMAT (code);
3094 int i, j;
3095
3096 if (code == REG)
3097 usage[REGNO (x)]
3098 = (usage[REGNO (x)] != 0 && usage[REGNO (x)] != insn)
3099 ? const0_rtx : insn;
3100
3101 else if (code == SET)
3102 {
3103 /* Don't count SET_DEST if it is a REG; otherwise count things
3104 in SET_DEST because if a register is partially modified, it won't
3105 show up as a potential movable so we don't care how USAGE is set
3106 for it. */
3107 if (GET_CODE (SET_DEST (x)) != REG)
3108 find_single_use_in_loop (insn, SET_DEST (x), usage);
3109 find_single_use_in_loop (insn, SET_SRC (x), usage);
3110 }
3111 else
3112 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3113 {
3114 if (fmt[i] == 'e' && XEXP (x, i) != 0)
3115 find_single_use_in_loop (insn, XEXP (x, i), usage);
3116 else if (fmt[i] == 'E')
3117 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3118 find_single_use_in_loop (insn, XVECEXP (x, i, j), usage);
3119 }
3120 }
3121 \f
3122 /* Increment N_TIMES_SET at the index of each register
3123 that is modified by an insn between FROM and TO.
3124 If the value of an element of N_TIMES_SET becomes 127 or more,
3125 stop incrementing it, to avoid overflow.
3126
3127 Store in SINGLE_USAGE[I] the single insn in which register I is
3128 used, if it is only used once. Otherwise, it is set to 0 (for no
3129 uses) or const0_rtx for more than one use. This parameter may be zero,
3130 in which case this processing is not done.
3131
3132 Store in *COUNT_PTR the number of actual instruction
3133 in the loop. We use this to decide what is worth moving out. */
3134
3135 /* last_set[n] is nonzero iff reg n has been set in the current basic block.
3136 In that case, it is the insn that last set reg n. */
3137
3138 static void
3139 count_loop_regs_set (from, to, may_not_move, single_usage, count_ptr, nregs)
3140 register rtx from, to;
3141 char *may_not_move;
3142 rtx *single_usage;
3143 int *count_ptr;
3144 int nregs;
3145 {
3146 register rtx *last_set = (rtx *) alloca (nregs * sizeof (rtx));
3147 register rtx insn;
3148 register int count = 0;
3149 register rtx dest;
3150
3151 bzero ((char *) last_set, nregs * sizeof (rtx));
3152 for (insn = from; insn != to; insn = NEXT_INSN (insn))
3153 {
3154 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
3155 {
3156 ++count;
3157
3158 /* If requested, record registers that have exactly one use. */
3159 if (single_usage)
3160 {
3161 find_single_use_in_loop (insn, PATTERN (insn), single_usage);
3162
3163 /* Include uses in REG_EQUAL notes. */
3164 if (REG_NOTES (insn))
3165 find_single_use_in_loop (insn, REG_NOTES (insn), single_usage);
3166 }
3167
3168 if (GET_CODE (PATTERN (insn)) == CLOBBER
3169 && GET_CODE (XEXP (PATTERN (insn), 0)) == REG)
3170 /* Don't move a reg that has an explicit clobber.
3171 We might do so sometimes, but it's not worth the pain. */
3172 may_not_move[REGNO (XEXP (PATTERN (insn), 0))] = 1;
3173
3174 if (GET_CODE (PATTERN (insn)) == SET
3175 || GET_CODE (PATTERN (insn)) == CLOBBER)
3176 {
3177 dest = SET_DEST (PATTERN (insn));
3178 while (GET_CODE (dest) == SUBREG
3179 || GET_CODE (dest) == ZERO_EXTRACT
3180 || GET_CODE (dest) == SIGN_EXTRACT
3181 || GET_CODE (dest) == STRICT_LOW_PART)
3182 dest = XEXP (dest, 0);
3183 if (GET_CODE (dest) == REG)
3184 {
3185 register int regno = REGNO (dest);
3186 /* If this is the first setting of this reg
3187 in current basic block, and it was set before,
3188 it must be set in two basic blocks, so it cannot
3189 be moved out of the loop. */
3190 if (n_times_set[regno] > 0 && last_set[regno] == 0)
3191 may_not_move[regno] = 1;
3192 /* If this is not first setting in current basic block,
3193 see if reg was used in between previous one and this.
3194 If so, neither one can be moved. */
3195 if (last_set[regno] != 0
3196 && reg_used_between_p (dest, last_set[regno], insn))
3197 may_not_move[regno] = 1;
3198 if (n_times_set[regno] < 127)
3199 ++n_times_set[regno];
3200 last_set[regno] = insn;
3201 }
3202 }
3203 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
3204 {
3205 register int i;
3206 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
3207 {
3208 register rtx x = XVECEXP (PATTERN (insn), 0, i);
3209 if (GET_CODE (x) == CLOBBER && GET_CODE (XEXP (x, 0)) == REG)
3210 /* Don't move a reg that has an explicit clobber.
3211 It's not worth the pain to try to do it correctly. */
3212 may_not_move[REGNO (XEXP (x, 0))] = 1;
3213
3214 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
3215 {
3216 dest = SET_DEST (x);
3217 while (GET_CODE (dest) == SUBREG
3218 || GET_CODE (dest) == ZERO_EXTRACT
3219 || GET_CODE (dest) == SIGN_EXTRACT
3220 || GET_CODE (dest) == STRICT_LOW_PART)
3221 dest = XEXP (dest, 0);
3222 if (GET_CODE (dest) == REG)
3223 {
3224 register int regno = REGNO (dest);
3225 if (n_times_set[regno] > 0 && last_set[regno] == 0)
3226 may_not_move[regno] = 1;
3227 if (last_set[regno] != 0
3228 && reg_used_between_p (dest, last_set[regno], insn))
3229 may_not_move[regno] = 1;
3230 if (n_times_set[regno] < 127)
3231 ++n_times_set[regno];
3232 last_set[regno] = insn;
3233 }
3234 }
3235 }
3236 }
3237 }
3238
3239 if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN)
3240 bzero ((char *) last_set, nregs * sizeof (rtx));
3241 }
3242 *count_ptr = count;
3243 }
3244 \f
3245 /* Given a loop that is bounded by LOOP_START and LOOP_END
3246 and that is entered at SCAN_START,
3247 return 1 if the register set in SET contained in insn INSN is used by
3248 any insn that precedes INSN in cyclic order starting
3249 from the loop entry point.
3250
3251 We don't want to use INSN_LUID here because if we restrict INSN to those
3252 that have a valid INSN_LUID, it means we cannot move an invariant out
3253 from an inner loop past two loops. */
3254
3255 static int
3256 loop_reg_used_before_p (set, insn, loop_start, scan_start, loop_end)
3257 rtx set, insn, loop_start, scan_start, loop_end;
3258 {
3259 rtx reg = SET_DEST (set);
3260 rtx p;
3261
3262 /* Scan forward checking for register usage. If we hit INSN, we
3263 are done. Otherwise, if we hit LOOP_END, wrap around to LOOP_START. */
3264 for (p = scan_start; p != insn; p = NEXT_INSN (p))
3265 {
3266 if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
3267 && reg_overlap_mentioned_p (reg, PATTERN (p)))
3268 return 1;
3269
3270 if (p == loop_end)
3271 p = loop_start;
3272 }
3273
3274 return 0;
3275 }
3276 \f
3277 /* A "basic induction variable" or biv is a pseudo reg that is set
3278 (within this loop) only by incrementing or decrementing it. */
3279 /* A "general induction variable" or giv is a pseudo reg whose
3280 value is a linear function of a biv. */
3281
3282 /* Bivs are recognized by `basic_induction_var';
3283 Givs by `general_induct_var'. */
3284
3285 /* Indexed by register number, indicates whether or not register is an
3286 induction variable, and if so what type. */
3287
3288 enum iv_mode *reg_iv_type;
3289
3290 /* Indexed by register number, contains pointer to `struct induction'
3291 if register is an induction variable. This holds general info for
3292 all induction variables. */
3293
3294 struct induction **reg_iv_info;
3295
3296 /* Indexed by register number, contains pointer to `struct iv_class'
3297 if register is a basic induction variable. This holds info describing
3298 the class (a related group) of induction variables that the biv belongs
3299 to. */
3300
3301 struct iv_class **reg_biv_class;
3302
3303 /* The head of a list which links together (via the next field)
3304 every iv class for the current loop. */
3305
3306 struct iv_class *loop_iv_list;
3307
3308 /* Communication with routines called via `note_stores'. */
3309
3310 static rtx note_insn;
3311
3312 /* Dummy register to have non-zero DEST_REG for DEST_ADDR type givs. */
3313
3314 static rtx addr_placeholder;
3315
3316 /* ??? Unfinished optimizations, and possible future optimizations,
3317 for the strength reduction code. */
3318
3319 /* ??? There is one more optimization you might be interested in doing: to
3320 allocate pseudo registers for frequently-accessed memory locations.
3321 If the same memory location is referenced each time around, it might
3322 be possible to copy it into a register before and out after.
3323 This is especially useful when the memory location is a variable which
3324 is in a stack slot because somewhere its address is taken. If the
3325 loop doesn't contain a function call and the variable isn't volatile,
3326 it is safe to keep the value in a register for the duration of the
3327 loop. One tricky thing is that the copying of the value back from the
3328 register has to be done on all exits from the loop. You need to check that
3329 all the exits from the loop go to the same place. */
3330
3331 /* ??? The interaction of biv elimination, and recognition of 'constant'
3332 bivs, may cause problems. */
3333
3334 /* ??? Add heuristics so that DEST_ADDR strength reduction does not cause
3335 performance problems.
3336
3337 Perhaps don't eliminate things that can be combined with an addressing
3338 mode. Find all givs that have the same biv, mult_val, and add_val;
3339 then for each giv, check to see if its only use dies in a following
3340 memory address. If so, generate a new memory address and check to see
3341 if it is valid. If it is valid, then store the modified memory address,
3342 otherwise, mark the giv as not done so that it will get its own iv. */
3343
3344 /* ??? Could try to optimize branches when it is known that a biv is always
3345 positive. */
3346
3347 /* ??? When replace a biv in a compare insn, we should replace with closest
3348 giv so that an optimized branch can still be recognized by the combiner,
3349 e.g. the VAX acb insn. */
3350
3351 /* ??? Many of the checks involving uid_luid could be simplified if regscan
3352 was rerun in loop_optimize whenever a register was added or moved.
3353 Also, some of the optimizations could be a little less conservative. */
3354 \f
3355 /* Perform strength reduction and induction variable elimination. */
3356
3357 /* Pseudo registers created during this function will be beyond the last
3358 valid index in several tables including n_times_set and regno_last_uid.
3359 This does not cause a problem here, because the added registers cannot be
3360 givs outside of their loop, and hence will never be reconsidered.
3361 But scan_loop must check regnos to make sure they are in bounds. */
3362
3363 static void
3364 strength_reduce (scan_start, end, loop_top, insn_count,
3365 loop_start, loop_end, unroll_p)
3366 rtx scan_start;
3367 rtx end;
3368 rtx loop_top;
3369 int insn_count;
3370 rtx loop_start;
3371 rtx loop_end;
3372 int unroll_p;
3373 {
3374 rtx p;
3375 rtx set;
3376 rtx inc_val;
3377 rtx mult_val;
3378 rtx dest_reg;
3379 /* This is 1 if current insn is not executed at least once for every loop
3380 iteration. */
3381 int not_every_iteration = 0;
3382 /* This is 1 if current insn may be executed more than once for every
3383 loop iteration. */
3384 int maybe_multiple = 0;
3385 /* Temporary list pointers for traversing loop_iv_list. */
3386 struct iv_class *bl, **backbl;
3387 /* Ratio of extra register life span we can justify
3388 for saving an instruction. More if loop doesn't call subroutines
3389 since in that case saving an insn makes more difference
3390 and more registers are available. */
3391 /* ??? could set this to last value of threshold in move_movables */
3392 int threshold = (loop_has_call ? 1 : 2) * (3 + n_non_fixed_regs);
3393 /* Map of pseudo-register replacements. */
3394 rtx *reg_map;
3395 int call_seen;
3396 rtx test;
3397 rtx end_insert_before;
3398 int loop_depth = 0;
3399
3400 reg_iv_type = (enum iv_mode *) alloca (max_reg_before_loop
3401 * sizeof (enum iv_mode *));
3402 bzero ((char *) reg_iv_type, max_reg_before_loop * sizeof (enum iv_mode *));
3403 reg_iv_info = (struct induction **)
3404 alloca (max_reg_before_loop * sizeof (struct induction *));
3405 bzero ((char *) reg_iv_info, (max_reg_before_loop
3406 * sizeof (struct induction *)));
3407 reg_biv_class = (struct iv_class **)
3408 alloca (max_reg_before_loop * sizeof (struct iv_class *));
3409 bzero ((char *) reg_biv_class, (max_reg_before_loop
3410 * sizeof (struct iv_class *)));
3411
3412 loop_iv_list = 0;
3413 addr_placeholder = gen_reg_rtx (Pmode);
3414
3415 /* Save insn immediately after the loop_end. Insns inserted after loop_end
3416 must be put before this insn, so that they will appear in the right
3417 order (i.e. loop order).
3418
3419 If loop_end is the end of the current function, then emit a
3420 NOTE_INSN_DELETED after loop_end and set end_insert_before to the
3421 dummy note insn. */
3422 if (NEXT_INSN (loop_end) != 0)
3423 end_insert_before = NEXT_INSN (loop_end);
3424 else
3425 end_insert_before = emit_note_after (NOTE_INSN_DELETED, loop_end);
3426
3427 /* Scan through loop to find all possible bivs. */
3428
3429 p = scan_start;
3430 while (1)
3431 {
3432 p = NEXT_INSN (p);
3433 /* At end of a straight-in loop, we are done.
3434 At end of a loop entered at the bottom, scan the top. */
3435 if (p == scan_start)
3436 break;
3437 if (p == end)
3438 {
3439 if (loop_top != 0)
3440 p = loop_top;
3441 else
3442 break;
3443 if (p == scan_start)
3444 break;
3445 }
3446
3447 if (GET_CODE (p) == INSN
3448 && (set = single_set (p))
3449 && GET_CODE (SET_DEST (set)) == REG)
3450 {
3451 dest_reg = SET_DEST (set);
3452 if (REGNO (dest_reg) < max_reg_before_loop
3453 && REGNO (dest_reg) >= FIRST_PSEUDO_REGISTER
3454 && reg_iv_type[REGNO (dest_reg)] != NOT_BASIC_INDUCT)
3455 {
3456 if (basic_induction_var (SET_SRC (set), GET_MODE (SET_SRC (set)),
3457 dest_reg, p, &inc_val, &mult_val))
3458 {
3459 /* It is a possible basic induction variable.
3460 Create and initialize an induction structure for it. */
3461
3462 struct induction *v
3463 = (struct induction *) alloca (sizeof (struct induction));
3464
3465 record_biv (v, p, dest_reg, inc_val, mult_val,
3466 not_every_iteration, maybe_multiple);
3467 reg_iv_type[REGNO (dest_reg)] = BASIC_INDUCT;
3468 }
3469 else if (REGNO (dest_reg) < max_reg_before_loop)
3470 reg_iv_type[REGNO (dest_reg)] = NOT_BASIC_INDUCT;
3471 }
3472 }
3473
3474 /* Past CODE_LABEL, we get to insns that may be executed multiple
3475 times. The only way we can be sure that they can't is if every
3476 every jump insn between here and the end of the loop either
3477 returns, exits the loop, is a forward jump, or is a jump
3478 to the loop start. */
3479
3480 if (GET_CODE (p) == CODE_LABEL)
3481 {
3482 rtx insn = p;
3483
3484 maybe_multiple = 0;
3485
3486 while (1)
3487 {
3488 insn = NEXT_INSN (insn);
3489 if (insn == scan_start)
3490 break;
3491 if (insn == end)
3492 {
3493 if (loop_top != 0)
3494 insn = loop_top;
3495 else
3496 break;
3497 if (insn == scan_start)
3498 break;
3499 }
3500
3501 if (GET_CODE (insn) == JUMP_INSN
3502 && GET_CODE (PATTERN (insn)) != RETURN
3503 && (! condjump_p (insn)
3504 || (JUMP_LABEL (insn) != 0
3505 && JUMP_LABEL (insn) != scan_start
3506 && (INSN_UID (JUMP_LABEL (insn)) >= max_uid_for_loop
3507 || INSN_UID (insn) >= max_uid_for_loop
3508 || (INSN_LUID (JUMP_LABEL (insn))
3509 < INSN_LUID (insn))))))
3510 {
3511 maybe_multiple = 1;
3512 break;
3513 }
3514 }
3515 }
3516
3517 /* Past a jump, we get to insns for which we can't count
3518 on whether they will be executed during each iteration. */
3519 /* This code appears twice in strength_reduce. There is also similar
3520 code in scan_loop. */
3521 if (GET_CODE (p) == JUMP_INSN
3522 /* If we enter the loop in the middle, and scan around to the
3523 beginning, don't set not_every_iteration for that.
3524 This can be any kind of jump, since we want to know if insns
3525 will be executed if the loop is executed. */
3526 && ! (JUMP_LABEL (p) == loop_top
3527 && ((NEXT_INSN (NEXT_INSN (p)) == loop_end && simplejump_p (p))
3528 || (NEXT_INSN (p) == loop_end && condjump_p (p)))))
3529 {
3530 rtx label = 0;
3531
3532 /* If this is a jump outside the loop, then it also doesn't
3533 matter. Check to see if the target of this branch is on the
3534 loop_number_exits_labels list. */
3535
3536 for (label = loop_number_exit_labels[uid_loop_num[INSN_UID (loop_start)]];
3537 label;
3538 label = LABEL_NEXTREF (label))
3539 if (XEXP (label, 0) == JUMP_LABEL (p))
3540 break;
3541
3542 if (! label)
3543 not_every_iteration = 1;
3544 }
3545
3546 else if (GET_CODE (p) == NOTE)
3547 {
3548 /* At the virtual top of a converted loop, insns are again known to
3549 be executed each iteration: logically, the loop begins here
3550 even though the exit code has been duplicated. */
3551 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
3552 not_every_iteration = 0;
3553 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
3554 loop_depth++;
3555 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
3556 loop_depth--;
3557 }
3558
3559 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
3560 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
3561 or not an insn is known to be executed each iteration of the
3562 loop, whether or not any iterations are known to occur.
3563
3564 Therefore, if we have just passed a label and have no more labels
3565 between here and the test insn of the loop, we know these insns
3566 will be executed each iteration. */
3567
3568 if (not_every_iteration && GET_CODE (p) == CODE_LABEL
3569 && no_labels_between_p (p, loop_end))
3570 not_every_iteration = 0;
3571 }
3572
3573 /* Scan loop_iv_list to remove all regs that proved not to be bivs.
3574 Make a sanity check against n_times_set. */
3575 for (backbl = &loop_iv_list, bl = *backbl; bl; bl = bl->next)
3576 {
3577 if (reg_iv_type[bl->regno] != BASIC_INDUCT
3578 /* Above happens if register modified by subreg, etc. */
3579 /* Make sure it is not recognized as a basic induction var: */
3580 || n_times_set[bl->regno] != bl->biv_count
3581 /* If never incremented, it is invariant that we decided not to
3582 move. So leave it alone. */
3583 || ! bl->incremented)
3584 {
3585 if (loop_dump_stream)
3586 fprintf (loop_dump_stream, "Reg %d: biv discarded, %s\n",
3587 bl->regno,
3588 (reg_iv_type[bl->regno] != BASIC_INDUCT
3589 ? "not induction variable"
3590 : (! bl->incremented ? "never incremented"
3591 : "count error")));
3592
3593 reg_iv_type[bl->regno] = NOT_BASIC_INDUCT;
3594 *backbl = bl->next;
3595 }
3596 else
3597 {
3598 backbl = &bl->next;
3599
3600 if (loop_dump_stream)
3601 fprintf (loop_dump_stream, "Reg %d: biv verified\n", bl->regno);
3602 }
3603 }
3604
3605 /* Exit if there are no bivs. */
3606 if (! loop_iv_list)
3607 {
3608 /* Can still unroll the loop anyways, but indicate that there is no
3609 strength reduction info available. */
3610 if (unroll_p)
3611 unroll_loop (loop_end, insn_count, loop_start, end_insert_before, 0);
3612
3613 return;
3614 }
3615
3616 /* Find initial value for each biv by searching backwards from loop_start,
3617 halting at first label. Also record any test condition. */
3618
3619 call_seen = 0;
3620 for (p = loop_start; p && GET_CODE (p) != CODE_LABEL; p = PREV_INSN (p))
3621 {
3622 note_insn = p;
3623
3624 if (GET_CODE (p) == CALL_INSN)
3625 call_seen = 1;
3626
3627 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
3628 || GET_CODE (p) == CALL_INSN)
3629 note_stores (PATTERN (p), record_initial);
3630
3631 /* Record any test of a biv that branches around the loop if no store
3632 between it and the start of loop. We only care about tests with
3633 constants and registers and only certain of those. */
3634 if (GET_CODE (p) == JUMP_INSN
3635 && JUMP_LABEL (p) != 0
3636 && next_real_insn (JUMP_LABEL (p)) == next_real_insn (loop_end)
3637 && (test = get_condition_for_loop (p)) != 0
3638 && GET_CODE (XEXP (test, 0)) == REG
3639 && REGNO (XEXP (test, 0)) < max_reg_before_loop
3640 && (bl = reg_biv_class[REGNO (XEXP (test, 0))]) != 0
3641 && valid_initial_value_p (XEXP (test, 1), p, call_seen, loop_start)
3642 && bl->init_insn == 0)
3643 {
3644 /* If an NE test, we have an initial value! */
3645 if (GET_CODE (test) == NE)
3646 {
3647 bl->init_insn = p;
3648 bl->init_set = gen_rtx_SET (VOIDmode,
3649 XEXP (test, 0), XEXP (test, 1));
3650 }
3651 else
3652 bl->initial_test = test;
3653 }
3654 }
3655
3656 /* Look at the each biv and see if we can say anything better about its
3657 initial value from any initializing insns set up above. (This is done
3658 in two passes to avoid missing SETs in a PARALLEL.) */
3659 for (bl = loop_iv_list; bl; bl = bl->next)
3660 {
3661 rtx src;
3662 rtx note;
3663
3664 if (! bl->init_insn)
3665 continue;
3666
3667 /* IF INIT_INSN has a REG_EQUAL or REG_EQUIV note and the value
3668 is a constant, use the value of that. */
3669 if (((note = find_reg_note (bl->init_insn, REG_EQUAL, 0)) != NULL
3670 && CONSTANT_P (XEXP (note, 0)))
3671 || ((note = find_reg_note (bl->init_insn, REG_EQUIV, 0)) != NULL
3672 && CONSTANT_P (XEXP (note, 0))))
3673 src = XEXP (note, 0);
3674 else
3675 src = SET_SRC (bl->init_set);
3676
3677 if (loop_dump_stream)
3678 fprintf (loop_dump_stream,
3679 "Biv %d initialized at insn %d: initial value ",
3680 bl->regno, INSN_UID (bl->init_insn));
3681
3682 if ((GET_MODE (src) == GET_MODE (regno_reg_rtx[bl->regno])
3683 || GET_MODE (src) == VOIDmode)
3684 && valid_initial_value_p (src, bl->init_insn, call_seen, loop_start))
3685 {
3686 bl->initial_value = src;
3687
3688 if (loop_dump_stream)
3689 {
3690 if (GET_CODE (src) == CONST_INT)
3691 {
3692 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (src));
3693 fputc ('\n', loop_dump_stream);
3694 }
3695 else
3696 {
3697 print_rtl (loop_dump_stream, src);
3698 fprintf (loop_dump_stream, "\n");
3699 }
3700 }
3701 }
3702 else
3703 {
3704 /* Biv initial value is not simple move,
3705 so let it keep initial value of "itself". */
3706
3707 if (loop_dump_stream)
3708 fprintf (loop_dump_stream, "is complex\n");
3709 }
3710 }
3711
3712 /* Search the loop for general induction variables. */
3713
3714 /* A register is a giv if: it is only set once, it is a function of a
3715 biv and a constant (or invariant), and it is not a biv. */
3716
3717 not_every_iteration = 0;
3718 loop_depth = 0;
3719 p = scan_start;
3720 while (1)
3721 {
3722 p = NEXT_INSN (p);
3723 /* At end of a straight-in loop, we are done.
3724 At end of a loop entered at the bottom, scan the top. */
3725 if (p == scan_start)
3726 break;
3727 if (p == end)
3728 {
3729 if (loop_top != 0)
3730 p = loop_top;
3731 else
3732 break;
3733 if (p == scan_start)
3734 break;
3735 }
3736
3737 /* Look for a general induction variable in a register. */
3738 if (GET_CODE (p) == INSN
3739 && (set = single_set (p))
3740 && GET_CODE (SET_DEST (set)) == REG
3741 && ! may_not_optimize[REGNO (SET_DEST (set))])
3742 {
3743 rtx src_reg;
3744 rtx add_val;
3745 rtx mult_val;
3746 int benefit;
3747 rtx regnote = 0;
3748
3749 dest_reg = SET_DEST (set);
3750 if (REGNO (dest_reg) < FIRST_PSEUDO_REGISTER)
3751 continue;
3752
3753 if (/* SET_SRC is a giv. */
3754 ((benefit = general_induction_var (SET_SRC (set),
3755 &src_reg, &add_val,
3756 &mult_val))
3757 /* Equivalent expression is a giv. */
3758 || ((regnote = find_reg_note (p, REG_EQUAL, NULL_RTX))
3759 && (benefit = general_induction_var (XEXP (regnote, 0),
3760 &src_reg,
3761 &add_val, &mult_val))))
3762 /* Don't try to handle any regs made by loop optimization.
3763 We have nothing on them in regno_first_uid, etc. */
3764 && REGNO (dest_reg) < max_reg_before_loop
3765 /* Don't recognize a BASIC_INDUCT_VAR here. */
3766 && dest_reg != src_reg
3767 /* This must be the only place where the register is set. */
3768 && (n_times_set[REGNO (dest_reg)] == 1
3769 /* or all sets must be consecutive and make a giv. */
3770 || (benefit = consec_sets_giv (benefit, p,
3771 src_reg, dest_reg,
3772 &add_val, &mult_val))))
3773 {
3774 int count;
3775 struct induction *v
3776 = (struct induction *) alloca (sizeof (struct induction));
3777 rtx temp;
3778
3779 /* If this is a library call, increase benefit. */
3780 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
3781 benefit += libcall_benefit (p);
3782
3783 /* Skip the consecutive insns, if there are any. */
3784 for (count = n_times_set[REGNO (dest_reg)] - 1;
3785 count > 0; count--)
3786 {
3787 /* If first insn of libcall sequence, skip to end.
3788 Do this at start of loop, since INSN is guaranteed to
3789 be an insn here. */
3790 if (GET_CODE (p) != NOTE
3791 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
3792 p = XEXP (temp, 0);
3793
3794 do p = NEXT_INSN (p);
3795 while (GET_CODE (p) == NOTE);
3796 }
3797
3798 record_giv (v, p, src_reg, dest_reg, mult_val, add_val, benefit,
3799 DEST_REG, not_every_iteration, NULL_PTR, loop_start,
3800 loop_end);
3801
3802 }
3803 }
3804
3805 #ifndef DONT_REDUCE_ADDR
3806 /* Look for givs which are memory addresses. */
3807 /* This resulted in worse code on a VAX 8600. I wonder if it
3808 still does. */
3809 if (GET_CODE (p) == INSN)
3810 find_mem_givs (PATTERN (p), p, not_every_iteration, loop_start,
3811 loop_end);
3812 #endif
3813
3814 /* Update the status of whether giv can derive other givs. This can
3815 change when we pass a label or an insn that updates a biv. */
3816 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
3817 || GET_CODE (p) == CODE_LABEL)
3818 update_giv_derive (p);
3819
3820 /* Past a jump, we get to insns for which we can't count
3821 on whether they will be executed during each iteration. */
3822 /* This code appears twice in strength_reduce. There is also similar
3823 code in scan_loop. */
3824 if (GET_CODE (p) == JUMP_INSN
3825 /* If we enter the loop in the middle, and scan around to the
3826 beginning, don't set not_every_iteration for that.
3827 This can be any kind of jump, since we want to know if insns
3828 will be executed if the loop is executed. */
3829 && ! (JUMP_LABEL (p) == loop_top
3830 && ((NEXT_INSN (NEXT_INSN (p)) == loop_end && simplejump_p (p))
3831 || (NEXT_INSN (p) == loop_end && condjump_p (p)))))
3832 {
3833 rtx label = 0;
3834
3835 /* If this is a jump outside the loop, then it also doesn't
3836 matter. Check to see if the target of this branch is on the
3837 loop_number_exits_labels list. */
3838
3839 for (label = loop_number_exit_labels[uid_loop_num[INSN_UID (loop_start)]];
3840 label;
3841 label = LABEL_NEXTREF (label))
3842 if (XEXP (label, 0) == JUMP_LABEL (p))
3843 break;
3844
3845 if (! label)
3846 not_every_iteration = 1;
3847 }
3848
3849 else if (GET_CODE (p) == NOTE)
3850 {
3851 /* At the virtual top of a converted loop, insns are again known to
3852 be executed each iteration: logically, the loop begins here
3853 even though the exit code has been duplicated. */
3854 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
3855 not_every_iteration = 0;
3856 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
3857 loop_depth++;
3858 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
3859 loop_depth--;
3860 }
3861
3862 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
3863 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
3864 or not an insn is known to be executed each iteration of the
3865 loop, whether or not any iterations are known to occur.
3866
3867 Therefore, if we have just passed a label and have no more labels
3868 between here and the test insn of the loop, we know these insns
3869 will be executed each iteration. */
3870
3871 if (not_every_iteration && GET_CODE (p) == CODE_LABEL
3872 && no_labels_between_p (p, loop_end))
3873 not_every_iteration = 0;
3874 }
3875
3876 /* Try to calculate and save the number of loop iterations. This is
3877 set to zero if the actual number can not be calculated. This must
3878 be called after all giv's have been identified, since otherwise it may
3879 fail if the iteration variable is a giv. */
3880
3881 loop_n_iterations = loop_iterations (loop_start, loop_end);
3882
3883 /* Now for each giv for which we still don't know whether or not it is
3884 replaceable, check to see if it is replaceable because its final value
3885 can be calculated. This must be done after loop_iterations is called,
3886 so that final_giv_value will work correctly. */
3887
3888 for (bl = loop_iv_list; bl; bl = bl->next)
3889 {
3890 struct induction *v;
3891
3892 for (v = bl->giv; v; v = v->next_iv)
3893 if (! v->replaceable && ! v->not_replaceable)
3894 check_final_value (v, loop_start, loop_end);
3895 }
3896
3897 /* Try to prove that the loop counter variable (if any) is always
3898 nonnegative; if so, record that fact with a REG_NONNEG note
3899 so that "decrement and branch until zero" insn can be used. */
3900 check_dbra_loop (loop_end, insn_count, loop_start);
3901
3902 #ifdef HAIFA
3903 /* record loop-variables relevant for BCT optimization before unrolling
3904 the loop. Unrolling may update part of this information, and the
3905 correct data will be used for generating the BCT. */
3906 #ifdef HAVE_decrement_and_branch_on_count
3907 if (HAVE_decrement_and_branch_on_count)
3908 analyze_loop_iterations (loop_start, loop_end);
3909 #endif
3910 #endif /* HAIFA */
3911
3912 /* Create reg_map to hold substitutions for replaceable giv regs. */
3913 reg_map = (rtx *) alloca (max_reg_before_loop * sizeof (rtx));
3914 bzero ((char *) reg_map, max_reg_before_loop * sizeof (rtx));
3915
3916 /* Examine each iv class for feasibility of strength reduction/induction
3917 variable elimination. */
3918
3919 for (bl = loop_iv_list; bl; bl = bl->next)
3920 {
3921 struct induction *v;
3922 int benefit;
3923 int all_reduced;
3924 rtx final_value = 0;
3925
3926 /* Test whether it will be possible to eliminate this biv
3927 provided all givs are reduced. This is possible if either
3928 the reg is not used outside the loop, or we can compute
3929 what its final value will be.
3930
3931 For architectures with a decrement_and_branch_until_zero insn,
3932 don't do this if we put a REG_NONNEG note on the endtest for
3933 this biv. */
3934
3935 /* Compare against bl->init_insn rather than loop_start.
3936 We aren't concerned with any uses of the biv between
3937 init_insn and loop_start since these won't be affected
3938 by the value of the biv elsewhere in the function, so
3939 long as init_insn doesn't use the biv itself.
3940 March 14, 1989 -- self@bayes.arc.nasa.gov */
3941
3942 if ((uid_luid[REGNO_LAST_UID (bl->regno)] < INSN_LUID (loop_end)
3943 && bl->init_insn
3944 && INSN_UID (bl->init_insn) < max_uid_for_loop
3945 && uid_luid[REGNO_FIRST_UID (bl->regno)] >= INSN_LUID (bl->init_insn)
3946 #ifdef HAVE_decrement_and_branch_until_zero
3947 && ! bl->nonneg
3948 #endif
3949 && ! reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set)))
3950 || ((final_value = final_biv_value (bl, loop_start, loop_end))
3951 #ifdef HAVE_decrement_and_branch_until_zero
3952 && ! bl->nonneg
3953 #endif
3954 ))
3955 bl->eliminable = maybe_eliminate_biv (bl, loop_start, end, 0,
3956 threshold, insn_count);
3957 else
3958 {
3959 if (loop_dump_stream)
3960 {
3961 fprintf (loop_dump_stream,
3962 "Cannot eliminate biv %d.\n",
3963 bl->regno);
3964 fprintf (loop_dump_stream,
3965 "First use: insn %d, last use: insn %d.\n",
3966 REGNO_FIRST_UID (bl->regno),
3967 REGNO_LAST_UID (bl->regno));
3968 }
3969 }
3970
3971 /* Combine all giv's for this iv_class. */
3972 combine_givs (bl);
3973
3974 /* This will be true at the end, if all givs which depend on this
3975 biv have been strength reduced.
3976 We can't (currently) eliminate the biv unless this is so. */
3977 all_reduced = 1;
3978
3979 /* Check each giv in this class to see if we will benefit by reducing
3980 it. Skip giv's combined with others. */
3981 for (v = bl->giv; v; v = v->next_iv)
3982 {
3983 struct induction *tv;
3984
3985 if (v->ignore || v->same)
3986 continue;
3987
3988 benefit = v->benefit;
3989
3990 /* Reduce benefit if not replaceable, since we will insert
3991 a move-insn to replace the insn that calculates this giv.
3992 Don't do this unless the giv is a user variable, since it
3993 will often be marked non-replaceable because of the duplication
3994 of the exit code outside the loop. In such a case, the copies
3995 we insert are dead and will be deleted. So they don't have
3996 a cost. Similar situations exist. */
3997 /* ??? The new final_[bg]iv_value code does a much better job
3998 of finding replaceable giv's, and hence this code may no longer
3999 be necessary. */
4000 if (! v->replaceable && ! bl->eliminable
4001 && REG_USERVAR_P (v->dest_reg))
4002 benefit -= copy_cost;
4003
4004 /* Decrease the benefit to count the add-insns that we will
4005 insert to increment the reduced reg for the giv. */
4006 benefit -= add_cost * bl->biv_count;
4007
4008 /* Decide whether to strength-reduce this giv or to leave the code
4009 unchanged (recompute it from the biv each time it is used).
4010 This decision can be made independently for each giv. */
4011
4012 #ifdef AUTO_INC_DEC
4013 /* Attempt to guess whether autoincrement will handle some of the
4014 new add insns; if so, increase BENEFIT (undo the subtraction of
4015 add_cost that was done above). */
4016 if (v->giv_type == DEST_ADDR
4017 && GET_CODE (v->mult_val) == CONST_INT)
4018 {
4019 #if defined (HAVE_POST_INCREMENT) || defined (HAVE_PRE_INCREMENT)
4020 if (INTVAL (v->mult_val) == GET_MODE_SIZE (v->mem_mode))
4021 benefit += add_cost * bl->biv_count;
4022 #endif
4023 #if defined (HAVE_POST_DECREMENT) || defined (HAVE_PRE_DECREMENT)
4024 if (-INTVAL (v->mult_val) == GET_MODE_SIZE (v->mem_mode))
4025 benefit += add_cost * bl->biv_count;
4026 #endif
4027 }
4028 #endif
4029
4030 /* If an insn is not to be strength reduced, then set its ignore
4031 flag, and clear all_reduced. */
4032
4033 /* A giv that depends on a reversed biv must be reduced if it is
4034 used after the loop exit, otherwise, it would have the wrong
4035 value after the loop exit. To make it simple, just reduce all
4036 of such giv's whether or not we know they are used after the loop
4037 exit. */
4038
4039 if ( ! flag_reduce_all_givs && v->lifetime * threshold * benefit < insn_count
4040 && ! bl->reversed )
4041 {
4042 if (loop_dump_stream)
4043 fprintf (loop_dump_stream,
4044 "giv of insn %d not worth while, %d vs %d.\n",
4045 INSN_UID (v->insn),
4046 v->lifetime * threshold * benefit, insn_count);
4047 v->ignore = 1;
4048 all_reduced = 0;
4049 }
4050 else
4051 {
4052 /* Check that we can increment the reduced giv without a
4053 multiply insn. If not, reject it. */
4054
4055 for (tv = bl->biv; tv; tv = tv->next_iv)
4056 if (tv->mult_val == const1_rtx
4057 && ! product_cheap_p (tv->add_val, v->mult_val))
4058 {
4059 if (loop_dump_stream)
4060 fprintf (loop_dump_stream,
4061 "giv of insn %d: would need a multiply.\n",
4062 INSN_UID (v->insn));
4063 v->ignore = 1;
4064 all_reduced = 0;
4065 break;
4066 }
4067 }
4068 }
4069
4070 /* Reduce each giv that we decided to reduce. */
4071
4072 for (v = bl->giv; v; v = v->next_iv)
4073 {
4074 struct induction *tv;
4075 if (! v->ignore && v->same == 0)
4076 {
4077 int auto_inc_opt = 0;
4078
4079 v->new_reg = gen_reg_rtx (v->mode);
4080
4081 #ifdef AUTO_INC_DEC
4082 /* If the target has auto-increment addressing modes, and
4083 this is an address giv, then try to put the increment
4084 immediately after its use, so that flow can create an
4085 auto-increment addressing mode. */
4086 if (v->giv_type == DEST_ADDR && bl->biv_count == 1
4087 && bl->biv->always_executed && ! bl->biv->maybe_multiple
4088 /* We don't handle reversed biv's because bl->biv->insn
4089 does not have a valid INSN_LUID. */
4090 && ! bl->reversed
4091 && v->always_executed && ! v->maybe_multiple)
4092 {
4093 /* If other giv's have been combined with this one, then
4094 this will work only if all uses of the other giv's occur
4095 before this giv's insn. This is difficult to check.
4096
4097 We simplify this by looking for the common case where
4098 there is one DEST_REG giv, and this giv's insn is the
4099 last use of the dest_reg of that DEST_REG giv. If the
4100 the increment occurs after the address giv, then we can
4101 perform the optimization. (Otherwise, the increment
4102 would have to go before other_giv, and we would not be
4103 able to combine it with the address giv to get an
4104 auto-inc address.) */
4105 if (v->combined_with)
4106 {
4107 struct induction *other_giv = 0;
4108
4109 for (tv = bl->giv; tv; tv = tv->next_iv)
4110 if (tv->same == v)
4111 {
4112 if (other_giv)
4113 break;
4114 else
4115 other_giv = tv;
4116 }
4117 if (! tv && other_giv
4118 && REGNO (other_giv->dest_reg) < max_reg_before_loop
4119 && (REGNO_LAST_UID (REGNO (other_giv->dest_reg))
4120 == INSN_UID (v->insn))
4121 && INSN_LUID (v->insn) < INSN_LUID (bl->biv->insn))
4122 auto_inc_opt = 1;
4123 }
4124 /* Check for case where increment is before the the address
4125 giv. Do this test in "loop order". */
4126 else if ((INSN_LUID (v->insn) > INSN_LUID (bl->biv->insn)
4127 && (INSN_LUID (v->insn) < INSN_LUID (scan_start)
4128 || (INSN_LUID (bl->biv->insn)
4129 > INSN_LUID (scan_start))))
4130 || (INSN_LUID (v->insn) < INSN_LUID (scan_start)
4131 && (INSN_LUID (scan_start)
4132 < INSN_LUID (bl->biv->insn))))
4133 auto_inc_opt = -1;
4134 else
4135 auto_inc_opt = 1;
4136
4137 #ifdef HAVE_cc0
4138 {
4139 rtx prev;
4140
4141 /* We can't put an insn immediately after one setting
4142 cc0, or immediately before one using cc0. */
4143 if ((auto_inc_opt == 1 && sets_cc0_p (PATTERN (v->insn)))
4144 || (auto_inc_opt == -1
4145 && (prev = prev_nonnote_insn (v->insn)) != 0
4146 && GET_RTX_CLASS (GET_CODE (prev)) == 'i'
4147 && sets_cc0_p (PATTERN (prev))))
4148 auto_inc_opt = 0;
4149 }
4150 #endif
4151
4152 if (auto_inc_opt)
4153 v->auto_inc_opt = 1;
4154 }
4155 #endif
4156
4157 /* For each place where the biv is incremented, add an insn
4158 to increment the new, reduced reg for the giv. */
4159 for (tv = bl->biv; tv; tv = tv->next_iv)
4160 {
4161 rtx insert_before;
4162
4163 if (! auto_inc_opt)
4164 insert_before = tv->insn;
4165 else if (auto_inc_opt == 1)
4166 insert_before = NEXT_INSN (v->insn);
4167 else
4168 insert_before = v->insn;
4169
4170 if (tv->mult_val == const1_rtx)
4171 emit_iv_add_mult (tv->add_val, v->mult_val,
4172 v->new_reg, v->new_reg, insert_before);
4173 else /* tv->mult_val == const0_rtx */
4174 /* A multiply is acceptable here
4175 since this is presumed to be seldom executed. */
4176 emit_iv_add_mult (tv->add_val, v->mult_val,
4177 v->add_val, v->new_reg, insert_before);
4178 }
4179
4180 /* Add code at loop start to initialize giv's reduced reg. */
4181
4182 emit_iv_add_mult (bl->initial_value, v->mult_val,
4183 v->add_val, v->new_reg, loop_start);
4184 }
4185 }
4186
4187 /* Rescan all givs. If a giv is the same as a giv not reduced, mark it
4188 as not reduced.
4189
4190 For each giv register that can be reduced now: if replaceable,
4191 substitute reduced reg wherever the old giv occurs;
4192 else add new move insn "giv_reg = reduced_reg".
4193
4194 Also check for givs whose first use is their definition and whose
4195 last use is the definition of another giv. If so, it is likely
4196 dead and should not be used to eliminate a biv. */
4197 for (v = bl->giv; v; v = v->next_iv)
4198 {
4199 if (v->same && v->same->ignore)
4200 v->ignore = 1;
4201
4202 if (v->ignore)
4203 continue;
4204
4205 if (v->giv_type == DEST_REG
4206 && REGNO_FIRST_UID (REGNO (v->dest_reg)) == INSN_UID (v->insn))
4207 {
4208 struct induction *v1;
4209
4210 for (v1 = bl->giv; v1; v1 = v1->next_iv)
4211 if (REGNO_LAST_UID (REGNO (v->dest_reg)) == INSN_UID (v1->insn))
4212 v->maybe_dead = 1;
4213 }
4214
4215 /* Update expression if this was combined, in case other giv was
4216 replaced. */
4217 if (v->same)
4218 v->new_reg = replace_rtx (v->new_reg,
4219 v->same->dest_reg, v->same->new_reg);
4220
4221 if (v->giv_type == DEST_ADDR)
4222 /* Store reduced reg as the address in the memref where we found
4223 this giv. */
4224 validate_change (v->insn, v->location, v->new_reg, 0);
4225 else if (v->replaceable)
4226 {
4227 reg_map[REGNO (v->dest_reg)] = v->new_reg;
4228
4229 #if 0
4230 /* I can no longer duplicate the original problem. Perhaps
4231 this is unnecessary now? */
4232
4233 /* Replaceable; it isn't strictly necessary to delete the old
4234 insn and emit a new one, because v->dest_reg is now dead.
4235
4236 However, especially when unrolling loops, the special
4237 handling for (set REG0 REG1) in the second cse pass may
4238 make v->dest_reg live again. To avoid this problem, emit
4239 an insn to set the original giv reg from the reduced giv.
4240 We can not delete the original insn, since it may be part
4241 of a LIBCALL, and the code in flow that eliminates dead
4242 libcalls will fail if it is deleted. */
4243 emit_insn_after (gen_move_insn (v->dest_reg, v->new_reg),
4244 v->insn);
4245 #endif
4246 }
4247 else
4248 {
4249 /* Not replaceable; emit an insn to set the original giv reg from
4250 the reduced giv, same as above. */
4251 emit_insn_after (gen_move_insn (v->dest_reg, v->new_reg),
4252 v->insn);
4253 }
4254
4255 /* When a loop is reversed, givs which depend on the reversed
4256 biv, and which are live outside the loop, must be set to their
4257 correct final value. This insn is only needed if the giv is
4258 not replaceable. The correct final value is the same as the
4259 value that the giv starts the reversed loop with. */
4260 if (bl->reversed && ! v->replaceable)
4261 emit_iv_add_mult (bl->initial_value, v->mult_val,
4262 v->add_val, v->dest_reg, end_insert_before);
4263 else if (v->final_value)
4264 {
4265 rtx insert_before;
4266
4267 /* If the loop has multiple exits, emit the insn before the
4268 loop to ensure that it will always be executed no matter
4269 how the loop exits. Otherwise, emit the insn after the loop,
4270 since this is slightly more efficient. */
4271 if (loop_number_exit_count[uid_loop_num[INSN_UID (loop_start)]])
4272 insert_before = loop_start;
4273 else
4274 insert_before = end_insert_before;
4275 emit_insn_before (gen_move_insn (v->dest_reg, v->final_value),
4276 insert_before);
4277
4278 #if 0
4279 /* If the insn to set the final value of the giv was emitted
4280 before the loop, then we must delete the insn inside the loop
4281 that sets it. If this is a LIBCALL, then we must delete
4282 every insn in the libcall. Note, however, that
4283 final_giv_value will only succeed when there are multiple
4284 exits if the giv is dead at each exit, hence it does not
4285 matter that the original insn remains because it is dead
4286 anyways. */
4287 /* Delete the insn inside the loop that sets the giv since
4288 the giv is now set before (or after) the loop. */
4289 delete_insn (v->insn);
4290 #endif
4291 }
4292
4293 if (loop_dump_stream)
4294 {
4295 fprintf (loop_dump_stream, "giv at %d reduced to ",
4296 INSN_UID (v->insn));
4297 print_rtl (loop_dump_stream, v->new_reg);
4298 fprintf (loop_dump_stream, "\n");
4299 }
4300 }
4301
4302 /* All the givs based on the biv bl have been reduced if they
4303 merit it. */
4304
4305 /* For each giv not marked as maybe dead that has been combined with a
4306 second giv, clear any "maybe dead" mark on that second giv.
4307 v->new_reg will either be or refer to the register of the giv it
4308 combined with.
4309
4310 Doing this clearing avoids problems in biv elimination where a
4311 giv's new_reg is a complex value that can't be put in the insn but
4312 the giv combined with (with a reg as new_reg) is marked maybe_dead.
4313 Since the register will be used in either case, we'd prefer it be
4314 used from the simpler giv. */
4315
4316 for (v = bl->giv; v; v = v->next_iv)
4317 if (! v->maybe_dead && v->same)
4318 v->same->maybe_dead = 0;
4319
4320 /* Try to eliminate the biv, if it is a candidate.
4321 This won't work if ! all_reduced,
4322 since the givs we planned to use might not have been reduced.
4323
4324 We have to be careful that we didn't initially think we could eliminate
4325 this biv because of a giv that we now think may be dead and shouldn't
4326 be used as a biv replacement.
4327
4328 Also, there is the possibility that we may have a giv that looks
4329 like it can be used to eliminate a biv, but the resulting insn
4330 isn't valid. This can happen, for example, on the 88k, where a
4331 JUMP_INSN can compare a register only with zero. Attempts to
4332 replace it with a compare with a constant will fail.
4333
4334 Note that in cases where this call fails, we may have replaced some
4335 of the occurrences of the biv with a giv, but no harm was done in
4336 doing so in the rare cases where it can occur. */
4337
4338 if (all_reduced == 1 && bl->eliminable
4339 && maybe_eliminate_biv (bl, loop_start, end, 1,
4340 threshold, insn_count))
4341
4342 {
4343 /* ?? If we created a new test to bypass the loop entirely,
4344 or otherwise drop straight in, based on this test, then
4345 we might want to rewrite it also. This way some later
4346 pass has more hope of removing the initialization of this
4347 biv entirely. */
4348
4349 /* If final_value != 0, then the biv may be used after loop end
4350 and we must emit an insn to set it just in case.
4351
4352 Reversed bivs already have an insn after the loop setting their
4353 value, so we don't need another one. We can't calculate the
4354 proper final value for such a biv here anyways. */
4355 if (final_value != 0 && ! bl->reversed)
4356 {
4357 rtx insert_before;
4358
4359 /* If the loop has multiple exits, emit the insn before the
4360 loop to ensure that it will always be executed no matter
4361 how the loop exits. Otherwise, emit the insn after the
4362 loop, since this is slightly more efficient. */
4363 if (loop_number_exit_count[uid_loop_num[INSN_UID (loop_start)]])
4364 insert_before = loop_start;
4365 else
4366 insert_before = end_insert_before;
4367
4368 emit_insn_before (gen_move_insn (bl->biv->dest_reg, final_value),
4369 end_insert_before);
4370 }
4371
4372 #if 0
4373 /* Delete all of the instructions inside the loop which set
4374 the biv, as they are all dead. If is safe to delete them,
4375 because an insn setting a biv will never be part of a libcall. */
4376 /* However, deleting them will invalidate the regno_last_uid info,
4377 so keeping them around is more convenient. Final_biv_value
4378 will only succeed when there are multiple exits if the biv
4379 is dead at each exit, hence it does not matter that the original
4380 insn remains, because it is dead anyways. */
4381 for (v = bl->biv; v; v = v->next_iv)
4382 delete_insn (v->insn);
4383 #endif
4384
4385 if (loop_dump_stream)
4386 fprintf (loop_dump_stream, "Reg %d: biv eliminated\n",
4387 bl->regno);
4388 }
4389 }
4390
4391 /* Go through all the instructions in the loop, making all the
4392 register substitutions scheduled in REG_MAP. */
4393
4394 for (p = loop_start; p != end; p = NEXT_INSN (p))
4395 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
4396 || GET_CODE (p) == CALL_INSN)
4397 {
4398 replace_regs (PATTERN (p), reg_map, max_reg_before_loop, 0);
4399 replace_regs (REG_NOTES (p), reg_map, max_reg_before_loop, 0);
4400 INSN_CODE (p) = -1;
4401 }
4402
4403 /* Unroll loops from within strength reduction so that we can use the
4404 induction variable information that strength_reduce has already
4405 collected. */
4406
4407 if (unroll_p)
4408 unroll_loop (loop_end, insn_count, loop_start, end_insert_before, 1);
4409
4410 #ifdef HAIFA
4411 /* instrument the loop with bct insn */
4412 #ifdef HAVE_decrement_and_branch_on_count
4413 if (HAVE_decrement_and_branch_on_count)
4414 insert_bct (loop_start, loop_end);
4415 #endif
4416 #endif /* HAIFA */
4417
4418 if (loop_dump_stream)
4419 fprintf (loop_dump_stream, "\n");
4420 }
4421 \f
4422 /* Return 1 if X is a valid source for an initial value (or as value being
4423 compared against in an initial test).
4424
4425 X must be either a register or constant and must not be clobbered between
4426 the current insn and the start of the loop.
4427
4428 INSN is the insn containing X. */
4429
4430 static int
4431 valid_initial_value_p (x, insn, call_seen, loop_start)
4432 rtx x;
4433 rtx insn;
4434 int call_seen;
4435 rtx loop_start;
4436 {
4437 if (CONSTANT_P (x))
4438 return 1;
4439
4440 /* Only consider pseudos we know about initialized in insns whose luids
4441 we know. */
4442 if (GET_CODE (x) != REG
4443 || REGNO (x) >= max_reg_before_loop)
4444 return 0;
4445
4446 /* Don't use call-clobbered registers across a call which clobbers it. On
4447 some machines, don't use any hard registers at all. */
4448 if (REGNO (x) < FIRST_PSEUDO_REGISTER
4449 && (SMALL_REGISTER_CLASSES
4450 || (call_used_regs[REGNO (x)] && call_seen)))
4451 return 0;
4452
4453 /* Don't use registers that have been clobbered before the start of the
4454 loop. */
4455 if (reg_set_between_p (x, insn, loop_start))
4456 return 0;
4457
4458 return 1;
4459 }
4460 \f
4461 /* Scan X for memory refs and check each memory address
4462 as a possible giv. INSN is the insn whose pattern X comes from.
4463 NOT_EVERY_ITERATION is 1 if the insn might not be executed during
4464 every loop iteration. */
4465
4466 static void
4467 find_mem_givs (x, insn, not_every_iteration, loop_start, loop_end)
4468 rtx x;
4469 rtx insn;
4470 int not_every_iteration;
4471 rtx loop_start, loop_end;
4472 {
4473 register int i, j;
4474 register enum rtx_code code;
4475 register char *fmt;
4476
4477 if (x == 0)
4478 return;
4479
4480 code = GET_CODE (x);
4481 switch (code)
4482 {
4483 case REG:
4484 case CONST_INT:
4485 case CONST:
4486 case CONST_DOUBLE:
4487 case SYMBOL_REF:
4488 case LABEL_REF:
4489 case PC:
4490 case CC0:
4491 case ADDR_VEC:
4492 case ADDR_DIFF_VEC:
4493 case USE:
4494 case CLOBBER:
4495 return;
4496
4497 case MEM:
4498 {
4499 rtx src_reg;
4500 rtx add_val;
4501 rtx mult_val;
4502 int benefit;
4503
4504 benefit = general_induction_var (XEXP (x, 0),
4505 &src_reg, &add_val, &mult_val);
4506
4507 /* Don't make a DEST_ADDR giv with mult_val == 1 && add_val == 0.
4508 Such a giv isn't useful. */
4509 if (benefit > 0 && (mult_val != const1_rtx || add_val != const0_rtx))
4510 {
4511 /* Found one; record it. */
4512 struct induction *v
4513 = (struct induction *) oballoc (sizeof (struct induction));
4514
4515 record_giv (v, insn, src_reg, addr_placeholder, mult_val,
4516 add_val, benefit, DEST_ADDR, not_every_iteration,
4517 &XEXP (x, 0), loop_start, loop_end);
4518
4519 v->mem_mode = GET_MODE (x);
4520 }
4521 }
4522 return;
4523
4524 default:
4525 break;
4526 }
4527
4528 /* Recursively scan the subexpressions for other mem refs. */
4529
4530 fmt = GET_RTX_FORMAT (code);
4531 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4532 if (fmt[i] == 'e')
4533 find_mem_givs (XEXP (x, i), insn, not_every_iteration, loop_start,
4534 loop_end);
4535 else if (fmt[i] == 'E')
4536 for (j = 0; j < XVECLEN (x, i); j++)
4537 find_mem_givs (XVECEXP (x, i, j), insn, not_every_iteration,
4538 loop_start, loop_end);
4539 }
4540 \f
4541 /* Fill in the data about one biv update.
4542 V is the `struct induction' in which we record the biv. (It is
4543 allocated by the caller, with alloca.)
4544 INSN is the insn that sets it.
4545 DEST_REG is the biv's reg.
4546
4547 MULT_VAL is const1_rtx if the biv is being incremented here, in which case
4548 INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is
4549 being set to INC_VAL.
4550
4551 NOT_EVERY_ITERATION is nonzero if this biv update is not know to be
4552 executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update
4553 can be executed more than once per iteration. If MAYBE_MULTIPLE
4554 and NOT_EVERY_ITERATION are both zero, we know that the biv update is
4555 executed exactly once per iteration. */
4556
4557 static void
4558 record_biv (v, insn, dest_reg, inc_val, mult_val,
4559 not_every_iteration, maybe_multiple)
4560 struct induction *v;
4561 rtx insn;
4562 rtx dest_reg;
4563 rtx inc_val;
4564 rtx mult_val;
4565 int not_every_iteration;
4566 int maybe_multiple;
4567 {
4568 struct iv_class *bl;
4569
4570 v->insn = insn;
4571 v->src_reg = dest_reg;
4572 v->dest_reg = dest_reg;
4573 v->mult_val = mult_val;
4574 v->add_val = inc_val;
4575 v->mode = GET_MODE (dest_reg);
4576 v->always_computable = ! not_every_iteration;
4577 v->always_executed = ! not_every_iteration;
4578 v->maybe_multiple = maybe_multiple;
4579
4580 /* Add this to the reg's iv_class, creating a class
4581 if this is the first incrementation of the reg. */
4582
4583 bl = reg_biv_class[REGNO (dest_reg)];
4584 if (bl == 0)
4585 {
4586 /* Create and initialize new iv_class. */
4587
4588 bl = (struct iv_class *) oballoc (sizeof (struct iv_class));
4589
4590 bl->regno = REGNO (dest_reg);
4591 bl->biv = 0;
4592 bl->giv = 0;
4593 bl->biv_count = 0;
4594 bl->giv_count = 0;
4595
4596 /* Set initial value to the reg itself. */
4597 bl->initial_value = dest_reg;
4598 /* We haven't seen the initializing insn yet */
4599 bl->init_insn = 0;
4600 bl->init_set = 0;
4601 bl->initial_test = 0;
4602 bl->incremented = 0;
4603 bl->eliminable = 0;
4604 bl->nonneg = 0;
4605 bl->reversed = 0;
4606 bl->total_benefit = 0;
4607
4608 /* Add this class to loop_iv_list. */
4609 bl->next = loop_iv_list;
4610 loop_iv_list = bl;
4611
4612 /* Put it in the array of biv register classes. */
4613 reg_biv_class[REGNO (dest_reg)] = bl;
4614 }
4615
4616 /* Update IV_CLASS entry for this biv. */
4617 v->next_iv = bl->biv;
4618 bl->biv = v;
4619 bl->biv_count++;
4620 if (mult_val == const1_rtx)
4621 bl->incremented = 1;
4622
4623 if (loop_dump_stream)
4624 {
4625 fprintf (loop_dump_stream,
4626 "Insn %d: possible biv, reg %d,",
4627 INSN_UID (insn), REGNO (dest_reg));
4628 if (GET_CODE (inc_val) == CONST_INT)
4629 {
4630 fprintf (loop_dump_stream, " const =");
4631 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (inc_val));
4632 fputc ('\n', loop_dump_stream);
4633 }
4634 else
4635 {
4636 fprintf (loop_dump_stream, " const = ");
4637 print_rtl (loop_dump_stream, inc_val);
4638 fprintf (loop_dump_stream, "\n");
4639 }
4640 }
4641 }
4642 \f
4643 /* Fill in the data about one giv.
4644 V is the `struct induction' in which we record the giv. (It is
4645 allocated by the caller, with alloca.)
4646 INSN is the insn that sets it.
4647 BENEFIT estimates the savings from deleting this insn.
4648 TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed
4649 into a register or is used as a memory address.
4650
4651 SRC_REG is the biv reg which the giv is computed from.
4652 DEST_REG is the giv's reg (if the giv is stored in a reg).
4653 MULT_VAL and ADD_VAL are the coefficients used to compute the giv.
4654 LOCATION points to the place where this giv's value appears in INSN. */
4655
4656 static void
4657 record_giv (v, insn, src_reg, dest_reg, mult_val, add_val, benefit,
4658 type, not_every_iteration, location, loop_start, loop_end)
4659 struct induction *v;
4660 rtx insn;
4661 rtx src_reg;
4662 rtx dest_reg;
4663 rtx mult_val, add_val;
4664 int benefit;
4665 enum g_types type;
4666 int not_every_iteration;
4667 rtx *location;
4668 rtx loop_start, loop_end;
4669 {
4670 struct induction *b;
4671 struct iv_class *bl;
4672 rtx set = single_set (insn);
4673
4674 v->insn = insn;
4675 v->src_reg = src_reg;
4676 v->giv_type = type;
4677 v->dest_reg = dest_reg;
4678 v->mult_val = mult_val;
4679 v->add_val = add_val;
4680 v->benefit = benefit;
4681 v->location = location;
4682 v->cant_derive = 0;
4683 v->combined_with = 0;
4684 v->maybe_multiple = 0;
4685 v->maybe_dead = 0;
4686 v->derive_adjustment = 0;
4687 v->same = 0;
4688 v->ignore = 0;
4689 v->new_reg = 0;
4690 v->final_value = 0;
4691 v->same_insn = 0;
4692 v->auto_inc_opt = 0;
4693 v->unrolled = 0;
4694 v->shared = 0;
4695
4696 /* The v->always_computable field is used in update_giv_derive, to
4697 determine whether a giv can be used to derive another giv. For a
4698 DEST_REG giv, INSN computes a new value for the giv, so its value
4699 isn't computable if INSN insn't executed every iteration.
4700 However, for a DEST_ADDR giv, INSN merely uses the value of the giv;
4701 it does not compute a new value. Hence the value is always computable
4702 regardless of whether INSN is executed each iteration. */
4703
4704 if (type == DEST_ADDR)
4705 v->always_computable = 1;
4706 else
4707 v->always_computable = ! not_every_iteration;
4708
4709 v->always_executed = ! not_every_iteration;
4710
4711 if (type == DEST_ADDR)
4712 {
4713 v->mode = GET_MODE (*location);
4714 v->lifetime = 1;
4715 v->times_used = 1;
4716 }
4717 else /* type == DEST_REG */
4718 {
4719 v->mode = GET_MODE (SET_DEST (set));
4720
4721 v->lifetime = (uid_luid[REGNO_LAST_UID (REGNO (dest_reg))]
4722 - uid_luid[REGNO_FIRST_UID (REGNO (dest_reg))]);
4723
4724 v->times_used = n_times_used[REGNO (dest_reg)];
4725
4726 /* If the lifetime is zero, it means that this register is
4727 really a dead store. So mark this as a giv that can be
4728 ignored. This will not prevent the biv from being eliminated. */
4729 if (v->lifetime == 0)
4730 v->ignore = 1;
4731
4732 reg_iv_type[REGNO (dest_reg)] = GENERAL_INDUCT;
4733 reg_iv_info[REGNO (dest_reg)] = v;
4734 }
4735
4736 /* Add the giv to the class of givs computed from one biv. */
4737
4738 bl = reg_biv_class[REGNO (src_reg)];
4739 if (bl)
4740 {
4741 v->next_iv = bl->giv;
4742 bl->giv = v;
4743 /* Don't count DEST_ADDR. This is supposed to count the number of
4744 insns that calculate givs. */
4745 if (type == DEST_REG)
4746 bl->giv_count++;
4747 bl->total_benefit += benefit;
4748 }
4749 else
4750 /* Fatal error, biv missing for this giv? */
4751 abort ();
4752
4753 if (type == DEST_ADDR)
4754 v->replaceable = 1;
4755 else
4756 {
4757 /* The giv can be replaced outright by the reduced register only if all
4758 of the following conditions are true:
4759 - the insn that sets the giv is always executed on any iteration
4760 on which the giv is used at all
4761 (there are two ways to deduce this:
4762 either the insn is executed on every iteration,
4763 or all uses follow that insn in the same basic block),
4764 - the giv is not used outside the loop
4765 - no assignments to the biv occur during the giv's lifetime. */
4766
4767 if (REGNO_FIRST_UID (REGNO (dest_reg)) == INSN_UID (insn)
4768 /* Previous line always fails if INSN was moved by loop opt. */
4769 && uid_luid[REGNO_LAST_UID (REGNO (dest_reg))] < INSN_LUID (loop_end)
4770 && (! not_every_iteration
4771 || last_use_this_basic_block (dest_reg, insn)))
4772 {
4773 /* Now check that there are no assignments to the biv within the
4774 giv's lifetime. This requires two separate checks. */
4775
4776 /* Check each biv update, and fail if any are between the first
4777 and last use of the giv.
4778
4779 If this loop contains an inner loop that was unrolled, then
4780 the insn modifying the biv may have been emitted by the loop
4781 unrolling code, and hence does not have a valid luid. Just
4782 mark the biv as not replaceable in this case. It is not very
4783 useful as a biv, because it is used in two different loops.
4784 It is very unlikely that we would be able to optimize the giv
4785 using this biv anyways. */
4786
4787 v->replaceable = 1;
4788 for (b = bl->biv; b; b = b->next_iv)
4789 {
4790 if (INSN_UID (b->insn) >= max_uid_for_loop
4791 || ((uid_luid[INSN_UID (b->insn)]
4792 >= uid_luid[REGNO_FIRST_UID (REGNO (dest_reg))])
4793 && (uid_luid[INSN_UID (b->insn)]
4794 <= uid_luid[REGNO_LAST_UID (REGNO (dest_reg))])))
4795 {
4796 v->replaceable = 0;
4797 v->not_replaceable = 1;
4798 break;
4799 }
4800 }
4801
4802 /* If there are any backwards branches that go from after the
4803 biv update to before it, then this giv is not replaceable. */
4804 if (v->replaceable)
4805 for (b = bl->biv; b; b = b->next_iv)
4806 if (back_branch_in_range_p (b->insn, loop_start, loop_end))
4807 {
4808 v->replaceable = 0;
4809 v->not_replaceable = 1;
4810 break;
4811 }
4812 }
4813 else
4814 {
4815 /* May still be replaceable, we don't have enough info here to
4816 decide. */
4817 v->replaceable = 0;
4818 v->not_replaceable = 0;
4819 }
4820 }
4821
4822 if (loop_dump_stream)
4823 {
4824 if (type == DEST_REG)
4825 fprintf (loop_dump_stream, "Insn %d: giv reg %d",
4826 INSN_UID (insn), REGNO (dest_reg));
4827 else
4828 fprintf (loop_dump_stream, "Insn %d: dest address",
4829 INSN_UID (insn));
4830
4831 fprintf (loop_dump_stream, " src reg %d benefit %d",
4832 REGNO (src_reg), v->benefit);
4833 fprintf (loop_dump_stream, " used %d lifetime %d",
4834 v->times_used, v->lifetime);
4835
4836 if (v->replaceable)
4837 fprintf (loop_dump_stream, " replaceable");
4838
4839 if (GET_CODE (mult_val) == CONST_INT)
4840 {
4841 fprintf (loop_dump_stream, " mult ");
4842 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (mult_val));
4843 }
4844 else
4845 {
4846 fprintf (loop_dump_stream, " mult ");
4847 print_rtl (loop_dump_stream, mult_val);
4848 }
4849
4850 if (GET_CODE (add_val) == CONST_INT)
4851 {
4852 fprintf (loop_dump_stream, " add ");
4853 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (add_val));
4854 }
4855 else
4856 {
4857 fprintf (loop_dump_stream, " add ");
4858 print_rtl (loop_dump_stream, add_val);
4859 }
4860 }
4861
4862 if (loop_dump_stream)
4863 fprintf (loop_dump_stream, "\n");
4864
4865 }
4866
4867
4868 /* All this does is determine whether a giv can be made replaceable because
4869 its final value can be calculated. This code can not be part of record_giv
4870 above, because final_giv_value requires that the number of loop iterations
4871 be known, and that can not be accurately calculated until after all givs
4872 have been identified. */
4873
4874 static void
4875 check_final_value (v, loop_start, loop_end)
4876 struct induction *v;
4877 rtx loop_start, loop_end;
4878 {
4879 struct iv_class *bl;
4880 rtx final_value = 0;
4881
4882 bl = reg_biv_class[REGNO (v->src_reg)];
4883
4884 /* DEST_ADDR givs will never reach here, because they are always marked
4885 replaceable above in record_giv. */
4886
4887 /* The giv can be replaced outright by the reduced register only if all
4888 of the following conditions are true:
4889 - the insn that sets the giv is always executed on any iteration
4890 on which the giv is used at all
4891 (there are two ways to deduce this:
4892 either the insn is executed on every iteration,
4893 or all uses follow that insn in the same basic block),
4894 - its final value can be calculated (this condition is different
4895 than the one above in record_giv)
4896 - no assignments to the biv occur during the giv's lifetime. */
4897
4898 #if 0
4899 /* This is only called now when replaceable is known to be false. */
4900 /* Clear replaceable, so that it won't confuse final_giv_value. */
4901 v->replaceable = 0;
4902 #endif
4903
4904 if ((final_value = final_giv_value (v, loop_start, loop_end))
4905 && (v->always_computable || last_use_this_basic_block (v->dest_reg, v->insn)))
4906 {
4907 int biv_increment_seen = 0;
4908 rtx p = v->insn;
4909 rtx last_giv_use;
4910
4911 v->replaceable = 1;
4912
4913 /* When trying to determine whether or not a biv increment occurs
4914 during the lifetime of the giv, we can ignore uses of the variable
4915 outside the loop because final_value is true. Hence we can not
4916 use regno_last_uid and regno_first_uid as above in record_giv. */
4917
4918 /* Search the loop to determine whether any assignments to the
4919 biv occur during the giv's lifetime. Start with the insn
4920 that sets the giv, and search around the loop until we come
4921 back to that insn again.
4922
4923 Also fail if there is a jump within the giv's lifetime that jumps
4924 to somewhere outside the lifetime but still within the loop. This
4925 catches spaghetti code where the execution order is not linear, and
4926 hence the above test fails. Here we assume that the giv lifetime
4927 does not extend from one iteration of the loop to the next, so as
4928 to make the test easier. Since the lifetime isn't known yet,
4929 this requires two loops. See also record_giv above. */
4930
4931 last_giv_use = v->insn;
4932
4933 while (1)
4934 {
4935 p = NEXT_INSN (p);
4936 if (p == loop_end)
4937 p = NEXT_INSN (loop_start);
4938 if (p == v->insn)
4939 break;
4940
4941 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
4942 || GET_CODE (p) == CALL_INSN)
4943 {
4944 if (biv_increment_seen)
4945 {
4946 if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
4947 {
4948 v->replaceable = 0;
4949 v->not_replaceable = 1;
4950 break;
4951 }
4952 }
4953 else if (reg_set_p (v->src_reg, PATTERN (p)))
4954 biv_increment_seen = 1;
4955 else if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
4956 last_giv_use = p;
4957 }
4958 }
4959
4960 /* Now that the lifetime of the giv is known, check for branches
4961 from within the lifetime to outside the lifetime if it is still
4962 replaceable. */
4963
4964 if (v->replaceable)
4965 {
4966 p = v->insn;
4967 while (1)
4968 {
4969 p = NEXT_INSN (p);
4970 if (p == loop_end)
4971 p = NEXT_INSN (loop_start);
4972 if (p == last_giv_use)
4973 break;
4974
4975 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
4976 && LABEL_NAME (JUMP_LABEL (p))
4977 && ((INSN_UID (JUMP_LABEL (p)) >= max_uid_for_loop)
4978 || (INSN_UID (v->insn) >= max_uid_for_loop)
4979 || (INSN_UID (last_giv_use) >= max_uid_for_loop)
4980 || (INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (v->insn)
4981 && INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (loop_start))
4982 || (INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (last_giv_use)
4983 && INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (loop_end))))
4984 {
4985 v->replaceable = 0;
4986 v->not_replaceable = 1;
4987
4988 if (loop_dump_stream)
4989 fprintf (loop_dump_stream,
4990 "Found branch outside giv lifetime.\n");
4991
4992 break;
4993 }
4994 }
4995 }
4996
4997 /* If it is replaceable, then save the final value. */
4998 if (v->replaceable)
4999 v->final_value = final_value;
5000 }
5001
5002 if (loop_dump_stream && v->replaceable)
5003 fprintf (loop_dump_stream, "Insn %d: giv reg %d final_value replaceable\n",
5004 INSN_UID (v->insn), REGNO (v->dest_reg));
5005 }
5006 \f
5007 /* Update the status of whether a giv can derive other givs.
5008
5009 We need to do something special if there is or may be an update to the biv
5010 between the time the giv is defined and the time it is used to derive
5011 another giv.
5012
5013 In addition, a giv that is only conditionally set is not allowed to
5014 derive another giv once a label has been passed.
5015
5016 The cases we look at are when a label or an update to a biv is passed. */
5017
5018 static void
5019 update_giv_derive (p)
5020 rtx p;
5021 {
5022 struct iv_class *bl;
5023 struct induction *biv, *giv;
5024 rtx tem;
5025 int dummy;
5026
5027 /* Search all IV classes, then all bivs, and finally all givs.
5028
5029 There are three cases we are concerned with. First we have the situation
5030 of a giv that is only updated conditionally. In that case, it may not
5031 derive any givs after a label is passed.
5032
5033 The second case is when a biv update occurs, or may occur, after the
5034 definition of a giv. For certain biv updates (see below) that are
5035 known to occur between the giv definition and use, we can adjust the
5036 giv definition. For others, or when the biv update is conditional,
5037 we must prevent the giv from deriving any other givs. There are two
5038 sub-cases within this case.
5039
5040 If this is a label, we are concerned with any biv update that is done
5041 conditionally, since it may be done after the giv is defined followed by
5042 a branch here (actually, we need to pass both a jump and a label, but
5043 this extra tracking doesn't seem worth it).
5044
5045 If this is a jump, we are concerned about any biv update that may be
5046 executed multiple times. We are actually only concerned about
5047 backward jumps, but it is probably not worth performing the test
5048 on the jump again here.
5049
5050 If this is a biv update, we must adjust the giv status to show that a
5051 subsequent biv update was performed. If this adjustment cannot be done,
5052 the giv cannot derive further givs. */
5053
5054 for (bl = loop_iv_list; bl; bl = bl->next)
5055 for (biv = bl->biv; biv; biv = biv->next_iv)
5056 if (GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN
5057 || biv->insn == p)
5058 {
5059 for (giv = bl->giv; giv; giv = giv->next_iv)
5060 {
5061 /* If cant_derive is already true, there is no point in
5062 checking all of these conditions again. */
5063 if (giv->cant_derive)
5064 continue;
5065
5066 /* If this giv is conditionally set and we have passed a label,
5067 it cannot derive anything. */
5068 if (GET_CODE (p) == CODE_LABEL && ! giv->always_computable)
5069 giv->cant_derive = 1;
5070
5071 /* Skip givs that have mult_val == 0, since
5072 they are really invariants. Also skip those that are
5073 replaceable, since we know their lifetime doesn't contain
5074 any biv update. */
5075 else if (giv->mult_val == const0_rtx || giv->replaceable)
5076 continue;
5077
5078 /* The only way we can allow this giv to derive another
5079 is if this is a biv increment and we can form the product
5080 of biv->add_val and giv->mult_val. In this case, we will
5081 be able to compute a compensation. */
5082 else if (biv->insn == p)
5083 {
5084 tem = 0;
5085
5086 if (biv->mult_val == const1_rtx)
5087 tem = simplify_giv_expr (gen_rtx_MULT (giv->mode,
5088 biv->add_val,
5089 giv->mult_val),
5090 &dummy);
5091
5092 if (tem && giv->derive_adjustment)
5093 tem = simplify_giv_expr (gen_rtx_PLUS (giv->mode, tem,
5094 giv->derive_adjustment),
5095 &dummy);
5096 if (tem)
5097 giv->derive_adjustment = tem;
5098 else
5099 giv->cant_derive = 1;
5100 }
5101 else if ((GET_CODE (p) == CODE_LABEL && ! biv->always_computable)
5102 || (GET_CODE (p) == JUMP_INSN && biv->maybe_multiple))
5103 giv->cant_derive = 1;
5104 }
5105 }
5106 }
5107 \f
5108 /* Check whether an insn is an increment legitimate for a basic induction var.
5109 X is the source of insn P, or a part of it.
5110 MODE is the mode in which X should be interpreted.
5111
5112 DEST_REG is the putative biv, also the destination of the insn.
5113 We accept patterns of these forms:
5114 REG = REG + INVARIANT (includes REG = REG - CONSTANT)
5115 REG = INVARIANT + REG
5116
5117 If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX,
5118 and store the additive term into *INC_VAL.
5119
5120 If X is an assignment of an invariant into DEST_REG, we set
5121 *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL.
5122
5123 We also want to detect a BIV when it corresponds to a variable
5124 whose mode was promoted via PROMOTED_MODE. In that case, an increment
5125 of the variable may be a PLUS that adds a SUBREG of that variable to
5126 an invariant and then sign- or zero-extends the result of the PLUS
5127 into the variable.
5128
5129 Most GIVs in such cases will be in the promoted mode, since that is the
5130 probably the natural computation mode (and almost certainly the mode
5131 used for addresses) on the machine. So we view the pseudo-reg containing
5132 the variable as the BIV, as if it were simply incremented.
5133
5134 Note that treating the entire pseudo as a BIV will result in making
5135 simple increments to any GIVs based on it. However, if the variable
5136 overflows in its declared mode but not its promoted mode, the result will
5137 be incorrect. This is acceptable if the variable is signed, since
5138 overflows in such cases are undefined, but not if it is unsigned, since
5139 those overflows are defined. So we only check for SIGN_EXTEND and
5140 not ZERO_EXTEND.
5141
5142 If we cannot find a biv, we return 0. */
5143
5144 static int
5145 basic_induction_var (x, mode, dest_reg, p, inc_val, mult_val)
5146 register rtx x;
5147 enum machine_mode mode;
5148 rtx p;
5149 rtx dest_reg;
5150 rtx *inc_val;
5151 rtx *mult_val;
5152 {
5153 register enum rtx_code code;
5154 rtx arg;
5155 rtx insn, set = 0;
5156
5157 code = GET_CODE (x);
5158 switch (code)
5159 {
5160 case PLUS:
5161 if (XEXP (x, 0) == dest_reg
5162 || (GET_CODE (XEXP (x, 0)) == SUBREG
5163 && SUBREG_PROMOTED_VAR_P (XEXP (x, 0))
5164 && SUBREG_REG (XEXP (x, 0)) == dest_reg))
5165 arg = XEXP (x, 1);
5166 else if (XEXP (x, 1) == dest_reg
5167 || (GET_CODE (XEXP (x, 1)) == SUBREG
5168 && SUBREG_PROMOTED_VAR_P (XEXP (x, 1))
5169 && SUBREG_REG (XEXP (x, 1)) == dest_reg))
5170 arg = XEXP (x, 0);
5171 else
5172 return 0;
5173
5174 if (invariant_p (arg) != 1)
5175 return 0;
5176
5177 *inc_val = convert_modes (GET_MODE (dest_reg), GET_MODE (x), arg, 0);
5178 *mult_val = const1_rtx;
5179 return 1;
5180
5181 case SUBREG:
5182 /* If this is a SUBREG for a promoted variable, check the inner
5183 value. */
5184 if (SUBREG_PROMOTED_VAR_P (x))
5185 return basic_induction_var (SUBREG_REG (x), GET_MODE (SUBREG_REG (x)),
5186 dest_reg, p, inc_val, mult_val);
5187 return 0;
5188
5189 case REG:
5190 /* If this register is assigned in the previous insn, look at its
5191 source, but don't go outside the loop or past a label. */
5192
5193 for (insn = PREV_INSN (p);
5194 (insn && GET_CODE (insn) == NOTE
5195 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
5196 insn = PREV_INSN (insn))
5197 ;
5198
5199 if (insn)
5200 set = single_set (insn);
5201
5202 if (set != 0
5203 && (SET_DEST (set) == x
5204 || (GET_CODE (SET_DEST (set)) == SUBREG
5205 && (GET_MODE_SIZE (GET_MODE (SET_DEST (set)))
5206 <= UNITS_PER_WORD)
5207 && SUBREG_REG (SET_DEST (set)) == x)))
5208 return basic_induction_var (SET_SRC (set),
5209 (GET_MODE (SET_SRC (set)) == VOIDmode
5210 ? GET_MODE (x)
5211 : GET_MODE (SET_SRC (set))),
5212 dest_reg, insn,
5213 inc_val, mult_val);
5214 /* ... fall through ... */
5215
5216 /* Can accept constant setting of biv only when inside inner most loop.
5217 Otherwise, a biv of an inner loop may be incorrectly recognized
5218 as a biv of the outer loop,
5219 causing code to be moved INTO the inner loop. */
5220 case MEM:
5221 if (invariant_p (x) != 1)
5222 return 0;
5223 case CONST_INT:
5224 case SYMBOL_REF:
5225 case CONST:
5226 if (loops_enclosed == 1)
5227 {
5228 /* Possible bug here? Perhaps we don't know the mode of X. */
5229 *inc_val = convert_modes (GET_MODE (dest_reg), mode, x, 0);
5230 *mult_val = const0_rtx;
5231 return 1;
5232 }
5233 else
5234 return 0;
5235
5236 case SIGN_EXTEND:
5237 return basic_induction_var (XEXP (x, 0), GET_MODE (XEXP (x, 0)),
5238 dest_reg, p, inc_val, mult_val);
5239 case ASHIFTRT:
5240 /* Similar, since this can be a sign extension. */
5241 for (insn = PREV_INSN (p);
5242 (insn && GET_CODE (insn) == NOTE
5243 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
5244 insn = PREV_INSN (insn))
5245 ;
5246
5247 if (insn)
5248 set = single_set (insn);
5249
5250 if (set && SET_DEST (set) == XEXP (x, 0)
5251 && GET_CODE (XEXP (x, 1)) == CONST_INT
5252 && INTVAL (XEXP (x, 1)) >= 0
5253 && GET_CODE (SET_SRC (set)) == ASHIFT
5254 && XEXP (x, 1) == XEXP (SET_SRC (set), 1))
5255 return basic_induction_var (XEXP (SET_SRC (set), 0),
5256 GET_MODE (XEXP (x, 0)),
5257 dest_reg, insn, inc_val, mult_val);
5258 return 0;
5259
5260 default:
5261 return 0;
5262 }
5263 }
5264 \f
5265 /* A general induction variable (giv) is any quantity that is a linear
5266 function of a basic induction variable,
5267 i.e. giv = biv * mult_val + add_val.
5268 The coefficients can be any loop invariant quantity.
5269 A giv need not be computed directly from the biv;
5270 it can be computed by way of other givs. */
5271
5272 /* Determine whether X computes a giv.
5273 If it does, return a nonzero value
5274 which is the benefit from eliminating the computation of X;
5275 set *SRC_REG to the register of the biv that it is computed from;
5276 set *ADD_VAL and *MULT_VAL to the coefficients,
5277 such that the value of X is biv * mult + add; */
5278
5279 static int
5280 general_induction_var (x, src_reg, add_val, mult_val)
5281 rtx x;
5282 rtx *src_reg;
5283 rtx *add_val;
5284 rtx *mult_val;
5285 {
5286 rtx orig_x = x;
5287 int benefit = 0;
5288 char *storage;
5289
5290 /* If this is an invariant, forget it, it isn't a giv. */
5291 if (invariant_p (x) == 1)
5292 return 0;
5293
5294 /* See if the expression could be a giv and get its form.
5295 Mark our place on the obstack in case we don't find a giv. */
5296 storage = (char *) oballoc (0);
5297 x = simplify_giv_expr (x, &benefit);
5298 if (x == 0)
5299 {
5300 obfree (storage);
5301 return 0;
5302 }
5303
5304 switch (GET_CODE (x))
5305 {
5306 case USE:
5307 case CONST_INT:
5308 /* Since this is now an invariant and wasn't before, it must be a giv
5309 with MULT_VAL == 0. It doesn't matter which BIV we associate this
5310 with. */
5311 *src_reg = loop_iv_list->biv->dest_reg;
5312 *mult_val = const0_rtx;
5313 *add_val = x;
5314 break;
5315
5316 case REG:
5317 /* This is equivalent to a BIV. */
5318 *src_reg = x;
5319 *mult_val = const1_rtx;
5320 *add_val = const0_rtx;
5321 break;
5322
5323 case PLUS:
5324 /* Either (plus (biv) (invar)) or
5325 (plus (mult (biv) (invar_1)) (invar_2)). */
5326 if (GET_CODE (XEXP (x, 0)) == MULT)
5327 {
5328 *src_reg = XEXP (XEXP (x, 0), 0);
5329 *mult_val = XEXP (XEXP (x, 0), 1);
5330 }
5331 else
5332 {
5333 *src_reg = XEXP (x, 0);
5334 *mult_val = const1_rtx;
5335 }
5336 *add_val = XEXP (x, 1);
5337 break;
5338
5339 case MULT:
5340 /* ADD_VAL is zero. */
5341 *src_reg = XEXP (x, 0);
5342 *mult_val = XEXP (x, 1);
5343 *add_val = const0_rtx;
5344 break;
5345
5346 default:
5347 abort ();
5348 }
5349
5350 /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be
5351 unless they are CONST_INT). */
5352 if (GET_CODE (*add_val) == USE)
5353 *add_val = XEXP (*add_val, 0);
5354 if (GET_CODE (*mult_val) == USE)
5355 *mult_val = XEXP (*mult_val, 0);
5356
5357 benefit += rtx_cost (orig_x, SET);
5358
5359 /* Always return some benefit if this is a giv so it will be detected
5360 as such. This allows elimination of bivs that might otherwise
5361 not be eliminated. */
5362 return benefit == 0 ? 1 : benefit;
5363 }
5364 \f
5365 /* Given an expression, X, try to form it as a linear function of a biv.
5366 We will canonicalize it to be of the form
5367 (plus (mult (BIV) (invar_1))
5368 (invar_2))
5369 with possible degeneracies.
5370
5371 The invariant expressions must each be of a form that can be used as a
5372 machine operand. We surround then with a USE rtx (a hack, but localized
5373 and certainly unambiguous!) if not a CONST_INT for simplicity in this
5374 routine; it is the caller's responsibility to strip them.
5375
5376 If no such canonicalization is possible (i.e., two biv's are used or an
5377 expression that is neither invariant nor a biv or giv), this routine
5378 returns 0.
5379
5380 For a non-zero return, the result will have a code of CONST_INT, USE,
5381 REG (for a BIV), PLUS, or MULT. No other codes will occur.
5382
5383 *BENEFIT will be incremented by the benefit of any sub-giv encountered. */
5384
5385 static rtx
5386 simplify_giv_expr (x, benefit)
5387 rtx x;
5388 int *benefit;
5389 {
5390 enum machine_mode mode = GET_MODE (x);
5391 rtx arg0, arg1;
5392 rtx tem;
5393
5394 /* If this is not an integer mode, or if we cannot do arithmetic in this
5395 mode, this can't be a giv. */
5396 if (mode != VOIDmode
5397 && (GET_MODE_CLASS (mode) != MODE_INT
5398 || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT))
5399 return 0;
5400
5401 switch (GET_CODE (x))
5402 {
5403 case PLUS:
5404 arg0 = simplify_giv_expr (XEXP (x, 0), benefit);
5405 arg1 = simplify_giv_expr (XEXP (x, 1), benefit);
5406 if (arg0 == 0 || arg1 == 0)
5407 return 0;
5408
5409 /* Put constant last, CONST_INT last if both constant. */
5410 if ((GET_CODE (arg0) == USE
5411 || GET_CODE (arg0) == CONST_INT)
5412 && GET_CODE (arg1) != CONST_INT)
5413 tem = arg0, arg0 = arg1, arg1 = tem;
5414
5415 /* Handle addition of zero, then addition of an invariant. */
5416 if (arg1 == const0_rtx)
5417 return arg0;
5418 else if (GET_CODE (arg1) == CONST_INT || GET_CODE (arg1) == USE)
5419 switch (GET_CODE (arg0))
5420 {
5421 case CONST_INT:
5422 case USE:
5423 /* Both invariant. Only valid if sum is machine operand.
5424 First strip off possible USE on the operands. */
5425 if (GET_CODE (arg0) == USE)
5426 arg0 = XEXP (arg0, 0);
5427
5428 if (GET_CODE (arg1) == USE)
5429 arg1 = XEXP (arg1, 0);
5430
5431 tem = 0;
5432 if (CONSTANT_P (arg0) && GET_CODE (arg1) == CONST_INT)
5433 {
5434 tem = plus_constant (arg0, INTVAL (arg1));
5435 if (GET_CODE (tem) != CONST_INT)
5436 tem = gen_rtx_USE (mode, tem);
5437 }
5438 else
5439 {
5440 /* Adding two invariants must result in an invariant,
5441 so enclose addition operation inside a USE and
5442 return it. */
5443 tem = gen_rtx_USE (mode, gen_rtx_PLUS (mode, arg0, arg1));
5444 }
5445
5446 return tem;
5447
5448 case REG:
5449 case MULT:
5450 /* biv + invar or mult + invar. Return sum. */
5451 return gen_rtx_PLUS (mode, arg0, arg1);
5452
5453 case PLUS:
5454 /* (a + invar_1) + invar_2. Associate. */
5455 return simplify_giv_expr (gen_rtx_PLUS (mode,
5456 XEXP (arg0, 0),
5457 gen_rtx_PLUS (mode,
5458 XEXP (arg0, 1), arg1)),
5459 benefit);
5460
5461 default:
5462 abort ();
5463 }
5464
5465 /* Each argument must be either REG, PLUS, or MULT. Convert REG to
5466 MULT to reduce cases. */
5467 if (GET_CODE (arg0) == REG)
5468 arg0 = gen_rtx_MULT (mode, arg0, const1_rtx);
5469 if (GET_CODE (arg1) == REG)
5470 arg1 = gen_rtx_MULT (mode, arg1, const1_rtx);
5471
5472 /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT.
5473 Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT.
5474 Recurse to associate the second PLUS. */
5475 if (GET_CODE (arg1) == MULT)
5476 tem = arg0, arg0 = arg1, arg1 = tem;
5477
5478 if (GET_CODE (arg1) == PLUS)
5479 return simplify_giv_expr (gen_rtx_PLUS (mode,
5480 gen_rtx_PLUS (mode, arg0,
5481 XEXP (arg1, 0)),
5482 XEXP (arg1, 1)),
5483 benefit);
5484
5485 /* Now must have MULT + MULT. Distribute if same biv, else not giv. */
5486 if (GET_CODE (arg0) != MULT || GET_CODE (arg1) != MULT)
5487 abort ();
5488
5489 if (XEXP (arg0, 0) != XEXP (arg1, 0))
5490 return 0;
5491
5492 return simplify_giv_expr (gen_rtx_MULT (mode,
5493 XEXP (arg0, 0),
5494 gen_rtx_PLUS (mode,
5495 XEXP (arg0, 1),
5496 XEXP (arg1, 1))),
5497 benefit);
5498
5499 case MINUS:
5500 /* Handle "a - b" as "a + b * (-1)". */
5501 return simplify_giv_expr (gen_rtx_PLUS (mode,
5502 XEXP (x, 0),
5503 gen_rtx_MULT (mode, XEXP (x, 1),
5504 constm1_rtx)),
5505 benefit);
5506
5507 case MULT:
5508 arg0 = simplify_giv_expr (XEXP (x, 0), benefit);
5509 arg1 = simplify_giv_expr (XEXP (x, 1), benefit);
5510 if (arg0 == 0 || arg1 == 0)
5511 return 0;
5512
5513 /* Put constant last, CONST_INT last if both constant. */
5514 if ((GET_CODE (arg0) == USE || GET_CODE (arg0) == CONST_INT)
5515 && GET_CODE (arg1) != CONST_INT)
5516 tem = arg0, arg0 = arg1, arg1 = tem;
5517
5518 /* If second argument is not now constant, not giv. */
5519 if (GET_CODE (arg1) != USE && GET_CODE (arg1) != CONST_INT)
5520 return 0;
5521
5522 /* Handle multiply by 0 or 1. */
5523 if (arg1 == const0_rtx)
5524 return const0_rtx;
5525
5526 else if (arg1 == const1_rtx)
5527 return arg0;
5528
5529 switch (GET_CODE (arg0))
5530 {
5531 case REG:
5532 /* biv * invar. Done. */
5533 return gen_rtx_MULT (mode, arg0, arg1);
5534
5535 case CONST_INT:
5536 /* Product of two constants. */
5537 return GEN_INT (INTVAL (arg0) * INTVAL (arg1));
5538
5539 case USE:
5540 /* invar * invar. Not giv. */
5541 return 0;
5542
5543 case MULT:
5544 /* (a * invar_1) * invar_2. Associate. */
5545 return simplify_giv_expr (gen_rtx_MULT (mode, XEXP (arg0, 0),
5546 gen_rtx_MULT (mode,
5547 XEXP (arg0, 1),
5548 arg1)),
5549 benefit);
5550
5551 case PLUS:
5552 /* (a + invar_1) * invar_2. Distribute. */
5553 return simplify_giv_expr (gen_rtx_PLUS (mode,
5554 gen_rtx_MULT (mode,
5555 XEXP (arg0, 0),
5556 arg1),
5557 gen_rtx_MULT (mode,
5558 XEXP (arg0, 1),
5559 arg1)),
5560 benefit);
5561
5562 default:
5563 abort ();
5564 }
5565
5566 case ASHIFT:
5567 /* Shift by constant is multiply by power of two. */
5568 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
5569 return 0;
5570
5571 return simplify_giv_expr (gen_rtx_MULT (mode,
5572 XEXP (x, 0),
5573 GEN_INT ((HOST_WIDE_INT) 1
5574 << INTVAL (XEXP (x, 1)))),
5575 benefit);
5576
5577 case NEG:
5578 /* "-a" is "a * (-1)" */
5579 return simplify_giv_expr (gen_rtx_MULT (mode, XEXP (x, 0), constm1_rtx),
5580 benefit);
5581
5582 case NOT:
5583 /* "~a" is "-a - 1". Silly, but easy. */
5584 return simplify_giv_expr (gen_rtx_MINUS (mode,
5585 gen_rtx_NEG (mode, XEXP (x, 0)),
5586 const1_rtx),
5587 benefit);
5588
5589 case USE:
5590 /* Already in proper form for invariant. */
5591 return x;
5592
5593 case REG:
5594 /* If this is a new register, we can't deal with it. */
5595 if (REGNO (x) >= max_reg_before_loop)
5596 return 0;
5597
5598 /* Check for biv or giv. */
5599 switch (reg_iv_type[REGNO (x)])
5600 {
5601 case BASIC_INDUCT:
5602 return x;
5603 case GENERAL_INDUCT:
5604 {
5605 struct induction *v = reg_iv_info[REGNO (x)];
5606
5607 /* Form expression from giv and add benefit. Ensure this giv
5608 can derive another and subtract any needed adjustment if so. */
5609 *benefit += v->benefit;
5610 if (v->cant_derive)
5611 return 0;
5612
5613 tem = gen_rtx_PLUS (mode, gen_rtx_MULT (mode, v->src_reg,
5614 v->mult_val),
5615 v->add_val);
5616 if (v->derive_adjustment)
5617 tem = gen_rtx_MINUS (mode, tem, v->derive_adjustment);
5618 return simplify_giv_expr (tem, benefit);
5619 }
5620
5621 default:
5622 break;
5623 }
5624
5625 /* Fall through to general case. */
5626 default:
5627 /* If invariant, return as USE (unless CONST_INT).
5628 Otherwise, not giv. */
5629 if (GET_CODE (x) == USE)
5630 x = XEXP (x, 0);
5631
5632 if (invariant_p (x) == 1)
5633 {
5634 if (GET_CODE (x) == CONST_INT)
5635 return x;
5636 else
5637 return gen_rtx_USE (mode, x);
5638 }
5639 else
5640 return 0;
5641 }
5642 }
5643 \f
5644 /* Help detect a giv that is calculated by several consecutive insns;
5645 for example,
5646 giv = biv * M
5647 giv = giv + A
5648 The caller has already identified the first insn P as having a giv as dest;
5649 we check that all other insns that set the same register follow
5650 immediately after P, that they alter nothing else,
5651 and that the result of the last is still a giv.
5652
5653 The value is 0 if the reg set in P is not really a giv.
5654 Otherwise, the value is the amount gained by eliminating
5655 all the consecutive insns that compute the value.
5656
5657 FIRST_BENEFIT is the amount gained by eliminating the first insn, P.
5658 SRC_REG is the reg of the biv; DEST_REG is the reg of the giv.
5659
5660 The coefficients of the ultimate giv value are stored in
5661 *MULT_VAL and *ADD_VAL. */
5662
5663 static int
5664 consec_sets_giv (first_benefit, p, src_reg, dest_reg,
5665 add_val, mult_val)
5666 int first_benefit;
5667 rtx p;
5668 rtx src_reg;
5669 rtx dest_reg;
5670 rtx *add_val;
5671 rtx *mult_val;
5672 {
5673 int count;
5674 enum rtx_code code;
5675 int benefit;
5676 rtx temp;
5677 rtx set;
5678
5679 /* Indicate that this is a giv so that we can update the value produced in
5680 each insn of the multi-insn sequence.
5681
5682 This induction structure will be used only by the call to
5683 general_induction_var below, so we can allocate it on our stack.
5684 If this is a giv, our caller will replace the induct var entry with
5685 a new induction structure. */
5686 struct induction *v
5687 = (struct induction *) alloca (sizeof (struct induction));
5688 v->src_reg = src_reg;
5689 v->mult_val = *mult_val;
5690 v->add_val = *add_val;
5691 v->benefit = first_benefit;
5692 v->cant_derive = 0;
5693 v->derive_adjustment = 0;
5694
5695 reg_iv_type[REGNO (dest_reg)] = GENERAL_INDUCT;
5696 reg_iv_info[REGNO (dest_reg)] = v;
5697
5698 count = n_times_set[REGNO (dest_reg)] - 1;
5699
5700 while (count > 0)
5701 {
5702 p = NEXT_INSN (p);
5703 code = GET_CODE (p);
5704
5705 /* If libcall, skip to end of call sequence. */
5706 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
5707 p = XEXP (temp, 0);
5708
5709 if (code == INSN
5710 && (set = single_set (p))
5711 && GET_CODE (SET_DEST (set)) == REG
5712 && SET_DEST (set) == dest_reg
5713 && ((benefit = general_induction_var (SET_SRC (set), &src_reg,
5714 add_val, mult_val))
5715 /* Giv created by equivalent expression. */
5716 || ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX))
5717 && (benefit = general_induction_var (XEXP (temp, 0), &src_reg,
5718 add_val, mult_val))))
5719 && src_reg == v->src_reg)
5720 {
5721 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
5722 benefit += libcall_benefit (p);
5723
5724 count--;
5725 v->mult_val = *mult_val;
5726 v->add_val = *add_val;
5727 v->benefit = benefit;
5728 }
5729 else if (code != NOTE)
5730 {
5731 /* Allow insns that set something other than this giv to a
5732 constant. Such insns are needed on machines which cannot
5733 include long constants and should not disqualify a giv. */
5734 if (code == INSN
5735 && (set = single_set (p))
5736 && SET_DEST (set) != dest_reg
5737 && CONSTANT_P (SET_SRC (set)))
5738 continue;
5739
5740 reg_iv_type[REGNO (dest_reg)] = UNKNOWN_INDUCT;
5741 return 0;
5742 }
5743 }
5744
5745 return v->benefit;
5746 }
5747 \f
5748 /* Return an rtx, if any, that expresses giv G2 as a function of the register
5749 represented by G1. If no such expression can be found, or it is clear that
5750 it cannot possibly be a valid address, 0 is returned.
5751
5752 To perform the computation, we note that
5753 G1 = a * v + b and
5754 G2 = c * v + d
5755 where `v' is the biv.
5756
5757 So G2 = (c/a) * G1 + (d - b*c/a) */
5758
5759 #ifdef ADDRESS_COST
5760 static rtx
5761 express_from (g1, g2)
5762 struct induction *g1, *g2;
5763 {
5764 rtx mult, add;
5765
5766 /* The value that G1 will be multiplied by must be a constant integer. Also,
5767 the only chance we have of getting a valid address is if b*c/a (see above
5768 for notation) is also an integer. */
5769 if (GET_CODE (g1->mult_val) != CONST_INT
5770 || GET_CODE (g2->mult_val) != CONST_INT
5771 || GET_CODE (g1->add_val) != CONST_INT
5772 || g1->mult_val == const0_rtx
5773 || INTVAL (g2->mult_val) % INTVAL (g1->mult_val) != 0)
5774 return 0;
5775
5776 mult = GEN_INT (INTVAL (g2->mult_val) / INTVAL (g1->mult_val));
5777 add = plus_constant (g2->add_val, - INTVAL (g1->add_val) * INTVAL (mult));
5778
5779 /* Form simplified final result. */
5780 if (mult == const0_rtx)
5781 return add;
5782 else if (mult == const1_rtx)
5783 mult = g1->dest_reg;
5784 else
5785 mult = gen_rtx_MULT (g2->mode, g1->dest_reg, mult);
5786
5787 if (add == const0_rtx)
5788 return mult;
5789 else
5790 return gen_rtx_PLUS (g2->mode, mult, add);
5791 }
5792 #endif
5793 \f
5794 /* Return 1 if giv G2 can be combined with G1. This means that G2 can use
5795 (either directly or via an address expression) a register used to represent
5796 G1. Set g2->new_reg to a represtation of G1 (normally just
5797 g1->dest_reg). */
5798
5799 static int
5800 combine_givs_p (g1, g2)
5801 struct induction *g1, *g2;
5802 {
5803 rtx tem;
5804
5805 /* If these givs are identical, they can be combined. */
5806 if (rtx_equal_p (g1->mult_val, g2->mult_val)
5807 && rtx_equal_p (g1->add_val, g2->add_val))
5808 {
5809 g2->new_reg = g1->dest_reg;
5810 return 1;
5811 }
5812
5813 #ifdef ADDRESS_COST
5814 /* If G2 can be expressed as a function of G1 and that function is valid
5815 as an address and no more expensive than using a register for G2,
5816 the expression of G2 in terms of G1 can be used. */
5817 if (g2->giv_type == DEST_ADDR
5818 && (tem = express_from (g1, g2)) != 0
5819 && memory_address_p (g2->mem_mode, tem)
5820 && ADDRESS_COST (tem) <= ADDRESS_COST (*g2->location))
5821 {
5822 g2->new_reg = tem;
5823 return 1;
5824 }
5825 #endif
5826
5827 return 0;
5828 }
5829 \f
5830 #ifdef GIV_SORT_CRITERION
5831 /* Compare two givs and sort the most desirable one for combinations first.
5832 This is used only in one qsort call below. */
5833
5834 static int
5835 giv_sort (x, y)
5836 struct induction **x, **y;
5837 {
5838 GIV_SORT_CRITERION (*x, *y);
5839
5840 return 0;
5841 }
5842 #endif
5843
5844 /* Check all pairs of givs for iv_class BL and see if any can be combined with
5845 any other. If so, point SAME to the giv combined with and set NEW_REG to
5846 be an expression (in terms of the other giv's DEST_REG) equivalent to the
5847 giv. Also, update BENEFIT and related fields for cost/benefit analysis. */
5848
5849 static void
5850 combine_givs (bl)
5851 struct iv_class *bl;
5852 {
5853 struct induction *g1, *g2, **giv_array;
5854 int i, j, giv_count, pass;
5855
5856 /* Count givs, because bl->giv_count is incorrect here. */
5857 giv_count = 0;
5858 for (g1 = bl->giv; g1; g1 = g1->next_iv)
5859 giv_count++;
5860
5861 giv_array
5862 = (struct induction **) alloca (giv_count * sizeof (struct induction *));
5863 i = 0;
5864 for (g1 = bl->giv; g1; g1 = g1->next_iv)
5865 giv_array[i++] = g1;
5866
5867 #ifdef GIV_SORT_CRITERION
5868 /* Sort the givs if GIV_SORT_CRITERION is defined.
5869 This is usually defined for processors which lack
5870 negative register offsets so more givs may be combined. */
5871
5872 if (loop_dump_stream)
5873 fprintf (loop_dump_stream, "%d givs counted, sorting...\n", giv_count);
5874
5875 qsort (giv_array, giv_count, sizeof (struct induction *), giv_sort);
5876 #endif
5877
5878 for (i = 0; i < giv_count; i++)
5879 {
5880 g1 = giv_array[i];
5881 for (pass = 0; pass <= 1; pass++)
5882 for (j = 0; j < giv_count; j++)
5883 {
5884 g2 = giv_array[j];
5885 if (g1 != g2
5886 /* First try to combine with replaceable givs, then all givs. */
5887 && (g1->replaceable || pass == 1)
5888 /* If either has already been combined or is to be ignored, can't
5889 combine. */
5890 && ! g1->ignore && ! g2->ignore && ! g1->same && ! g2->same
5891 /* If something has been based on G2, G2 cannot itself be based
5892 on something else. */
5893 && ! g2->combined_with
5894 && combine_givs_p (g1, g2))
5895 {
5896 /* g2->new_reg set by `combine_givs_p' */
5897 g2->same = g1;
5898 g1->combined_with = 1;
5899
5900 /* If one of these givs is a DEST_REG that was only used
5901 once, by the other giv, this is actually a single use.
5902 The DEST_REG has the correct cost, while the other giv
5903 counts the REG use too often. */
5904 if (g2->giv_type == DEST_REG
5905 && n_times_used[REGNO (g2->dest_reg)] == 1
5906 && reg_mentioned_p (g2->dest_reg, PATTERN (g1->insn)))
5907 g1->benefit = g2->benefit;
5908 else if (g1->giv_type != DEST_REG
5909 || n_times_used[REGNO (g1->dest_reg)] != 1
5910 || ! reg_mentioned_p (g1->dest_reg,
5911 PATTERN (g2->insn)))
5912 {
5913 g1->benefit += g2->benefit;
5914 g1->times_used += g2->times_used;
5915 }
5916 /* ??? The new final_[bg]iv_value code does a much better job
5917 of finding replaceable giv's, and hence this code may no
5918 longer be necessary. */
5919 if (! g2->replaceable && REG_USERVAR_P (g2->dest_reg))
5920 g1->benefit -= copy_cost;
5921 g1->lifetime += g2->lifetime;
5922
5923 if (loop_dump_stream)
5924 fprintf (loop_dump_stream, "giv at %d combined with giv at %d\n",
5925 INSN_UID (g2->insn), INSN_UID (g1->insn));
5926 }
5927 }
5928 }
5929 }
5930 \f
5931 /* EMIT code before INSERT_BEFORE to set REG = B * M + A. */
5932
5933 void
5934 emit_iv_add_mult (b, m, a, reg, insert_before)
5935 rtx b; /* initial value of basic induction variable */
5936 rtx m; /* multiplicative constant */
5937 rtx a; /* additive constant */
5938 rtx reg; /* destination register */
5939 rtx insert_before;
5940 {
5941 rtx seq;
5942 rtx result;
5943
5944 /* Prevent unexpected sharing of these rtx. */
5945 a = copy_rtx (a);
5946 b = copy_rtx (b);
5947
5948 /* Increase the lifetime of any invariants moved further in code. */
5949 update_reg_last_use (a, insert_before);
5950 update_reg_last_use (b, insert_before);
5951 update_reg_last_use (m, insert_before);
5952
5953 start_sequence ();
5954 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 0);
5955 if (reg != result)
5956 emit_move_insn (reg, result);
5957 seq = gen_sequence ();
5958 end_sequence ();
5959
5960 emit_insn_before (seq, insert_before);
5961
5962 record_base_value (REGNO (reg), b);
5963 }
5964 \f
5965 /* Test whether A * B can be computed without
5966 an actual multiply insn. Value is 1 if so. */
5967
5968 static int
5969 product_cheap_p (a, b)
5970 rtx a;
5971 rtx b;
5972 {
5973 int i;
5974 rtx tmp;
5975 struct obstack *old_rtl_obstack = rtl_obstack;
5976 char *storage = (char *) obstack_alloc (&temp_obstack, 0);
5977 int win = 1;
5978
5979 /* If only one is constant, make it B. */
5980 if (GET_CODE (a) == CONST_INT)
5981 tmp = a, a = b, b = tmp;
5982
5983 /* If first constant, both constant, so don't need multiply. */
5984 if (GET_CODE (a) == CONST_INT)
5985 return 1;
5986
5987 /* If second not constant, neither is constant, so would need multiply. */
5988 if (GET_CODE (b) != CONST_INT)
5989 return 0;
5990
5991 /* One operand is constant, so might not need multiply insn. Generate the
5992 code for the multiply and see if a call or multiply, or long sequence
5993 of insns is generated. */
5994
5995 rtl_obstack = &temp_obstack;
5996 start_sequence ();
5997 expand_mult (GET_MODE (a), a, b, NULL_RTX, 0);
5998 tmp = gen_sequence ();
5999 end_sequence ();
6000
6001 if (GET_CODE (tmp) == SEQUENCE)
6002 {
6003 if (XVEC (tmp, 0) == 0)
6004 win = 1;
6005 else if (XVECLEN (tmp, 0) > 3)
6006 win = 0;
6007 else
6008 for (i = 0; i < XVECLEN (tmp, 0); i++)
6009 {
6010 rtx insn = XVECEXP (tmp, 0, i);
6011
6012 if (GET_CODE (insn) != INSN
6013 || (GET_CODE (PATTERN (insn)) == SET
6014 && GET_CODE (SET_SRC (PATTERN (insn))) == MULT)
6015 || (GET_CODE (PATTERN (insn)) == PARALLEL
6016 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET
6017 && GET_CODE (SET_SRC (XVECEXP (PATTERN (insn), 0, 0))) == MULT))
6018 {
6019 win = 0;
6020 break;
6021 }
6022 }
6023 }
6024 else if (GET_CODE (tmp) == SET
6025 && GET_CODE (SET_SRC (tmp)) == MULT)
6026 win = 0;
6027 else if (GET_CODE (tmp) == PARALLEL
6028 && GET_CODE (XVECEXP (tmp, 0, 0)) == SET
6029 && GET_CODE (SET_SRC (XVECEXP (tmp, 0, 0))) == MULT)
6030 win = 0;
6031
6032 /* Free any storage we obtained in generating this multiply and restore rtl
6033 allocation to its normal obstack. */
6034 obstack_free (&temp_obstack, storage);
6035 rtl_obstack = old_rtl_obstack;
6036
6037 return win;
6038 }
6039 \f
6040 /* Check to see if loop can be terminated by a "decrement and branch until
6041 zero" instruction. If so, add a REG_NONNEG note to the branch insn if so.
6042 Also try reversing an increment loop to a decrement loop
6043 to see if the optimization can be performed.
6044 Value is nonzero if optimization was performed. */
6045
6046 /* This is useful even if the architecture doesn't have such an insn,
6047 because it might change a loops which increments from 0 to n to a loop
6048 which decrements from n to 0. A loop that decrements to zero is usually
6049 faster than one that increments from zero. */
6050
6051 /* ??? This could be rewritten to use some of the loop unrolling procedures,
6052 such as approx_final_value, biv_total_increment, loop_iterations, and
6053 final_[bg]iv_value. */
6054
6055 static int
6056 check_dbra_loop (loop_end, insn_count, loop_start)
6057 rtx loop_end;
6058 int insn_count;
6059 rtx loop_start;
6060 {
6061 struct iv_class *bl;
6062 rtx reg;
6063 rtx jump_label;
6064 rtx final_value;
6065 rtx start_value;
6066 rtx new_add_val;
6067 rtx comparison;
6068 rtx before_comparison;
6069 rtx p;
6070
6071 /* If last insn is a conditional branch, and the insn before tests a
6072 register value, try to optimize it. Otherwise, we can't do anything. */
6073
6074 comparison = get_condition_for_loop (PREV_INSN (loop_end));
6075 if (comparison == 0)
6076 return 0;
6077
6078 /* Check all of the bivs to see if the compare uses one of them.
6079 Skip biv's set more than once because we can't guarantee that
6080 it will be zero on the last iteration. Also skip if the biv is
6081 used between its update and the test insn. */
6082
6083 for (bl = loop_iv_list; bl; bl = bl->next)
6084 {
6085 if (bl->biv_count == 1
6086 && bl->biv->dest_reg == XEXP (comparison, 0)
6087 && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
6088 PREV_INSN (PREV_INSN (loop_end))))
6089 break;
6090 }
6091
6092 if (! bl)
6093 return 0;
6094
6095 /* Look for the case where the basic induction variable is always
6096 nonnegative, and equals zero on the last iteration.
6097 In this case, add a reg_note REG_NONNEG, which allows the
6098 m68k DBRA instruction to be used. */
6099
6100 if (((GET_CODE (comparison) == GT
6101 && GET_CODE (XEXP (comparison, 1)) == CONST_INT
6102 && INTVAL (XEXP (comparison, 1)) == -1)
6103 || (GET_CODE (comparison) == NE && XEXP (comparison, 1) == const0_rtx))
6104 && GET_CODE (bl->biv->add_val) == CONST_INT
6105 && INTVAL (bl->biv->add_val) < 0)
6106 {
6107 /* Initial value must be greater than 0,
6108 init_val % -dec_value == 0 to ensure that it equals zero on
6109 the last iteration */
6110
6111 if (GET_CODE (bl->initial_value) == CONST_INT
6112 && INTVAL (bl->initial_value) > 0
6113 && (INTVAL (bl->initial_value)
6114 % (-INTVAL (bl->biv->add_val))) == 0)
6115 {
6116 /* register always nonnegative, add REG_NOTE to branch */
6117 REG_NOTES (PREV_INSN (loop_end))
6118 = gen_rtx_EXPR_LIST (REG_NONNEG, NULL_RTX,
6119 REG_NOTES (PREV_INSN (loop_end)));
6120 bl->nonneg = 1;
6121
6122 return 1;
6123 }
6124
6125 /* If the decrement is 1 and the value was tested as >= 0 before
6126 the loop, then we can safely optimize. */
6127 for (p = loop_start; p; p = PREV_INSN (p))
6128 {
6129 if (GET_CODE (p) == CODE_LABEL)
6130 break;
6131 if (GET_CODE (p) != JUMP_INSN)
6132 continue;
6133
6134 before_comparison = get_condition_for_loop (p);
6135 if (before_comparison
6136 && XEXP (before_comparison, 0) == bl->biv->dest_reg
6137 && GET_CODE (before_comparison) == LT
6138 && XEXP (before_comparison, 1) == const0_rtx
6139 && ! reg_set_between_p (bl->biv->dest_reg, p, loop_start)
6140 && INTVAL (bl->biv->add_val) == -1)
6141 {
6142 REG_NOTES (PREV_INSN (loop_end))
6143 = gen_rtx_EXPR_LIST (REG_NONNEG, NULL_RTX,
6144 REG_NOTES (PREV_INSN (loop_end)));
6145 bl->nonneg = 1;
6146
6147 return 1;
6148 }
6149 }
6150 }
6151 else if (num_mem_sets <= 1)
6152 {
6153 /* Try to change inc to dec, so can apply above optimization. */
6154 /* Can do this if:
6155 all registers modified are induction variables or invariant,
6156 all memory references have non-overlapping addresses
6157 (obviously true if only one write)
6158 allow 2 insns for the compare/jump at the end of the loop. */
6159 /* Also, we must avoid any instructions which use both the reversed
6160 biv and another biv. Such instructions will fail if the loop is
6161 reversed. We meet this condition by requiring that either
6162 no_use_except_counting is true, or else that there is only
6163 one biv. */
6164 int num_nonfixed_reads = 0;
6165 /* 1 if the iteration var is used only to count iterations. */
6166 int no_use_except_counting = 0;
6167 /* 1 if the loop has no memory store, or it has a single memory store
6168 which is reversible. */
6169 int reversible_mem_store = 1;
6170
6171 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
6172 if (GET_RTX_CLASS (GET_CODE (p)) == 'i')
6173 num_nonfixed_reads += count_nonfixed_reads (PATTERN (p));
6174
6175 if (bl->giv_count == 0
6176 && ! loop_number_exit_count[uid_loop_num[INSN_UID (loop_start)]])
6177 {
6178 rtx bivreg = regno_reg_rtx[bl->regno];
6179
6180 /* If there are no givs for this biv, and the only exit is the
6181 fall through at the end of the the loop, then
6182 see if perhaps there are no uses except to count. */
6183 no_use_except_counting = 1;
6184 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
6185 if (GET_RTX_CLASS (GET_CODE (p)) == 'i')
6186 {
6187 rtx set = single_set (p);
6188
6189 if (set && GET_CODE (SET_DEST (set)) == REG
6190 && REGNO (SET_DEST (set)) == bl->regno)
6191 /* An insn that sets the biv is okay. */
6192 ;
6193 else if (p == prev_nonnote_insn (prev_nonnote_insn (loop_end))
6194 || p == prev_nonnote_insn (loop_end))
6195 /* Don't bother about the end test. */
6196 ;
6197 else if (reg_mentioned_p (bivreg, PATTERN (p)))
6198 /* Any other use of the biv is no good. */
6199 {
6200 no_use_except_counting = 0;
6201 break;
6202 }
6203 }
6204 }
6205
6206 /* If the loop has a single store, and the destination address is
6207 invariant, then we can't reverse the loop, because this address
6208 might then have the wrong value at loop exit.
6209 This would work if the source was invariant also, however, in that
6210 case, the insn should have been moved out of the loop. */
6211
6212 if (num_mem_sets == 1)
6213 reversible_mem_store
6214 = (! unknown_address_altered
6215 && ! invariant_p (XEXP (loop_store_mems[0], 0)));
6216
6217 /* This code only acts for innermost loops. Also it simplifies
6218 the memory address check by only reversing loops with
6219 zero or one memory access.
6220 Two memory accesses could involve parts of the same array,
6221 and that can't be reversed. */
6222
6223 if (num_nonfixed_reads <= 1
6224 && !loop_has_call
6225 && !loop_has_volatile
6226 && reversible_mem_store
6227 && (no_use_except_counting
6228 || ((bl->giv_count + bl->biv_count + num_mem_sets
6229 + num_movables + 2 == insn_count)
6230 && (bl == loop_iv_list && bl->next == 0))))
6231 {
6232 rtx tem;
6233
6234 /* Loop can be reversed. */
6235 if (loop_dump_stream)
6236 fprintf (loop_dump_stream, "Can reverse loop\n");
6237
6238 /* Now check other conditions:
6239
6240 The increment must be a constant, as must the initial value,
6241 and the comparison code must be LT.
6242
6243 This test can probably be improved since +/- 1 in the constant
6244 can be obtained by changing LT to LE and vice versa; this is
6245 confusing. */
6246
6247 if (comparison
6248 && GET_CODE (XEXP (comparison, 1)) == CONST_INT
6249 /* LE gets turned into LT */
6250 && GET_CODE (comparison) == LT
6251 && GET_CODE (bl->initial_value) == CONST_INT)
6252 {
6253 HOST_WIDE_INT add_val, comparison_val;
6254 rtx initial_value;
6255
6256 add_val = INTVAL (bl->biv->add_val);
6257 comparison_val = INTVAL (XEXP (comparison, 1));
6258 initial_value = bl->initial_value;
6259
6260 /* Normalize the initial value if it is an integer and
6261 has no other use except as a counter. This will allow
6262 a few more loops to be reversed. */
6263 if (no_use_except_counting
6264 && GET_CODE (initial_value) == CONST_INT)
6265 {
6266 comparison_val = comparison_val - INTVAL (bl->initial_value);
6267 initial_value = const0_rtx;
6268 }
6269
6270 /* If the initial value is not zero, or if the comparison
6271 value is not an exact multiple of the increment, then we
6272 can not reverse this loop. */
6273 if (initial_value != const0_rtx
6274 || (comparison_val % add_val) != 0)
6275 return 0;
6276
6277 /* Reset these in case we normalized the initial value
6278 and comparison value above. */
6279 bl->initial_value = initial_value;
6280 XEXP (comparison, 1) = GEN_INT (comparison_val);
6281
6282 /* Register will always be nonnegative, with value
6283 0 on last iteration if loop reversed */
6284
6285 /* Save some info needed to produce the new insns. */
6286 reg = bl->biv->dest_reg;
6287 jump_label = XEXP (SET_SRC (PATTERN (PREV_INSN (loop_end))), 1);
6288 if (jump_label == pc_rtx)
6289 jump_label = XEXP (SET_SRC (PATTERN (PREV_INSN (loop_end))), 2);
6290 new_add_val = GEN_INT (- INTVAL (bl->biv->add_val));
6291
6292 final_value = XEXP (comparison, 1);
6293 start_value = GEN_INT (INTVAL (XEXP (comparison, 1))
6294 - INTVAL (bl->biv->add_val));
6295
6296 /* Initialize biv to start_value before loop start.
6297 The old initializing insn will be deleted as a
6298 dead store by flow.c. */
6299 emit_insn_before (gen_move_insn (reg, start_value), loop_start);
6300
6301 /* Add insn to decrement register, and delete insn
6302 that incremented the register. */
6303 p = emit_insn_before (gen_add2_insn (reg, new_add_val),
6304 bl->biv->insn);
6305 delete_insn (bl->biv->insn);
6306
6307 /* Update biv info to reflect its new status. */
6308 bl->biv->insn = p;
6309 bl->initial_value = start_value;
6310 bl->biv->add_val = new_add_val;
6311
6312 /* Inc LABEL_NUSES so that delete_insn will
6313 not delete the label. */
6314 LABEL_NUSES (XEXP (jump_label, 0)) ++;
6315
6316 /* Emit an insn after the end of the loop to set the biv's
6317 proper exit value if it is used anywhere outside the loop. */
6318 if ((REGNO_LAST_UID (bl->regno)
6319 != INSN_UID (PREV_INSN (PREV_INSN (loop_end))))
6320 || ! bl->init_insn
6321 || REGNO_FIRST_UID (bl->regno) != INSN_UID (bl->init_insn))
6322 emit_insn_after (gen_move_insn (reg, final_value),
6323 loop_end);
6324
6325 /* Delete compare/branch at end of loop. */
6326 delete_insn (PREV_INSN (loop_end));
6327 delete_insn (PREV_INSN (loop_end));
6328
6329 /* Add new compare/branch insn at end of loop. */
6330 start_sequence ();
6331 emit_cmp_insn (reg, const0_rtx, GE, NULL_RTX,
6332 GET_MODE (reg), 0, 0);
6333 emit_jump_insn (gen_bge (XEXP (jump_label, 0)));
6334 tem = gen_sequence ();
6335 end_sequence ();
6336 emit_jump_insn_before (tem, loop_end);
6337
6338 for (tem = PREV_INSN (loop_end);
6339 tem && GET_CODE (tem) != JUMP_INSN; tem = PREV_INSN (tem))
6340 ;
6341 if (tem)
6342 {
6343 JUMP_LABEL (tem) = XEXP (jump_label, 0);
6344
6345 /* Increment of LABEL_NUSES done above. */
6346 /* Register is now always nonnegative,
6347 so add REG_NONNEG note to the branch. */
6348 REG_NOTES (tem) = gen_rtx_EXPR_LIST (REG_NONNEG, NULL_RTX,
6349 REG_NOTES (tem));
6350 }
6351
6352 bl->nonneg = 1;
6353
6354 /* Mark that this biv has been reversed. Each giv which depends
6355 on this biv, and which is also live past the end of the loop
6356 will have to be fixed up. */
6357
6358 bl->reversed = 1;
6359
6360 if (loop_dump_stream)
6361 fprintf (loop_dump_stream,
6362 "Reversed loop and added reg_nonneg\n");
6363
6364 return 1;
6365 }
6366 }
6367 }
6368
6369 return 0;
6370 }
6371 \f
6372 /* Verify whether the biv BL appears to be eliminable,
6373 based on the insns in the loop that refer to it.
6374 LOOP_START is the first insn of the loop, and END is the end insn.
6375
6376 If ELIMINATE_P is non-zero, actually do the elimination.
6377
6378 THRESHOLD and INSN_COUNT are from loop_optimize and are used to
6379 determine whether invariant insns should be placed inside or at the
6380 start of the loop. */
6381
6382 static int
6383 maybe_eliminate_biv (bl, loop_start, end, eliminate_p, threshold, insn_count)
6384 struct iv_class *bl;
6385 rtx loop_start;
6386 rtx end;
6387 int eliminate_p;
6388 int threshold, insn_count;
6389 {
6390 rtx reg = bl->biv->dest_reg;
6391 rtx p;
6392
6393 /* Scan all insns in the loop, stopping if we find one that uses the
6394 biv in a way that we cannot eliminate. */
6395
6396 for (p = loop_start; p != end; p = NEXT_INSN (p))
6397 {
6398 enum rtx_code code = GET_CODE (p);
6399 rtx where = threshold >= insn_count ? loop_start : p;
6400
6401 if ((code == INSN || code == JUMP_INSN || code == CALL_INSN)
6402 && reg_mentioned_p (reg, PATTERN (p))
6403 && ! maybe_eliminate_biv_1 (PATTERN (p), p, bl, eliminate_p, where))
6404 {
6405 if (loop_dump_stream)
6406 fprintf (loop_dump_stream,
6407 "Cannot eliminate biv %d: biv used in insn %d.\n",
6408 bl->regno, INSN_UID (p));
6409 break;
6410 }
6411 }
6412
6413 if (p == end)
6414 {
6415 if (loop_dump_stream)
6416 fprintf (loop_dump_stream, "biv %d %s eliminated.\n",
6417 bl->regno, eliminate_p ? "was" : "can be");
6418 return 1;
6419 }
6420
6421 return 0;
6422 }
6423 \f
6424 /* If BL appears in X (part of the pattern of INSN), see if we can
6425 eliminate its use. If so, return 1. If not, return 0.
6426
6427 If BIV does not appear in X, return 1.
6428
6429 If ELIMINATE_P is non-zero, actually do the elimination. WHERE indicates
6430 where extra insns should be added. Depending on how many items have been
6431 moved out of the loop, it will either be before INSN or at the start of
6432 the loop. */
6433
6434 static int
6435 maybe_eliminate_biv_1 (x, insn, bl, eliminate_p, where)
6436 rtx x, insn;
6437 struct iv_class *bl;
6438 int eliminate_p;
6439 rtx where;
6440 {
6441 enum rtx_code code = GET_CODE (x);
6442 rtx reg = bl->biv->dest_reg;
6443 enum machine_mode mode = GET_MODE (reg);
6444 struct induction *v;
6445 rtx arg, tem;
6446 #ifdef HAVE_cc0
6447 rtx new;
6448 #endif
6449 int arg_operand;
6450 char *fmt;
6451 int i, j;
6452
6453 switch (code)
6454 {
6455 case REG:
6456 /* If we haven't already been able to do something with this BIV,
6457 we can't eliminate it. */
6458 if (x == reg)
6459 return 0;
6460 return 1;
6461
6462 case SET:
6463 /* If this sets the BIV, it is not a problem. */
6464 if (SET_DEST (x) == reg)
6465 return 1;
6466
6467 /* If this is an insn that defines a giv, it is also ok because
6468 it will go away when the giv is reduced. */
6469 for (v = bl->giv; v; v = v->next_iv)
6470 if (v->giv_type == DEST_REG && SET_DEST (x) == v->dest_reg)
6471 return 1;
6472
6473 #ifdef HAVE_cc0
6474 if (SET_DEST (x) == cc0_rtx && SET_SRC (x) == reg)
6475 {
6476 /* Can replace with any giv that was reduced and
6477 that has (MULT_VAL != 0) and (ADD_VAL == 0).
6478 Require a constant for MULT_VAL, so we know it's nonzero.
6479 ??? We disable this optimization to avoid potential
6480 overflows. */
6481
6482 for (v = bl->giv; v; v = v->next_iv)
6483 if (CONSTANT_P (v->mult_val) && v->mult_val != const0_rtx
6484 && v->add_val == const0_rtx
6485 && ! v->ignore && ! v->maybe_dead && v->always_computable
6486 && v->mode == mode
6487 && 0)
6488 {
6489 /* If the giv V had the auto-inc address optimization applied
6490 to it, and INSN occurs between the giv insn and the biv
6491 insn, then we must adjust the value used here.
6492 This is rare, so we don't bother to do so. */
6493 if (v->auto_inc_opt
6494 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
6495 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
6496 || (INSN_LUID (v->insn) > INSN_LUID (insn)
6497 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
6498 continue;
6499
6500 if (! eliminate_p)
6501 return 1;
6502
6503 /* If the giv has the opposite direction of change,
6504 then reverse the comparison. */
6505 if (INTVAL (v->mult_val) < 0)
6506 new = gen_rtx_COMPARE (GET_MODE (v->new_reg),
6507 const0_rtx, v->new_reg);
6508 else
6509 new = v->new_reg;
6510
6511 /* We can probably test that giv's reduced reg. */
6512 if (validate_change (insn, &SET_SRC (x), new, 0))
6513 return 1;
6514 }
6515
6516 /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0);
6517 replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL).
6518 Require a constant for MULT_VAL, so we know it's nonzero.
6519 ??? Do this only if ADD_VAL is a pointer to avoid a potential
6520 overflow problem. */
6521
6522 for (v = bl->giv; v; v = v->next_iv)
6523 if (CONSTANT_P (v->mult_val) && v->mult_val != const0_rtx
6524 && ! v->ignore && ! v->maybe_dead && v->always_computable
6525 && v->mode == mode
6526 && (GET_CODE (v->add_val) == SYMBOL_REF
6527 || GET_CODE (v->add_val) == LABEL_REF
6528 || GET_CODE (v->add_val) == CONST
6529 || (GET_CODE (v->add_val) == REG
6530 && REGNO_POINTER_FLAG (REGNO (v->add_val)))))
6531 {
6532 /* If the giv V had the auto-inc address optimization applied
6533 to it, and INSN occurs between the giv insn and the biv
6534 insn, then we must adjust the value used here.
6535 This is rare, so we don't bother to do so. */
6536 if (v->auto_inc_opt
6537 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
6538 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
6539 || (INSN_LUID (v->insn) > INSN_LUID (insn)
6540 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
6541 continue;
6542
6543 if (! eliminate_p)
6544 return 1;
6545
6546 /* If the giv has the opposite direction of change,
6547 then reverse the comparison. */
6548 if (INTVAL (v->mult_val) < 0)
6549 new = gen_rtx_COMPARE (VOIDmode, copy_rtx (v->add_val),
6550 v->new_reg);
6551 else
6552 new = gen_rtx_COMPARE (VOIDmode, v->new_reg,
6553 copy_rtx (v->add_val));
6554
6555 /* Replace biv with the giv's reduced register. */
6556 update_reg_last_use (v->add_val, insn);
6557 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
6558 return 1;
6559
6560 /* Insn doesn't support that constant or invariant. Copy it
6561 into a register (it will be a loop invariant.) */
6562 tem = gen_reg_rtx (GET_MODE (v->new_reg));
6563
6564 emit_insn_before (gen_move_insn (tem, copy_rtx (v->add_val)),
6565 where);
6566
6567 /* Substitute the new register for its invariant value in
6568 the compare expression. */
6569 XEXP (new, (INTVAL (v->mult_val) < 0) ? 0 : 1) = tem;
6570 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
6571 return 1;
6572 }
6573 }
6574 #endif
6575 break;
6576
6577 case COMPARE:
6578 case EQ: case NE:
6579 case GT: case GE: case GTU: case GEU:
6580 case LT: case LE: case LTU: case LEU:
6581 /* See if either argument is the biv. */
6582 if (XEXP (x, 0) == reg)
6583 arg = XEXP (x, 1), arg_operand = 1;
6584 else if (XEXP (x, 1) == reg)
6585 arg = XEXP (x, 0), arg_operand = 0;
6586 else
6587 break;
6588
6589 if (CONSTANT_P (arg))
6590 {
6591 /* First try to replace with any giv that has constant positive
6592 mult_val and constant add_val. We might be able to support
6593 negative mult_val, but it seems complex to do it in general. */
6594
6595 for (v = bl->giv; v; v = v->next_iv)
6596 if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0
6597 && (GET_CODE (v->add_val) == SYMBOL_REF
6598 || GET_CODE (v->add_val) == LABEL_REF
6599 || GET_CODE (v->add_val) == CONST
6600 || (GET_CODE (v->add_val) == REG
6601 && REGNO_POINTER_FLAG (REGNO (v->add_val))))
6602 && ! v->ignore && ! v->maybe_dead && v->always_computable
6603 && v->mode == mode)
6604 {
6605 /* If the giv V had the auto-inc address optimization applied
6606 to it, and INSN occurs between the giv insn and the biv
6607 insn, then we must adjust the value used here.
6608 This is rare, so we don't bother to do so. */
6609 if (v->auto_inc_opt
6610 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
6611 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
6612 || (INSN_LUID (v->insn) > INSN_LUID (insn)
6613 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
6614 continue;
6615
6616 if (! eliminate_p)
6617 return 1;
6618
6619 /* Replace biv with the giv's reduced reg. */
6620 XEXP (x, 1-arg_operand) = v->new_reg;
6621
6622 /* If all constants are actually constant integers and
6623 the derived constant can be directly placed in the COMPARE,
6624 do so. */
6625 if (GET_CODE (arg) == CONST_INT
6626 && GET_CODE (v->mult_val) == CONST_INT
6627 && GET_CODE (v->add_val) == CONST_INT
6628 && validate_change (insn, &XEXP (x, arg_operand),
6629 GEN_INT (INTVAL (arg)
6630 * INTVAL (v->mult_val)
6631 + INTVAL (v->add_val)), 0))
6632 return 1;
6633
6634 /* Otherwise, load it into a register. */
6635 tem = gen_reg_rtx (mode);
6636 emit_iv_add_mult (arg, v->mult_val, v->add_val, tem, where);
6637 if (validate_change (insn, &XEXP (x, arg_operand), tem, 0))
6638 return 1;
6639
6640 /* If that failed, put back the change we made above. */
6641 XEXP (x, 1-arg_operand) = reg;
6642 }
6643
6644 /* Look for giv with positive constant mult_val and nonconst add_val.
6645 Insert insns to calculate new compare value.
6646 ??? Turn this off due to possible overflow. */
6647
6648 for (v = bl->giv; v; v = v->next_iv)
6649 if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0
6650 && ! v->ignore && ! v->maybe_dead && v->always_computable
6651 && v->mode == mode
6652 && 0)
6653 {
6654 rtx tem;
6655
6656 /* If the giv V had the auto-inc address optimization applied
6657 to it, and INSN occurs between the giv insn and the biv
6658 insn, then we must adjust the value used here.
6659 This is rare, so we don't bother to do so. */
6660 if (v->auto_inc_opt
6661 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
6662 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
6663 || (INSN_LUID (v->insn) > INSN_LUID (insn)
6664 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
6665 continue;
6666
6667 if (! eliminate_p)
6668 return 1;
6669
6670 tem = gen_reg_rtx (mode);
6671
6672 /* Replace biv with giv's reduced register. */
6673 validate_change (insn, &XEXP (x, 1 - arg_operand),
6674 v->new_reg, 1);
6675
6676 /* Compute value to compare against. */
6677 emit_iv_add_mult (arg, v->mult_val, v->add_val, tem, where);
6678 /* Use it in this insn. */
6679 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
6680 if (apply_change_group ())
6681 return 1;
6682 }
6683 }
6684 else if (GET_CODE (arg) == REG || GET_CODE (arg) == MEM)
6685 {
6686 if (invariant_p (arg) == 1)
6687 {
6688 /* Look for giv with constant positive mult_val and nonconst
6689 add_val. Insert insns to compute new compare value.
6690 ??? Turn this off due to possible overflow. */
6691
6692 for (v = bl->giv; v; v = v->next_iv)
6693 if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0
6694 && ! v->ignore && ! v->maybe_dead && v->always_computable
6695 && v->mode == mode
6696 && 0)
6697 {
6698 rtx tem;
6699
6700 /* If the giv V had the auto-inc address optimization applied
6701 to it, and INSN occurs between the giv insn and the biv
6702 insn, then we must adjust the value used here.
6703 This is rare, so we don't bother to do so. */
6704 if (v->auto_inc_opt
6705 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
6706 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
6707 || (INSN_LUID (v->insn) > INSN_LUID (insn)
6708 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
6709 continue;
6710
6711 if (! eliminate_p)
6712 return 1;
6713
6714 tem = gen_reg_rtx (mode);
6715
6716 /* Replace biv with giv's reduced register. */
6717 validate_change (insn, &XEXP (x, 1 - arg_operand),
6718 v->new_reg, 1);
6719
6720 /* Compute value to compare against. */
6721 emit_iv_add_mult (arg, v->mult_val, v->add_val,
6722 tem, where);
6723 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
6724 if (apply_change_group ())
6725 return 1;
6726 }
6727 }
6728
6729 /* This code has problems. Basically, you can't know when
6730 seeing if we will eliminate BL, whether a particular giv
6731 of ARG will be reduced. If it isn't going to be reduced,
6732 we can't eliminate BL. We can try forcing it to be reduced,
6733 but that can generate poor code.
6734
6735 The problem is that the benefit of reducing TV, below should
6736 be increased if BL can actually be eliminated, but this means
6737 we might have to do a topological sort of the order in which
6738 we try to process biv. It doesn't seem worthwhile to do
6739 this sort of thing now. */
6740
6741 #if 0
6742 /* Otherwise the reg compared with had better be a biv. */
6743 if (GET_CODE (arg) != REG
6744 || reg_iv_type[REGNO (arg)] != BASIC_INDUCT)
6745 return 0;
6746
6747 /* Look for a pair of givs, one for each biv,
6748 with identical coefficients. */
6749 for (v = bl->giv; v; v = v->next_iv)
6750 {
6751 struct induction *tv;
6752
6753 if (v->ignore || v->maybe_dead || v->mode != mode)
6754 continue;
6755
6756 for (tv = reg_biv_class[REGNO (arg)]->giv; tv; tv = tv->next_iv)
6757 if (! tv->ignore && ! tv->maybe_dead
6758 && rtx_equal_p (tv->mult_val, v->mult_val)
6759 && rtx_equal_p (tv->add_val, v->add_val)
6760 && tv->mode == mode)
6761 {
6762 /* If the giv V had the auto-inc address optimization applied
6763 to it, and INSN occurs between the giv insn and the biv
6764 insn, then we must adjust the value used here.
6765 This is rare, so we don't bother to do so. */
6766 if (v->auto_inc_opt
6767 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
6768 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
6769 || (INSN_LUID (v->insn) > INSN_LUID (insn)
6770 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
6771 continue;
6772
6773 if (! eliminate_p)
6774 return 1;
6775
6776 /* Replace biv with its giv's reduced reg. */
6777 XEXP (x, 1-arg_operand) = v->new_reg;
6778 /* Replace other operand with the other giv's
6779 reduced reg. */
6780 XEXP (x, arg_operand) = tv->new_reg;
6781 return 1;
6782 }
6783 }
6784 #endif
6785 }
6786
6787 /* If we get here, the biv can't be eliminated. */
6788 return 0;
6789
6790 case MEM:
6791 /* If this address is a DEST_ADDR giv, it doesn't matter if the
6792 biv is used in it, since it will be replaced. */
6793 for (v = bl->giv; v; v = v->next_iv)
6794 if (v->giv_type == DEST_ADDR && v->location == &XEXP (x, 0))
6795 return 1;
6796 break;
6797
6798 default:
6799 break;
6800 }
6801
6802 /* See if any subexpression fails elimination. */
6803 fmt = GET_RTX_FORMAT (code);
6804 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
6805 {
6806 switch (fmt[i])
6807 {
6808 case 'e':
6809 if (! maybe_eliminate_biv_1 (XEXP (x, i), insn, bl,
6810 eliminate_p, where))
6811 return 0;
6812 break;
6813
6814 case 'E':
6815 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
6816 if (! maybe_eliminate_biv_1 (XVECEXP (x, i, j), insn, bl,
6817 eliminate_p, where))
6818 return 0;
6819 break;
6820 }
6821 }
6822
6823 return 1;
6824 }
6825 \f
6826 /* Return nonzero if the last use of REG
6827 is in an insn following INSN in the same basic block. */
6828
6829 static int
6830 last_use_this_basic_block (reg, insn)
6831 rtx reg;
6832 rtx insn;
6833 {
6834 rtx n;
6835 for (n = insn;
6836 n && GET_CODE (n) != CODE_LABEL && GET_CODE (n) != JUMP_INSN;
6837 n = NEXT_INSN (n))
6838 {
6839 if (REGNO_LAST_UID (REGNO (reg)) == INSN_UID (n))
6840 return 1;
6841 }
6842 return 0;
6843 }
6844 \f
6845 /* Called via `note_stores' to record the initial value of a biv. Here we
6846 just record the location of the set and process it later. */
6847
6848 static void
6849 record_initial (dest, set)
6850 rtx dest;
6851 rtx set;
6852 {
6853 struct iv_class *bl;
6854
6855 if (GET_CODE (dest) != REG
6856 || REGNO (dest) >= max_reg_before_loop
6857 || reg_iv_type[REGNO (dest)] != BASIC_INDUCT)
6858 return;
6859
6860 bl = reg_biv_class[REGNO (dest)];
6861
6862 /* If this is the first set found, record it. */
6863 if (bl->init_insn == 0)
6864 {
6865 bl->init_insn = note_insn;
6866 bl->init_set = set;
6867 }
6868 }
6869 \f
6870 /* If any of the registers in X are "old" and currently have a last use earlier
6871 than INSN, update them to have a last use of INSN. Their actual last use
6872 will be the previous insn but it will not have a valid uid_luid so we can't
6873 use it. */
6874
6875 static void
6876 update_reg_last_use (x, insn)
6877 rtx x;
6878 rtx insn;
6879 {
6880 /* Check for the case where INSN does not have a valid luid. In this case,
6881 there is no need to modify the regno_last_uid, as this can only happen
6882 when code is inserted after the loop_end to set a pseudo's final value,
6883 and hence this insn will never be the last use of x. */
6884 if (GET_CODE (x) == REG && REGNO (x) < max_reg_before_loop
6885 && INSN_UID (insn) < max_uid_for_loop
6886 && uid_luid[REGNO_LAST_UID (REGNO (x))] < uid_luid[INSN_UID (insn)])
6887 REGNO_LAST_UID (REGNO (x)) = INSN_UID (insn);
6888 else
6889 {
6890 register int i, j;
6891 register char *fmt = GET_RTX_FORMAT (GET_CODE (x));
6892 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6893 {
6894 if (fmt[i] == 'e')
6895 update_reg_last_use (XEXP (x, i), insn);
6896 else if (fmt[i] == 'E')
6897 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
6898 update_reg_last_use (XVECEXP (x, i, j), insn);
6899 }
6900 }
6901 }
6902 \f
6903 /* Given a jump insn JUMP, return the condition that will cause it to branch
6904 to its JUMP_LABEL. If the condition cannot be understood, or is an
6905 inequality floating-point comparison which needs to be reversed, 0 will
6906 be returned.
6907
6908 If EARLIEST is non-zero, it is a pointer to a place where the earliest
6909 insn used in locating the condition was found. If a replacement test
6910 of the condition is desired, it should be placed in front of that
6911 insn and we will be sure that the inputs are still valid.
6912
6913 The condition will be returned in a canonical form to simplify testing by
6914 callers. Specifically:
6915
6916 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
6917 (2) Both operands will be machine operands; (cc0) will have been replaced.
6918 (3) If an operand is a constant, it will be the second operand.
6919 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
6920 for GE, GEU, and LEU. */
6921
6922 rtx
6923 get_condition (jump, earliest)
6924 rtx jump;
6925 rtx *earliest;
6926 {
6927 enum rtx_code code;
6928 rtx prev = jump;
6929 rtx set;
6930 rtx tem;
6931 rtx op0, op1;
6932 int reverse_code = 0;
6933 int did_reverse_condition = 0;
6934
6935 /* If this is not a standard conditional jump, we can't parse it. */
6936 if (GET_CODE (jump) != JUMP_INSN
6937 || ! condjump_p (jump) || simplejump_p (jump))
6938 return 0;
6939
6940 code = GET_CODE (XEXP (SET_SRC (PATTERN (jump)), 0));
6941 op0 = XEXP (XEXP (SET_SRC (PATTERN (jump)), 0), 0);
6942 op1 = XEXP (XEXP (SET_SRC (PATTERN (jump)), 0), 1);
6943
6944 if (earliest)
6945 *earliest = jump;
6946
6947 /* If this branches to JUMP_LABEL when the condition is false, reverse
6948 the condition. */
6949 if (GET_CODE (XEXP (SET_SRC (PATTERN (jump)), 2)) == LABEL_REF
6950 && XEXP (XEXP (SET_SRC (PATTERN (jump)), 2), 0) == JUMP_LABEL (jump))
6951 code = reverse_condition (code), did_reverse_condition ^= 1;
6952
6953 /* If we are comparing a register with zero, see if the register is set
6954 in the previous insn to a COMPARE or a comparison operation. Perform
6955 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
6956 in cse.c */
6957
6958 while (GET_RTX_CLASS (code) == '<' && op1 == CONST0_RTX (GET_MODE (op0)))
6959 {
6960 /* Set non-zero when we find something of interest. */
6961 rtx x = 0;
6962
6963 #ifdef HAVE_cc0
6964 /* If comparison with cc0, import actual comparison from compare
6965 insn. */
6966 if (op0 == cc0_rtx)
6967 {
6968 if ((prev = prev_nonnote_insn (prev)) == 0
6969 || GET_CODE (prev) != INSN
6970 || (set = single_set (prev)) == 0
6971 || SET_DEST (set) != cc0_rtx)
6972 return 0;
6973
6974 op0 = SET_SRC (set);
6975 op1 = CONST0_RTX (GET_MODE (op0));
6976 if (earliest)
6977 *earliest = prev;
6978 }
6979 #endif
6980
6981 /* If this is a COMPARE, pick up the two things being compared. */
6982 if (GET_CODE (op0) == COMPARE)
6983 {
6984 op1 = XEXP (op0, 1);
6985 op0 = XEXP (op0, 0);
6986 continue;
6987 }
6988 else if (GET_CODE (op0) != REG)
6989 break;
6990
6991 /* Go back to the previous insn. Stop if it is not an INSN. We also
6992 stop if it isn't a single set or if it has a REG_INC note because
6993 we don't want to bother dealing with it. */
6994
6995 if ((prev = prev_nonnote_insn (prev)) == 0
6996 || GET_CODE (prev) != INSN
6997 || FIND_REG_INC_NOTE (prev, 0)
6998 || (set = single_set (prev)) == 0)
6999 break;
7000
7001 /* If this is setting OP0, get what it sets it to if it looks
7002 relevant. */
7003 if (rtx_equal_p (SET_DEST (set), op0))
7004 {
7005 enum machine_mode inner_mode = GET_MODE (SET_SRC (set));
7006
7007 if ((GET_CODE (SET_SRC (set)) == COMPARE
7008 || (((code == NE
7009 || (code == LT
7010 && GET_MODE_CLASS (inner_mode) == MODE_INT
7011 && (GET_MODE_BITSIZE (inner_mode)
7012 <= HOST_BITS_PER_WIDE_INT)
7013 && (STORE_FLAG_VALUE
7014 & ((HOST_WIDE_INT) 1
7015 << (GET_MODE_BITSIZE (inner_mode) - 1))))
7016 #ifdef FLOAT_STORE_FLAG_VALUE
7017 || (code == LT
7018 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
7019 && FLOAT_STORE_FLAG_VALUE < 0)
7020 #endif
7021 ))
7022 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<')))
7023 x = SET_SRC (set);
7024 else if (((code == EQ
7025 || (code == GE
7026 && (GET_MODE_BITSIZE (inner_mode)
7027 <= HOST_BITS_PER_WIDE_INT)
7028 && GET_MODE_CLASS (inner_mode) == MODE_INT
7029 && (STORE_FLAG_VALUE
7030 & ((HOST_WIDE_INT) 1
7031 << (GET_MODE_BITSIZE (inner_mode) - 1))))
7032 #ifdef FLOAT_STORE_FLAG_VALUE
7033 || (code == GE
7034 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
7035 && FLOAT_STORE_FLAG_VALUE < 0)
7036 #endif
7037 ))
7038 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<')
7039 {
7040 /* We might have reversed a LT to get a GE here. But this wasn't
7041 actually the comparison of data, so we don't flag that we
7042 have had to reverse the condition. */
7043 did_reverse_condition ^= 1;
7044 reverse_code = 1;
7045 x = SET_SRC (set);
7046 }
7047 else
7048 break;
7049 }
7050
7051 else if (reg_set_p (op0, prev))
7052 /* If this sets OP0, but not directly, we have to give up. */
7053 break;
7054
7055 if (x)
7056 {
7057 if (GET_RTX_CLASS (GET_CODE (x)) == '<')
7058 code = GET_CODE (x);
7059 if (reverse_code)
7060 {
7061 code = reverse_condition (code);
7062 did_reverse_condition ^= 1;
7063 reverse_code = 0;
7064 }
7065
7066 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
7067 if (earliest)
7068 *earliest = prev;
7069 }
7070 }
7071
7072 /* If constant is first, put it last. */
7073 if (CONSTANT_P (op0))
7074 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
7075
7076 /* If OP0 is the result of a comparison, we weren't able to find what
7077 was really being compared, so fail. */
7078 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
7079 return 0;
7080
7081 /* Canonicalize any ordered comparison with integers involving equality
7082 if we can do computations in the relevant mode and we do not
7083 overflow. */
7084
7085 if (GET_CODE (op1) == CONST_INT
7086 && GET_MODE (op0) != VOIDmode
7087 && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
7088 {
7089 HOST_WIDE_INT const_val = INTVAL (op1);
7090 unsigned HOST_WIDE_INT uconst_val = const_val;
7091 unsigned HOST_WIDE_INT max_val
7092 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
7093
7094 switch (code)
7095 {
7096 case LE:
7097 if (const_val != max_val >> 1)
7098 code = LT, op1 = GEN_INT (const_val + 1);
7099 break;
7100
7101 /* When cross-compiling, const_val might be sign-extended from
7102 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
7103 case GE:
7104 if ((const_val & max_val)
7105 != (((HOST_WIDE_INT) 1
7106 << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
7107 code = GT, op1 = GEN_INT (const_val - 1);
7108 break;
7109
7110 case LEU:
7111 if (uconst_val < max_val)
7112 code = LTU, op1 = GEN_INT (uconst_val + 1);
7113 break;
7114
7115 case GEU:
7116 if (uconst_val != 0)
7117 code = GTU, op1 = GEN_INT (uconst_val - 1);
7118 break;
7119
7120 default:
7121 break;
7122 }
7123 }
7124
7125 /* If this was floating-point and we reversed anything other than an
7126 EQ or NE, return zero. */
7127 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
7128 && did_reverse_condition && code != NE && code != EQ
7129 && ! flag_fast_math
7130 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
7131 return 0;
7132
7133 #ifdef HAVE_cc0
7134 /* Never return CC0; return zero instead. */
7135 if (op0 == cc0_rtx)
7136 return 0;
7137 #endif
7138
7139 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
7140 }
7141
7142 /* Similar to above routine, except that we also put an invariant last
7143 unless both operands are invariants. */
7144
7145 rtx
7146 get_condition_for_loop (x)
7147 rtx x;
7148 {
7149 rtx comparison = get_condition (x, NULL_PTR);
7150
7151 if (comparison == 0
7152 || ! invariant_p (XEXP (comparison, 0))
7153 || invariant_p (XEXP (comparison, 1)))
7154 return comparison;
7155
7156 return gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)), VOIDmode,
7157 XEXP (comparison, 1), XEXP (comparison, 0));
7158 }
7159
7160 #ifdef HAIFA
7161 /* Analyze a loop in order to instrument it with the use of count register.
7162 loop_start and loop_end are the first and last insns of the loop.
7163 This function works in cooperation with insert_bct ().
7164 loop_can_insert_bct[loop_num] is set according to whether the optimization
7165 is applicable to the loop. When it is applicable, the following variables
7166 are also set:
7167 loop_start_value[loop_num]
7168 loop_comparison_value[loop_num]
7169 loop_increment[loop_num]
7170 loop_comparison_code[loop_num] */
7171
7172 #ifdef HAVE_decrement_and_branch_on_count
7173 static
7174 void analyze_loop_iterations (loop_start, loop_end)
7175 rtx loop_start, loop_end;
7176 {
7177 rtx comparison, comparison_value;
7178 rtx iteration_var, initial_value, increment;
7179 enum rtx_code comparison_code;
7180
7181 rtx last_loop_insn;
7182 rtx insn;
7183 int i;
7184
7185 /* loop_variable mode */
7186 enum machine_mode original_mode;
7187
7188 /* find the number of the loop */
7189 int loop_num = uid_loop_num [INSN_UID (loop_start)];
7190
7191 /* we change our mind only when we are sure that loop will be instrumented */
7192 loop_can_insert_bct[loop_num] = 0;
7193
7194 /* is the optimization suppressed. */
7195 if ( !flag_branch_on_count_reg )
7196 return;
7197
7198 /* make sure that count-reg is not in use */
7199 if (loop_used_count_register[loop_num]){
7200 if (loop_dump_stream)
7201 fprintf (loop_dump_stream,
7202 "analyze_loop_iterations %d: BCT instrumentation failed: count register already in use\n",
7203 loop_num);
7204 return;
7205 }
7206
7207 /* make sure that the function has no indirect jumps. */
7208 if (indirect_jump_in_function){
7209 if (loop_dump_stream)
7210 fprintf (loop_dump_stream,
7211 "analyze_loop_iterations %d: BCT instrumentation failed: indirect jump in function\n",
7212 loop_num);
7213 return;
7214 }
7215
7216 /* make sure that the last loop insn is a conditional jump */
7217 last_loop_insn = PREV_INSN (loop_end);
7218 if (GET_CODE (last_loop_insn) != JUMP_INSN || !condjump_p (last_loop_insn)) {
7219 if (loop_dump_stream)
7220 fprintf (loop_dump_stream,
7221 "analyze_loop_iterations %d: BCT instrumentation failed: invalid jump at loop end\n",
7222 loop_num);
7223 return;
7224 }
7225
7226 /* First find the iteration variable. If the last insn is a conditional
7227 branch, and the insn preceding it tests a register value, make that
7228 register the iteration variable. */
7229
7230 /* We used to use prev_nonnote_insn here, but that fails because it might
7231 accidentally get the branch for a contained loop if the branch for this
7232 loop was deleted. We can only trust branches immediately before the
7233 loop_end. */
7234
7235 comparison = get_condition_for_loop (last_loop_insn);
7236 /* ??? Get_condition may switch position of induction variable and
7237 invariant register when it canonicalizes the comparison. */
7238
7239 if (comparison == 0) {
7240 if (loop_dump_stream)
7241 fprintf (loop_dump_stream,
7242 "analyze_loop_iterations %d: BCT instrumentation failed: comparison not found\n",
7243 loop_num);
7244 return;
7245 }
7246
7247 comparison_code = GET_CODE (comparison);
7248 iteration_var = XEXP (comparison, 0);
7249 comparison_value = XEXP (comparison, 1);
7250
7251 original_mode = GET_MODE (iteration_var);
7252 if (GET_MODE_CLASS (original_mode) != MODE_INT
7253 || GET_MODE_SIZE (original_mode) != UNITS_PER_WORD) {
7254 if (loop_dump_stream)
7255 fprintf (loop_dump_stream,
7256 "analyze_loop_iterations %d: BCT Instrumentation failed: loop variable not integer\n",
7257 loop_num);
7258 return;
7259 }
7260
7261 /* get info about loop bounds and increment */
7262 iteration_info (iteration_var, &initial_value, &increment,
7263 loop_start, loop_end);
7264
7265 /* make sure that all required loop data were found */
7266 if (!(initial_value && increment && comparison_value
7267 && invariant_p (comparison_value) && invariant_p (increment)
7268 && ! indirect_jump_in_function))
7269 {
7270 if (loop_dump_stream) {
7271 fprintf (loop_dump_stream,
7272 "analyze_loop_iterations %d: BCT instrumentation failed because of wrong loop: ", loop_num);
7273 if (!(initial_value && increment && comparison_value)) {
7274 fprintf (loop_dump_stream, "\tbounds not available: ");
7275 if ( ! initial_value )
7276 fprintf (loop_dump_stream, "initial ");
7277 if ( ! increment )
7278 fprintf (loop_dump_stream, "increment ");
7279 if ( ! comparison_value )
7280 fprintf (loop_dump_stream, "comparison ");
7281 fprintf (loop_dump_stream, "\n");
7282 }
7283 if (!invariant_p (comparison_value) || !invariant_p (increment))
7284 fprintf (loop_dump_stream, "\tloop bounds not invariant\n");
7285 }
7286 return;
7287 }
7288
7289 /* make sure that the increment is constant */
7290 if (GET_CODE (increment) != CONST_INT) {
7291 if (loop_dump_stream)
7292 fprintf (loop_dump_stream,
7293 "analyze_loop_iterations %d: instrumentation failed: not arithmetic loop\n",
7294 loop_num);
7295 return;
7296 }
7297
7298 /* make sure that the loop contains neither function call, nor jump on table.
7299 (the count register might be altered by the called function, and might
7300 be used for a branch on table). */
7301 for (insn = loop_start; insn && insn != loop_end; insn = NEXT_INSN (insn)) {
7302 if (GET_CODE (insn) == CALL_INSN){
7303 if (loop_dump_stream)
7304 fprintf (loop_dump_stream,
7305 "analyze_loop_iterations %d: BCT instrumentation failed: function call in the loop\n",
7306 loop_num);
7307 return;
7308 }
7309
7310 if (GET_CODE (insn) == JUMP_INSN
7311 && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
7312 || GET_CODE (PATTERN (insn)) == ADDR_VEC)){
7313 if (loop_dump_stream)
7314 fprintf (loop_dump_stream,
7315 "analyze_loop_iterations %d: BCT instrumentation failed: computed branch in the loop\n",
7316 loop_num);
7317 return;
7318 }
7319 }
7320
7321 /* At this point, we are sure that the loop can be instrumented with BCT.
7322 Some of the loops, however, will not be instrumented - the final decision
7323 is taken by insert_bct () */
7324 if (loop_dump_stream)
7325 fprintf (loop_dump_stream,
7326 "analyze_loop_iterations: loop (luid =%d) can be BCT instrumented.\n",
7327 loop_num);
7328
7329 /* mark all enclosing loops that they cannot use count register */
7330 /* ???: In fact, since insert_bct may decide not to instrument this loop,
7331 marking here may prevent instrumenting an enclosing loop that could
7332 actually be instrumented. But since this is rare, it is safer to mark
7333 here in case the order of calling (analyze/insert)_bct would be changed. */
7334 for (i=loop_num; i != -1; i = loop_outer_loop[i])
7335 loop_used_count_register[i] = 1;
7336
7337 /* Set data structures which will be used by the instrumentation phase */
7338 loop_start_value[loop_num] = initial_value;
7339 loop_comparison_value[loop_num] = comparison_value;
7340 loop_increment[loop_num] = increment;
7341 loop_comparison_code[loop_num] = comparison_code;
7342 loop_can_insert_bct[loop_num] = 1;
7343 }
7344
7345
7346 /* instrument loop for insertion of bct instruction. We distinguish between
7347 loops with compile-time bounds, to those with run-time bounds. The loop
7348 behaviour is analized according to the following characteristics/variables:
7349 ; Input variables:
7350 ; comparison-value: the value to which the iteration counter is compared.
7351 ; initial-value: iteration-counter initial value.
7352 ; increment: iteration-counter increment.
7353 ; Computed variables:
7354 ; increment-direction: the sign of the increment.
7355 ; compare-direction: '1' for GT, GTE, '-1' for LT, LTE, '0' for NE.
7356 ; range-direction: sign (comparison-value - initial-value)
7357 We give up on the following cases:
7358 ; loop variable overflow.
7359 ; run-time loop bounds with comparison code NE.
7360 */
7361
7362 static void
7363 insert_bct (loop_start, loop_end)
7364 rtx loop_start, loop_end;
7365 {
7366 rtx initial_value, comparison_value, increment;
7367 enum rtx_code comparison_code;
7368
7369 int increment_direction, compare_direction;
7370 int unsigned_p = 0;
7371
7372 /* if the loop condition is <= or >=, the number of iteration
7373 is 1 more than the range of the bounds of the loop */
7374 int add_iteration = 0;
7375
7376 /* the only machine mode we work with - is the integer of the size that the
7377 machine has */
7378 enum machine_mode loop_var_mode = SImode;
7379
7380 int loop_num = uid_loop_num [INSN_UID (loop_start)];
7381
7382 /* get loop-variables. No need to check that these are valid - already
7383 checked in analyze_loop_iterations (). */
7384 comparison_code = loop_comparison_code[loop_num];
7385 initial_value = loop_start_value[loop_num];
7386 comparison_value = loop_comparison_value[loop_num];
7387 increment = loop_increment[loop_num];
7388
7389 /* check analyze_loop_iterations decision for this loop. */
7390 if (! loop_can_insert_bct[loop_num]){
7391 if (loop_dump_stream)
7392 fprintf (loop_dump_stream,
7393 "insert_bct: [%d] - was decided not to instrument by analyze_loop_iterations ()\n",
7394 loop_num);
7395 return;
7396 }
7397
7398 /* It's impossible to instrument a competely unrolled loop. */
7399 if (loop_unroll_factor [loop_num] == -1)
7400 return;
7401
7402 /* make sure that the last loop insn is a conditional jump .
7403 This check is repeated from analyze_loop_iterations (),
7404 because unrolling might have changed that. */
7405 if (GET_CODE (PREV_INSN (loop_end)) != JUMP_INSN
7406 || !condjump_p (PREV_INSN (loop_end))) {
7407 if (loop_dump_stream)
7408 fprintf (loop_dump_stream,
7409 "insert_bct: not instrumenting BCT because of invalid branch\n");
7410 return;
7411 }
7412
7413 /* fix increment in case loop was unrolled. */
7414 if (loop_unroll_factor [loop_num] > 1)
7415 increment = GEN_INT ( INTVAL (increment) * loop_unroll_factor [loop_num] );
7416
7417 /* determine properties and directions of the loop */
7418 increment_direction = (INTVAL (increment) > 0) ? 1:-1;
7419 switch ( comparison_code ) {
7420 case LEU:
7421 unsigned_p = 1;
7422 /* fallthrough */
7423 case LE:
7424 compare_direction = 1;
7425 add_iteration = 1;
7426 break;
7427 case GEU:
7428 unsigned_p = 1;
7429 /* fallthrough */
7430 case GE:
7431 compare_direction = -1;
7432 add_iteration = 1;
7433 break;
7434 case EQ:
7435 /* in this case we cannot know the number of iterations */
7436 if (loop_dump_stream)
7437 fprintf (loop_dump_stream,
7438 "insert_bct: %d: loop cannot be instrumented: == in condition\n",
7439 loop_num);
7440 return;
7441 case LTU:
7442 unsigned_p = 1;
7443 /* fallthrough */
7444 case LT:
7445 compare_direction = 1;
7446 break;
7447 case GTU:
7448 unsigned_p = 1;
7449 /* fallthrough */
7450 case GT:
7451 compare_direction = -1;
7452 break;
7453 case NE:
7454 compare_direction = 0;
7455 break;
7456 default:
7457 abort ();
7458 }
7459
7460
7461 /* make sure that the loop does not end by an overflow */
7462 if (compare_direction != increment_direction) {
7463 if (loop_dump_stream)
7464 fprintf (loop_dump_stream,
7465 "insert_bct: %d: loop cannot be instrumented: terminated by overflow\n",
7466 loop_num);
7467 return;
7468 }
7469
7470 /* try to instrument the loop. */
7471
7472 /* Handle the simpler case, where the bounds are known at compile time. */
7473 if (GET_CODE (initial_value) == CONST_INT && GET_CODE (comparison_value) == CONST_INT)
7474 {
7475 int n_iterations;
7476 int increment_value_abs = INTVAL (increment) * increment_direction;
7477
7478 /* check the relation between compare-val and initial-val */
7479 int difference = INTVAL (comparison_value) - INTVAL (initial_value);
7480 int range_direction = (difference > 0) ? 1 : -1;
7481
7482 /* make sure the loop executes enough iterations to gain from BCT */
7483 if (difference > -3 && difference < 3) {
7484 if (loop_dump_stream)
7485 fprintf (loop_dump_stream,
7486 "insert_bct: loop %d not BCT instrumented: too small iteration count.\n",
7487 loop_num);
7488 return;
7489 }
7490
7491 /* make sure that the loop executes at least once */
7492 if ((range_direction == 1 && compare_direction == -1)
7493 || (range_direction == -1 && compare_direction == 1))
7494 {
7495 if (loop_dump_stream)
7496 fprintf (loop_dump_stream,
7497 "insert_bct: loop %d: does not iterate even once. Not instrumenting.\n",
7498 loop_num);
7499 return;
7500 }
7501
7502 /* make sure that the loop does not end by an overflow (in compile time
7503 bounds we must have an additional check for overflow, because here
7504 we also support the compare code of 'NE'. */
7505 if (comparison_code == NE
7506 && increment_direction != range_direction) {
7507 if (loop_dump_stream)
7508 fprintf (loop_dump_stream,
7509 "insert_bct (compile time bounds): %d: loop not instrumented: terminated by overflow\n",
7510 loop_num);
7511 return;
7512 }
7513
7514 /* Determine the number of iterations by:
7515 ;
7516 ; compare-val - initial-val + (increment -1) + additional-iteration
7517 ; num_iterations = -----------------------------------------------------------------
7518 ; increment
7519 */
7520 difference = (range_direction > 0) ? difference : -difference;
7521 #if 0
7522 fprintf (stderr, "difference is: %d\n", difference); /* @*/
7523 fprintf (stderr, "increment_value_abs is: %d\n", increment_value_abs); /* @*/
7524 fprintf (stderr, "add_iteration is: %d\n", add_iteration); /* @*/
7525 fprintf (stderr, "INTVAL (comparison_value) is: %d\n", INTVAL (comparison_value)); /* @*/
7526 fprintf (stderr, "INTVAL (initial_value) is: %d\n", INTVAL (initial_value)); /* @*/
7527 #endif
7528
7529 if (increment_value_abs == 0) {
7530 fprintf (stderr, "insert_bct: error: increment == 0 !!!\n");
7531 abort ();
7532 }
7533 n_iterations = (difference + increment_value_abs - 1 + add_iteration)
7534 / increment_value_abs;
7535
7536 #if 0
7537 fprintf (stderr, "number of iterations is: %d\n", n_iterations); /* @*/
7538 #endif
7539 instrument_loop_bct (loop_start, loop_end, GEN_INT (n_iterations));
7540
7541 /* Done with this loop. */
7542 return;
7543 }
7544
7545 /* Handle the more complex case, that the bounds are NOT known at compile time. */
7546 /* In this case we generate run_time calculation of the number of iterations */
7547
7548 /* With runtime bounds, if the compare is of the form '!=' we give up */
7549 if (comparison_code == NE) {
7550 if (loop_dump_stream)
7551 fprintf (loop_dump_stream,
7552 "insert_bct: fail for loop %d: runtime bounds with != comparison\n",
7553 loop_num);
7554 return;
7555 }
7556
7557 else {
7558 /* We rely on the existence of run-time guard to ensure that the
7559 loop executes at least once. */
7560 rtx sequence;
7561 rtx iterations_num_reg;
7562
7563 int increment_value_abs = INTVAL (increment) * increment_direction;
7564
7565 /* make sure that the increment is a power of two, otherwise (an
7566 expensive) divide is needed. */
7567 if (exact_log2 (increment_value_abs) == -1)
7568 {
7569 if (loop_dump_stream)
7570 fprintf (loop_dump_stream,
7571 "insert_bct: not instrumenting BCT because the increment is not power of 2\n");
7572 return;
7573 }
7574
7575 /* compute the number of iterations */
7576 start_sequence ();
7577 {
7578 rtx temp_reg;
7579
7580 /* Again, the number of iterations is calculated by:
7581 ;
7582 ; compare-val - initial-val + (increment -1) + additional-iteration
7583 ; num_iterations = -----------------------------------------------------------------
7584 ; increment
7585 */
7586 /* ??? Do we have to call copy_rtx here before passing rtx to
7587 expand_binop? */
7588 if (compare_direction > 0) {
7589 /* <, <= :the loop variable is increasing */
7590 temp_reg = expand_binop (loop_var_mode, sub_optab, comparison_value,
7591 initial_value, NULL_RTX, 0, OPTAB_LIB_WIDEN);
7592 }
7593 else {
7594 temp_reg = expand_binop (loop_var_mode, sub_optab, initial_value,
7595 comparison_value, NULL_RTX, 0, OPTAB_LIB_WIDEN);
7596 }
7597
7598 if (increment_value_abs - 1 + add_iteration != 0)
7599 temp_reg = expand_binop (loop_var_mode, add_optab, temp_reg,
7600 GEN_INT (increment_value_abs - 1 + add_iteration),
7601 NULL_RTX, 0, OPTAB_LIB_WIDEN);
7602
7603 if (increment_value_abs != 1)
7604 {
7605 /* ??? This will generate an expensive divide instruction for
7606 most targets. The original authors apparently expected this
7607 to be a shift, since they test for power-of-2 divisors above,
7608 but just naively generating a divide instruction will not give
7609 a shift. It happens to work for the PowerPC target because
7610 the rs6000.md file has a divide pattern that emits shifts.
7611 It will probably not work for any other target. */
7612 iterations_num_reg = expand_binop (loop_var_mode, sdiv_optab,
7613 temp_reg,
7614 GEN_INT (increment_value_abs),
7615 NULL_RTX, 0, OPTAB_LIB_WIDEN);
7616 }
7617 else
7618 iterations_num_reg = temp_reg;
7619 }
7620 sequence = gen_sequence ();
7621 end_sequence ();
7622 emit_insn_before (sequence, loop_start);
7623 instrument_loop_bct (loop_start, loop_end, iterations_num_reg);
7624 }
7625 }
7626
7627 /* instrument loop by inserting a bct in it. This is done in the following way:
7628 1. A new register is created and assigned the hard register number of the count
7629 register.
7630 2. In the head of the loop the new variable is initialized by the value passed in the
7631 loop_num_iterations parameter.
7632 3. At the end of the loop, comparison of the register with 0 is generated.
7633 The created comparison follows the pattern defined for the
7634 decrement_and_branch_on_count insn, so this insn will be generated in assembly
7635 generation phase.
7636 4. The compare&branch on the old variable is deleted. So, if the loop-variable was
7637 not used elsewhere, it will be eliminated by data-flow analisys. */
7638
7639 static void
7640 instrument_loop_bct (loop_start, loop_end, loop_num_iterations)
7641 rtx loop_start, loop_end;
7642 rtx loop_num_iterations;
7643 {
7644 rtx temp_reg1, temp_reg2;
7645 rtx start_label;
7646
7647 rtx sequence;
7648 enum machine_mode loop_var_mode = SImode;
7649
7650 if (HAVE_decrement_and_branch_on_count)
7651 {
7652 if (loop_dump_stream)
7653 fprintf (loop_dump_stream, "Loop: Inserting BCT\n");
7654
7655 /* eliminate the check on the old variable */
7656 delete_insn (PREV_INSN (loop_end));
7657 delete_insn (PREV_INSN (loop_end));
7658
7659 /* insert the label which will delimit the start of the loop */
7660 start_label = gen_label_rtx ();
7661 emit_label_after (start_label, loop_start);
7662
7663 /* insert initialization of the count register into the loop header */
7664 start_sequence ();
7665 temp_reg1 = gen_reg_rtx (loop_var_mode);
7666 emit_insn (gen_move_insn (temp_reg1, loop_num_iterations));
7667
7668 /* this will be count register */
7669 temp_reg2 = gen_rtx_REG (loop_var_mode, COUNT_REGISTER_REGNUM);
7670 /* we have to move the value to the count register from an GPR
7671 because rtx pointed to by loop_num_iterations could contain
7672 expression which cannot be moved into count register */
7673 emit_insn (gen_move_insn (temp_reg2, temp_reg1));
7674
7675 sequence = gen_sequence ();
7676 end_sequence ();
7677 emit_insn_after (sequence, loop_start);
7678
7679 /* insert new comparison on the count register instead of the
7680 old one, generating the needed BCT pattern (that will be
7681 later recognized by assembly generation phase). */
7682 emit_jump_insn_before (gen_decrement_and_branch_on_count (temp_reg2, start_label),
7683 loop_end);
7684 LABEL_NUSES (start_label)++;
7685 }
7686
7687 }
7688 #endif /* HAVE_decrement_and_branch_on_count */
7689
7690 #endif /* HAIFA */
7691
7692 /* Scan the function and determine whether it has indirect (computed) jumps.
7693
7694 This is taken mostly from flow.c; similar code exists elsewhere
7695 in the compiler. It may be useful to put this into rtlanal.c. */
7696 static int
7697 indirect_jump_in_function_p (start)
7698 rtx start;
7699 {
7700 rtx insn;
7701
7702 for (insn = start; insn; insn = NEXT_INSN (insn))
7703 if (computed_jump_p (insn))
7704 return 1;
7705
7706 return 0;
7707 }