Fix sparc-sun-solaris2 -O2 -fPIC bootstrap failure with gcse code.
[gcc.git] / gcc / loop.c
1 /* Perform various loop optimizations, including strength reduction.
2 Copyright (C) 1987, 88, 89, 91-7, 1998 Free Software Foundation, Inc.
3
4 This file is part of GNU CC.
5
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
9 any later version.
10
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
20
21
22 /* This is the loop optimization pass of the compiler.
23 It finds invariant computations within loops and moves them
24 to the beginning of the loop. Then it identifies basic and
25 general induction variables. Strength reduction is applied to the general
26 induction variables, and induction variable elimination is applied to
27 the basic induction variables.
28
29 It also finds cases where
30 a register is set within the loop by zero-extending a narrower value
31 and changes these to zero the entire register once before the loop
32 and merely copy the low part within the loop.
33
34 Most of the complexity is in heuristics to decide when it is worth
35 while to do these things. */
36
37 #include "config.h"
38 #include <stdio.h>
39 #include "rtl.h"
40 #include "obstack.h"
41 #include "expr.h"
42 #include "insn-config.h"
43 #include "insn-flags.h"
44 #include "regs.h"
45 #include "hard-reg-set.h"
46 #include "recog.h"
47 #include "flags.h"
48 #include "real.h"
49 #include "loop.h"
50 #include "except.h"
51
52 /* Vector mapping INSN_UIDs to luids.
53 The luids are like uids but increase monotonically always.
54 We use them to see whether a jump comes from outside a given loop. */
55
56 int *uid_luid;
57
58 /* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop
59 number the insn is contained in. */
60
61 int *uid_loop_num;
62
63 /* 1 + largest uid of any insn. */
64
65 int max_uid_for_loop;
66
67 /* 1 + luid of last insn. */
68
69 static int max_luid;
70
71 /* Number of loops detected in current function. Used as index to the
72 next few tables. */
73
74 static int max_loop_num;
75
76 /* Indexed by loop number, contains the first and last insn of each loop. */
77
78 static rtx *loop_number_loop_starts, *loop_number_loop_ends;
79
80 /* For each loop, gives the containing loop number, -1 if none. */
81
82 int *loop_outer_loop;
83
84 #ifdef HAIFA
85 /* The main output of analyze_loop_iterations is placed here */
86
87 int *loop_can_insert_bct;
88
89 /* For each loop, determines whether some of its inner loops has used
90 count register */
91
92 int *loop_used_count_register;
93
94 /* loop parameters for arithmetic loops. These loops have a loop variable
95 which is initialized to loop_start_value, incremented in each iteration
96 by "loop_increment". At the end of the iteration the loop variable is
97 compared to the loop_comparison_value (using loop_comparison_code). */
98
99 rtx *loop_increment;
100 rtx *loop_comparison_value;
101 rtx *loop_start_value;
102 enum rtx_code *loop_comparison_code;
103 #endif /* HAIFA */
104
105 /* For each loop, keep track of its unrolling factor.
106 Potential values:
107 0: unrolled
108 1: not unrolled.
109 -1: completely unrolled
110 >0: holds the unroll exact factor. */
111 int *loop_unroll_factor;
112
113 /* Indexed by loop number, contains a nonzero value if the "loop" isn't
114 really a loop (an insn outside the loop branches into it). */
115
116 static char *loop_invalid;
117
118 /* Indexed by loop number, links together all LABEL_REFs which refer to
119 code labels outside the loop. Used by routines that need to know all
120 loop exits, such as final_biv_value and final_giv_value.
121
122 This does not include loop exits due to return instructions. This is
123 because all bivs and givs are pseudos, and hence must be dead after a
124 return, so the presense of a return does not affect any of the
125 optimizations that use this info. It is simpler to just not include return
126 instructions on this list. */
127
128 rtx *loop_number_exit_labels;
129
130 /* Indexed by loop number, counts the number of LABEL_REFs on
131 loop_number_exit_labels for this loop and all loops nested inside it. */
132
133 int *loop_number_exit_count;
134
135 /* Holds the number of loop iterations. It is zero if the number could not be
136 calculated. Must be unsigned since the number of iterations can
137 be as high as 2^wordsize-1. For loops with a wider iterator, this number
138 will will be zero if the number of loop iterations is too large for an
139 unsigned integer to hold. */
140
141 unsigned HOST_WIDE_INT loop_n_iterations;
142
143 /* Nonzero if there is a subroutine call in the current loop. */
144
145 static int loop_has_call;
146
147 /* Nonzero if there is a volatile memory reference in the current
148 loop. */
149
150 static int loop_has_volatile;
151
152 /* Added loop_continue which is the NOTE_INSN_LOOP_CONT of the
153 current loop. A continue statement will generate a branch to
154 NEXT_INSN (loop_continue). */
155
156 static rtx loop_continue;
157
158 /* Indexed by register number, contains the number of times the reg
159 is set during the loop being scanned.
160 During code motion, a negative value indicates a reg that has been
161 made a candidate; in particular -2 means that it is an candidate that
162 we know is equal to a constant and -1 means that it is an candidate
163 not known equal to a constant.
164 After code motion, regs moved have 0 (which is accurate now)
165 while the failed candidates have the original number of times set.
166
167 Therefore, at all times, == 0 indicates an invariant register;
168 < 0 a conditionally invariant one. */
169
170 static int *n_times_set;
171
172 /* Original value of n_times_set; same except that this value
173 is not set negative for a reg whose sets have been made candidates
174 and not set to 0 for a reg that is moved. */
175
176 static int *n_times_used;
177
178 /* Index by register number, 1 indicates that the register
179 cannot be moved or strength reduced. */
180
181 static char *may_not_optimize;
182
183 /* Nonzero means reg N has already been moved out of one loop.
184 This reduces the desire to move it out of another. */
185
186 static char *moved_once;
187
188 /* Array of MEMs that are stored in this loop. If there are too many to fit
189 here, we just turn on unknown_address_altered. */
190
191 #define NUM_STORES 30
192 static rtx loop_store_mems[NUM_STORES];
193
194 /* Index of first available slot in above array. */
195 static int loop_store_mems_idx;
196
197 /* Nonzero if we don't know what MEMs were changed in the current loop.
198 This happens if the loop contains a call (in which case `loop_has_call'
199 will also be set) or if we store into more than NUM_STORES MEMs. */
200
201 static int unknown_address_altered;
202
203 /* Count of movable (i.e. invariant) instructions discovered in the loop. */
204 static int num_movables;
205
206 /* Count of memory write instructions discovered in the loop. */
207 static int num_mem_sets;
208
209 /* Number of loops contained within the current one, including itself. */
210 static int loops_enclosed;
211
212 /* Bound on pseudo register number before loop optimization.
213 A pseudo has valid regscan info if its number is < max_reg_before_loop. */
214 int max_reg_before_loop;
215
216 /* This obstack is used in product_cheap_p to allocate its rtl. It
217 may call gen_reg_rtx which, in turn, may reallocate regno_reg_rtx.
218 If we used the same obstack that it did, we would be deallocating
219 that array. */
220
221 static struct obstack temp_obstack;
222
223 /* This is where the pointer to the obstack being used for RTL is stored. */
224
225 extern struct obstack *rtl_obstack;
226
227 #define obstack_chunk_alloc xmalloc
228 #define obstack_chunk_free free
229
230 extern char *oballoc ();
231 \f
232 /* During the analysis of a loop, a chain of `struct movable's
233 is made to record all the movable insns found.
234 Then the entire chain can be scanned to decide which to move. */
235
236 struct movable
237 {
238 rtx insn; /* A movable insn */
239 rtx set_src; /* The expression this reg is set from. */
240 rtx set_dest; /* The destination of this SET. */
241 rtx dependencies; /* When INSN is libcall, this is an EXPR_LIST
242 of any registers used within the LIBCALL. */
243 int consec; /* Number of consecutive following insns
244 that must be moved with this one. */
245 int regno; /* The register it sets */
246 short lifetime; /* lifetime of that register;
247 may be adjusted when matching movables
248 that load the same value are found. */
249 short savings; /* Number of insns we can move for this reg,
250 including other movables that force this
251 or match this one. */
252 unsigned int cond : 1; /* 1 if only conditionally movable */
253 unsigned int force : 1; /* 1 means MUST move this insn */
254 unsigned int global : 1; /* 1 means reg is live outside this loop */
255 /* If PARTIAL is 1, GLOBAL means something different:
256 that the reg is live outside the range from where it is set
257 to the following label. */
258 unsigned int done : 1; /* 1 inhibits further processing of this */
259
260 unsigned int partial : 1; /* 1 means this reg is used for zero-extending.
261 In particular, moving it does not make it
262 invariant. */
263 unsigned int move_insn : 1; /* 1 means that we call emit_move_insn to
264 load SRC, rather than copying INSN. */
265 unsigned int move_insn_first:1;/* Same as above, if this is necessary for the
266 first insn of a consecutive sets group. */
267 unsigned int is_equiv : 1; /* 1 means a REG_EQUIV is present on INSN. */
268 enum machine_mode savemode; /* Nonzero means it is a mode for a low part
269 that we should avoid changing when clearing
270 the rest of the reg. */
271 struct movable *match; /* First entry for same value */
272 struct movable *forces; /* An insn that must be moved if this is */
273 struct movable *next;
274 };
275
276 FILE *loop_dump_stream;
277
278 /* Forward declarations. */
279
280 static void find_and_verify_loops ();
281 static void mark_loop_jump ();
282 static void prescan_loop ();
283 static int reg_in_basic_block_p ();
284 static int consec_sets_invariant_p ();
285 static rtx libcall_other_reg ();
286 static int labels_in_range_p ();
287 static void count_loop_regs_set ();
288 static void note_addr_stored ();
289 static int loop_reg_used_before_p ();
290 static void scan_loop ();
291 #if 0
292 static void replace_call_address ();
293 #endif
294 static rtx skip_consec_insns ();
295 static int libcall_benefit ();
296 static void ignore_some_movables ();
297 static void force_movables ();
298 static void combine_movables ();
299 static int rtx_equal_for_loop_p ();
300 static void move_movables ();
301 static void strength_reduce ();
302 static int valid_initial_value_p ();
303 static void find_mem_givs ();
304 static void record_biv ();
305 static void check_final_value ();
306 static void record_giv ();
307 static void update_giv_derive ();
308 static int basic_induction_var ();
309 static rtx simplify_giv_expr ();
310 static int general_induction_var ();
311 static int consec_sets_giv ();
312 static int check_dbra_loop ();
313 static rtx express_from ();
314 static int combine_givs_p ();
315 static void combine_givs ();
316 static int product_cheap_p ();
317 static int maybe_eliminate_biv ();
318 static int maybe_eliminate_biv_1 ();
319 static int last_use_this_basic_block ();
320 static void record_initial ();
321 static void update_reg_last_use ();
322
323 #ifdef HAIFA
324 /* This is extern from unroll.c */
325 void iteration_info ();
326
327 /* Two main functions for implementing bct:
328 first - to be called before loop unrolling, and the second - after */
329 #ifdef HAVE_decrement_and_branch_on_count
330 static void analyze_loop_iterations ();
331 static void insert_bct ();
332
333 /* Auxiliary function that inserts the bct pattern into the loop */
334 static void instrument_loop_bct ();
335 #endif /* HAVE_decrement_and_branch_on_count */
336 #endif /* HAIFA */
337
338 /* Indirect_jump_in_function is computed once per function. */
339 int indirect_jump_in_function = 0;
340 static int indirect_jump_in_function_p ();
341
342 \f
343 /* Relative gain of eliminating various kinds of operations. */
344 int add_cost;
345 #if 0
346 int shift_cost;
347 int mult_cost;
348 #endif
349
350 /* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to
351 copy the value of the strength reduced giv to its original register. */
352 int copy_cost;
353
354 void
355 init_loop ()
356 {
357 char *free_point = (char *) oballoc (1);
358 rtx reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
359
360 add_cost = rtx_cost (gen_rtx_PLUS (word_mode, reg, reg), SET);
361
362 /* We multiply by 2 to reconcile the difference in scale between
363 these two ways of computing costs. Otherwise the cost of a copy
364 will be far less than the cost of an add. */
365
366 copy_cost = 2 * 2;
367
368 /* Free the objects we just allocated. */
369 obfree (free_point);
370
371 /* Initialize the obstack used for rtl in product_cheap_p. */
372 gcc_obstack_init (&temp_obstack);
373 }
374 \f
375 /* Entry point of this file. Perform loop optimization
376 on the current function. F is the first insn of the function
377 and DUMPFILE is a stream for output of a trace of actions taken
378 (or 0 if none should be output). */
379
380 void
381 loop_optimize (f, dumpfile, unroll_p)
382 /* f is the first instruction of a chain of insns for one function */
383 rtx f;
384 FILE *dumpfile;
385 int unroll_p;
386 {
387 register rtx insn;
388 register int i;
389 rtx last_insn;
390
391 loop_dump_stream = dumpfile;
392
393 init_recog_no_volatile ();
394 init_alias_analysis ();
395
396 max_reg_before_loop = max_reg_num ();
397
398 moved_once = (char *) alloca (max_reg_before_loop);
399 bzero (moved_once, max_reg_before_loop);
400
401 regs_may_share = 0;
402
403 /* Count the number of loops. */
404
405 max_loop_num = 0;
406 for (insn = f; insn; insn = NEXT_INSN (insn))
407 {
408 if (GET_CODE (insn) == NOTE
409 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
410 max_loop_num++;
411 }
412
413 /* Don't waste time if no loops. */
414 if (max_loop_num == 0)
415 return;
416
417 /* Get size to use for tables indexed by uids.
418 Leave some space for labels allocated by find_and_verify_loops. */
419 max_uid_for_loop = get_max_uid () + 1 + max_loop_num * 32;
420
421 uid_luid = (int *) alloca (max_uid_for_loop * sizeof (int));
422 uid_loop_num = (int *) alloca (max_uid_for_loop * sizeof (int));
423
424 bzero ((char *) uid_luid, max_uid_for_loop * sizeof (int));
425 bzero ((char *) uid_loop_num, max_uid_for_loop * sizeof (int));
426
427 /* Allocate tables for recording each loop. We set each entry, so they need
428 not be zeroed. */
429 loop_number_loop_starts = (rtx *) alloca (max_loop_num * sizeof (rtx));
430 loop_number_loop_ends = (rtx *) alloca (max_loop_num * sizeof (rtx));
431 loop_outer_loop = (int *) alloca (max_loop_num * sizeof (int));
432 loop_invalid = (char *) alloca (max_loop_num * sizeof (char));
433 loop_number_exit_labels = (rtx *) alloca (max_loop_num * sizeof (rtx));
434 loop_number_exit_count = (int *) alloca (max_loop_num * sizeof (int));
435
436 /* This is initialized by the unrolling code, so we go ahead
437 and clear them just in case we are not performing loop
438 unrolling. */
439 loop_unroll_factor = (int *) alloca (max_loop_num *sizeof (int));
440 bzero ((char *) loop_unroll_factor, max_loop_num * sizeof (int));
441
442 #ifdef HAIFA
443 /* Allocate for BCT optimization */
444 loop_can_insert_bct = (int *) alloca (max_loop_num * sizeof (int));
445 bzero ((char *) loop_can_insert_bct, max_loop_num * sizeof (int));
446
447 loop_used_count_register = (int *) alloca (max_loop_num * sizeof (int));
448 bzero ((char *) loop_used_count_register, max_loop_num * sizeof (int));
449
450 loop_increment = (rtx *) alloca (max_loop_num * sizeof (rtx));
451 loop_comparison_value = (rtx *) alloca (max_loop_num * sizeof (rtx));
452 loop_start_value = (rtx *) alloca (max_loop_num * sizeof (rtx));
453 bzero ((char *) loop_increment, max_loop_num * sizeof (rtx));
454 bzero ((char *) loop_comparison_value, max_loop_num * sizeof (rtx));
455 bzero ((char *) loop_start_value, max_loop_num * sizeof (rtx));
456
457 loop_comparison_code
458 = (enum rtx_code *) alloca (max_loop_num * sizeof (enum rtx_code));
459 bzero ((char *) loop_comparison_code, max_loop_num * sizeof (enum rtx_code));
460 #endif /* HAIFA */
461
462 /* Find and process each loop.
463 First, find them, and record them in order of their beginnings. */
464 find_and_verify_loops (f);
465
466 /* Now find all register lifetimes. This must be done after
467 find_and_verify_loops, because it might reorder the insns in the
468 function. */
469 reg_scan (f, max_reg_num (), 1);
470
471 /* See if we went too far. */
472 if (get_max_uid () > max_uid_for_loop)
473 abort ();
474
475 /* Compute the mapping from uids to luids.
476 LUIDs are numbers assigned to insns, like uids,
477 except that luids increase monotonically through the code.
478 Don't assign luids to line-number NOTEs, so that the distance in luids
479 between two insns is not affected by -g. */
480
481 for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
482 {
483 last_insn = insn;
484 if (GET_CODE (insn) != NOTE
485 || NOTE_LINE_NUMBER (insn) <= 0)
486 uid_luid[INSN_UID (insn)] = ++i;
487 else
488 /* Give a line number note the same luid as preceding insn. */
489 uid_luid[INSN_UID (insn)] = i;
490 }
491
492 max_luid = i + 1;
493
494 /* Don't leave gaps in uid_luid for insns that have been
495 deleted. It is possible that the first or last insn
496 using some register has been deleted by cross-jumping.
497 Make sure that uid_luid for that former insn's uid
498 points to the general area where that insn used to be. */
499 for (i = 0; i < max_uid_for_loop; i++)
500 {
501 uid_luid[0] = uid_luid[i];
502 if (uid_luid[0] != 0)
503 break;
504 }
505 for (i = 0; i < max_uid_for_loop; i++)
506 if (uid_luid[i] == 0)
507 uid_luid[i] = uid_luid[i - 1];
508
509 /* Create a mapping from loops to BLOCK tree nodes. */
510 if (unroll_p && write_symbols != NO_DEBUG)
511 find_loop_tree_blocks ();
512
513 /* Determine if the function has indirect jump. On some systems
514 this prevents low overhead loop instructions from being used. */
515 indirect_jump_in_function = indirect_jump_in_function_p (f);
516
517 /* Now scan the loops, last ones first, since this means inner ones are done
518 before outer ones. */
519 for (i = max_loop_num-1; i >= 0; i--)
520 if (! loop_invalid[i] && loop_number_loop_ends[i])
521 scan_loop (loop_number_loop_starts[i], loop_number_loop_ends[i],
522 max_reg_num (), unroll_p);
523
524 /* If debugging and unrolling loops, we must replicate the tree nodes
525 corresponding to the blocks inside the loop, so that the original one
526 to one mapping will remain. */
527 if (unroll_p && write_symbols != NO_DEBUG)
528 unroll_block_trees ();
529 }
530 \f
531 /* Optimize one loop whose start is LOOP_START and end is END.
532 LOOP_START is the NOTE_INSN_LOOP_BEG and END is the matching
533 NOTE_INSN_LOOP_END. */
534
535 /* ??? Could also move memory writes out of loops if the destination address
536 is invariant, the source is invariant, the memory write is not volatile,
537 and if we can prove that no read inside the loop can read this address
538 before the write occurs. If there is a read of this address after the
539 write, then we can also mark the memory read as invariant. */
540
541 static void
542 scan_loop (loop_start, end, nregs, unroll_p)
543 rtx loop_start, end;
544 int nregs;
545 int unroll_p;
546 {
547 register int i;
548 register rtx p;
549 /* 1 if we are scanning insns that could be executed zero times. */
550 int maybe_never = 0;
551 /* 1 if we are scanning insns that might never be executed
552 due to a subroutine call which might exit before they are reached. */
553 int call_passed = 0;
554 /* For a rotated loop that is entered near the bottom,
555 this is the label at the top. Otherwise it is zero. */
556 rtx loop_top = 0;
557 /* Jump insn that enters the loop, or 0 if control drops in. */
558 rtx loop_entry_jump = 0;
559 /* Place in the loop where control enters. */
560 rtx scan_start;
561 /* Number of insns in the loop. */
562 int insn_count;
563 int in_libcall = 0;
564 int tem;
565 rtx temp;
566 /* The SET from an insn, if it is the only SET in the insn. */
567 rtx set, set1;
568 /* Chain describing insns movable in current loop. */
569 struct movable *movables = 0;
570 /* Last element in `movables' -- so we can add elements at the end. */
571 struct movable *last_movable = 0;
572 /* Ratio of extra register life span we can justify
573 for saving an instruction. More if loop doesn't call subroutines
574 since in that case saving an insn makes more difference
575 and more registers are available. */
576 int threshold;
577 /* If we have calls, contains the insn in which a register was used
578 if it was used exactly once; contains const0_rtx if it was used more
579 than once. */
580 rtx *reg_single_usage = 0;
581 /* Nonzero if we are scanning instructions in a sub-loop. */
582 int loop_depth = 0;
583
584 n_times_set = (int *) alloca (nregs * sizeof (int));
585 n_times_used = (int *) alloca (nregs * sizeof (int));
586 may_not_optimize = (char *) alloca (nregs);
587
588 /* Determine whether this loop starts with a jump down to a test at
589 the end. This will occur for a small number of loops with a test
590 that is too complex to duplicate in front of the loop.
591
592 We search for the first insn or label in the loop, skipping NOTEs.
593 However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG
594 (because we might have a loop executed only once that contains a
595 loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END
596 (in case we have a degenerate loop).
597
598 Note that if we mistakenly think that a loop is entered at the top
599 when, in fact, it is entered at the exit test, the only effect will be
600 slightly poorer optimization. Making the opposite error can generate
601 incorrect code. Since very few loops now start with a jump to the
602 exit test, the code here to detect that case is very conservative. */
603
604 for (p = NEXT_INSN (loop_start);
605 p != end
606 && GET_CODE (p) != CODE_LABEL && GET_RTX_CLASS (GET_CODE (p)) != 'i'
607 && (GET_CODE (p) != NOTE
608 || (NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_BEG
609 && NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_END));
610 p = NEXT_INSN (p))
611 ;
612
613 scan_start = p;
614
615 /* Set up variables describing this loop. */
616 prescan_loop (loop_start, end);
617 threshold = (loop_has_call ? 1 : 2) * (1 + n_non_fixed_regs);
618
619 /* If loop has a jump before the first label,
620 the true entry is the target of that jump.
621 Start scan from there.
622 But record in LOOP_TOP the place where the end-test jumps
623 back to so we can scan that after the end of the loop. */
624 if (GET_CODE (p) == JUMP_INSN)
625 {
626 loop_entry_jump = p;
627
628 /* Loop entry must be unconditional jump (and not a RETURN) */
629 if (simplejump_p (p)
630 && JUMP_LABEL (p) != 0
631 /* Check to see whether the jump actually
632 jumps out of the loop (meaning it's no loop).
633 This case can happen for things like
634 do {..} while (0). If this label was generated previously
635 by loop, we can't tell anything about it and have to reject
636 the loop. */
637 && INSN_UID (JUMP_LABEL (p)) < max_uid_for_loop
638 && INSN_LUID (JUMP_LABEL (p)) >= INSN_LUID (loop_start)
639 && INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (end))
640 {
641 loop_top = next_label (scan_start);
642 scan_start = JUMP_LABEL (p);
643 }
644 }
645
646 /* If SCAN_START was an insn created by loop, we don't know its luid
647 as required by loop_reg_used_before_p. So skip such loops. (This
648 test may never be true, but it's best to play it safe.)
649
650 Also, skip loops where we do not start scanning at a label. This
651 test also rejects loops starting with a JUMP_INSN that failed the
652 test above. */
653
654 if (INSN_UID (scan_start) >= max_uid_for_loop
655 || GET_CODE (scan_start) != CODE_LABEL)
656 {
657 if (loop_dump_stream)
658 fprintf (loop_dump_stream, "\nLoop from %d to %d is phony.\n\n",
659 INSN_UID (loop_start), INSN_UID (end));
660 return;
661 }
662
663 /* Count number of times each reg is set during this loop.
664 Set may_not_optimize[I] if it is not safe to move out
665 the setting of register I. If this loop has calls, set
666 reg_single_usage[I]. */
667
668 bzero ((char *) n_times_set, nregs * sizeof (int));
669 bzero (may_not_optimize, nregs);
670
671 if (loop_has_call)
672 {
673 reg_single_usage = (rtx *) alloca (nregs * sizeof (rtx));
674 bzero ((char *) reg_single_usage, nregs * sizeof (rtx));
675 }
676
677 count_loop_regs_set (loop_top ? loop_top : loop_start, end,
678 may_not_optimize, reg_single_usage, &insn_count, nregs);
679
680 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
681 may_not_optimize[i] = 1, n_times_set[i] = 1;
682 bcopy ((char *) n_times_set, (char *) n_times_used, nregs * sizeof (int));
683
684 if (loop_dump_stream)
685 {
686 fprintf (loop_dump_stream, "\nLoop from %d to %d: %d real insns.\n",
687 INSN_UID (loop_start), INSN_UID (end), insn_count);
688 if (loop_continue)
689 fprintf (loop_dump_stream, "Continue at insn %d.\n",
690 INSN_UID (loop_continue));
691 }
692
693 /* Scan through the loop finding insns that are safe to move.
694 Set n_times_set negative for the reg being set, so that
695 this reg will be considered invariant for subsequent insns.
696 We consider whether subsequent insns use the reg
697 in deciding whether it is worth actually moving.
698
699 MAYBE_NEVER is nonzero if we have passed a conditional jump insn
700 and therefore it is possible that the insns we are scanning
701 would never be executed. At such times, we must make sure
702 that it is safe to execute the insn once instead of zero times.
703 When MAYBE_NEVER is 0, all insns will be executed at least once
704 so that is not a problem. */
705
706 p = scan_start;
707 while (1)
708 {
709 p = NEXT_INSN (p);
710 /* At end of a straight-in loop, we are done.
711 At end of a loop entered at the bottom, scan the top. */
712 if (p == scan_start)
713 break;
714 if (p == end)
715 {
716 if (loop_top != 0)
717 p = loop_top;
718 else
719 break;
720 if (p == scan_start)
721 break;
722 }
723
724 if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
725 && find_reg_note (p, REG_LIBCALL, NULL_RTX))
726 in_libcall = 1;
727 else if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
728 && find_reg_note (p, REG_RETVAL, NULL_RTX))
729 in_libcall = 0;
730
731 if (GET_CODE (p) == INSN
732 && (set = single_set (p))
733 && GET_CODE (SET_DEST (set)) == REG
734 && ! may_not_optimize[REGNO (SET_DEST (set))])
735 {
736 int tem1 = 0;
737 int tem2 = 0;
738 int move_insn = 0;
739 rtx src = SET_SRC (set);
740 rtx dependencies = 0;
741
742 /* Figure out what to use as a source of this insn. If a REG_EQUIV
743 note is given or if a REG_EQUAL note with a constant operand is
744 specified, use it as the source and mark that we should move
745 this insn by calling emit_move_insn rather that duplicating the
746 insn.
747
748 Otherwise, only use the REG_EQUAL contents if a REG_RETVAL note
749 is present. */
750 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
751 if (temp)
752 src = XEXP (temp, 0), move_insn = 1;
753 else
754 {
755 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
756 if (temp && CONSTANT_P (XEXP (temp, 0)))
757 src = XEXP (temp, 0), move_insn = 1;
758 if (temp && find_reg_note (p, REG_RETVAL, NULL_RTX))
759 {
760 src = XEXP (temp, 0);
761 /* A libcall block can use regs that don't appear in
762 the equivalent expression. To move the libcall,
763 we must move those regs too. */
764 dependencies = libcall_other_reg (p, src);
765 }
766 }
767
768 /* Don't try to optimize a register that was made
769 by loop-optimization for an inner loop.
770 We don't know its life-span, so we can't compute the benefit. */
771 if (REGNO (SET_DEST (set)) >= max_reg_before_loop)
772 ;
773 /* In order to move a register, we need to have one of three cases:
774 (1) it is used only in the same basic block as the set
775 (2) it is not a user variable and it is not used in the
776 exit test (this can cause the variable to be used
777 before it is set just like a user-variable).
778 (3) the set is guaranteed to be executed once the loop starts,
779 and the reg is not used until after that. */
780 else if (! ((! maybe_never
781 && ! loop_reg_used_before_p (set, p, loop_start,
782 scan_start, end))
783 || (! REG_USERVAR_P (SET_DEST (set))
784 && ! REG_LOOP_TEST_P (SET_DEST (set)))
785 || reg_in_basic_block_p (p, SET_DEST (set))))
786 ;
787 else if ((tem = invariant_p (src))
788 && (dependencies == 0
789 || (tem2 = invariant_p (dependencies)) != 0)
790 && (n_times_set[REGNO (SET_DEST (set))] == 1
791 || (tem1
792 = consec_sets_invariant_p (SET_DEST (set),
793 n_times_set[REGNO (SET_DEST (set))],
794 p)))
795 /* If the insn can cause a trap (such as divide by zero),
796 can't move it unless it's guaranteed to be executed
797 once loop is entered. Even a function call might
798 prevent the trap insn from being reached
799 (since it might exit!) */
800 && ! ((maybe_never || call_passed)
801 && may_trap_p (src)))
802 {
803 register struct movable *m;
804 register int regno = REGNO (SET_DEST (set));
805
806 /* A potential lossage is where we have a case where two insns
807 can be combined as long as they are both in the loop, but
808 we move one of them outside the loop. For large loops,
809 this can lose. The most common case of this is the address
810 of a function being called.
811
812 Therefore, if this register is marked as being used exactly
813 once if we are in a loop with calls (a "large loop"), see if
814 we can replace the usage of this register with the source
815 of this SET. If we can, delete this insn.
816
817 Don't do this if P has a REG_RETVAL note or if we have
818 SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */
819
820 if (reg_single_usage && reg_single_usage[regno] != 0
821 && reg_single_usage[regno] != const0_rtx
822 && REGNO_FIRST_UID (regno) == INSN_UID (p)
823 && (REGNO_LAST_UID (regno)
824 == INSN_UID (reg_single_usage[regno]))
825 && n_times_set[REGNO (SET_DEST (set))] == 1
826 && ! side_effects_p (SET_SRC (set))
827 && ! find_reg_note (p, REG_RETVAL, NULL_RTX)
828 && (! SMALL_REGISTER_CLASSES
829 || (! (GET_CODE (SET_SRC (set)) == REG
830 && REGNO (SET_SRC (set)) < FIRST_PSEUDO_REGISTER)))
831 /* This test is not redundant; SET_SRC (set) might be
832 a call-clobbered register and the life of REGNO
833 might span a call. */
834 && ! modified_between_p (SET_SRC (set), p,
835 reg_single_usage[regno])
836 && no_labels_between_p (p, reg_single_usage[regno])
837 && validate_replace_rtx (SET_DEST (set), SET_SRC (set),
838 reg_single_usage[regno]))
839 {
840 /* Replace any usage in a REG_EQUAL note. Must copy the
841 new source, so that we don't get rtx sharing between the
842 SET_SOURCE and REG_NOTES of insn p. */
843 REG_NOTES (reg_single_usage[regno])
844 = replace_rtx (REG_NOTES (reg_single_usage[regno]),
845 SET_DEST (set), copy_rtx (SET_SRC (set)));
846
847 PUT_CODE (p, NOTE);
848 NOTE_LINE_NUMBER (p) = NOTE_INSN_DELETED;
849 NOTE_SOURCE_FILE (p) = 0;
850 n_times_set[regno] = 0;
851 continue;
852 }
853
854 m = (struct movable *) alloca (sizeof (struct movable));
855 m->next = 0;
856 m->insn = p;
857 m->set_src = src;
858 m->dependencies = dependencies;
859 m->set_dest = SET_DEST (set);
860 m->force = 0;
861 m->consec = n_times_set[REGNO (SET_DEST (set))] - 1;
862 m->done = 0;
863 m->forces = 0;
864 m->partial = 0;
865 m->move_insn = move_insn;
866 m->move_insn_first = 0;
867 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
868 m->savemode = VOIDmode;
869 m->regno = regno;
870 /* Set M->cond if either invariant_p or consec_sets_invariant_p
871 returned 2 (only conditionally invariant). */
872 m->cond = ((tem | tem1 | tem2) > 1);
873 m->global = (uid_luid[REGNO_LAST_UID (regno)] > INSN_LUID (end)
874 || uid_luid[REGNO_FIRST_UID (regno)] < INSN_LUID (loop_start));
875 m->match = 0;
876 m->lifetime = (uid_luid[REGNO_LAST_UID (regno)]
877 - uid_luid[REGNO_FIRST_UID (regno)]);
878 m->savings = n_times_used[regno];
879 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
880 m->savings += libcall_benefit (p);
881 n_times_set[regno] = move_insn ? -2 : -1;
882 /* Add M to the end of the chain MOVABLES. */
883 if (movables == 0)
884 movables = m;
885 else
886 last_movable->next = m;
887 last_movable = m;
888
889 if (m->consec > 0)
890 {
891 /* It is possible for the first instruction to have a
892 REG_EQUAL note but a non-invariant SET_SRC, so we must
893 remember the status of the first instruction in case
894 the last instruction doesn't have a REG_EQUAL note. */
895 m->move_insn_first = m->move_insn;
896
897 /* Skip this insn, not checking REG_LIBCALL notes. */
898 p = next_nonnote_insn (p);
899 /* Skip the consecutive insns, if there are any. */
900 p = skip_consec_insns (p, m->consec);
901 /* Back up to the last insn of the consecutive group. */
902 p = prev_nonnote_insn (p);
903
904 /* We must now reset m->move_insn, m->is_equiv, and possibly
905 m->set_src to correspond to the effects of all the
906 insns. */
907 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
908 if (temp)
909 m->set_src = XEXP (temp, 0), m->move_insn = 1;
910 else
911 {
912 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
913 if (temp && CONSTANT_P (XEXP (temp, 0)))
914 m->set_src = XEXP (temp, 0), m->move_insn = 1;
915 else
916 m->move_insn = 0;
917
918 }
919 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
920 }
921 }
922 /* If this register is always set within a STRICT_LOW_PART
923 or set to zero, then its high bytes are constant.
924 So clear them outside the loop and within the loop
925 just load the low bytes.
926 We must check that the machine has an instruction to do so.
927 Also, if the value loaded into the register
928 depends on the same register, this cannot be done. */
929 else if (SET_SRC (set) == const0_rtx
930 && GET_CODE (NEXT_INSN (p)) == INSN
931 && (set1 = single_set (NEXT_INSN (p)))
932 && GET_CODE (set1) == SET
933 && (GET_CODE (SET_DEST (set1)) == STRICT_LOW_PART)
934 && (GET_CODE (XEXP (SET_DEST (set1), 0)) == SUBREG)
935 && (SUBREG_REG (XEXP (SET_DEST (set1), 0))
936 == SET_DEST (set))
937 && !reg_mentioned_p (SET_DEST (set), SET_SRC (set1)))
938 {
939 register int regno = REGNO (SET_DEST (set));
940 if (n_times_set[regno] == 2)
941 {
942 register struct movable *m;
943 m = (struct movable *) alloca (sizeof (struct movable));
944 m->next = 0;
945 m->insn = p;
946 m->set_dest = SET_DEST (set);
947 m->dependencies = 0;
948 m->force = 0;
949 m->consec = 0;
950 m->done = 0;
951 m->forces = 0;
952 m->move_insn = 0;
953 m->partial = 1;
954 /* If the insn may not be executed on some cycles,
955 we can't clear the whole reg; clear just high part.
956 Not even if the reg is used only within this loop.
957 Consider this:
958 while (1)
959 while (s != t) {
960 if (foo ()) x = *s;
961 use (x);
962 }
963 Clearing x before the inner loop could clobber a value
964 being saved from the last time around the outer loop.
965 However, if the reg is not used outside this loop
966 and all uses of the register are in the same
967 basic block as the store, there is no problem.
968
969 If this insn was made by loop, we don't know its
970 INSN_LUID and hence must make a conservative
971 assumption. */
972 m->global = (INSN_UID (p) >= max_uid_for_loop
973 || (uid_luid[REGNO_LAST_UID (regno)]
974 > INSN_LUID (end))
975 || (uid_luid[REGNO_FIRST_UID (regno)]
976 < INSN_LUID (p))
977 || (labels_in_range_p
978 (p, uid_luid[REGNO_FIRST_UID (regno)])));
979 if (maybe_never && m->global)
980 m->savemode = GET_MODE (SET_SRC (set1));
981 else
982 m->savemode = VOIDmode;
983 m->regno = regno;
984 m->cond = 0;
985 m->match = 0;
986 m->lifetime = (uid_luid[REGNO_LAST_UID (regno)]
987 - uid_luid[REGNO_FIRST_UID (regno)]);
988 m->savings = 1;
989 n_times_set[regno] = -1;
990 /* Add M to the end of the chain MOVABLES. */
991 if (movables == 0)
992 movables = m;
993 else
994 last_movable->next = m;
995 last_movable = m;
996 }
997 }
998 }
999 /* Past a call insn, we get to insns which might not be executed
1000 because the call might exit. This matters for insns that trap.
1001 Call insns inside a REG_LIBCALL/REG_RETVAL block always return,
1002 so they don't count. */
1003 else if (GET_CODE (p) == CALL_INSN && ! in_libcall)
1004 call_passed = 1;
1005 /* Past a label or a jump, we get to insns for which we
1006 can't count on whether or how many times they will be
1007 executed during each iteration. Therefore, we can
1008 only move out sets of trivial variables
1009 (those not used after the loop). */
1010 /* Similar code appears twice in strength_reduce. */
1011 else if ((GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN)
1012 /* If we enter the loop in the middle, and scan around to the
1013 beginning, don't set maybe_never for that. This must be an
1014 unconditional jump, otherwise the code at the top of the
1015 loop might never be executed. Unconditional jumps are
1016 followed a by barrier then loop end. */
1017 && ! (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == loop_top
1018 && NEXT_INSN (NEXT_INSN (p)) == end
1019 && simplejump_p (p)))
1020 maybe_never = 1;
1021 else if (GET_CODE (p) == NOTE)
1022 {
1023 /* At the virtual top of a converted loop, insns are again known to
1024 be executed: logically, the loop begins here even though the exit
1025 code has been duplicated. */
1026 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
1027 maybe_never = call_passed = 0;
1028 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
1029 loop_depth++;
1030 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
1031 loop_depth--;
1032 }
1033 }
1034
1035 /* If one movable subsumes another, ignore that other. */
1036
1037 ignore_some_movables (movables);
1038
1039 /* For each movable insn, see if the reg that it loads
1040 leads when it dies right into another conditionally movable insn.
1041 If so, record that the second insn "forces" the first one,
1042 since the second can be moved only if the first is. */
1043
1044 force_movables (movables);
1045
1046 /* See if there are multiple movable insns that load the same value.
1047 If there are, make all but the first point at the first one
1048 through the `match' field, and add the priorities of them
1049 all together as the priority of the first. */
1050
1051 combine_movables (movables, nregs);
1052
1053 /* Now consider each movable insn to decide whether it is worth moving.
1054 Store 0 in n_times_set for each reg that is moved. */
1055
1056 move_movables (movables, threshold,
1057 insn_count, loop_start, end, nregs);
1058
1059 /* Now candidates that still are negative are those not moved.
1060 Change n_times_set to indicate that those are not actually invariant. */
1061 for (i = 0; i < nregs; i++)
1062 if (n_times_set[i] < 0)
1063 n_times_set[i] = n_times_used[i];
1064
1065 if (flag_strength_reduce)
1066 strength_reduce (scan_start, end, loop_top,
1067 insn_count, loop_start, end, unroll_p);
1068 }
1069 \f
1070 /* Add elements to *OUTPUT to record all the pseudo-regs
1071 mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */
1072
1073 void
1074 record_excess_regs (in_this, not_in_this, output)
1075 rtx in_this, not_in_this;
1076 rtx *output;
1077 {
1078 enum rtx_code code;
1079 char *fmt;
1080 int i;
1081
1082 code = GET_CODE (in_this);
1083
1084 switch (code)
1085 {
1086 case PC:
1087 case CC0:
1088 case CONST_INT:
1089 case CONST_DOUBLE:
1090 case CONST:
1091 case SYMBOL_REF:
1092 case LABEL_REF:
1093 return;
1094
1095 case REG:
1096 if (REGNO (in_this) >= FIRST_PSEUDO_REGISTER
1097 && ! reg_mentioned_p (in_this, not_in_this))
1098 *output = gen_rtx_EXPR_LIST (VOIDmode, in_this, *output);
1099 return;
1100
1101 default:
1102 break;
1103 }
1104
1105 fmt = GET_RTX_FORMAT (code);
1106 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1107 {
1108 int j;
1109
1110 switch (fmt[i])
1111 {
1112 case 'E':
1113 for (j = 0; j < XVECLEN (in_this, i); j++)
1114 record_excess_regs (XVECEXP (in_this, i, j), not_in_this, output);
1115 break;
1116
1117 case 'e':
1118 record_excess_regs (XEXP (in_this, i), not_in_this, output);
1119 break;
1120 }
1121 }
1122 }
1123 \f
1124 /* Check what regs are referred to in the libcall block ending with INSN,
1125 aside from those mentioned in the equivalent value.
1126 If there are none, return 0.
1127 If there are one or more, return an EXPR_LIST containing all of them. */
1128
1129 static rtx
1130 libcall_other_reg (insn, equiv)
1131 rtx insn, equiv;
1132 {
1133 rtx note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
1134 rtx p = XEXP (note, 0);
1135 rtx output = 0;
1136
1137 /* First, find all the regs used in the libcall block
1138 that are not mentioned as inputs to the result. */
1139
1140 while (p != insn)
1141 {
1142 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
1143 || GET_CODE (p) == CALL_INSN)
1144 record_excess_regs (PATTERN (p), equiv, &output);
1145 p = NEXT_INSN (p);
1146 }
1147
1148 return output;
1149 }
1150 \f
1151 /* Return 1 if all uses of REG
1152 are between INSN and the end of the basic block. */
1153
1154 static int
1155 reg_in_basic_block_p (insn, reg)
1156 rtx insn, reg;
1157 {
1158 int regno = REGNO (reg);
1159 rtx p;
1160
1161 if (REGNO_FIRST_UID (regno) != INSN_UID (insn))
1162 return 0;
1163
1164 /* Search this basic block for the already recorded last use of the reg. */
1165 for (p = insn; p; p = NEXT_INSN (p))
1166 {
1167 switch (GET_CODE (p))
1168 {
1169 case NOTE:
1170 break;
1171
1172 case INSN:
1173 case CALL_INSN:
1174 /* Ordinary insn: if this is the last use, we win. */
1175 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1176 return 1;
1177 break;
1178
1179 case JUMP_INSN:
1180 /* Jump insn: if this is the last use, we win. */
1181 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1182 return 1;
1183 /* Otherwise, it's the end of the basic block, so we lose. */
1184 return 0;
1185
1186 case CODE_LABEL:
1187 case BARRIER:
1188 /* It's the end of the basic block, so we lose. */
1189 return 0;
1190
1191 default:
1192 break;
1193 }
1194 }
1195
1196 /* The "last use" doesn't follow the "first use"?? */
1197 abort ();
1198 }
1199 \f
1200 /* Compute the benefit of eliminating the insns in the block whose
1201 last insn is LAST. This may be a group of insns used to compute a
1202 value directly or can contain a library call. */
1203
1204 static int
1205 libcall_benefit (last)
1206 rtx last;
1207 {
1208 rtx insn;
1209 int benefit = 0;
1210
1211 for (insn = XEXP (find_reg_note (last, REG_RETVAL, NULL_RTX), 0);
1212 insn != last; insn = NEXT_INSN (insn))
1213 {
1214 if (GET_CODE (insn) == CALL_INSN)
1215 benefit += 10; /* Assume at least this many insns in a library
1216 routine. */
1217 else if (GET_CODE (insn) == INSN
1218 && GET_CODE (PATTERN (insn)) != USE
1219 && GET_CODE (PATTERN (insn)) != CLOBBER)
1220 benefit++;
1221 }
1222
1223 return benefit;
1224 }
1225 \f
1226 /* Skip COUNT insns from INSN, counting library calls as 1 insn. */
1227
1228 static rtx
1229 skip_consec_insns (insn, count)
1230 rtx insn;
1231 int count;
1232 {
1233 for (; count > 0; count--)
1234 {
1235 rtx temp;
1236
1237 /* If first insn of libcall sequence, skip to end. */
1238 /* Do this at start of loop, since INSN is guaranteed to
1239 be an insn here. */
1240 if (GET_CODE (insn) != NOTE
1241 && (temp = find_reg_note (insn, REG_LIBCALL, NULL_RTX)))
1242 insn = XEXP (temp, 0);
1243
1244 do insn = NEXT_INSN (insn);
1245 while (GET_CODE (insn) == NOTE);
1246 }
1247
1248 return insn;
1249 }
1250
1251 /* Ignore any movable whose insn falls within a libcall
1252 which is part of another movable.
1253 We make use of the fact that the movable for the libcall value
1254 was made later and so appears later on the chain. */
1255
1256 static void
1257 ignore_some_movables (movables)
1258 struct movable *movables;
1259 {
1260 register struct movable *m, *m1;
1261
1262 for (m = movables; m; m = m->next)
1263 {
1264 /* Is this a movable for the value of a libcall? */
1265 rtx note = find_reg_note (m->insn, REG_RETVAL, NULL_RTX);
1266 if (note)
1267 {
1268 rtx insn;
1269 /* Check for earlier movables inside that range,
1270 and mark them invalid. We cannot use LUIDs here because
1271 insns created by loop.c for prior loops don't have LUIDs.
1272 Rather than reject all such insns from movables, we just
1273 explicitly check each insn in the libcall (since invariant
1274 libcalls aren't that common). */
1275 for (insn = XEXP (note, 0); insn != m->insn; insn = NEXT_INSN (insn))
1276 for (m1 = movables; m1 != m; m1 = m1->next)
1277 if (m1->insn == insn)
1278 m1->done = 1;
1279 }
1280 }
1281 }
1282
1283 /* For each movable insn, see if the reg that it loads
1284 leads when it dies right into another conditionally movable insn.
1285 If so, record that the second insn "forces" the first one,
1286 since the second can be moved only if the first is. */
1287
1288 static void
1289 force_movables (movables)
1290 struct movable *movables;
1291 {
1292 register struct movable *m, *m1;
1293 for (m1 = movables; m1; m1 = m1->next)
1294 /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */
1295 if (!m1->partial && !m1->done)
1296 {
1297 int regno = m1->regno;
1298 for (m = m1->next; m; m = m->next)
1299 /* ??? Could this be a bug? What if CSE caused the
1300 register of M1 to be used after this insn?
1301 Since CSE does not update regno_last_uid,
1302 this insn M->insn might not be where it dies.
1303 But very likely this doesn't matter; what matters is
1304 that M's reg is computed from M1's reg. */
1305 if (INSN_UID (m->insn) == REGNO_LAST_UID (regno)
1306 && !m->done)
1307 break;
1308 if (m != 0 && m->set_src == m1->set_dest
1309 /* If m->consec, m->set_src isn't valid. */
1310 && m->consec == 0)
1311 m = 0;
1312
1313 /* Increase the priority of the moving the first insn
1314 since it permits the second to be moved as well. */
1315 if (m != 0)
1316 {
1317 m->forces = m1;
1318 m1->lifetime += m->lifetime;
1319 m1->savings += m->savings;
1320 }
1321 }
1322 }
1323 \f
1324 /* Find invariant expressions that are equal and can be combined into
1325 one register. */
1326
1327 static void
1328 combine_movables (movables, nregs)
1329 struct movable *movables;
1330 int nregs;
1331 {
1332 register struct movable *m;
1333 char *matched_regs = (char *) alloca (nregs);
1334 enum machine_mode mode;
1335
1336 /* Regs that are set more than once are not allowed to match
1337 or be matched. I'm no longer sure why not. */
1338 /* Perhaps testing m->consec_sets would be more appropriate here? */
1339
1340 for (m = movables; m; m = m->next)
1341 if (m->match == 0 && n_times_used[m->regno] == 1 && !m->partial)
1342 {
1343 register struct movable *m1;
1344 int regno = m->regno;
1345
1346 bzero (matched_regs, nregs);
1347 matched_regs[regno] = 1;
1348
1349 /* We want later insns to match the first one. Don't make the first
1350 one match any later ones. So start this loop at m->next. */
1351 for (m1 = m->next; m1; m1 = m1->next)
1352 if (m != m1 && m1->match == 0 && n_times_used[m1->regno] == 1
1353 /* A reg used outside the loop mustn't be eliminated. */
1354 && !m1->global
1355 /* A reg used for zero-extending mustn't be eliminated. */
1356 && !m1->partial
1357 && (matched_regs[m1->regno]
1358 ||
1359 (
1360 /* Can combine regs with different modes loaded from the
1361 same constant only if the modes are the same or
1362 if both are integer modes with M wider or the same
1363 width as M1. The check for integer is redundant, but
1364 safe, since the only case of differing destination
1365 modes with equal sources is when both sources are
1366 VOIDmode, i.e., CONST_INT. */
1367 (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest)
1368 || (GET_MODE_CLASS (GET_MODE (m->set_dest)) == MODE_INT
1369 && GET_MODE_CLASS (GET_MODE (m1->set_dest)) == MODE_INT
1370 && (GET_MODE_BITSIZE (GET_MODE (m->set_dest))
1371 >= GET_MODE_BITSIZE (GET_MODE (m1->set_dest)))))
1372 /* See if the source of M1 says it matches M. */
1373 && ((GET_CODE (m1->set_src) == REG
1374 && matched_regs[REGNO (m1->set_src)])
1375 || rtx_equal_for_loop_p (m->set_src, m1->set_src,
1376 movables))))
1377 && ((m->dependencies == m1->dependencies)
1378 || rtx_equal_p (m->dependencies, m1->dependencies)))
1379 {
1380 m->lifetime += m1->lifetime;
1381 m->savings += m1->savings;
1382 m1->done = 1;
1383 m1->match = m;
1384 matched_regs[m1->regno] = 1;
1385 }
1386 }
1387
1388 /* Now combine the regs used for zero-extension.
1389 This can be done for those not marked `global'
1390 provided their lives don't overlap. */
1391
1392 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1393 mode = GET_MODE_WIDER_MODE (mode))
1394 {
1395 register struct movable *m0 = 0;
1396
1397 /* Combine all the registers for extension from mode MODE.
1398 Don't combine any that are used outside this loop. */
1399 for (m = movables; m; m = m->next)
1400 if (m->partial && ! m->global
1401 && mode == GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m->insn)))))
1402 {
1403 register struct movable *m1;
1404 int first = uid_luid[REGNO_FIRST_UID (m->regno)];
1405 int last = uid_luid[REGNO_LAST_UID (m->regno)];
1406
1407 if (m0 == 0)
1408 {
1409 /* First one: don't check for overlap, just record it. */
1410 m0 = m;
1411 continue;
1412 }
1413
1414 /* Make sure they extend to the same mode.
1415 (Almost always true.) */
1416 if (GET_MODE (m->set_dest) != GET_MODE (m0->set_dest))
1417 continue;
1418
1419 /* We already have one: check for overlap with those
1420 already combined together. */
1421 for (m1 = movables; m1 != m; m1 = m1->next)
1422 if (m1 == m0 || (m1->partial && m1->match == m0))
1423 if (! (uid_luid[REGNO_FIRST_UID (m1->regno)] > last
1424 || uid_luid[REGNO_LAST_UID (m1->regno)] < first))
1425 goto overlap;
1426
1427 /* No overlap: we can combine this with the others. */
1428 m0->lifetime += m->lifetime;
1429 m0->savings += m->savings;
1430 m->done = 1;
1431 m->match = m0;
1432
1433 overlap: ;
1434 }
1435 }
1436 }
1437 \f
1438 /* Return 1 if regs X and Y will become the same if moved. */
1439
1440 static int
1441 regs_match_p (x, y, movables)
1442 rtx x, y;
1443 struct movable *movables;
1444 {
1445 int xn = REGNO (x);
1446 int yn = REGNO (y);
1447 struct movable *mx, *my;
1448
1449 for (mx = movables; mx; mx = mx->next)
1450 if (mx->regno == xn)
1451 break;
1452
1453 for (my = movables; my; my = my->next)
1454 if (my->regno == yn)
1455 break;
1456
1457 return (mx && my
1458 && ((mx->match == my->match && mx->match != 0)
1459 || mx->match == my
1460 || mx == my->match));
1461 }
1462
1463 /* Return 1 if X and Y are identical-looking rtx's.
1464 This is the Lisp function EQUAL for rtx arguments.
1465
1466 If two registers are matching movables or a movable register and an
1467 equivalent constant, consider them equal. */
1468
1469 static int
1470 rtx_equal_for_loop_p (x, y, movables)
1471 rtx x, y;
1472 struct movable *movables;
1473 {
1474 register int i;
1475 register int j;
1476 register struct movable *m;
1477 register enum rtx_code code;
1478 register char *fmt;
1479
1480 if (x == y)
1481 return 1;
1482 if (x == 0 || y == 0)
1483 return 0;
1484
1485 code = GET_CODE (x);
1486
1487 /* If we have a register and a constant, they may sometimes be
1488 equal. */
1489 if (GET_CODE (x) == REG && n_times_set[REGNO (x)] == -2
1490 && CONSTANT_P (y))
1491 {
1492 for (m = movables; m; m = m->next)
1493 if (m->move_insn && m->regno == REGNO (x)
1494 && rtx_equal_p (m->set_src, y))
1495 return 1;
1496 }
1497 else if (GET_CODE (y) == REG && n_times_set[REGNO (y)] == -2
1498 && CONSTANT_P (x))
1499 {
1500 for (m = movables; m; m = m->next)
1501 if (m->move_insn && m->regno == REGNO (y)
1502 && rtx_equal_p (m->set_src, x))
1503 return 1;
1504 }
1505
1506 /* Otherwise, rtx's of different codes cannot be equal. */
1507 if (code != GET_CODE (y))
1508 return 0;
1509
1510 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1511 (REG:SI x) and (REG:HI x) are NOT equivalent. */
1512
1513 if (GET_MODE (x) != GET_MODE (y))
1514 return 0;
1515
1516 /* These three types of rtx's can be compared nonrecursively. */
1517 if (code == REG)
1518 return (REGNO (x) == REGNO (y) || regs_match_p (x, y, movables));
1519
1520 if (code == LABEL_REF)
1521 return XEXP (x, 0) == XEXP (y, 0);
1522 if (code == SYMBOL_REF)
1523 return XSTR (x, 0) == XSTR (y, 0);
1524
1525 /* Compare the elements. If any pair of corresponding elements
1526 fail to match, return 0 for the whole things. */
1527
1528 fmt = GET_RTX_FORMAT (code);
1529 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1530 {
1531 switch (fmt[i])
1532 {
1533 case 'w':
1534 if (XWINT (x, i) != XWINT (y, i))
1535 return 0;
1536 break;
1537
1538 case 'i':
1539 if (XINT (x, i) != XINT (y, i))
1540 return 0;
1541 break;
1542
1543 case 'E':
1544 /* Two vectors must have the same length. */
1545 if (XVECLEN (x, i) != XVECLEN (y, i))
1546 return 0;
1547
1548 /* And the corresponding elements must match. */
1549 for (j = 0; j < XVECLEN (x, i); j++)
1550 if (rtx_equal_for_loop_p (XVECEXP (x, i, j), XVECEXP (y, i, j), movables) == 0)
1551 return 0;
1552 break;
1553
1554 case 'e':
1555 if (rtx_equal_for_loop_p (XEXP (x, i), XEXP (y, i), movables) == 0)
1556 return 0;
1557 break;
1558
1559 case 's':
1560 if (strcmp (XSTR (x, i), XSTR (y, i)))
1561 return 0;
1562 break;
1563
1564 case 'u':
1565 /* These are just backpointers, so they don't matter. */
1566 break;
1567
1568 case '0':
1569 break;
1570
1571 /* It is believed that rtx's at this level will never
1572 contain anything but integers and other rtx's,
1573 except for within LABEL_REFs and SYMBOL_REFs. */
1574 default:
1575 abort ();
1576 }
1577 }
1578 return 1;
1579 }
1580 \f
1581 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to all
1582 insns in INSNS which use thet reference. */
1583
1584 static void
1585 add_label_notes (x, insns)
1586 rtx x;
1587 rtx insns;
1588 {
1589 enum rtx_code code = GET_CODE (x);
1590 int i, j;
1591 char *fmt;
1592 rtx insn;
1593
1594 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
1595 {
1596 rtx next = next_real_insn (XEXP (x, 0));
1597
1598 /* Don't record labels that refer to dispatch tables.
1599 This is not necessary, since the tablejump references the same label.
1600 And if we did record them, flow.c would make worse code. */
1601 if (next == 0
1602 || ! (GET_CODE (next) == JUMP_INSN
1603 && (GET_CODE (PATTERN (next)) == ADDR_VEC
1604 || GET_CODE (PATTERN (next)) == ADDR_DIFF_VEC)))
1605 {
1606 for (insn = insns; insn; insn = NEXT_INSN (insn))
1607 if (reg_mentioned_p (XEXP (x, 0), insn))
1608 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_LABEL, XEXP (x, 0),
1609 REG_NOTES (insn));
1610 }
1611 return;
1612 }
1613
1614 fmt = GET_RTX_FORMAT (code);
1615 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1616 {
1617 if (fmt[i] == 'e')
1618 add_label_notes (XEXP (x, i), insns);
1619 else if (fmt[i] == 'E')
1620 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1621 add_label_notes (XVECEXP (x, i, j), insns);
1622 }
1623 }
1624 \f
1625 /* Scan MOVABLES, and move the insns that deserve to be moved.
1626 If two matching movables are combined, replace one reg with the
1627 other throughout. */
1628
1629 static void
1630 move_movables (movables, threshold, insn_count, loop_start, end, nregs)
1631 struct movable *movables;
1632 int threshold;
1633 int insn_count;
1634 rtx loop_start;
1635 rtx end;
1636 int nregs;
1637 {
1638 rtx new_start = 0;
1639 register struct movable *m;
1640 register rtx p;
1641 /* Map of pseudo-register replacements to handle combining
1642 when we move several insns that load the same value
1643 into different pseudo-registers. */
1644 rtx *reg_map = (rtx *) alloca (nregs * sizeof (rtx));
1645 char *already_moved = (char *) alloca (nregs);
1646
1647 bzero (already_moved, nregs);
1648 bzero ((char *) reg_map, nregs * sizeof (rtx));
1649
1650 num_movables = 0;
1651
1652 for (m = movables; m; m = m->next)
1653 {
1654 /* Describe this movable insn. */
1655
1656 if (loop_dump_stream)
1657 {
1658 fprintf (loop_dump_stream, "Insn %d: regno %d (life %d), ",
1659 INSN_UID (m->insn), m->regno, m->lifetime);
1660 if (m->consec > 0)
1661 fprintf (loop_dump_stream, "consec %d, ", m->consec);
1662 if (m->cond)
1663 fprintf (loop_dump_stream, "cond ");
1664 if (m->force)
1665 fprintf (loop_dump_stream, "force ");
1666 if (m->global)
1667 fprintf (loop_dump_stream, "global ");
1668 if (m->done)
1669 fprintf (loop_dump_stream, "done ");
1670 if (m->move_insn)
1671 fprintf (loop_dump_stream, "move-insn ");
1672 if (m->match)
1673 fprintf (loop_dump_stream, "matches %d ",
1674 INSN_UID (m->match->insn));
1675 if (m->forces)
1676 fprintf (loop_dump_stream, "forces %d ",
1677 INSN_UID (m->forces->insn));
1678 }
1679
1680 /* Count movables. Value used in heuristics in strength_reduce. */
1681 num_movables++;
1682
1683 /* Ignore the insn if it's already done (it matched something else).
1684 Otherwise, see if it is now safe to move. */
1685
1686 if (!m->done
1687 && (! m->cond
1688 || (1 == invariant_p (m->set_src)
1689 && (m->dependencies == 0
1690 || 1 == invariant_p (m->dependencies))
1691 && (m->consec == 0
1692 || 1 == consec_sets_invariant_p (m->set_dest,
1693 m->consec + 1,
1694 m->insn))))
1695 && (! m->forces || m->forces->done))
1696 {
1697 register int regno;
1698 register rtx p;
1699 int savings = m->savings;
1700
1701 /* We have an insn that is safe to move.
1702 Compute its desirability. */
1703
1704 p = m->insn;
1705 regno = m->regno;
1706
1707 if (loop_dump_stream)
1708 fprintf (loop_dump_stream, "savings %d ", savings);
1709
1710 if (moved_once[regno])
1711 {
1712 insn_count *= 2;
1713
1714 if (loop_dump_stream)
1715 fprintf (loop_dump_stream, "halved since already moved ");
1716 }
1717
1718 /* An insn MUST be moved if we already moved something else
1719 which is safe only if this one is moved too: that is,
1720 if already_moved[REGNO] is nonzero. */
1721
1722 /* An insn is desirable to move if the new lifetime of the
1723 register is no more than THRESHOLD times the old lifetime.
1724 If it's not desirable, it means the loop is so big
1725 that moving won't speed things up much,
1726 and it is liable to make register usage worse. */
1727
1728 /* It is also desirable to move if it can be moved at no
1729 extra cost because something else was already moved. */
1730
1731 if (already_moved[regno]
1732 || flag_move_all_movables
1733 || (threshold * savings * m->lifetime) >= insn_count
1734 || (m->forces && m->forces->done
1735 && n_times_used[m->forces->regno] == 1))
1736 {
1737 int count;
1738 register struct movable *m1;
1739 rtx first;
1740
1741 /* Now move the insns that set the reg. */
1742
1743 if (m->partial && m->match)
1744 {
1745 rtx newpat, i1;
1746 rtx r1, r2;
1747 /* Find the end of this chain of matching regs.
1748 Thus, we load each reg in the chain from that one reg.
1749 And that reg is loaded with 0 directly,
1750 since it has ->match == 0. */
1751 for (m1 = m; m1->match; m1 = m1->match);
1752 newpat = gen_move_insn (SET_DEST (PATTERN (m->insn)),
1753 SET_DEST (PATTERN (m1->insn)));
1754 i1 = emit_insn_before (newpat, loop_start);
1755
1756 /* Mark the moved, invariant reg as being allowed to
1757 share a hard reg with the other matching invariant. */
1758 REG_NOTES (i1) = REG_NOTES (m->insn);
1759 r1 = SET_DEST (PATTERN (m->insn));
1760 r2 = SET_DEST (PATTERN (m1->insn));
1761 regs_may_share
1762 = gen_rtx_EXPR_LIST (VOIDmode, r1,
1763 gen_rtx_EXPR_LIST (VOIDmode, r2,
1764 regs_may_share));
1765 delete_insn (m->insn);
1766
1767 if (new_start == 0)
1768 new_start = i1;
1769
1770 if (loop_dump_stream)
1771 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1772 }
1773 /* If we are to re-generate the item being moved with a
1774 new move insn, first delete what we have and then emit
1775 the move insn before the loop. */
1776 else if (m->move_insn)
1777 {
1778 rtx i1, temp;
1779
1780 for (count = m->consec; count >= 0; count--)
1781 {
1782 /* If this is the first insn of a library call sequence,
1783 skip to the end. */
1784 if (GET_CODE (p) != NOTE
1785 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1786 p = XEXP (temp, 0);
1787
1788 /* If this is the last insn of a libcall sequence, then
1789 delete every insn in the sequence except the last.
1790 The last insn is handled in the normal manner. */
1791 if (GET_CODE (p) != NOTE
1792 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1793 {
1794 temp = XEXP (temp, 0);
1795 while (temp != p)
1796 temp = delete_insn (temp);
1797 }
1798
1799 p = delete_insn (p);
1800 while (p && GET_CODE (p) == NOTE)
1801 p = NEXT_INSN (p);
1802 }
1803
1804 start_sequence ();
1805 emit_move_insn (m->set_dest, m->set_src);
1806 temp = get_insns ();
1807 end_sequence ();
1808
1809 add_label_notes (m->set_src, temp);
1810
1811 i1 = emit_insns_before (temp, loop_start);
1812 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
1813 REG_NOTES (i1)
1814 = gen_rtx_EXPR_LIST (m->is_equiv ? REG_EQUIV : REG_EQUAL,
1815 m->set_src, REG_NOTES (i1));
1816
1817 if (loop_dump_stream)
1818 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1819
1820 /* The more regs we move, the less we like moving them. */
1821 threshold -= 3;
1822 }
1823 else
1824 {
1825 for (count = m->consec; count >= 0; count--)
1826 {
1827 rtx i1, temp;
1828
1829 /* If first insn of libcall sequence, skip to end. */
1830 /* Do this at start of loop, since p is guaranteed to
1831 be an insn here. */
1832 if (GET_CODE (p) != NOTE
1833 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1834 p = XEXP (temp, 0);
1835
1836 /* If last insn of libcall sequence, move all
1837 insns except the last before the loop. The last
1838 insn is handled in the normal manner. */
1839 if (GET_CODE (p) != NOTE
1840 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1841 {
1842 rtx fn_address = 0;
1843 rtx fn_reg = 0;
1844 rtx fn_address_insn = 0;
1845
1846 first = 0;
1847 for (temp = XEXP (temp, 0); temp != p;
1848 temp = NEXT_INSN (temp))
1849 {
1850 rtx body;
1851 rtx n;
1852 rtx next;
1853
1854 if (GET_CODE (temp) == NOTE)
1855 continue;
1856
1857 body = PATTERN (temp);
1858
1859 /* Find the next insn after TEMP,
1860 not counting USE or NOTE insns. */
1861 for (next = NEXT_INSN (temp); next != p;
1862 next = NEXT_INSN (next))
1863 if (! (GET_CODE (next) == INSN
1864 && GET_CODE (PATTERN (next)) == USE)
1865 && GET_CODE (next) != NOTE)
1866 break;
1867
1868 /* If that is the call, this may be the insn
1869 that loads the function address.
1870
1871 Extract the function address from the insn
1872 that loads it into a register.
1873 If this insn was cse'd, we get incorrect code.
1874
1875 So emit a new move insn that copies the
1876 function address into the register that the
1877 call insn will use. flow.c will delete any
1878 redundant stores that we have created. */
1879 if (GET_CODE (next) == CALL_INSN
1880 && GET_CODE (body) == SET
1881 && GET_CODE (SET_DEST (body)) == REG
1882 && (n = find_reg_note (temp, REG_EQUAL,
1883 NULL_RTX)))
1884 {
1885 fn_reg = SET_SRC (body);
1886 if (GET_CODE (fn_reg) != REG)
1887 fn_reg = SET_DEST (body);
1888 fn_address = XEXP (n, 0);
1889 fn_address_insn = temp;
1890 }
1891 /* We have the call insn.
1892 If it uses the register we suspect it might,
1893 load it with the correct address directly. */
1894 if (GET_CODE (temp) == CALL_INSN
1895 && fn_address != 0
1896 && reg_referenced_p (fn_reg, body))
1897 emit_insn_after (gen_move_insn (fn_reg,
1898 fn_address),
1899 fn_address_insn);
1900
1901 if (GET_CODE (temp) == CALL_INSN)
1902 {
1903 i1 = emit_call_insn_before (body, loop_start);
1904 /* Because the USAGE information potentially
1905 contains objects other than hard registers
1906 we need to copy it. */
1907 if (CALL_INSN_FUNCTION_USAGE (temp))
1908 CALL_INSN_FUNCTION_USAGE (i1)
1909 = copy_rtx (CALL_INSN_FUNCTION_USAGE (temp));
1910 }
1911 else
1912 i1 = emit_insn_before (body, loop_start);
1913 if (first == 0)
1914 first = i1;
1915 if (temp == fn_address_insn)
1916 fn_address_insn = i1;
1917 REG_NOTES (i1) = REG_NOTES (temp);
1918 delete_insn (temp);
1919 }
1920 }
1921 if (m->savemode != VOIDmode)
1922 {
1923 /* P sets REG to zero; but we should clear only
1924 the bits that are not covered by the mode
1925 m->savemode. */
1926 rtx reg = m->set_dest;
1927 rtx sequence;
1928 rtx tem;
1929
1930 start_sequence ();
1931 tem = expand_binop
1932 (GET_MODE (reg), and_optab, reg,
1933 GEN_INT ((((HOST_WIDE_INT) 1
1934 << GET_MODE_BITSIZE (m->savemode)))
1935 - 1),
1936 reg, 1, OPTAB_LIB_WIDEN);
1937 if (tem == 0)
1938 abort ();
1939 if (tem != reg)
1940 emit_move_insn (reg, tem);
1941 sequence = gen_sequence ();
1942 end_sequence ();
1943 i1 = emit_insn_before (sequence, loop_start);
1944 }
1945 else if (GET_CODE (p) == CALL_INSN)
1946 {
1947 i1 = emit_call_insn_before (PATTERN (p), loop_start);
1948 /* Because the USAGE information potentially
1949 contains objects other than hard registers
1950 we need to copy it. */
1951 if (CALL_INSN_FUNCTION_USAGE (p))
1952 CALL_INSN_FUNCTION_USAGE (i1)
1953 = copy_rtx (CALL_INSN_FUNCTION_USAGE (p));
1954 }
1955 else if (count == m->consec && m->move_insn_first)
1956 {
1957 /* The SET_SRC might not be invariant, so we must
1958 use the REG_EQUAL note. */
1959 start_sequence ();
1960 emit_move_insn (m->set_dest, m->set_src);
1961 temp = get_insns ();
1962 end_sequence ();
1963
1964 add_label_notes (m->set_src, temp);
1965
1966 i1 = emit_insns_before (temp, loop_start);
1967 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
1968 REG_NOTES (i1)
1969 = gen_rtx_EXPR_LIST ((m->is_equiv ? REG_EQUIV
1970 : REG_EQUAL),
1971 m->set_src, REG_NOTES (i1));
1972 }
1973 else
1974 i1 = emit_insn_before (PATTERN (p), loop_start);
1975
1976 if (REG_NOTES (i1) == 0)
1977 {
1978 REG_NOTES (i1) = REG_NOTES (p);
1979
1980 /* If there is a REG_EQUAL note present whose value
1981 is not loop invariant, then delete it, since it
1982 may cause problems with later optimization passes.
1983 It is possible for cse to create such notes
1984 like this as a result of record_jump_cond. */
1985
1986 if ((temp = find_reg_note (i1, REG_EQUAL, NULL_RTX))
1987 && ! invariant_p (XEXP (temp, 0)))
1988 remove_note (i1, temp);
1989 }
1990
1991 if (new_start == 0)
1992 new_start = i1;
1993
1994 if (loop_dump_stream)
1995 fprintf (loop_dump_stream, " moved to %d",
1996 INSN_UID (i1));
1997
1998 /* If library call, now fix the REG_NOTES that contain
1999 insn pointers, namely REG_LIBCALL on FIRST
2000 and REG_RETVAL on I1. */
2001 if ((temp = find_reg_note (i1, REG_RETVAL, NULL_RTX)))
2002 {
2003 XEXP (temp, 0) = first;
2004 temp = find_reg_note (first, REG_LIBCALL, NULL_RTX);
2005 XEXP (temp, 0) = i1;
2006 }
2007
2008 delete_insn (p);
2009 do p = NEXT_INSN (p);
2010 while (p && GET_CODE (p) == NOTE);
2011 }
2012
2013 /* The more regs we move, the less we like moving them. */
2014 threshold -= 3;
2015 }
2016
2017 /* Any other movable that loads the same register
2018 MUST be moved. */
2019 already_moved[regno] = 1;
2020
2021 /* This reg has been moved out of one loop. */
2022 moved_once[regno] = 1;
2023
2024 /* The reg set here is now invariant. */
2025 if (! m->partial)
2026 n_times_set[regno] = 0;
2027
2028 m->done = 1;
2029
2030 /* Change the length-of-life info for the register
2031 to say it lives at least the full length of this loop.
2032 This will help guide optimizations in outer loops. */
2033
2034 if (uid_luid[REGNO_FIRST_UID (regno)] > INSN_LUID (loop_start))
2035 /* This is the old insn before all the moved insns.
2036 We can't use the moved insn because it is out of range
2037 in uid_luid. Only the old insns have luids. */
2038 REGNO_FIRST_UID (regno) = INSN_UID (loop_start);
2039 if (uid_luid[REGNO_LAST_UID (regno)] < INSN_LUID (end))
2040 REGNO_LAST_UID (regno) = INSN_UID (end);
2041
2042 /* Combine with this moved insn any other matching movables. */
2043
2044 if (! m->partial)
2045 for (m1 = movables; m1; m1 = m1->next)
2046 if (m1->match == m)
2047 {
2048 rtx temp;
2049
2050 /* Schedule the reg loaded by M1
2051 for replacement so that shares the reg of M.
2052 If the modes differ (only possible in restricted
2053 circumstances, make a SUBREG. */
2054 if (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest))
2055 reg_map[m1->regno] = m->set_dest;
2056 else
2057 reg_map[m1->regno]
2058 = gen_lowpart_common (GET_MODE (m1->set_dest),
2059 m->set_dest);
2060
2061 /* Get rid of the matching insn
2062 and prevent further processing of it. */
2063 m1->done = 1;
2064
2065 /* if library call, delete all insn except last, which
2066 is deleted below */
2067 if ((temp = find_reg_note (m1->insn, REG_RETVAL,
2068 NULL_RTX)))
2069 {
2070 for (temp = XEXP (temp, 0); temp != m1->insn;
2071 temp = NEXT_INSN (temp))
2072 delete_insn (temp);
2073 }
2074 delete_insn (m1->insn);
2075
2076 /* Any other movable that loads the same register
2077 MUST be moved. */
2078 already_moved[m1->regno] = 1;
2079
2080 /* The reg merged here is now invariant,
2081 if the reg it matches is invariant. */
2082 if (! m->partial)
2083 n_times_set[m1->regno] = 0;
2084 }
2085 }
2086 else if (loop_dump_stream)
2087 fprintf (loop_dump_stream, "not desirable");
2088 }
2089 else if (loop_dump_stream && !m->match)
2090 fprintf (loop_dump_stream, "not safe");
2091
2092 if (loop_dump_stream)
2093 fprintf (loop_dump_stream, "\n");
2094 }
2095
2096 if (new_start == 0)
2097 new_start = loop_start;
2098
2099 /* Go through all the instructions in the loop, making
2100 all the register substitutions scheduled in REG_MAP. */
2101 for (p = new_start; p != end; p = NEXT_INSN (p))
2102 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
2103 || GET_CODE (p) == CALL_INSN)
2104 {
2105 replace_regs (PATTERN (p), reg_map, nregs, 0);
2106 replace_regs (REG_NOTES (p), reg_map, nregs, 0);
2107 INSN_CODE (p) = -1;
2108 }
2109 }
2110 \f
2111 #if 0
2112 /* Scan X and replace the address of any MEM in it with ADDR.
2113 REG is the address that MEM should have before the replacement. */
2114
2115 static void
2116 replace_call_address (x, reg, addr)
2117 rtx x, reg, addr;
2118 {
2119 register enum rtx_code code;
2120 register int i;
2121 register char *fmt;
2122
2123 if (x == 0)
2124 return;
2125 code = GET_CODE (x);
2126 switch (code)
2127 {
2128 case PC:
2129 case CC0:
2130 case CONST_INT:
2131 case CONST_DOUBLE:
2132 case CONST:
2133 case SYMBOL_REF:
2134 case LABEL_REF:
2135 case REG:
2136 return;
2137
2138 case SET:
2139 /* Short cut for very common case. */
2140 replace_call_address (XEXP (x, 1), reg, addr);
2141 return;
2142
2143 case CALL:
2144 /* Short cut for very common case. */
2145 replace_call_address (XEXP (x, 0), reg, addr);
2146 return;
2147
2148 case MEM:
2149 /* If this MEM uses a reg other than the one we expected,
2150 something is wrong. */
2151 if (XEXP (x, 0) != reg)
2152 abort ();
2153 XEXP (x, 0) = addr;
2154 return;
2155
2156 default:
2157 break;
2158 }
2159
2160 fmt = GET_RTX_FORMAT (code);
2161 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2162 {
2163 if (fmt[i] == 'e')
2164 replace_call_address (XEXP (x, i), reg, addr);
2165 if (fmt[i] == 'E')
2166 {
2167 register int j;
2168 for (j = 0; j < XVECLEN (x, i); j++)
2169 replace_call_address (XVECEXP (x, i, j), reg, addr);
2170 }
2171 }
2172 }
2173 #endif
2174 \f
2175 /* Return the number of memory refs to addresses that vary
2176 in the rtx X. */
2177
2178 static int
2179 count_nonfixed_reads (x)
2180 rtx x;
2181 {
2182 register enum rtx_code code;
2183 register int i;
2184 register char *fmt;
2185 int value;
2186
2187 if (x == 0)
2188 return 0;
2189
2190 code = GET_CODE (x);
2191 switch (code)
2192 {
2193 case PC:
2194 case CC0:
2195 case CONST_INT:
2196 case CONST_DOUBLE:
2197 case CONST:
2198 case SYMBOL_REF:
2199 case LABEL_REF:
2200 case REG:
2201 return 0;
2202
2203 case MEM:
2204 return ((invariant_p (XEXP (x, 0)) != 1)
2205 + count_nonfixed_reads (XEXP (x, 0)));
2206
2207 default:
2208 break;
2209 }
2210
2211 value = 0;
2212 fmt = GET_RTX_FORMAT (code);
2213 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2214 {
2215 if (fmt[i] == 'e')
2216 value += count_nonfixed_reads (XEXP (x, i));
2217 if (fmt[i] == 'E')
2218 {
2219 register int j;
2220 for (j = 0; j < XVECLEN (x, i); j++)
2221 value += count_nonfixed_reads (XVECEXP (x, i, j));
2222 }
2223 }
2224 return value;
2225 }
2226
2227 \f
2228 #if 0
2229 /* P is an instruction that sets a register to the result of a ZERO_EXTEND.
2230 Replace it with an instruction to load just the low bytes
2231 if the machine supports such an instruction,
2232 and insert above LOOP_START an instruction to clear the register. */
2233
2234 static void
2235 constant_high_bytes (p, loop_start)
2236 rtx p, loop_start;
2237 {
2238 register rtx new;
2239 register int insn_code_number;
2240
2241 /* Try to change (SET (REG ...) (ZERO_EXTEND (..:B ...)))
2242 to (SET (STRICT_LOW_PART (SUBREG:B (REG...))) ...). */
2243
2244 new = gen_rtx_SET (VOIDmode,
2245 gen_rtx_STRICT_LOW_PART (VOIDmode,
2246 gen_rtx_SUBREG (GET_MODE (XEXP (SET_SRC (PATTERN (p)), 0)),
2247 SET_DEST (PATTERN (p)),
2248 0)),
2249 XEXP (SET_SRC (PATTERN (p)), 0));
2250 insn_code_number = recog (new, p);
2251
2252 if (insn_code_number)
2253 {
2254 register int i;
2255
2256 /* Clear destination register before the loop. */
2257 emit_insn_before (gen_rtx_SET (VOIDmode, SET_DEST (PATTERN (p)),
2258 const0_rtx),
2259 loop_start);
2260
2261 /* Inside the loop, just load the low part. */
2262 PATTERN (p) = new;
2263 }
2264 }
2265 #endif
2266 \f
2267 /* Scan a loop setting the variables `unknown_address_altered',
2268 `num_mem_sets', `loop_continue', loops_enclosed', `loop_has_call',
2269 and `loop_has_volatile'.
2270 Also, fill in the array `loop_store_mems'. */
2271
2272 static void
2273 prescan_loop (start, end)
2274 rtx start, end;
2275 {
2276 register int level = 1;
2277 register rtx insn;
2278
2279 unknown_address_altered = 0;
2280 loop_has_call = 0;
2281 loop_has_volatile = 0;
2282 loop_store_mems_idx = 0;
2283
2284 num_mem_sets = 0;
2285 loops_enclosed = 1;
2286 loop_continue = 0;
2287
2288 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2289 insn = NEXT_INSN (insn))
2290 {
2291 if (GET_CODE (insn) == NOTE)
2292 {
2293 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
2294 {
2295 ++level;
2296 /* Count number of loops contained in this one. */
2297 loops_enclosed++;
2298 }
2299 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
2300 {
2301 --level;
2302 if (level == 0)
2303 {
2304 end = insn;
2305 break;
2306 }
2307 }
2308 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_CONT)
2309 {
2310 if (level == 1)
2311 loop_continue = insn;
2312 }
2313 }
2314 else if (GET_CODE (insn) == CALL_INSN)
2315 {
2316 if (! CONST_CALL_P (insn))
2317 unknown_address_altered = 1;
2318 loop_has_call = 1;
2319 }
2320 else
2321 {
2322 if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
2323 {
2324 if (volatile_refs_p (PATTERN (insn)))
2325 loop_has_volatile = 1;
2326
2327 note_stores (PATTERN (insn), note_addr_stored);
2328 }
2329 }
2330 }
2331 }
2332 \f
2333 /* Scan the function looking for loops. Record the start and end of each loop.
2334 Also mark as invalid loops any loops that contain a setjmp or are branched
2335 to from outside the loop. */
2336
2337 static void
2338 find_and_verify_loops (f)
2339 rtx f;
2340 {
2341 rtx insn, label;
2342 int current_loop = -1;
2343 int next_loop = -1;
2344 int loop;
2345
2346 /* If there are jumps to undefined labels,
2347 treat them as jumps out of any/all loops.
2348 This also avoids writing past end of tables when there are no loops. */
2349 uid_loop_num[0] = -1;
2350
2351 /* Find boundaries of loops, mark which loops are contained within
2352 loops, and invalidate loops that have setjmp. */
2353
2354 for (insn = f; insn; insn = NEXT_INSN (insn))
2355 {
2356 if (GET_CODE (insn) == NOTE)
2357 switch (NOTE_LINE_NUMBER (insn))
2358 {
2359 case NOTE_INSN_LOOP_BEG:
2360 loop_number_loop_starts[++next_loop] = insn;
2361 loop_number_loop_ends[next_loop] = 0;
2362 loop_outer_loop[next_loop] = current_loop;
2363 loop_invalid[next_loop] = 0;
2364 loop_number_exit_labels[next_loop] = 0;
2365 loop_number_exit_count[next_loop] = 0;
2366 current_loop = next_loop;
2367 break;
2368
2369 case NOTE_INSN_SETJMP:
2370 /* In this case, we must invalidate our current loop and any
2371 enclosing loop. */
2372 for (loop = current_loop; loop != -1; loop = loop_outer_loop[loop])
2373 {
2374 loop_invalid[loop] = 1;
2375 if (loop_dump_stream)
2376 fprintf (loop_dump_stream,
2377 "\nLoop at %d ignored due to setjmp.\n",
2378 INSN_UID (loop_number_loop_starts[loop]));
2379 }
2380 break;
2381
2382 case NOTE_INSN_LOOP_END:
2383 if (current_loop == -1)
2384 abort ();
2385
2386 loop_number_loop_ends[current_loop] = insn;
2387 current_loop = loop_outer_loop[current_loop];
2388 break;
2389
2390 default:
2391 break;
2392 }
2393
2394 /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the
2395 enclosing loop, but this doesn't matter. */
2396 uid_loop_num[INSN_UID (insn)] = current_loop;
2397 }
2398
2399 /* Any loop containing a label used in an initializer must be invalidated,
2400 because it can be jumped into from anywhere. */
2401
2402 for (label = forced_labels; label; label = XEXP (label, 1))
2403 {
2404 int loop_num;
2405
2406 for (loop_num = uid_loop_num[INSN_UID (XEXP (label, 0))];
2407 loop_num != -1;
2408 loop_num = loop_outer_loop[loop_num])
2409 loop_invalid[loop_num] = 1;
2410 }
2411
2412 /* Any loop containing a label used for an exception handler must be
2413 invalidated, because it can be jumped into from anywhere. */
2414
2415 for (label = exception_handler_labels; label; label = XEXP (label, 1))
2416 {
2417 int loop_num;
2418
2419 for (loop_num = uid_loop_num[INSN_UID (XEXP (label, 0))];
2420 loop_num != -1;
2421 loop_num = loop_outer_loop[loop_num])
2422 loop_invalid[loop_num] = 1;
2423 }
2424
2425 /* Now scan all insn's in the function. If any JUMP_INSN branches into a
2426 loop that it is not contained within, that loop is marked invalid.
2427 If any INSN or CALL_INSN uses a label's address, then the loop containing
2428 that label is marked invalid, because it could be jumped into from
2429 anywhere.
2430
2431 Also look for blocks of code ending in an unconditional branch that
2432 exits the loop. If such a block is surrounded by a conditional
2433 branch around the block, move the block elsewhere (see below) and
2434 invert the jump to point to the code block. This may eliminate a
2435 label in our loop and will simplify processing by both us and a
2436 possible second cse pass. */
2437
2438 for (insn = f; insn; insn = NEXT_INSN (insn))
2439 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
2440 {
2441 int this_loop_num = uid_loop_num[INSN_UID (insn)];
2442
2443 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
2444 {
2445 rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
2446 if (note)
2447 {
2448 int loop_num;
2449
2450 for (loop_num = uid_loop_num[INSN_UID (XEXP (note, 0))];
2451 loop_num != -1;
2452 loop_num = loop_outer_loop[loop_num])
2453 loop_invalid[loop_num] = 1;
2454 }
2455 }
2456
2457 if (GET_CODE (insn) != JUMP_INSN)
2458 continue;
2459
2460 mark_loop_jump (PATTERN (insn), this_loop_num);
2461
2462 /* See if this is an unconditional branch outside the loop. */
2463 if (this_loop_num != -1
2464 && (GET_CODE (PATTERN (insn)) == RETURN
2465 || (simplejump_p (insn)
2466 && (uid_loop_num[INSN_UID (JUMP_LABEL (insn))]
2467 != this_loop_num)))
2468 && get_max_uid () < max_uid_for_loop)
2469 {
2470 rtx p;
2471 rtx our_next = next_real_insn (insn);
2472 int dest_loop;
2473 int outer_loop = -1;
2474
2475 /* Go backwards until we reach the start of the loop, a label,
2476 or a JUMP_INSN. */
2477 for (p = PREV_INSN (insn);
2478 GET_CODE (p) != CODE_LABEL
2479 && ! (GET_CODE (p) == NOTE
2480 && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
2481 && GET_CODE (p) != JUMP_INSN;
2482 p = PREV_INSN (p))
2483 ;
2484
2485 /* Check for the case where we have a jump to an inner nested
2486 loop, and do not perform the optimization in that case. */
2487
2488 if (JUMP_LABEL (insn))
2489 {
2490 dest_loop = uid_loop_num[INSN_UID (JUMP_LABEL (insn))];
2491 if (dest_loop != -1)
2492 {
2493 for (outer_loop = dest_loop; outer_loop != -1;
2494 outer_loop = loop_outer_loop[outer_loop])
2495 if (outer_loop == this_loop_num)
2496 break;
2497 }
2498 }
2499
2500 /* Make sure that the target of P is within the current loop. */
2501
2502 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
2503 && uid_loop_num[INSN_UID (JUMP_LABEL (p))] != this_loop_num)
2504 outer_loop = this_loop_num;
2505
2506 /* If we stopped on a JUMP_INSN to the next insn after INSN,
2507 we have a block of code to try to move.
2508
2509 We look backward and then forward from the target of INSN
2510 to find a BARRIER at the same loop depth as the target.
2511 If we find such a BARRIER, we make a new label for the start
2512 of the block, invert the jump in P and point it to that label,
2513 and move the block of code to the spot we found. */
2514
2515 if (outer_loop == -1
2516 && GET_CODE (p) == JUMP_INSN
2517 && JUMP_LABEL (p) != 0
2518 /* Just ignore jumps to labels that were never emitted.
2519 These always indicate compilation errors. */
2520 && INSN_UID (JUMP_LABEL (p)) != 0
2521 && condjump_p (p)
2522 && ! simplejump_p (p)
2523 && next_real_insn (JUMP_LABEL (p)) == our_next)
2524 {
2525 rtx target
2526 = JUMP_LABEL (insn) ? JUMP_LABEL (insn) : get_last_insn ();
2527 int target_loop_num = uid_loop_num[INSN_UID (target)];
2528 rtx loc;
2529
2530 for (loc = target; loc; loc = PREV_INSN (loc))
2531 if (GET_CODE (loc) == BARRIER
2532 && uid_loop_num[INSN_UID (loc)] == target_loop_num)
2533 break;
2534
2535 if (loc == 0)
2536 for (loc = target; loc; loc = NEXT_INSN (loc))
2537 if (GET_CODE (loc) == BARRIER
2538 && uid_loop_num[INSN_UID (loc)] == target_loop_num)
2539 break;
2540
2541 if (loc)
2542 {
2543 rtx cond_label = JUMP_LABEL (p);
2544 rtx new_label = get_label_after (p);
2545
2546 /* Ensure our label doesn't go away. */
2547 LABEL_NUSES (cond_label)++;
2548
2549 /* Verify that uid_loop_num is large enough and that
2550 we can invert P. */
2551 if (invert_jump (p, new_label))
2552 {
2553 rtx q, r;
2554
2555 /* If no suitable BARRIER was found, create a suitable
2556 one before TARGET. Since TARGET is a fall through
2557 path, we'll need to insert an jump around our block
2558 and a add a BARRIER before TARGET.
2559
2560 This creates an extra unconditional jump outside
2561 the loop. However, the benefits of removing rarely
2562 executed instructions from inside the loop usually
2563 outweighs the cost of the extra unconditional jump
2564 outside the loop. */
2565 if (loc == 0)
2566 {
2567 rtx temp;
2568
2569 temp = gen_jump (JUMP_LABEL (insn));
2570 temp = emit_jump_insn_before (temp, target);
2571 JUMP_LABEL (temp) = JUMP_LABEL (insn);
2572 LABEL_NUSES (JUMP_LABEL (insn))++;
2573 loc = emit_barrier_before (target);
2574 }
2575
2576 /* Include the BARRIER after INSN and copy the
2577 block after LOC. */
2578 new_label = squeeze_notes (new_label, NEXT_INSN (insn));
2579 reorder_insns (new_label, NEXT_INSN (insn), loc);
2580
2581 /* All those insns are now in TARGET_LOOP_NUM. */
2582 for (q = new_label; q != NEXT_INSN (NEXT_INSN (insn));
2583 q = NEXT_INSN (q))
2584 uid_loop_num[INSN_UID (q)] = target_loop_num;
2585
2586 /* The label jumped to by INSN is no longer a loop exit.
2587 Unless INSN does not have a label (e.g., it is a
2588 RETURN insn), search loop_number_exit_labels to find
2589 its label_ref, and remove it. Also turn off
2590 LABEL_OUTSIDE_LOOP_P bit. */
2591 if (JUMP_LABEL (insn))
2592 {
2593 int loop_num;
2594
2595 for (q = 0,
2596 r = loop_number_exit_labels[this_loop_num];
2597 r; q = r, r = LABEL_NEXTREF (r))
2598 if (XEXP (r, 0) == JUMP_LABEL (insn))
2599 {
2600 LABEL_OUTSIDE_LOOP_P (r) = 0;
2601 if (q)
2602 LABEL_NEXTREF (q) = LABEL_NEXTREF (r);
2603 else
2604 loop_number_exit_labels[this_loop_num]
2605 = LABEL_NEXTREF (r);
2606 break;
2607 }
2608
2609 for (loop_num = this_loop_num;
2610 loop_num != -1 && loop_num != target_loop_num;
2611 loop_num = loop_outer_loop[loop_num])
2612 loop_number_exit_count[loop_num]--;
2613
2614 /* If we didn't find it, then something is wrong. */
2615 if (! r)
2616 abort ();
2617 }
2618
2619 /* P is now a jump outside the loop, so it must be put
2620 in loop_number_exit_labels, and marked as such.
2621 The easiest way to do this is to just call
2622 mark_loop_jump again for P. */
2623 mark_loop_jump (PATTERN (p), this_loop_num);
2624
2625 /* If INSN now jumps to the insn after it,
2626 delete INSN. */
2627 if (JUMP_LABEL (insn) != 0
2628 && (next_real_insn (JUMP_LABEL (insn))
2629 == next_real_insn (insn)))
2630 delete_insn (insn);
2631 }
2632
2633 /* Continue the loop after where the conditional
2634 branch used to jump, since the only branch insn
2635 in the block (if it still remains) is an inter-loop
2636 branch and hence needs no processing. */
2637 insn = NEXT_INSN (cond_label);
2638
2639 if (--LABEL_NUSES (cond_label) == 0)
2640 delete_insn (cond_label);
2641
2642 /* This loop will be continued with NEXT_INSN (insn). */
2643 insn = PREV_INSN (insn);
2644 }
2645 }
2646 }
2647 }
2648 }
2649
2650 /* If any label in X jumps to a loop different from LOOP_NUM and any of the
2651 loops it is contained in, mark the target loop invalid.
2652
2653 For speed, we assume that X is part of a pattern of a JUMP_INSN. */
2654
2655 static void
2656 mark_loop_jump (x, loop_num)
2657 rtx x;
2658 int loop_num;
2659 {
2660 int dest_loop;
2661 int outer_loop;
2662 int i;
2663
2664 switch (GET_CODE (x))
2665 {
2666 case PC:
2667 case USE:
2668 case CLOBBER:
2669 case REG:
2670 case MEM:
2671 case CONST_INT:
2672 case CONST_DOUBLE:
2673 case RETURN:
2674 return;
2675
2676 case CONST:
2677 /* There could be a label reference in here. */
2678 mark_loop_jump (XEXP (x, 0), loop_num);
2679 return;
2680
2681 case PLUS:
2682 case MINUS:
2683 case MULT:
2684 mark_loop_jump (XEXP (x, 0), loop_num);
2685 mark_loop_jump (XEXP (x, 1), loop_num);
2686 return;
2687
2688 case SIGN_EXTEND:
2689 case ZERO_EXTEND:
2690 mark_loop_jump (XEXP (x, 0), loop_num);
2691 return;
2692
2693 case LABEL_REF:
2694 dest_loop = uid_loop_num[INSN_UID (XEXP (x, 0))];
2695
2696 /* Link together all labels that branch outside the loop. This
2697 is used by final_[bg]iv_value and the loop unrolling code. Also
2698 mark this LABEL_REF so we know that this branch should predict
2699 false. */
2700
2701 /* A check to make sure the label is not in an inner nested loop,
2702 since this does not count as a loop exit. */
2703 if (dest_loop != -1)
2704 {
2705 for (outer_loop = dest_loop; outer_loop != -1;
2706 outer_loop = loop_outer_loop[outer_loop])
2707 if (outer_loop == loop_num)
2708 break;
2709 }
2710 else
2711 outer_loop = -1;
2712
2713 if (loop_num != -1 && outer_loop == -1)
2714 {
2715 LABEL_OUTSIDE_LOOP_P (x) = 1;
2716 LABEL_NEXTREF (x) = loop_number_exit_labels[loop_num];
2717 loop_number_exit_labels[loop_num] = x;
2718
2719 for (outer_loop = loop_num;
2720 outer_loop != -1 && outer_loop != dest_loop;
2721 outer_loop = loop_outer_loop[outer_loop])
2722 loop_number_exit_count[outer_loop]++;
2723 }
2724
2725 /* If this is inside a loop, but not in the current loop or one enclosed
2726 by it, it invalidates at least one loop. */
2727
2728 if (dest_loop == -1)
2729 return;
2730
2731 /* We must invalidate every nested loop containing the target of this
2732 label, except those that also contain the jump insn. */
2733
2734 for (; dest_loop != -1; dest_loop = loop_outer_loop[dest_loop])
2735 {
2736 /* Stop when we reach a loop that also contains the jump insn. */
2737 for (outer_loop = loop_num; outer_loop != -1;
2738 outer_loop = loop_outer_loop[outer_loop])
2739 if (dest_loop == outer_loop)
2740 return;
2741
2742 /* If we get here, we know we need to invalidate a loop. */
2743 if (loop_dump_stream && ! loop_invalid[dest_loop])
2744 fprintf (loop_dump_stream,
2745 "\nLoop at %d ignored due to multiple entry points.\n",
2746 INSN_UID (loop_number_loop_starts[dest_loop]));
2747
2748 loop_invalid[dest_loop] = 1;
2749 }
2750 return;
2751
2752 case SET:
2753 /* If this is not setting pc, ignore. */
2754 if (SET_DEST (x) == pc_rtx)
2755 mark_loop_jump (SET_SRC (x), loop_num);
2756 return;
2757
2758 case IF_THEN_ELSE:
2759 mark_loop_jump (XEXP (x, 1), loop_num);
2760 mark_loop_jump (XEXP (x, 2), loop_num);
2761 return;
2762
2763 case PARALLEL:
2764 case ADDR_VEC:
2765 for (i = 0; i < XVECLEN (x, 0); i++)
2766 mark_loop_jump (XVECEXP (x, 0, i), loop_num);
2767 return;
2768
2769 case ADDR_DIFF_VEC:
2770 for (i = 0; i < XVECLEN (x, 1); i++)
2771 mark_loop_jump (XVECEXP (x, 1, i), loop_num);
2772 return;
2773
2774 default:
2775 /* Treat anything else (such as a symbol_ref)
2776 as a branch out of this loop, but not into any loop. */
2777
2778 if (loop_num != -1)
2779 {
2780 #ifdef HAIFA
2781 LABEL_OUTSIDE_LOOP_P (x) = 1;
2782 LABEL_NEXTREF (x) = loop_number_exit_labels[loop_num];
2783 #endif /* HAIFA */
2784
2785 loop_number_exit_labels[loop_num] = x;
2786
2787 for (outer_loop = loop_num; outer_loop != -1;
2788 outer_loop = loop_outer_loop[outer_loop])
2789 loop_number_exit_count[outer_loop]++;
2790 }
2791 return;
2792 }
2793 }
2794 \f
2795 /* Return nonzero if there is a label in the range from
2796 insn INSN to and including the insn whose luid is END
2797 INSN must have an assigned luid (i.e., it must not have
2798 been previously created by loop.c). */
2799
2800 static int
2801 labels_in_range_p (insn, end)
2802 rtx insn;
2803 int end;
2804 {
2805 while (insn && INSN_LUID (insn) <= end)
2806 {
2807 if (GET_CODE (insn) == CODE_LABEL)
2808 return 1;
2809 insn = NEXT_INSN (insn);
2810 }
2811
2812 return 0;
2813 }
2814
2815 /* Record that a memory reference X is being set. */
2816
2817 static void
2818 note_addr_stored (x)
2819 rtx x;
2820 {
2821 register int i;
2822
2823 if (x == 0 || GET_CODE (x) != MEM)
2824 return;
2825
2826 /* Count number of memory writes.
2827 This affects heuristics in strength_reduce. */
2828 num_mem_sets++;
2829
2830 /* BLKmode MEM means all memory is clobbered. */
2831 if (GET_MODE (x) == BLKmode)
2832 unknown_address_altered = 1;
2833
2834 if (unknown_address_altered)
2835 return;
2836
2837 for (i = 0; i < loop_store_mems_idx; i++)
2838 if (rtx_equal_p (XEXP (loop_store_mems[i], 0), XEXP (x, 0))
2839 && MEM_IN_STRUCT_P (x) == MEM_IN_STRUCT_P (loop_store_mems[i]))
2840 {
2841 /* We are storing at the same address as previously noted. Save the
2842 wider reference. */
2843 if (GET_MODE_SIZE (GET_MODE (x))
2844 > GET_MODE_SIZE (GET_MODE (loop_store_mems[i])))
2845 loop_store_mems[i] = x;
2846 break;
2847 }
2848
2849 if (i == NUM_STORES)
2850 unknown_address_altered = 1;
2851
2852 else if (i == loop_store_mems_idx)
2853 loop_store_mems[loop_store_mems_idx++] = x;
2854 }
2855 \f
2856 /* Return nonzero if the rtx X is invariant over the current loop.
2857
2858 The value is 2 if we refer to something only conditionally invariant.
2859
2860 If `unknown_address_altered' is nonzero, no memory ref is invariant.
2861 Otherwise, a memory ref is invariant if it does not conflict with
2862 anything stored in `loop_store_mems'. */
2863
2864 int
2865 invariant_p (x)
2866 register rtx x;
2867 {
2868 register int i;
2869 register enum rtx_code code;
2870 register char *fmt;
2871 int conditional = 0;
2872
2873 if (x == 0)
2874 return 1;
2875 code = GET_CODE (x);
2876 switch (code)
2877 {
2878 case CONST_INT:
2879 case CONST_DOUBLE:
2880 case SYMBOL_REF:
2881 case CONST:
2882 return 1;
2883
2884 case LABEL_REF:
2885 /* A LABEL_REF is normally invariant, however, if we are unrolling
2886 loops, and this label is inside the loop, then it isn't invariant.
2887 This is because each unrolled copy of the loop body will have
2888 a copy of this label. If this was invariant, then an insn loading
2889 the address of this label into a register might get moved outside
2890 the loop, and then each loop body would end up using the same label.
2891
2892 We don't know the loop bounds here though, so just fail for all
2893 labels. */
2894 if (flag_unroll_loops)
2895 return 0;
2896 else
2897 return 1;
2898
2899 case PC:
2900 case CC0:
2901 case UNSPEC_VOLATILE:
2902 return 0;
2903
2904 case REG:
2905 /* We used to check RTX_UNCHANGING_P (x) here, but that is invalid
2906 since the reg might be set by initialization within the loop. */
2907
2908 if ((x == frame_pointer_rtx || x == hard_frame_pointer_rtx
2909 || x == arg_pointer_rtx)
2910 && ! current_function_has_nonlocal_goto)
2911 return 1;
2912
2913 if (loop_has_call
2914 && REGNO (x) < FIRST_PSEUDO_REGISTER && call_used_regs[REGNO (x)])
2915 return 0;
2916
2917 if (n_times_set[REGNO (x)] < 0)
2918 return 2;
2919
2920 return n_times_set[REGNO (x)] == 0;
2921
2922 case MEM:
2923 /* Volatile memory references must be rejected. Do this before
2924 checking for read-only items, so that volatile read-only items
2925 will be rejected also. */
2926 if (MEM_VOLATILE_P (x))
2927 return 0;
2928
2929 /* Read-only items (such as constants in a constant pool) are
2930 invariant if their address is. */
2931 if (RTX_UNCHANGING_P (x))
2932 break;
2933
2934 /* If we filled the table (or had a subroutine call), any location
2935 in memory could have been clobbered. */
2936 if (unknown_address_altered)
2937 return 0;
2938
2939 /* See if there is any dependence between a store and this load. */
2940 for (i = loop_store_mems_idx - 1; i >= 0; i--)
2941 if (true_dependence (loop_store_mems[i], VOIDmode, x, rtx_varies_p))
2942 return 0;
2943
2944 /* It's not invalidated by a store in memory
2945 but we must still verify the address is invariant. */
2946 break;
2947
2948 case ASM_OPERANDS:
2949 /* Don't mess with insns declared volatile. */
2950 if (MEM_VOLATILE_P (x))
2951 return 0;
2952 break;
2953
2954 default:
2955 break;
2956 }
2957
2958 fmt = GET_RTX_FORMAT (code);
2959 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2960 {
2961 if (fmt[i] == 'e')
2962 {
2963 int tem = invariant_p (XEXP (x, i));
2964 if (tem == 0)
2965 return 0;
2966 if (tem == 2)
2967 conditional = 1;
2968 }
2969 else if (fmt[i] == 'E')
2970 {
2971 register int j;
2972 for (j = 0; j < XVECLEN (x, i); j++)
2973 {
2974 int tem = invariant_p (XVECEXP (x, i, j));
2975 if (tem == 0)
2976 return 0;
2977 if (tem == 2)
2978 conditional = 1;
2979 }
2980
2981 }
2982 }
2983
2984 return 1 + conditional;
2985 }
2986
2987 \f
2988 /* Return nonzero if all the insns in the loop that set REG
2989 are INSN and the immediately following insns,
2990 and if each of those insns sets REG in an invariant way
2991 (not counting uses of REG in them).
2992
2993 The value is 2 if some of these insns are only conditionally invariant.
2994
2995 We assume that INSN itself is the first set of REG
2996 and that its source is invariant. */
2997
2998 static int
2999 consec_sets_invariant_p (reg, n_sets, insn)
3000 int n_sets;
3001 rtx reg, insn;
3002 {
3003 register rtx p = insn;
3004 register int regno = REGNO (reg);
3005 rtx temp;
3006 /* Number of sets we have to insist on finding after INSN. */
3007 int count = n_sets - 1;
3008 int old = n_times_set[regno];
3009 int value = 0;
3010 int this;
3011
3012 /* If N_SETS hit the limit, we can't rely on its value. */
3013 if (n_sets == 127)
3014 return 0;
3015
3016 n_times_set[regno] = 0;
3017
3018 while (count > 0)
3019 {
3020 register enum rtx_code code;
3021 rtx set;
3022
3023 p = NEXT_INSN (p);
3024 code = GET_CODE (p);
3025
3026 /* If library call, skip to end of of it. */
3027 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
3028 p = XEXP (temp, 0);
3029
3030 this = 0;
3031 if (code == INSN
3032 && (set = single_set (p))
3033 && GET_CODE (SET_DEST (set)) == REG
3034 && REGNO (SET_DEST (set)) == regno)
3035 {
3036 this = invariant_p (SET_SRC (set));
3037 if (this != 0)
3038 value |= this;
3039 else if ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX)))
3040 {
3041 /* If this is a libcall, then any invariant REG_EQUAL note is OK.
3042 If this is an ordinary insn, then only CONSTANT_P REG_EQUAL
3043 notes are OK. */
3044 this = (CONSTANT_P (XEXP (temp, 0))
3045 || (find_reg_note (p, REG_RETVAL, NULL_RTX)
3046 && invariant_p (XEXP (temp, 0))));
3047 if (this != 0)
3048 value |= this;
3049 }
3050 }
3051 if (this != 0)
3052 count--;
3053 else if (code != NOTE)
3054 {
3055 n_times_set[regno] = old;
3056 return 0;
3057 }
3058 }
3059
3060 n_times_set[regno] = old;
3061 /* If invariant_p ever returned 2, we return 2. */
3062 return 1 + (value & 2);
3063 }
3064
3065 #if 0
3066 /* I don't think this condition is sufficient to allow INSN
3067 to be moved, so we no longer test it. */
3068
3069 /* Return 1 if all insns in the basic block of INSN and following INSN
3070 that set REG are invariant according to TABLE. */
3071
3072 static int
3073 all_sets_invariant_p (reg, insn, table)
3074 rtx reg, insn;
3075 short *table;
3076 {
3077 register rtx p = insn;
3078 register int regno = REGNO (reg);
3079
3080 while (1)
3081 {
3082 register enum rtx_code code;
3083 p = NEXT_INSN (p);
3084 code = GET_CODE (p);
3085 if (code == CODE_LABEL || code == JUMP_INSN)
3086 return 1;
3087 if (code == INSN && GET_CODE (PATTERN (p)) == SET
3088 && GET_CODE (SET_DEST (PATTERN (p))) == REG
3089 && REGNO (SET_DEST (PATTERN (p))) == regno)
3090 {
3091 if (!invariant_p (SET_SRC (PATTERN (p)), table))
3092 return 0;
3093 }
3094 }
3095 }
3096 #endif /* 0 */
3097 \f
3098 /* Look at all uses (not sets) of registers in X. For each, if it is
3099 the single use, set USAGE[REGNO] to INSN; if there was a previous use in
3100 a different insn, set USAGE[REGNO] to const0_rtx. */
3101
3102 static void
3103 find_single_use_in_loop (insn, x, usage)
3104 rtx insn;
3105 rtx x;
3106 rtx *usage;
3107 {
3108 enum rtx_code code = GET_CODE (x);
3109 char *fmt = GET_RTX_FORMAT (code);
3110 int i, j;
3111
3112 if (code == REG)
3113 usage[REGNO (x)]
3114 = (usage[REGNO (x)] != 0 && usage[REGNO (x)] != insn)
3115 ? const0_rtx : insn;
3116
3117 else if (code == SET)
3118 {
3119 /* Don't count SET_DEST if it is a REG; otherwise count things
3120 in SET_DEST because if a register is partially modified, it won't
3121 show up as a potential movable so we don't care how USAGE is set
3122 for it. */
3123 if (GET_CODE (SET_DEST (x)) != REG)
3124 find_single_use_in_loop (insn, SET_DEST (x), usage);
3125 find_single_use_in_loop (insn, SET_SRC (x), usage);
3126 }
3127 else
3128 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3129 {
3130 if (fmt[i] == 'e' && XEXP (x, i) != 0)
3131 find_single_use_in_loop (insn, XEXP (x, i), usage);
3132 else if (fmt[i] == 'E')
3133 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3134 find_single_use_in_loop (insn, XVECEXP (x, i, j), usage);
3135 }
3136 }
3137 \f
3138 /* Increment N_TIMES_SET at the index of each register
3139 that is modified by an insn between FROM and TO.
3140 If the value of an element of N_TIMES_SET becomes 127 or more,
3141 stop incrementing it, to avoid overflow.
3142
3143 Store in SINGLE_USAGE[I] the single insn in which register I is
3144 used, if it is only used once. Otherwise, it is set to 0 (for no
3145 uses) or const0_rtx for more than one use. This parameter may be zero,
3146 in which case this processing is not done.
3147
3148 Store in *COUNT_PTR the number of actual instruction
3149 in the loop. We use this to decide what is worth moving out. */
3150
3151 /* last_set[n] is nonzero iff reg n has been set in the current basic block.
3152 In that case, it is the insn that last set reg n. */
3153
3154 static void
3155 count_loop_regs_set (from, to, may_not_move, single_usage, count_ptr, nregs)
3156 register rtx from, to;
3157 char *may_not_move;
3158 rtx *single_usage;
3159 int *count_ptr;
3160 int nregs;
3161 {
3162 register rtx *last_set = (rtx *) alloca (nregs * sizeof (rtx));
3163 register rtx insn;
3164 register int count = 0;
3165 register rtx dest;
3166
3167 bzero ((char *) last_set, nregs * sizeof (rtx));
3168 for (insn = from; insn != to; insn = NEXT_INSN (insn))
3169 {
3170 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
3171 {
3172 ++count;
3173
3174 /* If requested, record registers that have exactly one use. */
3175 if (single_usage)
3176 {
3177 find_single_use_in_loop (insn, PATTERN (insn), single_usage);
3178
3179 /* Include uses in REG_EQUAL notes. */
3180 if (REG_NOTES (insn))
3181 find_single_use_in_loop (insn, REG_NOTES (insn), single_usage);
3182 }
3183
3184 if (GET_CODE (PATTERN (insn)) == CLOBBER
3185 && GET_CODE (XEXP (PATTERN (insn), 0)) == REG)
3186 /* Don't move a reg that has an explicit clobber.
3187 We might do so sometimes, but it's not worth the pain. */
3188 may_not_move[REGNO (XEXP (PATTERN (insn), 0))] = 1;
3189
3190 if (GET_CODE (PATTERN (insn)) == SET
3191 || GET_CODE (PATTERN (insn)) == CLOBBER)
3192 {
3193 dest = SET_DEST (PATTERN (insn));
3194 while (GET_CODE (dest) == SUBREG
3195 || GET_CODE (dest) == ZERO_EXTRACT
3196 || GET_CODE (dest) == SIGN_EXTRACT
3197 || GET_CODE (dest) == STRICT_LOW_PART)
3198 dest = XEXP (dest, 0);
3199 if (GET_CODE (dest) == REG)
3200 {
3201 register int regno = REGNO (dest);
3202 /* If this is the first setting of this reg
3203 in current basic block, and it was set before,
3204 it must be set in two basic blocks, so it cannot
3205 be moved out of the loop. */
3206 if (n_times_set[regno] > 0 && last_set[regno] == 0)
3207 may_not_move[regno] = 1;
3208 /* If this is not first setting in current basic block,
3209 see if reg was used in between previous one and this.
3210 If so, neither one can be moved. */
3211 if (last_set[regno] != 0
3212 && reg_used_between_p (dest, last_set[regno], insn))
3213 may_not_move[regno] = 1;
3214 if (n_times_set[regno] < 127)
3215 ++n_times_set[regno];
3216 last_set[regno] = insn;
3217 }
3218 }
3219 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
3220 {
3221 register int i;
3222 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
3223 {
3224 register rtx x = XVECEXP (PATTERN (insn), 0, i);
3225 if (GET_CODE (x) == CLOBBER && GET_CODE (XEXP (x, 0)) == REG)
3226 /* Don't move a reg that has an explicit clobber.
3227 It's not worth the pain to try to do it correctly. */
3228 may_not_move[REGNO (XEXP (x, 0))] = 1;
3229
3230 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
3231 {
3232 dest = SET_DEST (x);
3233 while (GET_CODE (dest) == SUBREG
3234 || GET_CODE (dest) == ZERO_EXTRACT
3235 || GET_CODE (dest) == SIGN_EXTRACT
3236 || GET_CODE (dest) == STRICT_LOW_PART)
3237 dest = XEXP (dest, 0);
3238 if (GET_CODE (dest) == REG)
3239 {
3240 register int regno = REGNO (dest);
3241 if (n_times_set[regno] > 0 && last_set[regno] == 0)
3242 may_not_move[regno] = 1;
3243 if (last_set[regno] != 0
3244 && reg_used_between_p (dest, last_set[regno], insn))
3245 may_not_move[regno] = 1;
3246 if (n_times_set[regno] < 127)
3247 ++n_times_set[regno];
3248 last_set[regno] = insn;
3249 }
3250 }
3251 }
3252 }
3253 }
3254
3255 if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN)
3256 bzero ((char *) last_set, nregs * sizeof (rtx));
3257 }
3258 *count_ptr = count;
3259 }
3260 \f
3261 /* Given a loop that is bounded by LOOP_START and LOOP_END
3262 and that is entered at SCAN_START,
3263 return 1 if the register set in SET contained in insn INSN is used by
3264 any insn that precedes INSN in cyclic order starting
3265 from the loop entry point.
3266
3267 We don't want to use INSN_LUID here because if we restrict INSN to those
3268 that have a valid INSN_LUID, it means we cannot move an invariant out
3269 from an inner loop past two loops. */
3270
3271 static int
3272 loop_reg_used_before_p (set, insn, loop_start, scan_start, loop_end)
3273 rtx set, insn, loop_start, scan_start, loop_end;
3274 {
3275 rtx reg = SET_DEST (set);
3276 rtx p;
3277
3278 /* Scan forward checking for register usage. If we hit INSN, we
3279 are done. Otherwise, if we hit LOOP_END, wrap around to LOOP_START. */
3280 for (p = scan_start; p != insn; p = NEXT_INSN (p))
3281 {
3282 if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
3283 && reg_overlap_mentioned_p (reg, PATTERN (p)))
3284 return 1;
3285
3286 if (p == loop_end)
3287 p = loop_start;
3288 }
3289
3290 return 0;
3291 }
3292 \f
3293 /* A "basic induction variable" or biv is a pseudo reg that is set
3294 (within this loop) only by incrementing or decrementing it. */
3295 /* A "general induction variable" or giv is a pseudo reg whose
3296 value is a linear function of a biv. */
3297
3298 /* Bivs are recognized by `basic_induction_var';
3299 Givs by `general_induct_var'. */
3300
3301 /* Indexed by register number, indicates whether or not register is an
3302 induction variable, and if so what type. */
3303
3304 enum iv_mode *reg_iv_type;
3305
3306 /* Indexed by register number, contains pointer to `struct induction'
3307 if register is an induction variable. This holds general info for
3308 all induction variables. */
3309
3310 struct induction **reg_iv_info;
3311
3312 /* Indexed by register number, contains pointer to `struct iv_class'
3313 if register is a basic induction variable. This holds info describing
3314 the class (a related group) of induction variables that the biv belongs
3315 to. */
3316
3317 struct iv_class **reg_biv_class;
3318
3319 /* The head of a list which links together (via the next field)
3320 every iv class for the current loop. */
3321
3322 struct iv_class *loop_iv_list;
3323
3324 /* Communication with routines called via `note_stores'. */
3325
3326 static rtx note_insn;
3327
3328 /* Dummy register to have non-zero DEST_REG for DEST_ADDR type givs. */
3329
3330 static rtx addr_placeholder;
3331
3332 /* ??? Unfinished optimizations, and possible future optimizations,
3333 for the strength reduction code. */
3334
3335 /* ??? There is one more optimization you might be interested in doing: to
3336 allocate pseudo registers for frequently-accessed memory locations.
3337 If the same memory location is referenced each time around, it might
3338 be possible to copy it into a register before and out after.
3339 This is especially useful when the memory location is a variable which
3340 is in a stack slot because somewhere its address is taken. If the
3341 loop doesn't contain a function call and the variable isn't volatile,
3342 it is safe to keep the value in a register for the duration of the
3343 loop. One tricky thing is that the copying of the value back from the
3344 register has to be done on all exits from the loop. You need to check that
3345 all the exits from the loop go to the same place. */
3346
3347 /* ??? The interaction of biv elimination, and recognition of 'constant'
3348 bivs, may cause problems. */
3349
3350 /* ??? Add heuristics so that DEST_ADDR strength reduction does not cause
3351 performance problems.
3352
3353 Perhaps don't eliminate things that can be combined with an addressing
3354 mode. Find all givs that have the same biv, mult_val, and add_val;
3355 then for each giv, check to see if its only use dies in a following
3356 memory address. If so, generate a new memory address and check to see
3357 if it is valid. If it is valid, then store the modified memory address,
3358 otherwise, mark the giv as not done so that it will get its own iv. */
3359
3360 /* ??? Could try to optimize branches when it is known that a biv is always
3361 positive. */
3362
3363 /* ??? When replace a biv in a compare insn, we should replace with closest
3364 giv so that an optimized branch can still be recognized by the combiner,
3365 e.g. the VAX acb insn. */
3366
3367 /* ??? Many of the checks involving uid_luid could be simplified if regscan
3368 was rerun in loop_optimize whenever a register was added or moved.
3369 Also, some of the optimizations could be a little less conservative. */
3370 \f
3371 /* Perform strength reduction and induction variable elimination. */
3372
3373 /* Pseudo registers created during this function will be beyond the last
3374 valid index in several tables including n_times_set and regno_last_uid.
3375 This does not cause a problem here, because the added registers cannot be
3376 givs outside of their loop, and hence will never be reconsidered.
3377 But scan_loop must check regnos to make sure they are in bounds. */
3378
3379 static void
3380 strength_reduce (scan_start, end, loop_top, insn_count,
3381 loop_start, loop_end, unroll_p)
3382 rtx scan_start;
3383 rtx end;
3384 rtx loop_top;
3385 int insn_count;
3386 rtx loop_start;
3387 rtx loop_end;
3388 int unroll_p;
3389 {
3390 rtx p;
3391 rtx set;
3392 rtx inc_val;
3393 rtx mult_val;
3394 rtx dest_reg;
3395 /* This is 1 if current insn is not executed at least once for every loop
3396 iteration. */
3397 int not_every_iteration = 0;
3398 /* This is 1 if current insn may be executed more than once for every
3399 loop iteration. */
3400 int maybe_multiple = 0;
3401 /* Temporary list pointers for traversing loop_iv_list. */
3402 struct iv_class *bl, **backbl;
3403 /* Ratio of extra register life span we can justify
3404 for saving an instruction. More if loop doesn't call subroutines
3405 since in that case saving an insn makes more difference
3406 and more registers are available. */
3407 /* ??? could set this to last value of threshold in move_movables */
3408 int threshold = (loop_has_call ? 1 : 2) * (3 + n_non_fixed_regs);
3409 /* Map of pseudo-register replacements. */
3410 rtx *reg_map;
3411 int call_seen;
3412 rtx test;
3413 rtx end_insert_before;
3414 int loop_depth = 0;
3415
3416 reg_iv_type = (enum iv_mode *) alloca (max_reg_before_loop
3417 * sizeof (enum iv_mode *));
3418 bzero ((char *) reg_iv_type, max_reg_before_loop * sizeof (enum iv_mode *));
3419 reg_iv_info = (struct induction **)
3420 alloca (max_reg_before_loop * sizeof (struct induction *));
3421 bzero ((char *) reg_iv_info, (max_reg_before_loop
3422 * sizeof (struct induction *)));
3423 reg_biv_class = (struct iv_class **)
3424 alloca (max_reg_before_loop * sizeof (struct iv_class *));
3425 bzero ((char *) reg_biv_class, (max_reg_before_loop
3426 * sizeof (struct iv_class *)));
3427
3428 loop_iv_list = 0;
3429 addr_placeholder = gen_reg_rtx (Pmode);
3430
3431 /* Save insn immediately after the loop_end. Insns inserted after loop_end
3432 must be put before this insn, so that they will appear in the right
3433 order (i.e. loop order).
3434
3435 If loop_end is the end of the current function, then emit a
3436 NOTE_INSN_DELETED after loop_end and set end_insert_before to the
3437 dummy note insn. */
3438 if (NEXT_INSN (loop_end) != 0)
3439 end_insert_before = NEXT_INSN (loop_end);
3440 else
3441 end_insert_before = emit_note_after (NOTE_INSN_DELETED, loop_end);
3442
3443 /* Scan through loop to find all possible bivs. */
3444
3445 p = scan_start;
3446 while (1)
3447 {
3448 p = NEXT_INSN (p);
3449 /* At end of a straight-in loop, we are done.
3450 At end of a loop entered at the bottom, scan the top. */
3451 if (p == scan_start)
3452 break;
3453 if (p == end)
3454 {
3455 if (loop_top != 0)
3456 p = loop_top;
3457 else
3458 break;
3459 if (p == scan_start)
3460 break;
3461 }
3462
3463 if (GET_CODE (p) == INSN
3464 && (set = single_set (p))
3465 && GET_CODE (SET_DEST (set)) == REG)
3466 {
3467 dest_reg = SET_DEST (set);
3468 if (REGNO (dest_reg) < max_reg_before_loop
3469 && REGNO (dest_reg) >= FIRST_PSEUDO_REGISTER
3470 && reg_iv_type[REGNO (dest_reg)] != NOT_BASIC_INDUCT)
3471 {
3472 if (basic_induction_var (SET_SRC (set), GET_MODE (SET_SRC (set)),
3473 dest_reg, p, &inc_val, &mult_val))
3474 {
3475 /* It is a possible basic induction variable.
3476 Create and initialize an induction structure for it. */
3477
3478 struct induction *v
3479 = (struct induction *) alloca (sizeof (struct induction));
3480
3481 record_biv (v, p, dest_reg, inc_val, mult_val,
3482 not_every_iteration, maybe_multiple);
3483 reg_iv_type[REGNO (dest_reg)] = BASIC_INDUCT;
3484 }
3485 else if (REGNO (dest_reg) < max_reg_before_loop)
3486 reg_iv_type[REGNO (dest_reg)] = NOT_BASIC_INDUCT;
3487 }
3488 }
3489
3490 /* Past CODE_LABEL, we get to insns that may be executed multiple
3491 times. The only way we can be sure that they can't is if every
3492 every jump insn between here and the end of the loop either
3493 returns, exits the loop, is a forward jump, or is a jump
3494 to the loop start. */
3495
3496 if (GET_CODE (p) == CODE_LABEL)
3497 {
3498 rtx insn = p;
3499
3500 maybe_multiple = 0;
3501
3502 while (1)
3503 {
3504 insn = NEXT_INSN (insn);
3505 if (insn == scan_start)
3506 break;
3507 if (insn == end)
3508 {
3509 if (loop_top != 0)
3510 insn = loop_top;
3511 else
3512 break;
3513 if (insn == scan_start)
3514 break;
3515 }
3516
3517 if (GET_CODE (insn) == JUMP_INSN
3518 && GET_CODE (PATTERN (insn)) != RETURN
3519 && (! condjump_p (insn)
3520 || (JUMP_LABEL (insn) != 0
3521 && JUMP_LABEL (insn) != scan_start
3522 && (INSN_UID (JUMP_LABEL (insn)) >= max_uid_for_loop
3523 || INSN_UID (insn) >= max_uid_for_loop
3524 || (INSN_LUID (JUMP_LABEL (insn))
3525 < INSN_LUID (insn))))))
3526 {
3527 maybe_multiple = 1;
3528 break;
3529 }
3530 }
3531 }
3532
3533 /* Past a jump, we get to insns for which we can't count
3534 on whether they will be executed during each iteration. */
3535 /* This code appears twice in strength_reduce. There is also similar
3536 code in scan_loop. */
3537 if (GET_CODE (p) == JUMP_INSN
3538 /* If we enter the loop in the middle, and scan around to the
3539 beginning, don't set not_every_iteration for that.
3540 This can be any kind of jump, since we want to know if insns
3541 will be executed if the loop is executed. */
3542 && ! (JUMP_LABEL (p) == loop_top
3543 && ((NEXT_INSN (NEXT_INSN (p)) == loop_end && simplejump_p (p))
3544 || (NEXT_INSN (p) == loop_end && condjump_p (p)))))
3545 {
3546 rtx label = 0;
3547
3548 /* If this is a jump outside the loop, then it also doesn't
3549 matter. Check to see if the target of this branch is on the
3550 loop_number_exits_labels list. */
3551
3552 for (label = loop_number_exit_labels[uid_loop_num[INSN_UID (loop_start)]];
3553 label;
3554 label = LABEL_NEXTREF (label))
3555 if (XEXP (label, 0) == JUMP_LABEL (p))
3556 break;
3557
3558 if (! label)
3559 not_every_iteration = 1;
3560 }
3561
3562 else if (GET_CODE (p) == NOTE)
3563 {
3564 /* At the virtual top of a converted loop, insns are again known to
3565 be executed each iteration: logically, the loop begins here
3566 even though the exit code has been duplicated. */
3567 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
3568 not_every_iteration = 0;
3569 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
3570 loop_depth++;
3571 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
3572 loop_depth--;
3573 }
3574
3575 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
3576 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
3577 or not an insn is known to be executed each iteration of the
3578 loop, whether or not any iterations are known to occur.
3579
3580 Therefore, if we have just passed a label and have no more labels
3581 between here and the test insn of the loop, we know these insns
3582 will be executed each iteration. */
3583
3584 if (not_every_iteration && GET_CODE (p) == CODE_LABEL
3585 && no_labels_between_p (p, loop_end))
3586 not_every_iteration = 0;
3587 }
3588
3589 /* Scan loop_iv_list to remove all regs that proved not to be bivs.
3590 Make a sanity check against n_times_set. */
3591 for (backbl = &loop_iv_list, bl = *backbl; bl; bl = bl->next)
3592 {
3593 if (reg_iv_type[bl->regno] != BASIC_INDUCT
3594 /* Above happens if register modified by subreg, etc. */
3595 /* Make sure it is not recognized as a basic induction var: */
3596 || n_times_set[bl->regno] != bl->biv_count
3597 /* If never incremented, it is invariant that we decided not to
3598 move. So leave it alone. */
3599 || ! bl->incremented)
3600 {
3601 if (loop_dump_stream)
3602 fprintf (loop_dump_stream, "Reg %d: biv discarded, %s\n",
3603 bl->regno,
3604 (reg_iv_type[bl->regno] != BASIC_INDUCT
3605 ? "not induction variable"
3606 : (! bl->incremented ? "never incremented"
3607 : "count error")));
3608
3609 reg_iv_type[bl->regno] = NOT_BASIC_INDUCT;
3610 *backbl = bl->next;
3611 }
3612 else
3613 {
3614 backbl = &bl->next;
3615
3616 if (loop_dump_stream)
3617 fprintf (loop_dump_stream, "Reg %d: biv verified\n", bl->regno);
3618 }
3619 }
3620
3621 /* Exit if there are no bivs. */
3622 if (! loop_iv_list)
3623 {
3624 /* Can still unroll the loop anyways, but indicate that there is no
3625 strength reduction info available. */
3626 if (unroll_p)
3627 unroll_loop (loop_end, insn_count, loop_start, end_insert_before, 0);
3628
3629 return;
3630 }
3631
3632 /* Find initial value for each biv by searching backwards from loop_start,
3633 halting at first label. Also record any test condition. */
3634
3635 call_seen = 0;
3636 for (p = loop_start; p && GET_CODE (p) != CODE_LABEL; p = PREV_INSN (p))
3637 {
3638 note_insn = p;
3639
3640 if (GET_CODE (p) == CALL_INSN)
3641 call_seen = 1;
3642
3643 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
3644 || GET_CODE (p) == CALL_INSN)
3645 note_stores (PATTERN (p), record_initial);
3646
3647 /* Record any test of a biv that branches around the loop if no store
3648 between it and the start of loop. We only care about tests with
3649 constants and registers and only certain of those. */
3650 if (GET_CODE (p) == JUMP_INSN
3651 && JUMP_LABEL (p) != 0
3652 && next_real_insn (JUMP_LABEL (p)) == next_real_insn (loop_end)
3653 && (test = get_condition_for_loop (p)) != 0
3654 && GET_CODE (XEXP (test, 0)) == REG
3655 && REGNO (XEXP (test, 0)) < max_reg_before_loop
3656 && (bl = reg_biv_class[REGNO (XEXP (test, 0))]) != 0
3657 && valid_initial_value_p (XEXP (test, 1), p, call_seen, loop_start)
3658 && bl->init_insn == 0)
3659 {
3660 /* If an NE test, we have an initial value! */
3661 if (GET_CODE (test) == NE)
3662 {
3663 bl->init_insn = p;
3664 bl->init_set = gen_rtx_SET (VOIDmode,
3665 XEXP (test, 0), XEXP (test, 1));
3666 }
3667 else
3668 bl->initial_test = test;
3669 }
3670 }
3671
3672 /* Look at the each biv and see if we can say anything better about its
3673 initial value from any initializing insns set up above. (This is done
3674 in two passes to avoid missing SETs in a PARALLEL.) */
3675 for (bl = loop_iv_list; bl; bl = bl->next)
3676 {
3677 rtx src;
3678 rtx note;
3679
3680 if (! bl->init_insn)
3681 continue;
3682
3683 /* IF INIT_INSN has a REG_EQUAL or REG_EQUIV note and the value
3684 is a constant, use the value of that. */
3685 if (((note = find_reg_note (bl->init_insn, REG_EQUAL, 0)) != NULL
3686 && CONSTANT_P (XEXP (note, 0)))
3687 || ((note = find_reg_note (bl->init_insn, REG_EQUIV, 0)) != NULL
3688 && CONSTANT_P (XEXP (note, 0))))
3689 src = XEXP (note, 0);
3690 else
3691 src = SET_SRC (bl->init_set);
3692
3693 if (loop_dump_stream)
3694 fprintf (loop_dump_stream,
3695 "Biv %d initialized at insn %d: initial value ",
3696 bl->regno, INSN_UID (bl->init_insn));
3697
3698 if ((GET_MODE (src) == GET_MODE (regno_reg_rtx[bl->regno])
3699 || GET_MODE (src) == VOIDmode)
3700 && valid_initial_value_p (src, bl->init_insn, call_seen, loop_start))
3701 {
3702 bl->initial_value = src;
3703
3704 if (loop_dump_stream)
3705 {
3706 if (GET_CODE (src) == CONST_INT)
3707 {
3708 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (src));
3709 fputc ('\n', loop_dump_stream);
3710 }
3711 else
3712 {
3713 print_rtl (loop_dump_stream, src);
3714 fprintf (loop_dump_stream, "\n");
3715 }
3716 }
3717 }
3718 else
3719 {
3720 /* Biv initial value is not simple move,
3721 so let it keep initial value of "itself". */
3722
3723 if (loop_dump_stream)
3724 fprintf (loop_dump_stream, "is complex\n");
3725 }
3726 }
3727
3728 /* Search the loop for general induction variables. */
3729
3730 /* A register is a giv if: it is only set once, it is a function of a
3731 biv and a constant (or invariant), and it is not a biv. */
3732
3733 not_every_iteration = 0;
3734 loop_depth = 0;
3735 p = scan_start;
3736 while (1)
3737 {
3738 p = NEXT_INSN (p);
3739 /* At end of a straight-in loop, we are done.
3740 At end of a loop entered at the bottom, scan the top. */
3741 if (p == scan_start)
3742 break;
3743 if (p == end)
3744 {
3745 if (loop_top != 0)
3746 p = loop_top;
3747 else
3748 break;
3749 if (p == scan_start)
3750 break;
3751 }
3752
3753 /* Look for a general induction variable in a register. */
3754 if (GET_CODE (p) == INSN
3755 && (set = single_set (p))
3756 && GET_CODE (SET_DEST (set)) == REG
3757 && ! may_not_optimize[REGNO (SET_DEST (set))])
3758 {
3759 rtx src_reg;
3760 rtx add_val;
3761 rtx mult_val;
3762 int benefit;
3763 rtx regnote = 0;
3764
3765 dest_reg = SET_DEST (set);
3766 if (REGNO (dest_reg) < FIRST_PSEUDO_REGISTER)
3767 continue;
3768
3769 if (/* SET_SRC is a giv. */
3770 ((benefit = general_induction_var (SET_SRC (set),
3771 &src_reg, &add_val,
3772 &mult_val))
3773 /* Equivalent expression is a giv. */
3774 || ((regnote = find_reg_note (p, REG_EQUAL, NULL_RTX))
3775 && (benefit = general_induction_var (XEXP (regnote, 0),
3776 &src_reg,
3777 &add_val, &mult_val))))
3778 /* Don't try to handle any regs made by loop optimization.
3779 We have nothing on them in regno_first_uid, etc. */
3780 && REGNO (dest_reg) < max_reg_before_loop
3781 /* Don't recognize a BASIC_INDUCT_VAR here. */
3782 && dest_reg != src_reg
3783 /* This must be the only place where the register is set. */
3784 && (n_times_set[REGNO (dest_reg)] == 1
3785 /* or all sets must be consecutive and make a giv. */
3786 || (benefit = consec_sets_giv (benefit, p,
3787 src_reg, dest_reg,
3788 &add_val, &mult_val))))
3789 {
3790 int count;
3791 struct induction *v
3792 = (struct induction *) alloca (sizeof (struct induction));
3793 rtx temp;
3794
3795 /* If this is a library call, increase benefit. */
3796 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
3797 benefit += libcall_benefit (p);
3798
3799 /* Skip the consecutive insns, if there are any. */
3800 for (count = n_times_set[REGNO (dest_reg)] - 1;
3801 count > 0; count--)
3802 {
3803 /* If first insn of libcall sequence, skip to end.
3804 Do this at start of loop, since INSN is guaranteed to
3805 be an insn here. */
3806 if (GET_CODE (p) != NOTE
3807 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
3808 p = XEXP (temp, 0);
3809
3810 do p = NEXT_INSN (p);
3811 while (GET_CODE (p) == NOTE);
3812 }
3813
3814 record_giv (v, p, src_reg, dest_reg, mult_val, add_val, benefit,
3815 DEST_REG, not_every_iteration, NULL_PTR, loop_start,
3816 loop_end);
3817
3818 }
3819 }
3820
3821 #ifndef DONT_REDUCE_ADDR
3822 /* Look for givs which are memory addresses. */
3823 /* This resulted in worse code on a VAX 8600. I wonder if it
3824 still does. */
3825 if (GET_CODE (p) == INSN)
3826 find_mem_givs (PATTERN (p), p, not_every_iteration, loop_start,
3827 loop_end);
3828 #endif
3829
3830 /* Update the status of whether giv can derive other givs. This can
3831 change when we pass a label or an insn that updates a biv. */
3832 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
3833 || GET_CODE (p) == CODE_LABEL)
3834 update_giv_derive (p);
3835
3836 /* Past a jump, we get to insns for which we can't count
3837 on whether they will be executed during each iteration. */
3838 /* This code appears twice in strength_reduce. There is also similar
3839 code in scan_loop. */
3840 if (GET_CODE (p) == JUMP_INSN
3841 /* If we enter the loop in the middle, and scan around to the
3842 beginning, don't set not_every_iteration for that.
3843 This can be any kind of jump, since we want to know if insns
3844 will be executed if the loop is executed. */
3845 && ! (JUMP_LABEL (p) == loop_top
3846 && ((NEXT_INSN (NEXT_INSN (p)) == loop_end && simplejump_p (p))
3847 || (NEXT_INSN (p) == loop_end && condjump_p (p)))))
3848 {
3849 rtx label = 0;
3850
3851 /* If this is a jump outside the loop, then it also doesn't
3852 matter. Check to see if the target of this branch is on the
3853 loop_number_exits_labels list. */
3854
3855 for (label = loop_number_exit_labels[uid_loop_num[INSN_UID (loop_start)]];
3856 label;
3857 label = LABEL_NEXTREF (label))
3858 if (XEXP (label, 0) == JUMP_LABEL (p))
3859 break;
3860
3861 if (! label)
3862 not_every_iteration = 1;
3863 }
3864
3865 else if (GET_CODE (p) == NOTE)
3866 {
3867 /* At the virtual top of a converted loop, insns are again known to
3868 be executed each iteration: logically, the loop begins here
3869 even though the exit code has been duplicated. */
3870 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
3871 not_every_iteration = 0;
3872 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
3873 loop_depth++;
3874 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
3875 loop_depth--;
3876 }
3877
3878 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
3879 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
3880 or not an insn is known to be executed each iteration of the
3881 loop, whether or not any iterations are known to occur.
3882
3883 Therefore, if we have just passed a label and have no more labels
3884 between here and the test insn of the loop, we know these insns
3885 will be executed each iteration. */
3886
3887 if (not_every_iteration && GET_CODE (p) == CODE_LABEL
3888 && no_labels_between_p (p, loop_end))
3889 not_every_iteration = 0;
3890 }
3891
3892 /* Try to calculate and save the number of loop iterations. This is
3893 set to zero if the actual number can not be calculated. This must
3894 be called after all giv's have been identified, since otherwise it may
3895 fail if the iteration variable is a giv. */
3896
3897 loop_n_iterations = loop_iterations (loop_start, loop_end);
3898
3899 /* Now for each giv for which we still don't know whether or not it is
3900 replaceable, check to see if it is replaceable because its final value
3901 can be calculated. This must be done after loop_iterations is called,
3902 so that final_giv_value will work correctly. */
3903
3904 for (bl = loop_iv_list; bl; bl = bl->next)
3905 {
3906 struct induction *v;
3907
3908 for (v = bl->giv; v; v = v->next_iv)
3909 if (! v->replaceable && ! v->not_replaceable)
3910 check_final_value (v, loop_start, loop_end);
3911 }
3912
3913 /* Try to prove that the loop counter variable (if any) is always
3914 nonnegative; if so, record that fact with a REG_NONNEG note
3915 so that "decrement and branch until zero" insn can be used. */
3916 check_dbra_loop (loop_end, insn_count, loop_start);
3917
3918 #ifdef HAIFA
3919 /* record loop-variables relevant for BCT optimization before unrolling
3920 the loop. Unrolling may update part of this information, and the
3921 correct data will be used for generating the BCT. */
3922 #ifdef HAVE_decrement_and_branch_on_count
3923 if (HAVE_decrement_and_branch_on_count)
3924 analyze_loop_iterations (loop_start, loop_end);
3925 #endif
3926 #endif /* HAIFA */
3927
3928 /* Create reg_map to hold substitutions for replaceable giv regs. */
3929 reg_map = (rtx *) alloca (max_reg_before_loop * sizeof (rtx));
3930 bzero ((char *) reg_map, max_reg_before_loop * sizeof (rtx));
3931
3932 /* Examine each iv class for feasibility of strength reduction/induction
3933 variable elimination. */
3934
3935 for (bl = loop_iv_list; bl; bl = bl->next)
3936 {
3937 struct induction *v;
3938 int benefit;
3939 int all_reduced;
3940 rtx final_value = 0;
3941
3942 /* Test whether it will be possible to eliminate this biv
3943 provided all givs are reduced. This is possible if either
3944 the reg is not used outside the loop, or we can compute
3945 what its final value will be.
3946
3947 For architectures with a decrement_and_branch_until_zero insn,
3948 don't do this if we put a REG_NONNEG note on the endtest for
3949 this biv. */
3950
3951 /* Compare against bl->init_insn rather than loop_start.
3952 We aren't concerned with any uses of the biv between
3953 init_insn and loop_start since these won't be affected
3954 by the value of the biv elsewhere in the function, so
3955 long as init_insn doesn't use the biv itself.
3956 March 14, 1989 -- self@bayes.arc.nasa.gov */
3957
3958 if ((uid_luid[REGNO_LAST_UID (bl->regno)] < INSN_LUID (loop_end)
3959 && bl->init_insn
3960 && INSN_UID (bl->init_insn) < max_uid_for_loop
3961 && uid_luid[REGNO_FIRST_UID (bl->regno)] >= INSN_LUID (bl->init_insn)
3962 #ifdef HAVE_decrement_and_branch_until_zero
3963 && ! bl->nonneg
3964 #endif
3965 && ! reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set)))
3966 || ((final_value = final_biv_value (bl, loop_start, loop_end))
3967 #ifdef HAVE_decrement_and_branch_until_zero
3968 && ! bl->nonneg
3969 #endif
3970 ))
3971 bl->eliminable = maybe_eliminate_biv (bl, loop_start, end, 0,
3972 threshold, insn_count);
3973 else
3974 {
3975 if (loop_dump_stream)
3976 {
3977 fprintf (loop_dump_stream,
3978 "Cannot eliminate biv %d.\n",
3979 bl->regno);
3980 fprintf (loop_dump_stream,
3981 "First use: insn %d, last use: insn %d.\n",
3982 REGNO_FIRST_UID (bl->regno),
3983 REGNO_LAST_UID (bl->regno));
3984 }
3985 }
3986
3987 /* Combine all giv's for this iv_class. */
3988 combine_givs (bl);
3989
3990 /* This will be true at the end, if all givs which depend on this
3991 biv have been strength reduced.
3992 We can't (currently) eliminate the biv unless this is so. */
3993 all_reduced = 1;
3994
3995 /* Check each giv in this class to see if we will benefit by reducing
3996 it. Skip giv's combined with others. */
3997 for (v = bl->giv; v; v = v->next_iv)
3998 {
3999 struct induction *tv;
4000
4001 if (v->ignore || v->same)
4002 continue;
4003
4004 benefit = v->benefit;
4005
4006 /* Reduce benefit if not replaceable, since we will insert
4007 a move-insn to replace the insn that calculates this giv.
4008 Don't do this unless the giv is a user variable, since it
4009 will often be marked non-replaceable because of the duplication
4010 of the exit code outside the loop. In such a case, the copies
4011 we insert are dead and will be deleted. So they don't have
4012 a cost. Similar situations exist. */
4013 /* ??? The new final_[bg]iv_value code does a much better job
4014 of finding replaceable giv's, and hence this code may no longer
4015 be necessary. */
4016 if (! v->replaceable && ! bl->eliminable
4017 && REG_USERVAR_P (v->dest_reg))
4018 benefit -= copy_cost;
4019
4020 /* Decrease the benefit to count the add-insns that we will
4021 insert to increment the reduced reg for the giv. */
4022 benefit -= add_cost * bl->biv_count;
4023
4024 /* Decide whether to strength-reduce this giv or to leave the code
4025 unchanged (recompute it from the biv each time it is used).
4026 This decision can be made independently for each giv. */
4027
4028 #ifdef AUTO_INC_DEC
4029 /* Attempt to guess whether autoincrement will handle some of the
4030 new add insns; if so, increase BENEFIT (undo the subtraction of
4031 add_cost that was done above). */
4032 if (v->giv_type == DEST_ADDR
4033 && GET_CODE (v->mult_val) == CONST_INT)
4034 {
4035 #if defined (HAVE_POST_INCREMENT) || defined (HAVE_PRE_INCREMENT)
4036 if (INTVAL (v->mult_val) == GET_MODE_SIZE (v->mem_mode))
4037 benefit += add_cost * bl->biv_count;
4038 #endif
4039 #if defined (HAVE_POST_DECREMENT) || defined (HAVE_PRE_DECREMENT)
4040 if (-INTVAL (v->mult_val) == GET_MODE_SIZE (v->mem_mode))
4041 benefit += add_cost * bl->biv_count;
4042 #endif
4043 }
4044 #endif
4045
4046 /* If an insn is not to be strength reduced, then set its ignore
4047 flag, and clear all_reduced. */
4048
4049 /* A giv that depends on a reversed biv must be reduced if it is
4050 used after the loop exit, otherwise, it would have the wrong
4051 value after the loop exit. To make it simple, just reduce all
4052 of such giv's whether or not we know they are used after the loop
4053 exit. */
4054
4055 if ( ! flag_reduce_all_givs && v->lifetime * threshold * benefit < insn_count
4056 && ! bl->reversed )
4057 {
4058 if (loop_dump_stream)
4059 fprintf (loop_dump_stream,
4060 "giv of insn %d not worth while, %d vs %d.\n",
4061 INSN_UID (v->insn),
4062 v->lifetime * threshold * benefit, insn_count);
4063 v->ignore = 1;
4064 all_reduced = 0;
4065 }
4066 else
4067 {
4068 /* Check that we can increment the reduced giv without a
4069 multiply insn. If not, reject it. */
4070
4071 for (tv = bl->biv; tv; tv = tv->next_iv)
4072 if (tv->mult_val == const1_rtx
4073 && ! product_cheap_p (tv->add_val, v->mult_val))
4074 {
4075 if (loop_dump_stream)
4076 fprintf (loop_dump_stream,
4077 "giv of insn %d: would need a multiply.\n",
4078 INSN_UID (v->insn));
4079 v->ignore = 1;
4080 all_reduced = 0;
4081 break;
4082 }
4083 }
4084 }
4085
4086 /* Reduce each giv that we decided to reduce. */
4087
4088 for (v = bl->giv; v; v = v->next_iv)
4089 {
4090 struct induction *tv;
4091 if (! v->ignore && v->same == 0)
4092 {
4093 int auto_inc_opt = 0;
4094
4095 v->new_reg = gen_reg_rtx (v->mode);
4096
4097 #ifdef AUTO_INC_DEC
4098 /* If the target has auto-increment addressing modes, and
4099 this is an address giv, then try to put the increment
4100 immediately after its use, so that flow can create an
4101 auto-increment addressing mode. */
4102 if (v->giv_type == DEST_ADDR && bl->biv_count == 1
4103 && bl->biv->always_executed && ! bl->biv->maybe_multiple
4104 /* We don't handle reversed biv's because bl->biv->insn
4105 does not have a valid INSN_LUID. */
4106 && ! bl->reversed
4107 && v->always_executed && ! v->maybe_multiple)
4108 {
4109 /* If other giv's have been combined with this one, then
4110 this will work only if all uses of the other giv's occur
4111 before this giv's insn. This is difficult to check.
4112
4113 We simplify this by looking for the common case where
4114 there is one DEST_REG giv, and this giv's insn is the
4115 last use of the dest_reg of that DEST_REG giv. If the
4116 the increment occurs after the address giv, then we can
4117 perform the optimization. (Otherwise, the increment
4118 would have to go before other_giv, and we would not be
4119 able to combine it with the address giv to get an
4120 auto-inc address.) */
4121 if (v->combined_with)
4122 {
4123 struct induction *other_giv = 0;
4124
4125 for (tv = bl->giv; tv; tv = tv->next_iv)
4126 if (tv->same == v)
4127 {
4128 if (other_giv)
4129 break;
4130 else
4131 other_giv = tv;
4132 }
4133 if (! tv && other_giv
4134 && REGNO (other_giv->dest_reg) < max_reg_before_loop
4135 && (REGNO_LAST_UID (REGNO (other_giv->dest_reg))
4136 == INSN_UID (v->insn))
4137 && INSN_LUID (v->insn) < INSN_LUID (bl->biv->insn))
4138 auto_inc_opt = 1;
4139 }
4140 /* Check for case where increment is before the the address
4141 giv. Do this test in "loop order". */
4142 else if ((INSN_LUID (v->insn) > INSN_LUID (bl->biv->insn)
4143 && (INSN_LUID (v->insn) < INSN_LUID (scan_start)
4144 || (INSN_LUID (bl->biv->insn)
4145 > INSN_LUID (scan_start))))
4146 || (INSN_LUID (v->insn) < INSN_LUID (scan_start)
4147 && (INSN_LUID (scan_start)
4148 < INSN_LUID (bl->biv->insn))))
4149 auto_inc_opt = -1;
4150 else
4151 auto_inc_opt = 1;
4152
4153 #ifdef HAVE_cc0
4154 {
4155 rtx prev;
4156
4157 /* We can't put an insn immediately after one setting
4158 cc0, or immediately before one using cc0. */
4159 if ((auto_inc_opt == 1 && sets_cc0_p (PATTERN (v->insn)))
4160 || (auto_inc_opt == -1
4161 && (prev = prev_nonnote_insn (v->insn)) != 0
4162 && GET_RTX_CLASS (GET_CODE (prev)) == 'i'
4163 && sets_cc0_p (PATTERN (prev))))
4164 auto_inc_opt = 0;
4165 }
4166 #endif
4167
4168 if (auto_inc_opt)
4169 v->auto_inc_opt = 1;
4170 }
4171 #endif
4172
4173 /* For each place where the biv is incremented, add an insn
4174 to increment the new, reduced reg for the giv. */
4175 for (tv = bl->biv; tv; tv = tv->next_iv)
4176 {
4177 rtx insert_before;
4178
4179 if (! auto_inc_opt)
4180 insert_before = tv->insn;
4181 else if (auto_inc_opt == 1)
4182 insert_before = NEXT_INSN (v->insn);
4183 else
4184 insert_before = v->insn;
4185
4186 if (tv->mult_val == const1_rtx)
4187 emit_iv_add_mult (tv->add_val, v->mult_val,
4188 v->new_reg, v->new_reg, insert_before);
4189 else /* tv->mult_val == const0_rtx */
4190 /* A multiply is acceptable here
4191 since this is presumed to be seldom executed. */
4192 emit_iv_add_mult (tv->add_val, v->mult_val,
4193 v->add_val, v->new_reg, insert_before);
4194 }
4195
4196 /* Add code at loop start to initialize giv's reduced reg. */
4197
4198 emit_iv_add_mult (bl->initial_value, v->mult_val,
4199 v->add_val, v->new_reg, loop_start);
4200 }
4201 }
4202
4203 /* Rescan all givs. If a giv is the same as a giv not reduced, mark it
4204 as not reduced.
4205
4206 For each giv register that can be reduced now: if replaceable,
4207 substitute reduced reg wherever the old giv occurs;
4208 else add new move insn "giv_reg = reduced_reg".
4209
4210 Also check for givs whose first use is their definition and whose
4211 last use is the definition of another giv. If so, it is likely
4212 dead and should not be used to eliminate a biv. */
4213 for (v = bl->giv; v; v = v->next_iv)
4214 {
4215 if (v->same && v->same->ignore)
4216 v->ignore = 1;
4217
4218 if (v->ignore)
4219 continue;
4220
4221 if (v->giv_type == DEST_REG
4222 && REGNO_FIRST_UID (REGNO (v->dest_reg)) == INSN_UID (v->insn))
4223 {
4224 struct induction *v1;
4225
4226 for (v1 = bl->giv; v1; v1 = v1->next_iv)
4227 if (REGNO_LAST_UID (REGNO (v->dest_reg)) == INSN_UID (v1->insn))
4228 v->maybe_dead = 1;
4229 }
4230
4231 /* Update expression if this was combined, in case other giv was
4232 replaced. */
4233 if (v->same)
4234 v->new_reg = replace_rtx (v->new_reg,
4235 v->same->dest_reg, v->same->new_reg);
4236
4237 if (v->giv_type == DEST_ADDR)
4238 /* Store reduced reg as the address in the memref where we found
4239 this giv. */
4240 validate_change (v->insn, v->location, v->new_reg, 0);
4241 else if (v->replaceable)
4242 {
4243 reg_map[REGNO (v->dest_reg)] = v->new_reg;
4244
4245 #if 0
4246 /* I can no longer duplicate the original problem. Perhaps
4247 this is unnecessary now? */
4248
4249 /* Replaceable; it isn't strictly necessary to delete the old
4250 insn and emit a new one, because v->dest_reg is now dead.
4251
4252 However, especially when unrolling loops, the special
4253 handling for (set REG0 REG1) in the second cse pass may
4254 make v->dest_reg live again. To avoid this problem, emit
4255 an insn to set the original giv reg from the reduced giv.
4256 We can not delete the original insn, since it may be part
4257 of a LIBCALL, and the code in flow that eliminates dead
4258 libcalls will fail if it is deleted. */
4259 emit_insn_after (gen_move_insn (v->dest_reg, v->new_reg),
4260 v->insn);
4261 #endif
4262 }
4263 else
4264 {
4265 /* Not replaceable; emit an insn to set the original giv reg from
4266 the reduced giv, same as above. */
4267 emit_insn_after (gen_move_insn (v->dest_reg, v->new_reg),
4268 v->insn);
4269 }
4270
4271 /* When a loop is reversed, givs which depend on the reversed
4272 biv, and which are live outside the loop, must be set to their
4273 correct final value. This insn is only needed if the giv is
4274 not replaceable. The correct final value is the same as the
4275 value that the giv starts the reversed loop with. */
4276 if (bl->reversed && ! v->replaceable)
4277 emit_iv_add_mult (bl->initial_value, v->mult_val,
4278 v->add_val, v->dest_reg, end_insert_before);
4279 else if (v->final_value)
4280 {
4281 rtx insert_before;
4282
4283 /* If the loop has multiple exits, emit the insn before the
4284 loop to ensure that it will always be executed no matter
4285 how the loop exits. Otherwise, emit the insn after the loop,
4286 since this is slightly more efficient. */
4287 if (loop_number_exit_count[uid_loop_num[INSN_UID (loop_start)]])
4288 insert_before = loop_start;
4289 else
4290 insert_before = end_insert_before;
4291 emit_insn_before (gen_move_insn (v->dest_reg, v->final_value),
4292 insert_before);
4293
4294 #if 0
4295 /* If the insn to set the final value of the giv was emitted
4296 before the loop, then we must delete the insn inside the loop
4297 that sets it. If this is a LIBCALL, then we must delete
4298 every insn in the libcall. Note, however, that
4299 final_giv_value will only succeed when there are multiple
4300 exits if the giv is dead at each exit, hence it does not
4301 matter that the original insn remains because it is dead
4302 anyways. */
4303 /* Delete the insn inside the loop that sets the giv since
4304 the giv is now set before (or after) the loop. */
4305 delete_insn (v->insn);
4306 #endif
4307 }
4308
4309 if (loop_dump_stream)
4310 {
4311 fprintf (loop_dump_stream, "giv at %d reduced to ",
4312 INSN_UID (v->insn));
4313 print_rtl (loop_dump_stream, v->new_reg);
4314 fprintf (loop_dump_stream, "\n");
4315 }
4316 }
4317
4318 /* All the givs based on the biv bl have been reduced if they
4319 merit it. */
4320
4321 /* For each giv not marked as maybe dead that has been combined with a
4322 second giv, clear any "maybe dead" mark on that second giv.
4323 v->new_reg will either be or refer to the register of the giv it
4324 combined with.
4325
4326 Doing this clearing avoids problems in biv elimination where a
4327 giv's new_reg is a complex value that can't be put in the insn but
4328 the giv combined with (with a reg as new_reg) is marked maybe_dead.
4329 Since the register will be used in either case, we'd prefer it be
4330 used from the simpler giv. */
4331
4332 for (v = bl->giv; v; v = v->next_iv)
4333 if (! v->maybe_dead && v->same)
4334 v->same->maybe_dead = 0;
4335
4336 /* Try to eliminate the biv, if it is a candidate.
4337 This won't work if ! all_reduced,
4338 since the givs we planned to use might not have been reduced.
4339
4340 We have to be careful that we didn't initially think we could eliminate
4341 this biv because of a giv that we now think may be dead and shouldn't
4342 be used as a biv replacement.
4343
4344 Also, there is the possibility that we may have a giv that looks
4345 like it can be used to eliminate a biv, but the resulting insn
4346 isn't valid. This can happen, for example, on the 88k, where a
4347 JUMP_INSN can compare a register only with zero. Attempts to
4348 replace it with a compare with a constant will fail.
4349
4350 Note that in cases where this call fails, we may have replaced some
4351 of the occurrences of the biv with a giv, but no harm was done in
4352 doing so in the rare cases where it can occur. */
4353
4354 if (all_reduced == 1 && bl->eliminable
4355 && maybe_eliminate_biv (bl, loop_start, end, 1,
4356 threshold, insn_count))
4357
4358 {
4359 /* ?? If we created a new test to bypass the loop entirely,
4360 or otherwise drop straight in, based on this test, then
4361 we might want to rewrite it also. This way some later
4362 pass has more hope of removing the initialization of this
4363 biv entirely. */
4364
4365 /* If final_value != 0, then the biv may be used after loop end
4366 and we must emit an insn to set it just in case.
4367
4368 Reversed bivs already have an insn after the loop setting their
4369 value, so we don't need another one. We can't calculate the
4370 proper final value for such a biv here anyways. */
4371 if (final_value != 0 && ! bl->reversed)
4372 {
4373 rtx insert_before;
4374
4375 /* If the loop has multiple exits, emit the insn before the
4376 loop to ensure that it will always be executed no matter
4377 how the loop exits. Otherwise, emit the insn after the
4378 loop, since this is slightly more efficient. */
4379 if (loop_number_exit_count[uid_loop_num[INSN_UID (loop_start)]])
4380 insert_before = loop_start;
4381 else
4382 insert_before = end_insert_before;
4383
4384 emit_insn_before (gen_move_insn (bl->biv->dest_reg, final_value),
4385 end_insert_before);
4386 }
4387
4388 #if 0
4389 /* Delete all of the instructions inside the loop which set
4390 the biv, as they are all dead. If is safe to delete them,
4391 because an insn setting a biv will never be part of a libcall. */
4392 /* However, deleting them will invalidate the regno_last_uid info,
4393 so keeping them around is more convenient. Final_biv_value
4394 will only succeed when there are multiple exits if the biv
4395 is dead at each exit, hence it does not matter that the original
4396 insn remains, because it is dead anyways. */
4397 for (v = bl->biv; v; v = v->next_iv)
4398 delete_insn (v->insn);
4399 #endif
4400
4401 if (loop_dump_stream)
4402 fprintf (loop_dump_stream, "Reg %d: biv eliminated\n",
4403 bl->regno);
4404 }
4405 }
4406
4407 /* Go through all the instructions in the loop, making all the
4408 register substitutions scheduled in REG_MAP. */
4409
4410 for (p = loop_start; p != end; p = NEXT_INSN (p))
4411 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
4412 || GET_CODE (p) == CALL_INSN)
4413 {
4414 replace_regs (PATTERN (p), reg_map, max_reg_before_loop, 0);
4415 replace_regs (REG_NOTES (p), reg_map, max_reg_before_loop, 0);
4416 INSN_CODE (p) = -1;
4417 }
4418
4419 /* Unroll loops from within strength reduction so that we can use the
4420 induction variable information that strength_reduce has already
4421 collected. */
4422
4423 if (unroll_p)
4424 unroll_loop (loop_end, insn_count, loop_start, end_insert_before, 1);
4425
4426 #ifdef HAIFA
4427 /* instrument the loop with bct insn */
4428 #ifdef HAVE_decrement_and_branch_on_count
4429 if (HAVE_decrement_and_branch_on_count)
4430 insert_bct (loop_start, loop_end);
4431 #endif
4432 #endif /* HAIFA */
4433
4434 if (loop_dump_stream)
4435 fprintf (loop_dump_stream, "\n");
4436 }
4437 \f
4438 /* Return 1 if X is a valid source for an initial value (or as value being
4439 compared against in an initial test).
4440
4441 X must be either a register or constant and must not be clobbered between
4442 the current insn and the start of the loop.
4443
4444 INSN is the insn containing X. */
4445
4446 static int
4447 valid_initial_value_p (x, insn, call_seen, loop_start)
4448 rtx x;
4449 rtx insn;
4450 int call_seen;
4451 rtx loop_start;
4452 {
4453 if (CONSTANT_P (x))
4454 return 1;
4455
4456 /* Only consider pseudos we know about initialized in insns whose luids
4457 we know. */
4458 if (GET_CODE (x) != REG
4459 || REGNO (x) >= max_reg_before_loop)
4460 return 0;
4461
4462 /* Don't use call-clobbered registers across a call which clobbers it. On
4463 some machines, don't use any hard registers at all. */
4464 if (REGNO (x) < FIRST_PSEUDO_REGISTER
4465 && (SMALL_REGISTER_CLASSES
4466 || (call_used_regs[REGNO (x)] && call_seen)))
4467 return 0;
4468
4469 /* Don't use registers that have been clobbered before the start of the
4470 loop. */
4471 if (reg_set_between_p (x, insn, loop_start))
4472 return 0;
4473
4474 return 1;
4475 }
4476 \f
4477 /* Scan X for memory refs and check each memory address
4478 as a possible giv. INSN is the insn whose pattern X comes from.
4479 NOT_EVERY_ITERATION is 1 if the insn might not be executed during
4480 every loop iteration. */
4481
4482 static void
4483 find_mem_givs (x, insn, not_every_iteration, loop_start, loop_end)
4484 rtx x;
4485 rtx insn;
4486 int not_every_iteration;
4487 rtx loop_start, loop_end;
4488 {
4489 register int i, j;
4490 register enum rtx_code code;
4491 register char *fmt;
4492
4493 if (x == 0)
4494 return;
4495
4496 code = GET_CODE (x);
4497 switch (code)
4498 {
4499 case REG:
4500 case CONST_INT:
4501 case CONST:
4502 case CONST_DOUBLE:
4503 case SYMBOL_REF:
4504 case LABEL_REF:
4505 case PC:
4506 case CC0:
4507 case ADDR_VEC:
4508 case ADDR_DIFF_VEC:
4509 case USE:
4510 case CLOBBER:
4511 return;
4512
4513 case MEM:
4514 {
4515 rtx src_reg;
4516 rtx add_val;
4517 rtx mult_val;
4518 int benefit;
4519
4520 benefit = general_induction_var (XEXP (x, 0),
4521 &src_reg, &add_val, &mult_val);
4522
4523 /* Don't make a DEST_ADDR giv with mult_val == 1 && add_val == 0.
4524 Such a giv isn't useful. */
4525 if (benefit > 0 && (mult_val != const1_rtx || add_val != const0_rtx))
4526 {
4527 /* Found one; record it. */
4528 struct induction *v
4529 = (struct induction *) oballoc (sizeof (struct induction));
4530
4531 record_giv (v, insn, src_reg, addr_placeholder, mult_val,
4532 add_val, benefit, DEST_ADDR, not_every_iteration,
4533 &XEXP (x, 0), loop_start, loop_end);
4534
4535 v->mem_mode = GET_MODE (x);
4536 }
4537 }
4538 return;
4539
4540 default:
4541 break;
4542 }
4543
4544 /* Recursively scan the subexpressions for other mem refs. */
4545
4546 fmt = GET_RTX_FORMAT (code);
4547 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4548 if (fmt[i] == 'e')
4549 find_mem_givs (XEXP (x, i), insn, not_every_iteration, loop_start,
4550 loop_end);
4551 else if (fmt[i] == 'E')
4552 for (j = 0; j < XVECLEN (x, i); j++)
4553 find_mem_givs (XVECEXP (x, i, j), insn, not_every_iteration,
4554 loop_start, loop_end);
4555 }
4556 \f
4557 /* Fill in the data about one biv update.
4558 V is the `struct induction' in which we record the biv. (It is
4559 allocated by the caller, with alloca.)
4560 INSN is the insn that sets it.
4561 DEST_REG is the biv's reg.
4562
4563 MULT_VAL is const1_rtx if the biv is being incremented here, in which case
4564 INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is
4565 being set to INC_VAL.
4566
4567 NOT_EVERY_ITERATION is nonzero if this biv update is not know to be
4568 executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update
4569 can be executed more than once per iteration. If MAYBE_MULTIPLE
4570 and NOT_EVERY_ITERATION are both zero, we know that the biv update is
4571 executed exactly once per iteration. */
4572
4573 static void
4574 record_biv (v, insn, dest_reg, inc_val, mult_val,
4575 not_every_iteration, maybe_multiple)
4576 struct induction *v;
4577 rtx insn;
4578 rtx dest_reg;
4579 rtx inc_val;
4580 rtx mult_val;
4581 int not_every_iteration;
4582 int maybe_multiple;
4583 {
4584 struct iv_class *bl;
4585
4586 v->insn = insn;
4587 v->src_reg = dest_reg;
4588 v->dest_reg = dest_reg;
4589 v->mult_val = mult_val;
4590 v->add_val = inc_val;
4591 v->mode = GET_MODE (dest_reg);
4592 v->always_computable = ! not_every_iteration;
4593 v->always_executed = ! not_every_iteration;
4594 v->maybe_multiple = maybe_multiple;
4595
4596 /* Add this to the reg's iv_class, creating a class
4597 if this is the first incrementation of the reg. */
4598
4599 bl = reg_biv_class[REGNO (dest_reg)];
4600 if (bl == 0)
4601 {
4602 /* Create and initialize new iv_class. */
4603
4604 bl = (struct iv_class *) oballoc (sizeof (struct iv_class));
4605
4606 bl->regno = REGNO (dest_reg);
4607 bl->biv = 0;
4608 bl->giv = 0;
4609 bl->biv_count = 0;
4610 bl->giv_count = 0;
4611
4612 /* Set initial value to the reg itself. */
4613 bl->initial_value = dest_reg;
4614 /* We haven't seen the initializing insn yet */
4615 bl->init_insn = 0;
4616 bl->init_set = 0;
4617 bl->initial_test = 0;
4618 bl->incremented = 0;
4619 bl->eliminable = 0;
4620 bl->nonneg = 0;
4621 bl->reversed = 0;
4622 bl->total_benefit = 0;
4623
4624 /* Add this class to loop_iv_list. */
4625 bl->next = loop_iv_list;
4626 loop_iv_list = bl;
4627
4628 /* Put it in the array of biv register classes. */
4629 reg_biv_class[REGNO (dest_reg)] = bl;
4630 }
4631
4632 /* Update IV_CLASS entry for this biv. */
4633 v->next_iv = bl->biv;
4634 bl->biv = v;
4635 bl->biv_count++;
4636 if (mult_val == const1_rtx)
4637 bl->incremented = 1;
4638
4639 if (loop_dump_stream)
4640 {
4641 fprintf (loop_dump_stream,
4642 "Insn %d: possible biv, reg %d,",
4643 INSN_UID (insn), REGNO (dest_reg));
4644 if (GET_CODE (inc_val) == CONST_INT)
4645 {
4646 fprintf (loop_dump_stream, " const =");
4647 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (inc_val));
4648 fputc ('\n', loop_dump_stream);
4649 }
4650 else
4651 {
4652 fprintf (loop_dump_stream, " const = ");
4653 print_rtl (loop_dump_stream, inc_val);
4654 fprintf (loop_dump_stream, "\n");
4655 }
4656 }
4657 }
4658 \f
4659 /* Fill in the data about one giv.
4660 V is the `struct induction' in which we record the giv. (It is
4661 allocated by the caller, with alloca.)
4662 INSN is the insn that sets it.
4663 BENEFIT estimates the savings from deleting this insn.
4664 TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed
4665 into a register or is used as a memory address.
4666
4667 SRC_REG is the biv reg which the giv is computed from.
4668 DEST_REG is the giv's reg (if the giv is stored in a reg).
4669 MULT_VAL and ADD_VAL are the coefficients used to compute the giv.
4670 LOCATION points to the place where this giv's value appears in INSN. */
4671
4672 static void
4673 record_giv (v, insn, src_reg, dest_reg, mult_val, add_val, benefit,
4674 type, not_every_iteration, location, loop_start, loop_end)
4675 struct induction *v;
4676 rtx insn;
4677 rtx src_reg;
4678 rtx dest_reg;
4679 rtx mult_val, add_val;
4680 int benefit;
4681 enum g_types type;
4682 int not_every_iteration;
4683 rtx *location;
4684 rtx loop_start, loop_end;
4685 {
4686 struct induction *b;
4687 struct iv_class *bl;
4688 rtx set = single_set (insn);
4689
4690 v->insn = insn;
4691 v->src_reg = src_reg;
4692 v->giv_type = type;
4693 v->dest_reg = dest_reg;
4694 v->mult_val = mult_val;
4695 v->add_val = add_val;
4696 v->benefit = benefit;
4697 v->location = location;
4698 v->cant_derive = 0;
4699 v->combined_with = 0;
4700 v->maybe_multiple = 0;
4701 v->maybe_dead = 0;
4702 v->derive_adjustment = 0;
4703 v->same = 0;
4704 v->ignore = 0;
4705 v->new_reg = 0;
4706 v->final_value = 0;
4707 v->same_insn = 0;
4708 v->auto_inc_opt = 0;
4709 v->unrolled = 0;
4710 v->shared = 0;
4711
4712 /* The v->always_computable field is used in update_giv_derive, to
4713 determine whether a giv can be used to derive another giv. For a
4714 DEST_REG giv, INSN computes a new value for the giv, so its value
4715 isn't computable if INSN insn't executed every iteration.
4716 However, for a DEST_ADDR giv, INSN merely uses the value of the giv;
4717 it does not compute a new value. Hence the value is always computable
4718 regardless of whether INSN is executed each iteration. */
4719
4720 if (type == DEST_ADDR)
4721 v->always_computable = 1;
4722 else
4723 v->always_computable = ! not_every_iteration;
4724
4725 v->always_executed = ! not_every_iteration;
4726
4727 if (type == DEST_ADDR)
4728 {
4729 v->mode = GET_MODE (*location);
4730 v->lifetime = 1;
4731 v->times_used = 1;
4732 }
4733 else /* type == DEST_REG */
4734 {
4735 v->mode = GET_MODE (SET_DEST (set));
4736
4737 v->lifetime = (uid_luid[REGNO_LAST_UID (REGNO (dest_reg))]
4738 - uid_luid[REGNO_FIRST_UID (REGNO (dest_reg))]);
4739
4740 v->times_used = n_times_used[REGNO (dest_reg)];
4741
4742 /* If the lifetime is zero, it means that this register is
4743 really a dead store. So mark this as a giv that can be
4744 ignored. This will not prevent the biv from being eliminated. */
4745 if (v->lifetime == 0)
4746 v->ignore = 1;
4747
4748 reg_iv_type[REGNO (dest_reg)] = GENERAL_INDUCT;
4749 reg_iv_info[REGNO (dest_reg)] = v;
4750 }
4751
4752 /* Add the giv to the class of givs computed from one biv. */
4753
4754 bl = reg_biv_class[REGNO (src_reg)];
4755 if (bl)
4756 {
4757 v->next_iv = bl->giv;
4758 bl->giv = v;
4759 /* Don't count DEST_ADDR. This is supposed to count the number of
4760 insns that calculate givs. */
4761 if (type == DEST_REG)
4762 bl->giv_count++;
4763 bl->total_benefit += benefit;
4764 }
4765 else
4766 /* Fatal error, biv missing for this giv? */
4767 abort ();
4768
4769 if (type == DEST_ADDR)
4770 v->replaceable = 1;
4771 else
4772 {
4773 /* The giv can be replaced outright by the reduced register only if all
4774 of the following conditions are true:
4775 - the insn that sets the giv is always executed on any iteration
4776 on which the giv is used at all
4777 (there are two ways to deduce this:
4778 either the insn is executed on every iteration,
4779 or all uses follow that insn in the same basic block),
4780 - the giv is not used outside the loop
4781 - no assignments to the biv occur during the giv's lifetime. */
4782
4783 if (REGNO_FIRST_UID (REGNO (dest_reg)) == INSN_UID (insn)
4784 /* Previous line always fails if INSN was moved by loop opt. */
4785 && uid_luid[REGNO_LAST_UID (REGNO (dest_reg))] < INSN_LUID (loop_end)
4786 && (! not_every_iteration
4787 || last_use_this_basic_block (dest_reg, insn)))
4788 {
4789 /* Now check that there are no assignments to the biv within the
4790 giv's lifetime. This requires two separate checks. */
4791
4792 /* Check each biv update, and fail if any are between the first
4793 and last use of the giv.
4794
4795 If this loop contains an inner loop that was unrolled, then
4796 the insn modifying the biv may have been emitted by the loop
4797 unrolling code, and hence does not have a valid luid. Just
4798 mark the biv as not replaceable in this case. It is not very
4799 useful as a biv, because it is used in two different loops.
4800 It is very unlikely that we would be able to optimize the giv
4801 using this biv anyways. */
4802
4803 v->replaceable = 1;
4804 for (b = bl->biv; b; b = b->next_iv)
4805 {
4806 if (INSN_UID (b->insn) >= max_uid_for_loop
4807 || ((uid_luid[INSN_UID (b->insn)]
4808 >= uid_luid[REGNO_FIRST_UID (REGNO (dest_reg))])
4809 && (uid_luid[INSN_UID (b->insn)]
4810 <= uid_luid[REGNO_LAST_UID (REGNO (dest_reg))])))
4811 {
4812 v->replaceable = 0;
4813 v->not_replaceable = 1;
4814 break;
4815 }
4816 }
4817
4818 /* If there are any backwards branches that go from after the
4819 biv update to before it, then this giv is not replaceable. */
4820 if (v->replaceable)
4821 for (b = bl->biv; b; b = b->next_iv)
4822 if (back_branch_in_range_p (b->insn, loop_start, loop_end))
4823 {
4824 v->replaceable = 0;
4825 v->not_replaceable = 1;
4826 break;
4827 }
4828 }
4829 else
4830 {
4831 /* May still be replaceable, we don't have enough info here to
4832 decide. */
4833 v->replaceable = 0;
4834 v->not_replaceable = 0;
4835 }
4836 }
4837
4838 if (loop_dump_stream)
4839 {
4840 if (type == DEST_REG)
4841 fprintf (loop_dump_stream, "Insn %d: giv reg %d",
4842 INSN_UID (insn), REGNO (dest_reg));
4843 else
4844 fprintf (loop_dump_stream, "Insn %d: dest address",
4845 INSN_UID (insn));
4846
4847 fprintf (loop_dump_stream, " src reg %d benefit %d",
4848 REGNO (src_reg), v->benefit);
4849 fprintf (loop_dump_stream, " used %d lifetime %d",
4850 v->times_used, v->lifetime);
4851
4852 if (v->replaceable)
4853 fprintf (loop_dump_stream, " replaceable");
4854
4855 if (GET_CODE (mult_val) == CONST_INT)
4856 {
4857 fprintf (loop_dump_stream, " mult ");
4858 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (mult_val));
4859 }
4860 else
4861 {
4862 fprintf (loop_dump_stream, " mult ");
4863 print_rtl (loop_dump_stream, mult_val);
4864 }
4865
4866 if (GET_CODE (add_val) == CONST_INT)
4867 {
4868 fprintf (loop_dump_stream, " add ");
4869 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (add_val));
4870 }
4871 else
4872 {
4873 fprintf (loop_dump_stream, " add ");
4874 print_rtl (loop_dump_stream, add_val);
4875 }
4876 }
4877
4878 if (loop_dump_stream)
4879 fprintf (loop_dump_stream, "\n");
4880
4881 }
4882
4883
4884 /* All this does is determine whether a giv can be made replaceable because
4885 its final value can be calculated. This code can not be part of record_giv
4886 above, because final_giv_value requires that the number of loop iterations
4887 be known, and that can not be accurately calculated until after all givs
4888 have been identified. */
4889
4890 static void
4891 check_final_value (v, loop_start, loop_end)
4892 struct induction *v;
4893 rtx loop_start, loop_end;
4894 {
4895 struct iv_class *bl;
4896 rtx final_value = 0;
4897
4898 bl = reg_biv_class[REGNO (v->src_reg)];
4899
4900 /* DEST_ADDR givs will never reach here, because they are always marked
4901 replaceable above in record_giv. */
4902
4903 /* The giv can be replaced outright by the reduced register only if all
4904 of the following conditions are true:
4905 - the insn that sets the giv is always executed on any iteration
4906 on which the giv is used at all
4907 (there are two ways to deduce this:
4908 either the insn is executed on every iteration,
4909 or all uses follow that insn in the same basic block),
4910 - its final value can be calculated (this condition is different
4911 than the one above in record_giv)
4912 - no assignments to the biv occur during the giv's lifetime. */
4913
4914 #if 0
4915 /* This is only called now when replaceable is known to be false. */
4916 /* Clear replaceable, so that it won't confuse final_giv_value. */
4917 v->replaceable = 0;
4918 #endif
4919
4920 if ((final_value = final_giv_value (v, loop_start, loop_end))
4921 && (v->always_computable || last_use_this_basic_block (v->dest_reg, v->insn)))
4922 {
4923 int biv_increment_seen = 0;
4924 rtx p = v->insn;
4925 rtx last_giv_use;
4926
4927 v->replaceable = 1;
4928
4929 /* When trying to determine whether or not a biv increment occurs
4930 during the lifetime of the giv, we can ignore uses of the variable
4931 outside the loop because final_value is true. Hence we can not
4932 use regno_last_uid and regno_first_uid as above in record_giv. */
4933
4934 /* Search the loop to determine whether any assignments to the
4935 biv occur during the giv's lifetime. Start with the insn
4936 that sets the giv, and search around the loop until we come
4937 back to that insn again.
4938
4939 Also fail if there is a jump within the giv's lifetime that jumps
4940 to somewhere outside the lifetime but still within the loop. This
4941 catches spaghetti code where the execution order is not linear, and
4942 hence the above test fails. Here we assume that the giv lifetime
4943 does not extend from one iteration of the loop to the next, so as
4944 to make the test easier. Since the lifetime isn't known yet,
4945 this requires two loops. See also record_giv above. */
4946
4947 last_giv_use = v->insn;
4948
4949 while (1)
4950 {
4951 p = NEXT_INSN (p);
4952 if (p == loop_end)
4953 p = NEXT_INSN (loop_start);
4954 if (p == v->insn)
4955 break;
4956
4957 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
4958 || GET_CODE (p) == CALL_INSN)
4959 {
4960 if (biv_increment_seen)
4961 {
4962 if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
4963 {
4964 v->replaceable = 0;
4965 v->not_replaceable = 1;
4966 break;
4967 }
4968 }
4969 else if (reg_set_p (v->src_reg, PATTERN (p)))
4970 biv_increment_seen = 1;
4971 else if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
4972 last_giv_use = p;
4973 }
4974 }
4975
4976 /* Now that the lifetime of the giv is known, check for branches
4977 from within the lifetime to outside the lifetime if it is still
4978 replaceable. */
4979
4980 if (v->replaceable)
4981 {
4982 p = v->insn;
4983 while (1)
4984 {
4985 p = NEXT_INSN (p);
4986 if (p == loop_end)
4987 p = NEXT_INSN (loop_start);
4988 if (p == last_giv_use)
4989 break;
4990
4991 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
4992 && LABEL_NAME (JUMP_LABEL (p))
4993 && ((INSN_UID (JUMP_LABEL (p)) >= max_uid_for_loop)
4994 || (INSN_UID (v->insn) >= max_uid_for_loop)
4995 || (INSN_UID (last_giv_use) >= max_uid_for_loop)
4996 || (INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (v->insn)
4997 && INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (loop_start))
4998 || (INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (last_giv_use)
4999 && INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (loop_end))))
5000 {
5001 v->replaceable = 0;
5002 v->not_replaceable = 1;
5003
5004 if (loop_dump_stream)
5005 fprintf (loop_dump_stream,
5006 "Found branch outside giv lifetime.\n");
5007
5008 break;
5009 }
5010 }
5011 }
5012
5013 /* If it is replaceable, then save the final value. */
5014 if (v->replaceable)
5015 v->final_value = final_value;
5016 }
5017
5018 if (loop_dump_stream && v->replaceable)
5019 fprintf (loop_dump_stream, "Insn %d: giv reg %d final_value replaceable\n",
5020 INSN_UID (v->insn), REGNO (v->dest_reg));
5021 }
5022 \f
5023 /* Update the status of whether a giv can derive other givs.
5024
5025 We need to do something special if there is or may be an update to the biv
5026 between the time the giv is defined and the time it is used to derive
5027 another giv.
5028
5029 In addition, a giv that is only conditionally set is not allowed to
5030 derive another giv once a label has been passed.
5031
5032 The cases we look at are when a label or an update to a biv is passed. */
5033
5034 static void
5035 update_giv_derive (p)
5036 rtx p;
5037 {
5038 struct iv_class *bl;
5039 struct induction *biv, *giv;
5040 rtx tem;
5041 int dummy;
5042
5043 /* Search all IV classes, then all bivs, and finally all givs.
5044
5045 There are three cases we are concerned with. First we have the situation
5046 of a giv that is only updated conditionally. In that case, it may not
5047 derive any givs after a label is passed.
5048
5049 The second case is when a biv update occurs, or may occur, after the
5050 definition of a giv. For certain biv updates (see below) that are
5051 known to occur between the giv definition and use, we can adjust the
5052 giv definition. For others, or when the biv update is conditional,
5053 we must prevent the giv from deriving any other givs. There are two
5054 sub-cases within this case.
5055
5056 If this is a label, we are concerned with any biv update that is done
5057 conditionally, since it may be done after the giv is defined followed by
5058 a branch here (actually, we need to pass both a jump and a label, but
5059 this extra tracking doesn't seem worth it).
5060
5061 If this is a jump, we are concerned about any biv update that may be
5062 executed multiple times. We are actually only concerned about
5063 backward jumps, but it is probably not worth performing the test
5064 on the jump again here.
5065
5066 If this is a biv update, we must adjust the giv status to show that a
5067 subsequent biv update was performed. If this adjustment cannot be done,
5068 the giv cannot derive further givs. */
5069
5070 for (bl = loop_iv_list; bl; bl = bl->next)
5071 for (biv = bl->biv; biv; biv = biv->next_iv)
5072 if (GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN
5073 || biv->insn == p)
5074 {
5075 for (giv = bl->giv; giv; giv = giv->next_iv)
5076 {
5077 /* If cant_derive is already true, there is no point in
5078 checking all of these conditions again. */
5079 if (giv->cant_derive)
5080 continue;
5081
5082 /* If this giv is conditionally set and we have passed a label,
5083 it cannot derive anything. */
5084 if (GET_CODE (p) == CODE_LABEL && ! giv->always_computable)
5085 giv->cant_derive = 1;
5086
5087 /* Skip givs that have mult_val == 0, since
5088 they are really invariants. Also skip those that are
5089 replaceable, since we know their lifetime doesn't contain
5090 any biv update. */
5091 else if (giv->mult_val == const0_rtx || giv->replaceable)
5092 continue;
5093
5094 /* The only way we can allow this giv to derive another
5095 is if this is a biv increment and we can form the product
5096 of biv->add_val and giv->mult_val. In this case, we will
5097 be able to compute a compensation. */
5098 else if (biv->insn == p)
5099 {
5100 tem = 0;
5101
5102 if (biv->mult_val == const1_rtx)
5103 tem = simplify_giv_expr (gen_rtx_MULT (giv->mode,
5104 biv->add_val,
5105 giv->mult_val),
5106 &dummy);
5107
5108 if (tem && giv->derive_adjustment)
5109 tem = simplify_giv_expr (gen_rtx_PLUS (giv->mode, tem,
5110 giv->derive_adjustment),
5111 &dummy);
5112 if (tem)
5113 giv->derive_adjustment = tem;
5114 else
5115 giv->cant_derive = 1;
5116 }
5117 else if ((GET_CODE (p) == CODE_LABEL && ! biv->always_computable)
5118 || (GET_CODE (p) == JUMP_INSN && biv->maybe_multiple))
5119 giv->cant_derive = 1;
5120 }
5121 }
5122 }
5123 \f
5124 /* Check whether an insn is an increment legitimate for a basic induction var.
5125 X is the source of insn P, or a part of it.
5126 MODE is the mode in which X should be interpreted.
5127
5128 DEST_REG is the putative biv, also the destination of the insn.
5129 We accept patterns of these forms:
5130 REG = REG + INVARIANT (includes REG = REG - CONSTANT)
5131 REG = INVARIANT + REG
5132
5133 If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX,
5134 and store the additive term into *INC_VAL.
5135
5136 If X is an assignment of an invariant into DEST_REG, we set
5137 *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL.
5138
5139 We also want to detect a BIV when it corresponds to a variable
5140 whose mode was promoted via PROMOTED_MODE. In that case, an increment
5141 of the variable may be a PLUS that adds a SUBREG of that variable to
5142 an invariant and then sign- or zero-extends the result of the PLUS
5143 into the variable.
5144
5145 Most GIVs in such cases will be in the promoted mode, since that is the
5146 probably the natural computation mode (and almost certainly the mode
5147 used for addresses) on the machine. So we view the pseudo-reg containing
5148 the variable as the BIV, as if it were simply incremented.
5149
5150 Note that treating the entire pseudo as a BIV will result in making
5151 simple increments to any GIVs based on it. However, if the variable
5152 overflows in its declared mode but not its promoted mode, the result will
5153 be incorrect. This is acceptable if the variable is signed, since
5154 overflows in such cases are undefined, but not if it is unsigned, since
5155 those overflows are defined. So we only check for SIGN_EXTEND and
5156 not ZERO_EXTEND.
5157
5158 If we cannot find a biv, we return 0. */
5159
5160 static int
5161 basic_induction_var (x, mode, dest_reg, p, inc_val, mult_val)
5162 register rtx x;
5163 enum machine_mode mode;
5164 rtx p;
5165 rtx dest_reg;
5166 rtx *inc_val;
5167 rtx *mult_val;
5168 {
5169 register enum rtx_code code;
5170 rtx arg;
5171 rtx insn, set = 0;
5172
5173 code = GET_CODE (x);
5174 switch (code)
5175 {
5176 case PLUS:
5177 if (XEXP (x, 0) == dest_reg
5178 || (GET_CODE (XEXP (x, 0)) == SUBREG
5179 && SUBREG_PROMOTED_VAR_P (XEXP (x, 0))
5180 && SUBREG_REG (XEXP (x, 0)) == dest_reg))
5181 arg = XEXP (x, 1);
5182 else if (XEXP (x, 1) == dest_reg
5183 || (GET_CODE (XEXP (x, 1)) == SUBREG
5184 && SUBREG_PROMOTED_VAR_P (XEXP (x, 1))
5185 && SUBREG_REG (XEXP (x, 1)) == dest_reg))
5186 arg = XEXP (x, 0);
5187 else
5188 return 0;
5189
5190 if (invariant_p (arg) != 1)
5191 return 0;
5192
5193 *inc_val = convert_modes (GET_MODE (dest_reg), GET_MODE (x), arg, 0);
5194 *mult_val = const1_rtx;
5195 return 1;
5196
5197 case SUBREG:
5198 /* If this is a SUBREG for a promoted variable, check the inner
5199 value. */
5200 if (SUBREG_PROMOTED_VAR_P (x))
5201 return basic_induction_var (SUBREG_REG (x), GET_MODE (SUBREG_REG (x)),
5202 dest_reg, p, inc_val, mult_val);
5203 return 0;
5204
5205 case REG:
5206 /* If this register is assigned in the previous insn, look at its
5207 source, but don't go outside the loop or past a label. */
5208
5209 for (insn = PREV_INSN (p);
5210 (insn && GET_CODE (insn) == NOTE
5211 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
5212 insn = PREV_INSN (insn))
5213 ;
5214
5215 if (insn)
5216 set = single_set (insn);
5217
5218 if (set != 0
5219 && (SET_DEST (set) == x
5220 || (GET_CODE (SET_DEST (set)) == SUBREG
5221 && (GET_MODE_SIZE (GET_MODE (SET_DEST (set)))
5222 <= UNITS_PER_WORD)
5223 && SUBREG_REG (SET_DEST (set)) == x)))
5224 return basic_induction_var (SET_SRC (set),
5225 (GET_MODE (SET_SRC (set)) == VOIDmode
5226 ? GET_MODE (x)
5227 : GET_MODE (SET_SRC (set))),
5228 dest_reg, insn,
5229 inc_val, mult_val);
5230 /* ... fall through ... */
5231
5232 /* Can accept constant setting of biv only when inside inner most loop.
5233 Otherwise, a biv of an inner loop may be incorrectly recognized
5234 as a biv of the outer loop,
5235 causing code to be moved INTO the inner loop. */
5236 case MEM:
5237 if (invariant_p (x) != 1)
5238 return 0;
5239 case CONST_INT:
5240 case SYMBOL_REF:
5241 case CONST:
5242 if (loops_enclosed == 1)
5243 {
5244 /* Possible bug here? Perhaps we don't know the mode of X. */
5245 *inc_val = convert_modes (GET_MODE (dest_reg), mode, x, 0);
5246 *mult_val = const0_rtx;
5247 return 1;
5248 }
5249 else
5250 return 0;
5251
5252 case SIGN_EXTEND:
5253 return basic_induction_var (XEXP (x, 0), GET_MODE (XEXP (x, 0)),
5254 dest_reg, p, inc_val, mult_val);
5255 case ASHIFTRT:
5256 /* Similar, since this can be a sign extension. */
5257 for (insn = PREV_INSN (p);
5258 (insn && GET_CODE (insn) == NOTE
5259 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
5260 insn = PREV_INSN (insn))
5261 ;
5262
5263 if (insn)
5264 set = single_set (insn);
5265
5266 if (set && SET_DEST (set) == XEXP (x, 0)
5267 && GET_CODE (XEXP (x, 1)) == CONST_INT
5268 && INTVAL (XEXP (x, 1)) >= 0
5269 && GET_CODE (SET_SRC (set)) == ASHIFT
5270 && XEXP (x, 1) == XEXP (SET_SRC (set), 1))
5271 return basic_induction_var (XEXP (SET_SRC (set), 0),
5272 GET_MODE (XEXP (x, 0)),
5273 dest_reg, insn, inc_val, mult_val);
5274 return 0;
5275
5276 default:
5277 return 0;
5278 }
5279 }
5280 \f
5281 /* A general induction variable (giv) is any quantity that is a linear
5282 function of a basic induction variable,
5283 i.e. giv = biv * mult_val + add_val.
5284 The coefficients can be any loop invariant quantity.
5285 A giv need not be computed directly from the biv;
5286 it can be computed by way of other givs. */
5287
5288 /* Determine whether X computes a giv.
5289 If it does, return a nonzero value
5290 which is the benefit from eliminating the computation of X;
5291 set *SRC_REG to the register of the biv that it is computed from;
5292 set *ADD_VAL and *MULT_VAL to the coefficients,
5293 such that the value of X is biv * mult + add; */
5294
5295 static int
5296 general_induction_var (x, src_reg, add_val, mult_val)
5297 rtx x;
5298 rtx *src_reg;
5299 rtx *add_val;
5300 rtx *mult_val;
5301 {
5302 rtx orig_x = x;
5303 int benefit = 0;
5304 char *storage;
5305
5306 /* If this is an invariant, forget it, it isn't a giv. */
5307 if (invariant_p (x) == 1)
5308 return 0;
5309
5310 /* See if the expression could be a giv and get its form.
5311 Mark our place on the obstack in case we don't find a giv. */
5312 storage = (char *) oballoc (0);
5313 x = simplify_giv_expr (x, &benefit);
5314 if (x == 0)
5315 {
5316 obfree (storage);
5317 return 0;
5318 }
5319
5320 switch (GET_CODE (x))
5321 {
5322 case USE:
5323 case CONST_INT:
5324 /* Since this is now an invariant and wasn't before, it must be a giv
5325 with MULT_VAL == 0. It doesn't matter which BIV we associate this
5326 with. */
5327 *src_reg = loop_iv_list->biv->dest_reg;
5328 *mult_val = const0_rtx;
5329 *add_val = x;
5330 break;
5331
5332 case REG:
5333 /* This is equivalent to a BIV. */
5334 *src_reg = x;
5335 *mult_val = const1_rtx;
5336 *add_val = const0_rtx;
5337 break;
5338
5339 case PLUS:
5340 /* Either (plus (biv) (invar)) or
5341 (plus (mult (biv) (invar_1)) (invar_2)). */
5342 if (GET_CODE (XEXP (x, 0)) == MULT)
5343 {
5344 *src_reg = XEXP (XEXP (x, 0), 0);
5345 *mult_val = XEXP (XEXP (x, 0), 1);
5346 }
5347 else
5348 {
5349 *src_reg = XEXP (x, 0);
5350 *mult_val = const1_rtx;
5351 }
5352 *add_val = XEXP (x, 1);
5353 break;
5354
5355 case MULT:
5356 /* ADD_VAL is zero. */
5357 *src_reg = XEXP (x, 0);
5358 *mult_val = XEXP (x, 1);
5359 *add_val = const0_rtx;
5360 break;
5361
5362 default:
5363 abort ();
5364 }
5365
5366 /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be
5367 unless they are CONST_INT). */
5368 if (GET_CODE (*add_val) == USE)
5369 *add_val = XEXP (*add_val, 0);
5370 if (GET_CODE (*mult_val) == USE)
5371 *mult_val = XEXP (*mult_val, 0);
5372
5373 benefit += rtx_cost (orig_x, SET);
5374
5375 /* Always return some benefit if this is a giv so it will be detected
5376 as such. This allows elimination of bivs that might otherwise
5377 not be eliminated. */
5378 return benefit == 0 ? 1 : benefit;
5379 }
5380 \f
5381 /* Given an expression, X, try to form it as a linear function of a biv.
5382 We will canonicalize it to be of the form
5383 (plus (mult (BIV) (invar_1))
5384 (invar_2))
5385 with possible degeneracies.
5386
5387 The invariant expressions must each be of a form that can be used as a
5388 machine operand. We surround then with a USE rtx (a hack, but localized
5389 and certainly unambiguous!) if not a CONST_INT for simplicity in this
5390 routine; it is the caller's responsibility to strip them.
5391
5392 If no such canonicalization is possible (i.e., two biv's are used or an
5393 expression that is neither invariant nor a biv or giv), this routine
5394 returns 0.
5395
5396 For a non-zero return, the result will have a code of CONST_INT, USE,
5397 REG (for a BIV), PLUS, or MULT. No other codes will occur.
5398
5399 *BENEFIT will be incremented by the benefit of any sub-giv encountered. */
5400
5401 static rtx
5402 simplify_giv_expr (x, benefit)
5403 rtx x;
5404 int *benefit;
5405 {
5406 enum machine_mode mode = GET_MODE (x);
5407 rtx arg0, arg1;
5408 rtx tem;
5409
5410 /* If this is not an integer mode, or if we cannot do arithmetic in this
5411 mode, this can't be a giv. */
5412 if (mode != VOIDmode
5413 && (GET_MODE_CLASS (mode) != MODE_INT
5414 || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT))
5415 return 0;
5416
5417 switch (GET_CODE (x))
5418 {
5419 case PLUS:
5420 arg0 = simplify_giv_expr (XEXP (x, 0), benefit);
5421 arg1 = simplify_giv_expr (XEXP (x, 1), benefit);
5422 if (arg0 == 0 || arg1 == 0)
5423 return 0;
5424
5425 /* Put constant last, CONST_INT last if both constant. */
5426 if ((GET_CODE (arg0) == USE
5427 || GET_CODE (arg0) == CONST_INT)
5428 && GET_CODE (arg1) != CONST_INT)
5429 tem = arg0, arg0 = arg1, arg1 = tem;
5430
5431 /* Handle addition of zero, then addition of an invariant. */
5432 if (arg1 == const0_rtx)
5433 return arg0;
5434 else if (GET_CODE (arg1) == CONST_INT || GET_CODE (arg1) == USE)
5435 switch (GET_CODE (arg0))
5436 {
5437 case CONST_INT:
5438 case USE:
5439 /* Both invariant. Only valid if sum is machine operand.
5440 First strip off possible USE on the operands. */
5441 if (GET_CODE (arg0) == USE)
5442 arg0 = XEXP (arg0, 0);
5443
5444 if (GET_CODE (arg1) == USE)
5445 arg1 = XEXP (arg1, 0);
5446
5447 tem = 0;
5448 if (CONSTANT_P (arg0) && GET_CODE (arg1) == CONST_INT)
5449 {
5450 tem = plus_constant (arg0, INTVAL (arg1));
5451 if (GET_CODE (tem) != CONST_INT)
5452 tem = gen_rtx_USE (mode, tem);
5453 }
5454 else
5455 {
5456 /* Adding two invariants must result in an invariant,
5457 so enclose addition operation inside a USE and
5458 return it. */
5459 tem = gen_rtx_USE (mode, gen_rtx_PLUS (mode, arg0, arg1));
5460 }
5461
5462 return tem;
5463
5464 case REG:
5465 case MULT:
5466 /* biv + invar or mult + invar. Return sum. */
5467 return gen_rtx_PLUS (mode, arg0, arg1);
5468
5469 case PLUS:
5470 /* (a + invar_1) + invar_2. Associate. */
5471 return simplify_giv_expr (gen_rtx_PLUS (mode,
5472 XEXP (arg0, 0),
5473 gen_rtx_PLUS (mode,
5474 XEXP (arg0, 1), arg1)),
5475 benefit);
5476
5477 default:
5478 abort ();
5479 }
5480
5481 /* Each argument must be either REG, PLUS, or MULT. Convert REG to
5482 MULT to reduce cases. */
5483 if (GET_CODE (arg0) == REG)
5484 arg0 = gen_rtx_MULT (mode, arg0, const1_rtx);
5485 if (GET_CODE (arg1) == REG)
5486 arg1 = gen_rtx_MULT (mode, arg1, const1_rtx);
5487
5488 /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT.
5489 Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT.
5490 Recurse to associate the second PLUS. */
5491 if (GET_CODE (arg1) == MULT)
5492 tem = arg0, arg0 = arg1, arg1 = tem;
5493
5494 if (GET_CODE (arg1) == PLUS)
5495 return simplify_giv_expr (gen_rtx_PLUS (mode,
5496 gen_rtx_PLUS (mode, arg0,
5497 XEXP (arg1, 0)),
5498 XEXP (arg1, 1)),
5499 benefit);
5500
5501 /* Now must have MULT + MULT. Distribute if same biv, else not giv. */
5502 if (GET_CODE (arg0) != MULT || GET_CODE (arg1) != MULT)
5503 abort ();
5504
5505 if (XEXP (arg0, 0) != XEXP (arg1, 0))
5506 return 0;
5507
5508 return simplify_giv_expr (gen_rtx_MULT (mode,
5509 XEXP (arg0, 0),
5510 gen_rtx_PLUS (mode,
5511 XEXP (arg0, 1),
5512 XEXP (arg1, 1))),
5513 benefit);
5514
5515 case MINUS:
5516 /* Handle "a - b" as "a + b * (-1)". */
5517 return simplify_giv_expr (gen_rtx_PLUS (mode,
5518 XEXP (x, 0),
5519 gen_rtx_MULT (mode, XEXP (x, 1),
5520 constm1_rtx)),
5521 benefit);
5522
5523 case MULT:
5524 arg0 = simplify_giv_expr (XEXP (x, 0), benefit);
5525 arg1 = simplify_giv_expr (XEXP (x, 1), benefit);
5526 if (arg0 == 0 || arg1 == 0)
5527 return 0;
5528
5529 /* Put constant last, CONST_INT last if both constant. */
5530 if ((GET_CODE (arg0) == USE || GET_CODE (arg0) == CONST_INT)
5531 && GET_CODE (arg1) != CONST_INT)
5532 tem = arg0, arg0 = arg1, arg1 = tem;
5533
5534 /* If second argument is not now constant, not giv. */
5535 if (GET_CODE (arg1) != USE && GET_CODE (arg1) != CONST_INT)
5536 return 0;
5537
5538 /* Handle multiply by 0 or 1. */
5539 if (arg1 == const0_rtx)
5540 return const0_rtx;
5541
5542 else if (arg1 == const1_rtx)
5543 return arg0;
5544
5545 switch (GET_CODE (arg0))
5546 {
5547 case REG:
5548 /* biv * invar. Done. */
5549 return gen_rtx_MULT (mode, arg0, arg1);
5550
5551 case CONST_INT:
5552 /* Product of two constants. */
5553 return GEN_INT (INTVAL (arg0) * INTVAL (arg1));
5554
5555 case USE:
5556 /* invar * invar. Not giv. */
5557 return 0;
5558
5559 case MULT:
5560 /* (a * invar_1) * invar_2. Associate. */
5561 return simplify_giv_expr (gen_rtx_MULT (mode, XEXP (arg0, 0),
5562 gen_rtx_MULT (mode,
5563 XEXP (arg0, 1),
5564 arg1)),
5565 benefit);
5566
5567 case PLUS:
5568 /* (a + invar_1) * invar_2. Distribute. */
5569 return simplify_giv_expr (gen_rtx_PLUS (mode,
5570 gen_rtx_MULT (mode,
5571 XEXP (arg0, 0),
5572 arg1),
5573 gen_rtx_MULT (mode,
5574 XEXP (arg0, 1),
5575 arg1)),
5576 benefit);
5577
5578 default:
5579 abort ();
5580 }
5581
5582 case ASHIFT:
5583 /* Shift by constant is multiply by power of two. */
5584 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
5585 return 0;
5586
5587 return simplify_giv_expr (gen_rtx_MULT (mode,
5588 XEXP (x, 0),
5589 GEN_INT ((HOST_WIDE_INT) 1
5590 << INTVAL (XEXP (x, 1)))),
5591 benefit);
5592
5593 case NEG:
5594 /* "-a" is "a * (-1)" */
5595 return simplify_giv_expr (gen_rtx_MULT (mode, XEXP (x, 0), constm1_rtx),
5596 benefit);
5597
5598 case NOT:
5599 /* "~a" is "-a - 1". Silly, but easy. */
5600 return simplify_giv_expr (gen_rtx_MINUS (mode,
5601 gen_rtx_NEG (mode, XEXP (x, 0)),
5602 const1_rtx),
5603 benefit);
5604
5605 case USE:
5606 /* Already in proper form for invariant. */
5607 return x;
5608
5609 case REG:
5610 /* If this is a new register, we can't deal with it. */
5611 if (REGNO (x) >= max_reg_before_loop)
5612 return 0;
5613
5614 /* Check for biv or giv. */
5615 switch (reg_iv_type[REGNO (x)])
5616 {
5617 case BASIC_INDUCT:
5618 return x;
5619 case GENERAL_INDUCT:
5620 {
5621 struct induction *v = reg_iv_info[REGNO (x)];
5622
5623 /* Form expression from giv and add benefit. Ensure this giv
5624 can derive another and subtract any needed adjustment if so. */
5625 *benefit += v->benefit;
5626 if (v->cant_derive)
5627 return 0;
5628
5629 tem = gen_rtx_PLUS (mode, gen_rtx_MULT (mode, v->src_reg,
5630 v->mult_val),
5631 v->add_val);
5632 if (v->derive_adjustment)
5633 tem = gen_rtx_MINUS (mode, tem, v->derive_adjustment);
5634 return simplify_giv_expr (tem, benefit);
5635 }
5636
5637 default:
5638 break;
5639 }
5640
5641 /* Fall through to general case. */
5642 default:
5643 /* If invariant, return as USE (unless CONST_INT).
5644 Otherwise, not giv. */
5645 if (GET_CODE (x) == USE)
5646 x = XEXP (x, 0);
5647
5648 if (invariant_p (x) == 1)
5649 {
5650 if (GET_CODE (x) == CONST_INT)
5651 return x;
5652 else
5653 return gen_rtx_USE (mode, x);
5654 }
5655 else
5656 return 0;
5657 }
5658 }
5659 \f
5660 /* Help detect a giv that is calculated by several consecutive insns;
5661 for example,
5662 giv = biv * M
5663 giv = giv + A
5664 The caller has already identified the first insn P as having a giv as dest;
5665 we check that all other insns that set the same register follow
5666 immediately after P, that they alter nothing else,
5667 and that the result of the last is still a giv.
5668
5669 The value is 0 if the reg set in P is not really a giv.
5670 Otherwise, the value is the amount gained by eliminating
5671 all the consecutive insns that compute the value.
5672
5673 FIRST_BENEFIT is the amount gained by eliminating the first insn, P.
5674 SRC_REG is the reg of the biv; DEST_REG is the reg of the giv.
5675
5676 The coefficients of the ultimate giv value are stored in
5677 *MULT_VAL and *ADD_VAL. */
5678
5679 static int
5680 consec_sets_giv (first_benefit, p, src_reg, dest_reg,
5681 add_val, mult_val)
5682 int first_benefit;
5683 rtx p;
5684 rtx src_reg;
5685 rtx dest_reg;
5686 rtx *add_val;
5687 rtx *mult_val;
5688 {
5689 int count;
5690 enum rtx_code code;
5691 int benefit;
5692 rtx temp;
5693 rtx set;
5694
5695 /* Indicate that this is a giv so that we can update the value produced in
5696 each insn of the multi-insn sequence.
5697
5698 This induction structure will be used only by the call to
5699 general_induction_var below, so we can allocate it on our stack.
5700 If this is a giv, our caller will replace the induct var entry with
5701 a new induction structure. */
5702 struct induction *v
5703 = (struct induction *) alloca (sizeof (struct induction));
5704 v->src_reg = src_reg;
5705 v->mult_val = *mult_val;
5706 v->add_val = *add_val;
5707 v->benefit = first_benefit;
5708 v->cant_derive = 0;
5709 v->derive_adjustment = 0;
5710
5711 reg_iv_type[REGNO (dest_reg)] = GENERAL_INDUCT;
5712 reg_iv_info[REGNO (dest_reg)] = v;
5713
5714 count = n_times_set[REGNO (dest_reg)] - 1;
5715
5716 while (count > 0)
5717 {
5718 p = NEXT_INSN (p);
5719 code = GET_CODE (p);
5720
5721 /* If libcall, skip to end of call sequence. */
5722 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
5723 p = XEXP (temp, 0);
5724
5725 if (code == INSN
5726 && (set = single_set (p))
5727 && GET_CODE (SET_DEST (set)) == REG
5728 && SET_DEST (set) == dest_reg
5729 && ((benefit = general_induction_var (SET_SRC (set), &src_reg,
5730 add_val, mult_val))
5731 /* Giv created by equivalent expression. */
5732 || ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX))
5733 && (benefit = general_induction_var (XEXP (temp, 0), &src_reg,
5734 add_val, mult_val))))
5735 && src_reg == v->src_reg)
5736 {
5737 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
5738 benefit += libcall_benefit (p);
5739
5740 count--;
5741 v->mult_val = *mult_val;
5742 v->add_val = *add_val;
5743 v->benefit = benefit;
5744 }
5745 else if (code != NOTE)
5746 {
5747 /* Allow insns that set something other than this giv to a
5748 constant. Such insns are needed on machines which cannot
5749 include long constants and should not disqualify a giv. */
5750 if (code == INSN
5751 && (set = single_set (p))
5752 && SET_DEST (set) != dest_reg
5753 && CONSTANT_P (SET_SRC (set)))
5754 continue;
5755
5756 reg_iv_type[REGNO (dest_reg)] = UNKNOWN_INDUCT;
5757 return 0;
5758 }
5759 }
5760
5761 return v->benefit;
5762 }
5763 \f
5764 /* Return an rtx, if any, that expresses giv G2 as a function of the register
5765 represented by G1. If no such expression can be found, or it is clear that
5766 it cannot possibly be a valid address, 0 is returned.
5767
5768 To perform the computation, we note that
5769 G1 = a * v + b and
5770 G2 = c * v + d
5771 where `v' is the biv.
5772
5773 So G2 = (c/a) * G1 + (d - b*c/a) */
5774
5775 #ifdef ADDRESS_COST
5776 static rtx
5777 express_from (g1, g2)
5778 struct induction *g1, *g2;
5779 {
5780 rtx mult, add;
5781
5782 /* The value that G1 will be multiplied by must be a constant integer. Also,
5783 the only chance we have of getting a valid address is if b*c/a (see above
5784 for notation) is also an integer. */
5785 if (GET_CODE (g1->mult_val) != CONST_INT
5786 || GET_CODE (g2->mult_val) != CONST_INT
5787 || GET_CODE (g1->add_val) != CONST_INT
5788 || g1->mult_val == const0_rtx
5789 || INTVAL (g2->mult_val) % INTVAL (g1->mult_val) != 0)
5790 return 0;
5791
5792 mult = GEN_INT (INTVAL (g2->mult_val) / INTVAL (g1->mult_val));
5793 add = plus_constant (g2->add_val, - INTVAL (g1->add_val) * INTVAL (mult));
5794
5795 /* Form simplified final result. */
5796 if (mult == const0_rtx)
5797 return add;
5798 else if (mult == const1_rtx)
5799 mult = g1->dest_reg;
5800 else
5801 mult = gen_rtx_MULT (g2->mode, g1->dest_reg, mult);
5802
5803 if (add == const0_rtx)
5804 return mult;
5805 else
5806 return gen_rtx_PLUS (g2->mode, mult, add);
5807 }
5808 #endif
5809 \f
5810 /* Return 1 if giv G2 can be combined with G1. This means that G2 can use
5811 (either directly or via an address expression) a register used to represent
5812 G1. Set g2->new_reg to a represtation of G1 (normally just
5813 g1->dest_reg). */
5814
5815 static int
5816 combine_givs_p (g1, g2)
5817 struct induction *g1, *g2;
5818 {
5819 rtx tem;
5820
5821 /* If these givs are identical, they can be combined. */
5822 if (rtx_equal_p (g1->mult_val, g2->mult_val)
5823 && rtx_equal_p (g1->add_val, g2->add_val))
5824 {
5825 g2->new_reg = g1->dest_reg;
5826 return 1;
5827 }
5828
5829 #ifdef ADDRESS_COST
5830 /* If G2 can be expressed as a function of G1 and that function is valid
5831 as an address and no more expensive than using a register for G2,
5832 the expression of G2 in terms of G1 can be used. */
5833 if (g2->giv_type == DEST_ADDR
5834 && (tem = express_from (g1, g2)) != 0
5835 && memory_address_p (g2->mem_mode, tem)
5836 && ADDRESS_COST (tem) <= ADDRESS_COST (*g2->location))
5837 {
5838 g2->new_reg = tem;
5839 return 1;
5840 }
5841 #endif
5842
5843 return 0;
5844 }
5845 \f
5846 #ifdef GIV_SORT_CRITERION
5847 /* Compare two givs and sort the most desirable one for combinations first.
5848 This is used only in one qsort call below. */
5849
5850 static int
5851 giv_sort (x, y)
5852 struct induction **x, **y;
5853 {
5854 GIV_SORT_CRITERION (*x, *y);
5855
5856 return 0;
5857 }
5858 #endif
5859
5860 /* Check all pairs of givs for iv_class BL and see if any can be combined with
5861 any other. If so, point SAME to the giv combined with and set NEW_REG to
5862 be an expression (in terms of the other giv's DEST_REG) equivalent to the
5863 giv. Also, update BENEFIT and related fields for cost/benefit analysis. */
5864
5865 static void
5866 combine_givs (bl)
5867 struct iv_class *bl;
5868 {
5869 struct induction *g1, *g2, **giv_array;
5870 int i, j, giv_count, pass;
5871
5872 /* Count givs, because bl->giv_count is incorrect here. */
5873 giv_count = 0;
5874 for (g1 = bl->giv; g1; g1 = g1->next_iv)
5875 giv_count++;
5876
5877 giv_array
5878 = (struct induction **) alloca (giv_count * sizeof (struct induction *));
5879 i = 0;
5880 for (g1 = bl->giv; g1; g1 = g1->next_iv)
5881 giv_array[i++] = g1;
5882
5883 #ifdef GIV_SORT_CRITERION
5884 /* Sort the givs if GIV_SORT_CRITERION is defined.
5885 This is usually defined for processors which lack
5886 negative register offsets so more givs may be combined. */
5887
5888 if (loop_dump_stream)
5889 fprintf (loop_dump_stream, "%d givs counted, sorting...\n", giv_count);
5890
5891 qsort (giv_array, giv_count, sizeof (struct induction *), giv_sort);
5892 #endif
5893
5894 for (i = 0; i < giv_count; i++)
5895 {
5896 g1 = giv_array[i];
5897 for (pass = 0; pass <= 1; pass++)
5898 for (j = 0; j < giv_count; j++)
5899 {
5900 g2 = giv_array[j];
5901 if (g1 != g2
5902 /* First try to combine with replaceable givs, then all givs. */
5903 && (g1->replaceable || pass == 1)
5904 /* If either has already been combined or is to be ignored, can't
5905 combine. */
5906 && ! g1->ignore && ! g2->ignore && ! g1->same && ! g2->same
5907 /* If something has been based on G2, G2 cannot itself be based
5908 on something else. */
5909 && ! g2->combined_with
5910 && combine_givs_p (g1, g2))
5911 {
5912 /* g2->new_reg set by `combine_givs_p' */
5913 g2->same = g1;
5914 g1->combined_with = 1;
5915
5916 /* If one of these givs is a DEST_REG that was only used
5917 once, by the other giv, this is actually a single use.
5918 The DEST_REG has the correct cost, while the other giv
5919 counts the REG use too often. */
5920 if (g2->giv_type == DEST_REG
5921 && n_times_used[REGNO (g2->dest_reg)] == 1
5922 && reg_mentioned_p (g2->dest_reg, PATTERN (g1->insn)))
5923 g1->benefit = g2->benefit;
5924 else if (g1->giv_type != DEST_REG
5925 || n_times_used[REGNO (g1->dest_reg)] != 1
5926 || ! reg_mentioned_p (g1->dest_reg,
5927 PATTERN (g2->insn)))
5928 {
5929 g1->benefit += g2->benefit;
5930 g1->times_used += g2->times_used;
5931 }
5932 /* ??? The new final_[bg]iv_value code does a much better job
5933 of finding replaceable giv's, and hence this code may no
5934 longer be necessary. */
5935 if (! g2->replaceable && REG_USERVAR_P (g2->dest_reg))
5936 g1->benefit -= copy_cost;
5937 g1->lifetime += g2->lifetime;
5938
5939 if (loop_dump_stream)
5940 fprintf (loop_dump_stream, "giv at %d combined with giv at %d\n",
5941 INSN_UID (g2->insn), INSN_UID (g1->insn));
5942 }
5943 }
5944 }
5945 }
5946 \f
5947 /* EMIT code before INSERT_BEFORE to set REG = B * M + A. */
5948
5949 void
5950 emit_iv_add_mult (b, m, a, reg, insert_before)
5951 rtx b; /* initial value of basic induction variable */
5952 rtx m; /* multiplicative constant */
5953 rtx a; /* additive constant */
5954 rtx reg; /* destination register */
5955 rtx insert_before;
5956 {
5957 rtx seq;
5958 rtx result;
5959
5960 /* Prevent unexpected sharing of these rtx. */
5961 a = copy_rtx (a);
5962 b = copy_rtx (b);
5963
5964 /* Increase the lifetime of any invariants moved further in code. */
5965 update_reg_last_use (a, insert_before);
5966 update_reg_last_use (b, insert_before);
5967 update_reg_last_use (m, insert_before);
5968
5969 start_sequence ();
5970 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 0);
5971 if (reg != result)
5972 emit_move_insn (reg, result);
5973 seq = gen_sequence ();
5974 end_sequence ();
5975
5976 emit_insn_before (seq, insert_before);
5977
5978 record_base_value (REGNO (reg), b);
5979 }
5980 \f
5981 /* Test whether A * B can be computed without
5982 an actual multiply insn. Value is 1 if so. */
5983
5984 static int
5985 product_cheap_p (a, b)
5986 rtx a;
5987 rtx b;
5988 {
5989 int i;
5990 rtx tmp;
5991 struct obstack *old_rtl_obstack = rtl_obstack;
5992 char *storage = (char *) obstack_alloc (&temp_obstack, 0);
5993 int win = 1;
5994
5995 /* If only one is constant, make it B. */
5996 if (GET_CODE (a) == CONST_INT)
5997 tmp = a, a = b, b = tmp;
5998
5999 /* If first constant, both constant, so don't need multiply. */
6000 if (GET_CODE (a) == CONST_INT)
6001 return 1;
6002
6003 /* If second not constant, neither is constant, so would need multiply. */
6004 if (GET_CODE (b) != CONST_INT)
6005 return 0;
6006
6007 /* One operand is constant, so might not need multiply insn. Generate the
6008 code for the multiply and see if a call or multiply, or long sequence
6009 of insns is generated. */
6010
6011 rtl_obstack = &temp_obstack;
6012 start_sequence ();
6013 expand_mult (GET_MODE (a), a, b, NULL_RTX, 0);
6014 tmp = gen_sequence ();
6015 end_sequence ();
6016
6017 if (GET_CODE (tmp) == SEQUENCE)
6018 {
6019 if (XVEC (tmp, 0) == 0)
6020 win = 1;
6021 else if (XVECLEN (tmp, 0) > 3)
6022 win = 0;
6023 else
6024 for (i = 0; i < XVECLEN (tmp, 0); i++)
6025 {
6026 rtx insn = XVECEXP (tmp, 0, i);
6027
6028 if (GET_CODE (insn) != INSN
6029 || (GET_CODE (PATTERN (insn)) == SET
6030 && GET_CODE (SET_SRC (PATTERN (insn))) == MULT)
6031 || (GET_CODE (PATTERN (insn)) == PARALLEL
6032 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET
6033 && GET_CODE (SET_SRC (XVECEXP (PATTERN (insn), 0, 0))) == MULT))
6034 {
6035 win = 0;
6036 break;
6037 }
6038 }
6039 }
6040 else if (GET_CODE (tmp) == SET
6041 && GET_CODE (SET_SRC (tmp)) == MULT)
6042 win = 0;
6043 else if (GET_CODE (tmp) == PARALLEL
6044 && GET_CODE (XVECEXP (tmp, 0, 0)) == SET
6045 && GET_CODE (SET_SRC (XVECEXP (tmp, 0, 0))) == MULT)
6046 win = 0;
6047
6048 /* Free any storage we obtained in generating this multiply and restore rtl
6049 allocation to its normal obstack. */
6050 obstack_free (&temp_obstack, storage);
6051 rtl_obstack = old_rtl_obstack;
6052
6053 return win;
6054 }
6055 \f
6056 /* Check to see if loop can be terminated by a "decrement and branch until
6057 zero" instruction. If so, add a REG_NONNEG note to the branch insn if so.
6058 Also try reversing an increment loop to a decrement loop
6059 to see if the optimization can be performed.
6060 Value is nonzero if optimization was performed. */
6061
6062 /* This is useful even if the architecture doesn't have such an insn,
6063 because it might change a loops which increments from 0 to n to a loop
6064 which decrements from n to 0. A loop that decrements to zero is usually
6065 faster than one that increments from zero. */
6066
6067 /* ??? This could be rewritten to use some of the loop unrolling procedures,
6068 such as approx_final_value, biv_total_increment, loop_iterations, and
6069 final_[bg]iv_value. */
6070
6071 static int
6072 check_dbra_loop (loop_end, insn_count, loop_start)
6073 rtx loop_end;
6074 int insn_count;
6075 rtx loop_start;
6076 {
6077 struct iv_class *bl;
6078 rtx reg;
6079 rtx jump_label;
6080 rtx final_value;
6081 rtx start_value;
6082 rtx new_add_val;
6083 rtx comparison;
6084 rtx before_comparison;
6085 rtx p;
6086
6087 /* If last insn is a conditional branch, and the insn before tests a
6088 register value, try to optimize it. Otherwise, we can't do anything. */
6089
6090 comparison = get_condition_for_loop (PREV_INSN (loop_end));
6091 if (comparison == 0)
6092 return 0;
6093
6094 /* Check all of the bivs to see if the compare uses one of them.
6095 Skip biv's set more than once because we can't guarantee that
6096 it will be zero on the last iteration. Also skip if the biv is
6097 used between its update and the test insn. */
6098
6099 for (bl = loop_iv_list; bl; bl = bl->next)
6100 {
6101 if (bl->biv_count == 1
6102 && bl->biv->dest_reg == XEXP (comparison, 0)
6103 && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
6104 PREV_INSN (PREV_INSN (loop_end))))
6105 break;
6106 }
6107
6108 if (! bl)
6109 return 0;
6110
6111 /* Look for the case where the basic induction variable is always
6112 nonnegative, and equals zero on the last iteration.
6113 In this case, add a reg_note REG_NONNEG, which allows the
6114 m68k DBRA instruction to be used. */
6115
6116 if (((GET_CODE (comparison) == GT
6117 && GET_CODE (XEXP (comparison, 1)) == CONST_INT
6118 && INTVAL (XEXP (comparison, 1)) == -1)
6119 || (GET_CODE (comparison) == NE && XEXP (comparison, 1) == const0_rtx))
6120 && GET_CODE (bl->biv->add_val) == CONST_INT
6121 && INTVAL (bl->biv->add_val) < 0)
6122 {
6123 /* Initial value must be greater than 0,
6124 init_val % -dec_value == 0 to ensure that it equals zero on
6125 the last iteration */
6126
6127 if (GET_CODE (bl->initial_value) == CONST_INT
6128 && INTVAL (bl->initial_value) > 0
6129 && (INTVAL (bl->initial_value)
6130 % (-INTVAL (bl->biv->add_val))) == 0)
6131 {
6132 /* register always nonnegative, add REG_NOTE to branch */
6133 REG_NOTES (PREV_INSN (loop_end))
6134 = gen_rtx_EXPR_LIST (REG_NONNEG, NULL_RTX,
6135 REG_NOTES (PREV_INSN (loop_end)));
6136 bl->nonneg = 1;
6137
6138 return 1;
6139 }
6140
6141 /* If the decrement is 1 and the value was tested as >= 0 before
6142 the loop, then we can safely optimize. */
6143 for (p = loop_start; p; p = PREV_INSN (p))
6144 {
6145 if (GET_CODE (p) == CODE_LABEL)
6146 break;
6147 if (GET_CODE (p) != JUMP_INSN)
6148 continue;
6149
6150 before_comparison = get_condition_for_loop (p);
6151 if (before_comparison
6152 && XEXP (before_comparison, 0) == bl->biv->dest_reg
6153 && GET_CODE (before_comparison) == LT
6154 && XEXP (before_comparison, 1) == const0_rtx
6155 && ! reg_set_between_p (bl->biv->dest_reg, p, loop_start)
6156 && INTVAL (bl->biv->add_val) == -1)
6157 {
6158 REG_NOTES (PREV_INSN (loop_end))
6159 = gen_rtx_EXPR_LIST (REG_NONNEG, NULL_RTX,
6160 REG_NOTES (PREV_INSN (loop_end)));
6161 bl->nonneg = 1;
6162
6163 return 1;
6164 }
6165 }
6166 }
6167 else if (num_mem_sets <= 1)
6168 {
6169 /* Try to change inc to dec, so can apply above optimization. */
6170 /* Can do this if:
6171 all registers modified are induction variables or invariant,
6172 all memory references have non-overlapping addresses
6173 (obviously true if only one write)
6174 allow 2 insns for the compare/jump at the end of the loop. */
6175 /* Also, we must avoid any instructions which use both the reversed
6176 biv and another biv. Such instructions will fail if the loop is
6177 reversed. We meet this condition by requiring that either
6178 no_use_except_counting is true, or else that there is only
6179 one biv. */
6180 int num_nonfixed_reads = 0;
6181 /* 1 if the iteration var is used only to count iterations. */
6182 int no_use_except_counting = 0;
6183 /* 1 if the loop has no memory store, or it has a single memory store
6184 which is reversible. */
6185 int reversible_mem_store = 1;
6186
6187 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
6188 if (GET_RTX_CLASS (GET_CODE (p)) == 'i')
6189 num_nonfixed_reads += count_nonfixed_reads (PATTERN (p));
6190
6191 if (bl->giv_count == 0
6192 && ! loop_number_exit_count[uid_loop_num[INSN_UID (loop_start)]])
6193 {
6194 rtx bivreg = regno_reg_rtx[bl->regno];
6195
6196 /* If there are no givs for this biv, and the only exit is the
6197 fall through at the end of the the loop, then
6198 see if perhaps there are no uses except to count. */
6199 no_use_except_counting = 1;
6200 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
6201 if (GET_RTX_CLASS (GET_CODE (p)) == 'i')
6202 {
6203 rtx set = single_set (p);
6204
6205 if (set && GET_CODE (SET_DEST (set)) == REG
6206 && REGNO (SET_DEST (set)) == bl->regno)
6207 /* An insn that sets the biv is okay. */
6208 ;
6209 else if (p == prev_nonnote_insn (prev_nonnote_insn (loop_end))
6210 || p == prev_nonnote_insn (loop_end))
6211 /* Don't bother about the end test. */
6212 ;
6213 else if (reg_mentioned_p (bivreg, PATTERN (p)))
6214 /* Any other use of the biv is no good. */
6215 {
6216 no_use_except_counting = 0;
6217 break;
6218 }
6219 }
6220 }
6221
6222 /* If the loop has a single store, and the destination address is
6223 invariant, then we can't reverse the loop, because this address
6224 might then have the wrong value at loop exit.
6225 This would work if the source was invariant also, however, in that
6226 case, the insn should have been moved out of the loop. */
6227
6228 if (num_mem_sets == 1)
6229 reversible_mem_store
6230 = (! unknown_address_altered
6231 && ! invariant_p (XEXP (loop_store_mems[0], 0)));
6232
6233 /* This code only acts for innermost loops. Also it simplifies
6234 the memory address check by only reversing loops with
6235 zero or one memory access.
6236 Two memory accesses could involve parts of the same array,
6237 and that can't be reversed. */
6238
6239 if (num_nonfixed_reads <= 1
6240 && !loop_has_call
6241 && !loop_has_volatile
6242 && reversible_mem_store
6243 && (no_use_except_counting
6244 || ((bl->giv_count + bl->biv_count + num_mem_sets
6245 + num_movables + 2 == insn_count)
6246 && (bl == loop_iv_list && bl->next == 0))))
6247 {
6248 rtx tem;
6249
6250 /* Loop can be reversed. */
6251 if (loop_dump_stream)
6252 fprintf (loop_dump_stream, "Can reverse loop\n");
6253
6254 /* Now check other conditions:
6255
6256 The increment must be a constant, as must the initial value,
6257 and the comparison code must be LT.
6258
6259 This test can probably be improved since +/- 1 in the constant
6260 can be obtained by changing LT to LE and vice versa; this is
6261 confusing. */
6262
6263 if (comparison
6264 && GET_CODE (XEXP (comparison, 1)) == CONST_INT
6265 /* LE gets turned into LT */
6266 && GET_CODE (comparison) == LT
6267 && GET_CODE (bl->initial_value) == CONST_INT)
6268 {
6269 HOST_WIDE_INT add_val, comparison_val;
6270 rtx initial_value;
6271
6272 add_val = INTVAL (bl->biv->add_val);
6273 comparison_val = INTVAL (XEXP (comparison, 1));
6274 initial_value = bl->initial_value;
6275
6276 /* Normalize the initial value if it is an integer and
6277 has no other use except as a counter. This will allow
6278 a few more loops to be reversed. */
6279 if (no_use_except_counting
6280 && GET_CODE (initial_value) == CONST_INT)
6281 {
6282 comparison_val = comparison_val - INTVAL (bl->initial_value);
6283 initial_value = const0_rtx;
6284 }
6285
6286 /* If the initial value is not zero, or if the comparison
6287 value is not an exact multiple of the increment, then we
6288 can not reverse this loop. */
6289 if (initial_value != const0_rtx
6290 || (comparison_val % add_val) != 0)
6291 return 0;
6292
6293 /* Reset these in case we normalized the initial value
6294 and comparison value above. */
6295 bl->initial_value = initial_value;
6296 XEXP (comparison, 1) = GEN_INT (comparison_val);
6297
6298 /* Register will always be nonnegative, with value
6299 0 on last iteration if loop reversed */
6300
6301 /* Save some info needed to produce the new insns. */
6302 reg = bl->biv->dest_reg;
6303 jump_label = XEXP (SET_SRC (PATTERN (PREV_INSN (loop_end))), 1);
6304 if (jump_label == pc_rtx)
6305 jump_label = XEXP (SET_SRC (PATTERN (PREV_INSN (loop_end))), 2);
6306 new_add_val = GEN_INT (- INTVAL (bl->biv->add_val));
6307
6308 final_value = XEXP (comparison, 1);
6309 start_value = GEN_INT (INTVAL (XEXP (comparison, 1))
6310 - INTVAL (bl->biv->add_val));
6311
6312 /* Initialize biv to start_value before loop start.
6313 The old initializing insn will be deleted as a
6314 dead store by flow.c. */
6315 emit_insn_before (gen_move_insn (reg, start_value), loop_start);
6316
6317 /* Add insn to decrement register, and delete insn
6318 that incremented the register. */
6319 p = emit_insn_before (gen_add2_insn (reg, new_add_val),
6320 bl->biv->insn);
6321 delete_insn (bl->biv->insn);
6322
6323 /* Update biv info to reflect its new status. */
6324 bl->biv->insn = p;
6325 bl->initial_value = start_value;
6326 bl->biv->add_val = new_add_val;
6327
6328 /* Inc LABEL_NUSES so that delete_insn will
6329 not delete the label. */
6330 LABEL_NUSES (XEXP (jump_label, 0)) ++;
6331
6332 /* Emit an insn after the end of the loop to set the biv's
6333 proper exit value if it is used anywhere outside the loop. */
6334 if ((REGNO_LAST_UID (bl->regno)
6335 != INSN_UID (PREV_INSN (PREV_INSN (loop_end))))
6336 || ! bl->init_insn
6337 || REGNO_FIRST_UID (bl->regno) != INSN_UID (bl->init_insn))
6338 emit_insn_after (gen_move_insn (reg, final_value),
6339 loop_end);
6340
6341 /* Delete compare/branch at end of loop. */
6342 delete_insn (PREV_INSN (loop_end));
6343 delete_insn (PREV_INSN (loop_end));
6344
6345 /* Add new compare/branch insn at end of loop. */
6346 start_sequence ();
6347 emit_cmp_insn (reg, const0_rtx, GE, NULL_RTX,
6348 GET_MODE (reg), 0, 0);
6349 emit_jump_insn (gen_bge (XEXP (jump_label, 0)));
6350 tem = gen_sequence ();
6351 end_sequence ();
6352 emit_jump_insn_before (tem, loop_end);
6353
6354 for (tem = PREV_INSN (loop_end);
6355 tem && GET_CODE (tem) != JUMP_INSN; tem = PREV_INSN (tem))
6356 ;
6357 if (tem)
6358 {
6359 JUMP_LABEL (tem) = XEXP (jump_label, 0);
6360
6361 /* Increment of LABEL_NUSES done above. */
6362 /* Register is now always nonnegative,
6363 so add REG_NONNEG note to the branch. */
6364 REG_NOTES (tem) = gen_rtx_EXPR_LIST (REG_NONNEG, NULL_RTX,
6365 REG_NOTES (tem));
6366 }
6367
6368 bl->nonneg = 1;
6369
6370 /* Mark that this biv has been reversed. Each giv which depends
6371 on this biv, and which is also live past the end of the loop
6372 will have to be fixed up. */
6373
6374 bl->reversed = 1;
6375
6376 if (loop_dump_stream)
6377 fprintf (loop_dump_stream,
6378 "Reversed loop and added reg_nonneg\n");
6379
6380 return 1;
6381 }
6382 }
6383 }
6384
6385 return 0;
6386 }
6387 \f
6388 /* Verify whether the biv BL appears to be eliminable,
6389 based on the insns in the loop that refer to it.
6390 LOOP_START is the first insn of the loop, and END is the end insn.
6391
6392 If ELIMINATE_P is non-zero, actually do the elimination.
6393
6394 THRESHOLD and INSN_COUNT are from loop_optimize and are used to
6395 determine whether invariant insns should be placed inside or at the
6396 start of the loop. */
6397
6398 static int
6399 maybe_eliminate_biv (bl, loop_start, end, eliminate_p, threshold, insn_count)
6400 struct iv_class *bl;
6401 rtx loop_start;
6402 rtx end;
6403 int eliminate_p;
6404 int threshold, insn_count;
6405 {
6406 rtx reg = bl->biv->dest_reg;
6407 rtx p;
6408
6409 /* Scan all insns in the loop, stopping if we find one that uses the
6410 biv in a way that we cannot eliminate. */
6411
6412 for (p = loop_start; p != end; p = NEXT_INSN (p))
6413 {
6414 enum rtx_code code = GET_CODE (p);
6415 rtx where = threshold >= insn_count ? loop_start : p;
6416
6417 if ((code == INSN || code == JUMP_INSN || code == CALL_INSN)
6418 && reg_mentioned_p (reg, PATTERN (p))
6419 && ! maybe_eliminate_biv_1 (PATTERN (p), p, bl, eliminate_p, where))
6420 {
6421 if (loop_dump_stream)
6422 fprintf (loop_dump_stream,
6423 "Cannot eliminate biv %d: biv used in insn %d.\n",
6424 bl->regno, INSN_UID (p));
6425 break;
6426 }
6427 }
6428
6429 if (p == end)
6430 {
6431 if (loop_dump_stream)
6432 fprintf (loop_dump_stream, "biv %d %s eliminated.\n",
6433 bl->regno, eliminate_p ? "was" : "can be");
6434 return 1;
6435 }
6436
6437 return 0;
6438 }
6439 \f
6440 /* If BL appears in X (part of the pattern of INSN), see if we can
6441 eliminate its use. If so, return 1. If not, return 0.
6442
6443 If BIV does not appear in X, return 1.
6444
6445 If ELIMINATE_P is non-zero, actually do the elimination. WHERE indicates
6446 where extra insns should be added. Depending on how many items have been
6447 moved out of the loop, it will either be before INSN or at the start of
6448 the loop. */
6449
6450 static int
6451 maybe_eliminate_biv_1 (x, insn, bl, eliminate_p, where)
6452 rtx x, insn;
6453 struct iv_class *bl;
6454 int eliminate_p;
6455 rtx where;
6456 {
6457 enum rtx_code code = GET_CODE (x);
6458 rtx reg = bl->biv->dest_reg;
6459 enum machine_mode mode = GET_MODE (reg);
6460 struct induction *v;
6461 rtx arg, tem;
6462 #ifdef HAVE_cc0
6463 rtx new;
6464 #endif
6465 int arg_operand;
6466 char *fmt;
6467 int i, j;
6468
6469 switch (code)
6470 {
6471 case REG:
6472 /* If we haven't already been able to do something with this BIV,
6473 we can't eliminate it. */
6474 if (x == reg)
6475 return 0;
6476 return 1;
6477
6478 case SET:
6479 /* If this sets the BIV, it is not a problem. */
6480 if (SET_DEST (x) == reg)
6481 return 1;
6482
6483 /* If this is an insn that defines a giv, it is also ok because
6484 it will go away when the giv is reduced. */
6485 for (v = bl->giv; v; v = v->next_iv)
6486 if (v->giv_type == DEST_REG && SET_DEST (x) == v->dest_reg)
6487 return 1;
6488
6489 #ifdef HAVE_cc0
6490 if (SET_DEST (x) == cc0_rtx && SET_SRC (x) == reg)
6491 {
6492 /* Can replace with any giv that was reduced and
6493 that has (MULT_VAL != 0) and (ADD_VAL == 0).
6494 Require a constant for MULT_VAL, so we know it's nonzero.
6495 ??? We disable this optimization to avoid potential
6496 overflows. */
6497
6498 for (v = bl->giv; v; v = v->next_iv)
6499 if (CONSTANT_P (v->mult_val) && v->mult_val != const0_rtx
6500 && v->add_val == const0_rtx
6501 && ! v->ignore && ! v->maybe_dead && v->always_computable
6502 && v->mode == mode
6503 && 0)
6504 {
6505 /* If the giv V had the auto-inc address optimization applied
6506 to it, and INSN occurs between the giv insn and the biv
6507 insn, then we must adjust the value used here.
6508 This is rare, so we don't bother to do so. */
6509 if (v->auto_inc_opt
6510 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
6511 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
6512 || (INSN_LUID (v->insn) > INSN_LUID (insn)
6513 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
6514 continue;
6515
6516 if (! eliminate_p)
6517 return 1;
6518
6519 /* If the giv has the opposite direction of change,
6520 then reverse the comparison. */
6521 if (INTVAL (v->mult_val) < 0)
6522 new = gen_rtx_COMPARE (GET_MODE (v->new_reg),
6523 const0_rtx, v->new_reg);
6524 else
6525 new = v->new_reg;
6526
6527 /* We can probably test that giv's reduced reg. */
6528 if (validate_change (insn, &SET_SRC (x), new, 0))
6529 return 1;
6530 }
6531
6532 /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0);
6533 replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL).
6534 Require a constant for MULT_VAL, so we know it's nonzero.
6535 ??? Do this only if ADD_VAL is a pointer to avoid a potential
6536 overflow problem. */
6537
6538 for (v = bl->giv; v; v = v->next_iv)
6539 if (CONSTANT_P (v->mult_val) && v->mult_val != const0_rtx
6540 && ! v->ignore && ! v->maybe_dead && v->always_computable
6541 && v->mode == mode
6542 && (GET_CODE (v->add_val) == SYMBOL_REF
6543 || GET_CODE (v->add_val) == LABEL_REF
6544 || GET_CODE (v->add_val) == CONST
6545 || (GET_CODE (v->add_val) == REG
6546 && REGNO_POINTER_FLAG (REGNO (v->add_val)))))
6547 {
6548 /* If the giv V had the auto-inc address optimization applied
6549 to it, and INSN occurs between the giv insn and the biv
6550 insn, then we must adjust the value used here.
6551 This is rare, so we don't bother to do so. */
6552 if (v->auto_inc_opt
6553 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
6554 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
6555 || (INSN_LUID (v->insn) > INSN_LUID (insn)
6556 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
6557 continue;
6558
6559 if (! eliminate_p)
6560 return 1;
6561
6562 /* If the giv has the opposite direction of change,
6563 then reverse the comparison. */
6564 if (INTVAL (v->mult_val) < 0)
6565 new = gen_rtx_COMPARE (VOIDmode, copy_rtx (v->add_val),
6566 v->new_reg);
6567 else
6568 new = gen_rtx_COMPARE (VOIDmode, v->new_reg,
6569 copy_rtx (v->add_val));
6570
6571 /* Replace biv with the giv's reduced register. */
6572 update_reg_last_use (v->add_val, insn);
6573 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
6574 return 1;
6575
6576 /* Insn doesn't support that constant or invariant. Copy it
6577 into a register (it will be a loop invariant.) */
6578 tem = gen_reg_rtx (GET_MODE (v->new_reg));
6579
6580 emit_insn_before (gen_move_insn (tem, copy_rtx (v->add_val)),
6581 where);
6582
6583 /* Substitute the new register for its invariant value in
6584 the compare expression. */
6585 XEXP (new, (INTVAL (v->mult_val) < 0) ? 0 : 1) = tem;
6586 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
6587 return 1;
6588 }
6589 }
6590 #endif
6591 break;
6592
6593 case COMPARE:
6594 case EQ: case NE:
6595 case GT: case GE: case GTU: case GEU:
6596 case LT: case LE: case LTU: case LEU:
6597 /* See if either argument is the biv. */
6598 if (XEXP (x, 0) == reg)
6599 arg = XEXP (x, 1), arg_operand = 1;
6600 else if (XEXP (x, 1) == reg)
6601 arg = XEXP (x, 0), arg_operand = 0;
6602 else
6603 break;
6604
6605 if (CONSTANT_P (arg))
6606 {
6607 /* First try to replace with any giv that has constant positive
6608 mult_val and constant add_val. We might be able to support
6609 negative mult_val, but it seems complex to do it in general. */
6610
6611 for (v = bl->giv; v; v = v->next_iv)
6612 if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0
6613 && (GET_CODE (v->add_val) == SYMBOL_REF
6614 || GET_CODE (v->add_val) == LABEL_REF
6615 || GET_CODE (v->add_val) == CONST
6616 || (GET_CODE (v->add_val) == REG
6617 && REGNO_POINTER_FLAG (REGNO (v->add_val))))
6618 && ! v->ignore && ! v->maybe_dead && v->always_computable
6619 && v->mode == mode)
6620 {
6621 /* If the giv V had the auto-inc address optimization applied
6622 to it, and INSN occurs between the giv insn and the biv
6623 insn, then we must adjust the value used here.
6624 This is rare, so we don't bother to do so. */
6625 if (v->auto_inc_opt
6626 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
6627 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
6628 || (INSN_LUID (v->insn) > INSN_LUID (insn)
6629 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
6630 continue;
6631
6632 if (! eliminate_p)
6633 return 1;
6634
6635 /* Replace biv with the giv's reduced reg. */
6636 XEXP (x, 1-arg_operand) = v->new_reg;
6637
6638 /* If all constants are actually constant integers and
6639 the derived constant can be directly placed in the COMPARE,
6640 do so. */
6641 if (GET_CODE (arg) == CONST_INT
6642 && GET_CODE (v->mult_val) == CONST_INT
6643 && GET_CODE (v->add_val) == CONST_INT
6644 && validate_change (insn, &XEXP (x, arg_operand),
6645 GEN_INT (INTVAL (arg)
6646 * INTVAL (v->mult_val)
6647 + INTVAL (v->add_val)), 0))
6648 return 1;
6649
6650 /* Otherwise, load it into a register. */
6651 tem = gen_reg_rtx (mode);
6652 emit_iv_add_mult (arg, v->mult_val, v->add_val, tem, where);
6653 if (validate_change (insn, &XEXP (x, arg_operand), tem, 0))
6654 return 1;
6655
6656 /* If that failed, put back the change we made above. */
6657 XEXP (x, 1-arg_operand) = reg;
6658 }
6659
6660 /* Look for giv with positive constant mult_val and nonconst add_val.
6661 Insert insns to calculate new compare value.
6662 ??? Turn this off due to possible overflow. */
6663
6664 for (v = bl->giv; v; v = v->next_iv)
6665 if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0
6666 && ! v->ignore && ! v->maybe_dead && v->always_computable
6667 && v->mode == mode
6668 && 0)
6669 {
6670 rtx tem;
6671
6672 /* If the giv V had the auto-inc address optimization applied
6673 to it, and INSN occurs between the giv insn and the biv
6674 insn, then we must adjust the value used here.
6675 This is rare, so we don't bother to do so. */
6676 if (v->auto_inc_opt
6677 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
6678 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
6679 || (INSN_LUID (v->insn) > INSN_LUID (insn)
6680 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
6681 continue;
6682
6683 if (! eliminate_p)
6684 return 1;
6685
6686 tem = gen_reg_rtx (mode);
6687
6688 /* Replace biv with giv's reduced register. */
6689 validate_change (insn, &XEXP (x, 1 - arg_operand),
6690 v->new_reg, 1);
6691
6692 /* Compute value to compare against. */
6693 emit_iv_add_mult (arg, v->mult_val, v->add_val, tem, where);
6694 /* Use it in this insn. */
6695 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
6696 if (apply_change_group ())
6697 return 1;
6698 }
6699 }
6700 else if (GET_CODE (arg) == REG || GET_CODE (arg) == MEM)
6701 {
6702 if (invariant_p (arg) == 1)
6703 {
6704 /* Look for giv with constant positive mult_val and nonconst
6705 add_val. Insert insns to compute new compare value.
6706 ??? Turn this off due to possible overflow. */
6707
6708 for (v = bl->giv; v; v = v->next_iv)
6709 if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0
6710 && ! v->ignore && ! v->maybe_dead && v->always_computable
6711 && v->mode == mode
6712 && 0)
6713 {
6714 rtx tem;
6715
6716 /* If the giv V had the auto-inc address optimization applied
6717 to it, and INSN occurs between the giv insn and the biv
6718 insn, then we must adjust the value used here.
6719 This is rare, so we don't bother to do so. */
6720 if (v->auto_inc_opt
6721 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
6722 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
6723 || (INSN_LUID (v->insn) > INSN_LUID (insn)
6724 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
6725 continue;
6726
6727 if (! eliminate_p)
6728 return 1;
6729
6730 tem = gen_reg_rtx (mode);
6731
6732 /* Replace biv with giv's reduced register. */
6733 validate_change (insn, &XEXP (x, 1 - arg_operand),
6734 v->new_reg, 1);
6735
6736 /* Compute value to compare against. */
6737 emit_iv_add_mult (arg, v->mult_val, v->add_val,
6738 tem, where);
6739 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
6740 if (apply_change_group ())
6741 return 1;
6742 }
6743 }
6744
6745 /* This code has problems. Basically, you can't know when
6746 seeing if we will eliminate BL, whether a particular giv
6747 of ARG will be reduced. If it isn't going to be reduced,
6748 we can't eliminate BL. We can try forcing it to be reduced,
6749 but that can generate poor code.
6750
6751 The problem is that the benefit of reducing TV, below should
6752 be increased if BL can actually be eliminated, but this means
6753 we might have to do a topological sort of the order in which
6754 we try to process biv. It doesn't seem worthwhile to do
6755 this sort of thing now. */
6756
6757 #if 0
6758 /* Otherwise the reg compared with had better be a biv. */
6759 if (GET_CODE (arg) != REG
6760 || reg_iv_type[REGNO (arg)] != BASIC_INDUCT)
6761 return 0;
6762
6763 /* Look for a pair of givs, one for each biv,
6764 with identical coefficients. */
6765 for (v = bl->giv; v; v = v->next_iv)
6766 {
6767 struct induction *tv;
6768
6769 if (v->ignore || v->maybe_dead || v->mode != mode)
6770 continue;
6771
6772 for (tv = reg_biv_class[REGNO (arg)]->giv; tv; tv = tv->next_iv)
6773 if (! tv->ignore && ! tv->maybe_dead
6774 && rtx_equal_p (tv->mult_val, v->mult_val)
6775 && rtx_equal_p (tv->add_val, v->add_val)
6776 && tv->mode == mode)
6777 {
6778 /* If the giv V had the auto-inc address optimization applied
6779 to it, and INSN occurs between the giv insn and the biv
6780 insn, then we must adjust the value used here.
6781 This is rare, so we don't bother to do so. */
6782 if (v->auto_inc_opt
6783 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
6784 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
6785 || (INSN_LUID (v->insn) > INSN_LUID (insn)
6786 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
6787 continue;
6788
6789 if (! eliminate_p)
6790 return 1;
6791
6792 /* Replace biv with its giv's reduced reg. */
6793 XEXP (x, 1-arg_operand) = v->new_reg;
6794 /* Replace other operand with the other giv's
6795 reduced reg. */
6796 XEXP (x, arg_operand) = tv->new_reg;
6797 return 1;
6798 }
6799 }
6800 #endif
6801 }
6802
6803 /* If we get here, the biv can't be eliminated. */
6804 return 0;
6805
6806 case MEM:
6807 /* If this address is a DEST_ADDR giv, it doesn't matter if the
6808 biv is used in it, since it will be replaced. */
6809 for (v = bl->giv; v; v = v->next_iv)
6810 if (v->giv_type == DEST_ADDR && v->location == &XEXP (x, 0))
6811 return 1;
6812 break;
6813
6814 default:
6815 break;
6816 }
6817
6818 /* See if any subexpression fails elimination. */
6819 fmt = GET_RTX_FORMAT (code);
6820 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
6821 {
6822 switch (fmt[i])
6823 {
6824 case 'e':
6825 if (! maybe_eliminate_biv_1 (XEXP (x, i), insn, bl,
6826 eliminate_p, where))
6827 return 0;
6828 break;
6829
6830 case 'E':
6831 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
6832 if (! maybe_eliminate_biv_1 (XVECEXP (x, i, j), insn, bl,
6833 eliminate_p, where))
6834 return 0;
6835 break;
6836 }
6837 }
6838
6839 return 1;
6840 }
6841 \f
6842 /* Return nonzero if the last use of REG
6843 is in an insn following INSN in the same basic block. */
6844
6845 static int
6846 last_use_this_basic_block (reg, insn)
6847 rtx reg;
6848 rtx insn;
6849 {
6850 rtx n;
6851 for (n = insn;
6852 n && GET_CODE (n) != CODE_LABEL && GET_CODE (n) != JUMP_INSN;
6853 n = NEXT_INSN (n))
6854 {
6855 if (REGNO_LAST_UID (REGNO (reg)) == INSN_UID (n))
6856 return 1;
6857 }
6858 return 0;
6859 }
6860 \f
6861 /* Called via `note_stores' to record the initial value of a biv. Here we
6862 just record the location of the set and process it later. */
6863
6864 static void
6865 record_initial (dest, set)
6866 rtx dest;
6867 rtx set;
6868 {
6869 struct iv_class *bl;
6870
6871 if (GET_CODE (dest) != REG
6872 || REGNO (dest) >= max_reg_before_loop
6873 || reg_iv_type[REGNO (dest)] != BASIC_INDUCT)
6874 return;
6875
6876 bl = reg_biv_class[REGNO (dest)];
6877
6878 /* If this is the first set found, record it. */
6879 if (bl->init_insn == 0)
6880 {
6881 bl->init_insn = note_insn;
6882 bl->init_set = set;
6883 }
6884 }
6885 \f
6886 /* If any of the registers in X are "old" and currently have a last use earlier
6887 than INSN, update them to have a last use of INSN. Their actual last use
6888 will be the previous insn but it will not have a valid uid_luid so we can't
6889 use it. */
6890
6891 static void
6892 update_reg_last_use (x, insn)
6893 rtx x;
6894 rtx insn;
6895 {
6896 /* Check for the case where INSN does not have a valid luid. In this case,
6897 there is no need to modify the regno_last_uid, as this can only happen
6898 when code is inserted after the loop_end to set a pseudo's final value,
6899 and hence this insn will never be the last use of x. */
6900 if (GET_CODE (x) == REG && REGNO (x) < max_reg_before_loop
6901 && INSN_UID (insn) < max_uid_for_loop
6902 && uid_luid[REGNO_LAST_UID (REGNO (x))] < uid_luid[INSN_UID (insn)])
6903 REGNO_LAST_UID (REGNO (x)) = INSN_UID (insn);
6904 else
6905 {
6906 register int i, j;
6907 register char *fmt = GET_RTX_FORMAT (GET_CODE (x));
6908 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6909 {
6910 if (fmt[i] == 'e')
6911 update_reg_last_use (XEXP (x, i), insn);
6912 else if (fmt[i] == 'E')
6913 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
6914 update_reg_last_use (XVECEXP (x, i, j), insn);
6915 }
6916 }
6917 }
6918 \f
6919 /* Given a jump insn JUMP, return the condition that will cause it to branch
6920 to its JUMP_LABEL. If the condition cannot be understood, or is an
6921 inequality floating-point comparison which needs to be reversed, 0 will
6922 be returned.
6923
6924 If EARLIEST is non-zero, it is a pointer to a place where the earliest
6925 insn used in locating the condition was found. If a replacement test
6926 of the condition is desired, it should be placed in front of that
6927 insn and we will be sure that the inputs are still valid.
6928
6929 The condition will be returned in a canonical form to simplify testing by
6930 callers. Specifically:
6931
6932 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
6933 (2) Both operands will be machine operands; (cc0) will have been replaced.
6934 (3) If an operand is a constant, it will be the second operand.
6935 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
6936 for GE, GEU, and LEU. */
6937
6938 rtx
6939 get_condition (jump, earliest)
6940 rtx jump;
6941 rtx *earliest;
6942 {
6943 enum rtx_code code;
6944 rtx prev = jump;
6945 rtx set;
6946 rtx tem;
6947 rtx op0, op1;
6948 int reverse_code = 0;
6949 int did_reverse_condition = 0;
6950
6951 /* If this is not a standard conditional jump, we can't parse it. */
6952 if (GET_CODE (jump) != JUMP_INSN
6953 || ! condjump_p (jump) || simplejump_p (jump))
6954 return 0;
6955
6956 code = GET_CODE (XEXP (SET_SRC (PATTERN (jump)), 0));
6957 op0 = XEXP (XEXP (SET_SRC (PATTERN (jump)), 0), 0);
6958 op1 = XEXP (XEXP (SET_SRC (PATTERN (jump)), 0), 1);
6959
6960 if (earliest)
6961 *earliest = jump;
6962
6963 /* If this branches to JUMP_LABEL when the condition is false, reverse
6964 the condition. */
6965 if (GET_CODE (XEXP (SET_SRC (PATTERN (jump)), 2)) == LABEL_REF
6966 && XEXP (XEXP (SET_SRC (PATTERN (jump)), 2), 0) == JUMP_LABEL (jump))
6967 code = reverse_condition (code), did_reverse_condition ^= 1;
6968
6969 /* If we are comparing a register with zero, see if the register is set
6970 in the previous insn to a COMPARE or a comparison operation. Perform
6971 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
6972 in cse.c */
6973
6974 while (GET_RTX_CLASS (code) == '<' && op1 == CONST0_RTX (GET_MODE (op0)))
6975 {
6976 /* Set non-zero when we find something of interest. */
6977 rtx x = 0;
6978
6979 #ifdef HAVE_cc0
6980 /* If comparison with cc0, import actual comparison from compare
6981 insn. */
6982 if (op0 == cc0_rtx)
6983 {
6984 if ((prev = prev_nonnote_insn (prev)) == 0
6985 || GET_CODE (prev) != INSN
6986 || (set = single_set (prev)) == 0
6987 || SET_DEST (set) != cc0_rtx)
6988 return 0;
6989
6990 op0 = SET_SRC (set);
6991 op1 = CONST0_RTX (GET_MODE (op0));
6992 if (earliest)
6993 *earliest = prev;
6994 }
6995 #endif
6996
6997 /* If this is a COMPARE, pick up the two things being compared. */
6998 if (GET_CODE (op0) == COMPARE)
6999 {
7000 op1 = XEXP (op0, 1);
7001 op0 = XEXP (op0, 0);
7002 continue;
7003 }
7004 else if (GET_CODE (op0) != REG)
7005 break;
7006
7007 /* Go back to the previous insn. Stop if it is not an INSN. We also
7008 stop if it isn't a single set or if it has a REG_INC note because
7009 we don't want to bother dealing with it. */
7010
7011 if ((prev = prev_nonnote_insn (prev)) == 0
7012 || GET_CODE (prev) != INSN
7013 || FIND_REG_INC_NOTE (prev, 0)
7014 || (set = single_set (prev)) == 0)
7015 break;
7016
7017 /* If this is setting OP0, get what it sets it to if it looks
7018 relevant. */
7019 if (rtx_equal_p (SET_DEST (set), op0))
7020 {
7021 enum machine_mode inner_mode = GET_MODE (SET_SRC (set));
7022
7023 if ((GET_CODE (SET_SRC (set)) == COMPARE
7024 || (((code == NE
7025 || (code == LT
7026 && GET_MODE_CLASS (inner_mode) == MODE_INT
7027 && (GET_MODE_BITSIZE (inner_mode)
7028 <= HOST_BITS_PER_WIDE_INT)
7029 && (STORE_FLAG_VALUE
7030 & ((HOST_WIDE_INT) 1
7031 << (GET_MODE_BITSIZE (inner_mode) - 1))))
7032 #ifdef FLOAT_STORE_FLAG_VALUE
7033 || (code == LT
7034 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
7035 && FLOAT_STORE_FLAG_VALUE < 0)
7036 #endif
7037 ))
7038 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<')))
7039 x = SET_SRC (set);
7040 else if (((code == EQ
7041 || (code == GE
7042 && (GET_MODE_BITSIZE (inner_mode)
7043 <= HOST_BITS_PER_WIDE_INT)
7044 && GET_MODE_CLASS (inner_mode) == MODE_INT
7045 && (STORE_FLAG_VALUE
7046 & ((HOST_WIDE_INT) 1
7047 << (GET_MODE_BITSIZE (inner_mode) - 1))))
7048 #ifdef FLOAT_STORE_FLAG_VALUE
7049 || (code == GE
7050 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
7051 && FLOAT_STORE_FLAG_VALUE < 0)
7052 #endif
7053 ))
7054 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<')
7055 {
7056 /* We might have reversed a LT to get a GE here. But this wasn't
7057 actually the comparison of data, so we don't flag that we
7058 have had to reverse the condition. */
7059 did_reverse_condition ^= 1;
7060 reverse_code = 1;
7061 x = SET_SRC (set);
7062 }
7063 else
7064 break;
7065 }
7066
7067 else if (reg_set_p (op0, prev))
7068 /* If this sets OP0, but not directly, we have to give up. */
7069 break;
7070
7071 if (x)
7072 {
7073 if (GET_RTX_CLASS (GET_CODE (x)) == '<')
7074 code = GET_CODE (x);
7075 if (reverse_code)
7076 {
7077 code = reverse_condition (code);
7078 did_reverse_condition ^= 1;
7079 reverse_code = 0;
7080 }
7081
7082 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
7083 if (earliest)
7084 *earliest = prev;
7085 }
7086 }
7087
7088 /* If constant is first, put it last. */
7089 if (CONSTANT_P (op0))
7090 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
7091
7092 /* If OP0 is the result of a comparison, we weren't able to find what
7093 was really being compared, so fail. */
7094 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
7095 return 0;
7096
7097 /* Canonicalize any ordered comparison with integers involving equality
7098 if we can do computations in the relevant mode and we do not
7099 overflow. */
7100
7101 if (GET_CODE (op1) == CONST_INT
7102 && GET_MODE (op0) != VOIDmode
7103 && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
7104 {
7105 HOST_WIDE_INT const_val = INTVAL (op1);
7106 unsigned HOST_WIDE_INT uconst_val = const_val;
7107 unsigned HOST_WIDE_INT max_val
7108 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
7109
7110 switch (code)
7111 {
7112 case LE:
7113 if (const_val != max_val >> 1)
7114 code = LT, op1 = GEN_INT (const_val + 1);
7115 break;
7116
7117 /* When cross-compiling, const_val might be sign-extended from
7118 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
7119 case GE:
7120 if ((const_val & max_val)
7121 != (((HOST_WIDE_INT) 1
7122 << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
7123 code = GT, op1 = GEN_INT (const_val - 1);
7124 break;
7125
7126 case LEU:
7127 if (uconst_val < max_val)
7128 code = LTU, op1 = GEN_INT (uconst_val + 1);
7129 break;
7130
7131 case GEU:
7132 if (uconst_val != 0)
7133 code = GTU, op1 = GEN_INT (uconst_val - 1);
7134 break;
7135
7136 default:
7137 break;
7138 }
7139 }
7140
7141 /* If this was floating-point and we reversed anything other than an
7142 EQ or NE, return zero. */
7143 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
7144 && did_reverse_condition && code != NE && code != EQ
7145 && ! flag_fast_math
7146 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
7147 return 0;
7148
7149 #ifdef HAVE_cc0
7150 /* Never return CC0; return zero instead. */
7151 if (op0 == cc0_rtx)
7152 return 0;
7153 #endif
7154
7155 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
7156 }
7157
7158 /* Similar to above routine, except that we also put an invariant last
7159 unless both operands are invariants. */
7160
7161 rtx
7162 get_condition_for_loop (x)
7163 rtx x;
7164 {
7165 rtx comparison = get_condition (x, NULL_PTR);
7166
7167 if (comparison == 0
7168 || ! invariant_p (XEXP (comparison, 0))
7169 || invariant_p (XEXP (comparison, 1)))
7170 return comparison;
7171
7172 return gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)), VOIDmode,
7173 XEXP (comparison, 1), XEXP (comparison, 0));
7174 }
7175
7176 #ifdef HAIFA
7177 /* Analyze a loop in order to instrument it with the use of count register.
7178 loop_start and loop_end are the first and last insns of the loop.
7179 This function works in cooperation with insert_bct ().
7180 loop_can_insert_bct[loop_num] is set according to whether the optimization
7181 is applicable to the loop. When it is applicable, the following variables
7182 are also set:
7183 loop_start_value[loop_num]
7184 loop_comparison_value[loop_num]
7185 loop_increment[loop_num]
7186 loop_comparison_code[loop_num] */
7187
7188 #ifdef HAVE_decrement_and_branch_on_count
7189 static
7190 void analyze_loop_iterations (loop_start, loop_end)
7191 rtx loop_start, loop_end;
7192 {
7193 rtx comparison, comparison_value;
7194 rtx iteration_var, initial_value, increment;
7195 enum rtx_code comparison_code;
7196
7197 rtx last_loop_insn;
7198 rtx insn;
7199 int i;
7200
7201 /* loop_variable mode */
7202 enum machine_mode original_mode;
7203
7204 /* find the number of the loop */
7205 int loop_num = uid_loop_num [INSN_UID (loop_start)];
7206
7207 /* we change our mind only when we are sure that loop will be instrumented */
7208 loop_can_insert_bct[loop_num] = 0;
7209
7210 /* is the optimization suppressed. */
7211 if ( !flag_branch_on_count_reg )
7212 return;
7213
7214 /* make sure that count-reg is not in use */
7215 if (loop_used_count_register[loop_num]){
7216 if (loop_dump_stream)
7217 fprintf (loop_dump_stream,
7218 "analyze_loop_iterations %d: BCT instrumentation failed: count register already in use\n",
7219 loop_num);
7220 return;
7221 }
7222
7223 /* make sure that the function has no indirect jumps. */
7224 if (indirect_jump_in_function){
7225 if (loop_dump_stream)
7226 fprintf (loop_dump_stream,
7227 "analyze_loop_iterations %d: BCT instrumentation failed: indirect jump in function\n",
7228 loop_num);
7229 return;
7230 }
7231
7232 /* make sure that the last loop insn is a conditional jump */
7233 last_loop_insn = PREV_INSN (loop_end);
7234 if (GET_CODE (last_loop_insn) != JUMP_INSN || !condjump_p (last_loop_insn)) {
7235 if (loop_dump_stream)
7236 fprintf (loop_dump_stream,
7237 "analyze_loop_iterations %d: BCT instrumentation failed: invalid jump at loop end\n",
7238 loop_num);
7239 return;
7240 }
7241
7242 /* First find the iteration variable. If the last insn is a conditional
7243 branch, and the insn preceding it tests a register value, make that
7244 register the iteration variable. */
7245
7246 /* We used to use prev_nonnote_insn here, but that fails because it might
7247 accidentally get the branch for a contained loop if the branch for this
7248 loop was deleted. We can only trust branches immediately before the
7249 loop_end. */
7250
7251 comparison = get_condition_for_loop (last_loop_insn);
7252 /* ??? Get_condition may switch position of induction variable and
7253 invariant register when it canonicalizes the comparison. */
7254
7255 if (comparison == 0) {
7256 if (loop_dump_stream)
7257 fprintf (loop_dump_stream,
7258 "analyze_loop_iterations %d: BCT instrumentation failed: comparison not found\n",
7259 loop_num);
7260 return;
7261 }
7262
7263 comparison_code = GET_CODE (comparison);
7264 iteration_var = XEXP (comparison, 0);
7265 comparison_value = XEXP (comparison, 1);
7266
7267 original_mode = GET_MODE (iteration_var);
7268 if (GET_MODE_CLASS (original_mode) != MODE_INT
7269 || GET_MODE_SIZE (original_mode) != UNITS_PER_WORD) {
7270 if (loop_dump_stream)
7271 fprintf (loop_dump_stream,
7272 "analyze_loop_iterations %d: BCT Instrumentation failed: loop variable not integer\n",
7273 loop_num);
7274 return;
7275 }
7276
7277 /* get info about loop bounds and increment */
7278 iteration_info (iteration_var, &initial_value, &increment,
7279 loop_start, loop_end);
7280
7281 /* make sure that all required loop data were found */
7282 if (!(initial_value && increment && comparison_value
7283 && invariant_p (comparison_value) && invariant_p (increment)
7284 && ! indirect_jump_in_function))
7285 {
7286 if (loop_dump_stream) {
7287 fprintf (loop_dump_stream,
7288 "analyze_loop_iterations %d: BCT instrumentation failed because of wrong loop: ", loop_num);
7289 if (!(initial_value && increment && comparison_value)) {
7290 fprintf (loop_dump_stream, "\tbounds not available: ");
7291 if ( ! initial_value )
7292 fprintf (loop_dump_stream, "initial ");
7293 if ( ! increment )
7294 fprintf (loop_dump_stream, "increment ");
7295 if ( ! comparison_value )
7296 fprintf (loop_dump_stream, "comparison ");
7297 fprintf (loop_dump_stream, "\n");
7298 }
7299 if (!invariant_p (comparison_value) || !invariant_p (increment))
7300 fprintf (loop_dump_stream, "\tloop bounds not invariant\n");
7301 }
7302 return;
7303 }
7304
7305 /* make sure that the increment is constant */
7306 if (GET_CODE (increment) != CONST_INT) {
7307 if (loop_dump_stream)
7308 fprintf (loop_dump_stream,
7309 "analyze_loop_iterations %d: instrumentation failed: not arithmetic loop\n",
7310 loop_num);
7311 return;
7312 }
7313
7314 /* make sure that the loop contains neither function call, nor jump on table.
7315 (the count register might be altered by the called function, and might
7316 be used for a branch on table). */
7317 for (insn = loop_start; insn && insn != loop_end; insn = NEXT_INSN (insn)) {
7318 if (GET_CODE (insn) == CALL_INSN){
7319 if (loop_dump_stream)
7320 fprintf (loop_dump_stream,
7321 "analyze_loop_iterations %d: BCT instrumentation failed: function call in the loop\n",
7322 loop_num);
7323 return;
7324 }
7325
7326 if (GET_CODE (insn) == JUMP_INSN
7327 && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
7328 || GET_CODE (PATTERN (insn)) == ADDR_VEC)){
7329 if (loop_dump_stream)
7330 fprintf (loop_dump_stream,
7331 "analyze_loop_iterations %d: BCT instrumentation failed: computed branch in the loop\n",
7332 loop_num);
7333 return;
7334 }
7335 }
7336
7337 /* At this point, we are sure that the loop can be instrumented with BCT.
7338 Some of the loops, however, will not be instrumented - the final decision
7339 is taken by insert_bct () */
7340 if (loop_dump_stream)
7341 fprintf (loop_dump_stream,
7342 "analyze_loop_iterations: loop (luid =%d) can be BCT instrumented.\n",
7343 loop_num);
7344
7345 /* mark all enclosing loops that they cannot use count register */
7346 /* ???: In fact, since insert_bct may decide not to instrument this loop,
7347 marking here may prevent instrumenting an enclosing loop that could
7348 actually be instrumented. But since this is rare, it is safer to mark
7349 here in case the order of calling (analyze/insert)_bct would be changed. */
7350 for (i=loop_num; i != -1; i = loop_outer_loop[i])
7351 loop_used_count_register[i] = 1;
7352
7353 /* Set data structures which will be used by the instrumentation phase */
7354 loop_start_value[loop_num] = initial_value;
7355 loop_comparison_value[loop_num] = comparison_value;
7356 loop_increment[loop_num] = increment;
7357 loop_comparison_code[loop_num] = comparison_code;
7358 loop_can_insert_bct[loop_num] = 1;
7359 }
7360
7361
7362 /* instrument loop for insertion of bct instruction. We distinguish between
7363 loops with compile-time bounds, to those with run-time bounds. The loop
7364 behaviour is analized according to the following characteristics/variables:
7365 ; Input variables:
7366 ; comparison-value: the value to which the iteration counter is compared.
7367 ; initial-value: iteration-counter initial value.
7368 ; increment: iteration-counter increment.
7369 ; Computed variables:
7370 ; increment-direction: the sign of the increment.
7371 ; compare-direction: '1' for GT, GTE, '-1' for LT, LTE, '0' for NE.
7372 ; range-direction: sign (comparison-value - initial-value)
7373 We give up on the following cases:
7374 ; loop variable overflow.
7375 ; run-time loop bounds with comparison code NE.
7376 */
7377
7378 static void
7379 insert_bct (loop_start, loop_end)
7380 rtx loop_start, loop_end;
7381 {
7382 rtx initial_value, comparison_value, increment;
7383 enum rtx_code comparison_code;
7384
7385 int increment_direction, compare_direction;
7386 int unsigned_p = 0;
7387
7388 /* if the loop condition is <= or >=, the number of iteration
7389 is 1 more than the range of the bounds of the loop */
7390 int add_iteration = 0;
7391
7392 /* the only machine mode we work with - is the integer of the size that the
7393 machine has */
7394 enum machine_mode loop_var_mode = SImode;
7395
7396 int loop_num = uid_loop_num [INSN_UID (loop_start)];
7397
7398 /* get loop-variables. No need to check that these are valid - already
7399 checked in analyze_loop_iterations (). */
7400 comparison_code = loop_comparison_code[loop_num];
7401 initial_value = loop_start_value[loop_num];
7402 comparison_value = loop_comparison_value[loop_num];
7403 increment = loop_increment[loop_num];
7404
7405 /* check analyze_loop_iterations decision for this loop. */
7406 if (! loop_can_insert_bct[loop_num]){
7407 if (loop_dump_stream)
7408 fprintf (loop_dump_stream,
7409 "insert_bct: [%d] - was decided not to instrument by analyze_loop_iterations ()\n",
7410 loop_num);
7411 return;
7412 }
7413
7414 /* It's impossible to instrument a competely unrolled loop. */
7415 if (loop_unroll_factor [loop_num] == -1)
7416 return;
7417
7418 /* make sure that the last loop insn is a conditional jump .
7419 This check is repeated from analyze_loop_iterations (),
7420 because unrolling might have changed that. */
7421 if (GET_CODE (PREV_INSN (loop_end)) != JUMP_INSN
7422 || !condjump_p (PREV_INSN (loop_end))) {
7423 if (loop_dump_stream)
7424 fprintf (loop_dump_stream,
7425 "insert_bct: not instrumenting BCT because of invalid branch\n");
7426 return;
7427 }
7428
7429 /* fix increment in case loop was unrolled. */
7430 if (loop_unroll_factor [loop_num] > 1)
7431 increment = GEN_INT ( INTVAL (increment) * loop_unroll_factor [loop_num] );
7432
7433 /* determine properties and directions of the loop */
7434 increment_direction = (INTVAL (increment) > 0) ? 1:-1;
7435 switch ( comparison_code ) {
7436 case LEU:
7437 unsigned_p = 1;
7438 /* fallthrough */
7439 case LE:
7440 compare_direction = 1;
7441 add_iteration = 1;
7442 break;
7443 case GEU:
7444 unsigned_p = 1;
7445 /* fallthrough */
7446 case GE:
7447 compare_direction = -1;
7448 add_iteration = 1;
7449 break;
7450 case EQ:
7451 /* in this case we cannot know the number of iterations */
7452 if (loop_dump_stream)
7453 fprintf (loop_dump_stream,
7454 "insert_bct: %d: loop cannot be instrumented: == in condition\n",
7455 loop_num);
7456 return;
7457 case LTU:
7458 unsigned_p = 1;
7459 /* fallthrough */
7460 case LT:
7461 compare_direction = 1;
7462 break;
7463 case GTU:
7464 unsigned_p = 1;
7465 /* fallthrough */
7466 case GT:
7467 compare_direction = -1;
7468 break;
7469 case NE:
7470 compare_direction = 0;
7471 break;
7472 default:
7473 abort ();
7474 }
7475
7476
7477 /* make sure that the loop does not end by an overflow */
7478 if (compare_direction != increment_direction) {
7479 if (loop_dump_stream)
7480 fprintf (loop_dump_stream,
7481 "insert_bct: %d: loop cannot be instrumented: terminated by overflow\n",
7482 loop_num);
7483 return;
7484 }
7485
7486 /* try to instrument the loop. */
7487
7488 /* Handle the simpler case, where the bounds are known at compile time. */
7489 if (GET_CODE (initial_value) == CONST_INT && GET_CODE (comparison_value) == CONST_INT)
7490 {
7491 int n_iterations;
7492 int increment_value_abs = INTVAL (increment) * increment_direction;
7493
7494 /* check the relation between compare-val and initial-val */
7495 int difference = INTVAL (comparison_value) - INTVAL (initial_value);
7496 int range_direction = (difference > 0) ? 1 : -1;
7497
7498 /* make sure the loop executes enough iterations to gain from BCT */
7499 if (difference > -3 && difference < 3) {
7500 if (loop_dump_stream)
7501 fprintf (loop_dump_stream,
7502 "insert_bct: loop %d not BCT instrumented: too small iteration count.\n",
7503 loop_num);
7504 return;
7505 }
7506
7507 /* make sure that the loop executes at least once */
7508 if ((range_direction == 1 && compare_direction == -1)
7509 || (range_direction == -1 && compare_direction == 1))
7510 {
7511 if (loop_dump_stream)
7512 fprintf (loop_dump_stream,
7513 "insert_bct: loop %d: does not iterate even once. Not instrumenting.\n",
7514 loop_num);
7515 return;
7516 }
7517
7518 /* make sure that the loop does not end by an overflow (in compile time
7519 bounds we must have an additional check for overflow, because here
7520 we also support the compare code of 'NE'. */
7521 if (comparison_code == NE
7522 && increment_direction != range_direction) {
7523 if (loop_dump_stream)
7524 fprintf (loop_dump_stream,
7525 "insert_bct (compile time bounds): %d: loop not instrumented: terminated by overflow\n",
7526 loop_num);
7527 return;
7528 }
7529
7530 /* Determine the number of iterations by:
7531 ;
7532 ; compare-val - initial-val + (increment -1) + additional-iteration
7533 ; num_iterations = -----------------------------------------------------------------
7534 ; increment
7535 */
7536 difference = (range_direction > 0) ? difference : -difference;
7537 #if 0
7538 fprintf (stderr, "difference is: %d\n", difference); /* @*/
7539 fprintf (stderr, "increment_value_abs is: %d\n", increment_value_abs); /* @*/
7540 fprintf (stderr, "add_iteration is: %d\n", add_iteration); /* @*/
7541 fprintf (stderr, "INTVAL (comparison_value) is: %d\n", INTVAL (comparison_value)); /* @*/
7542 fprintf (stderr, "INTVAL (initial_value) is: %d\n", INTVAL (initial_value)); /* @*/
7543 #endif
7544
7545 if (increment_value_abs == 0) {
7546 fprintf (stderr, "insert_bct: error: increment == 0 !!!\n");
7547 abort ();
7548 }
7549 n_iterations = (difference + increment_value_abs - 1 + add_iteration)
7550 / increment_value_abs;
7551
7552 #if 0
7553 fprintf (stderr, "number of iterations is: %d\n", n_iterations); /* @*/
7554 #endif
7555 instrument_loop_bct (loop_start, loop_end, GEN_INT (n_iterations));
7556
7557 /* Done with this loop. */
7558 return;
7559 }
7560
7561 /* Handle the more complex case, that the bounds are NOT known at compile time. */
7562 /* In this case we generate run_time calculation of the number of iterations */
7563
7564 /* With runtime bounds, if the compare is of the form '!=' we give up */
7565 if (comparison_code == NE) {
7566 if (loop_dump_stream)
7567 fprintf (loop_dump_stream,
7568 "insert_bct: fail for loop %d: runtime bounds with != comparison\n",
7569 loop_num);
7570 return;
7571 }
7572
7573 else {
7574 /* We rely on the existence of run-time guard to ensure that the
7575 loop executes at least once. */
7576 rtx sequence;
7577 rtx iterations_num_reg;
7578
7579 int increment_value_abs = INTVAL (increment) * increment_direction;
7580
7581 /* make sure that the increment is a power of two, otherwise (an
7582 expensive) divide is needed. */
7583 if (exact_log2 (increment_value_abs) == -1)
7584 {
7585 if (loop_dump_stream)
7586 fprintf (loop_dump_stream,
7587 "insert_bct: not instrumenting BCT because the increment is not power of 2\n");
7588 return;
7589 }
7590
7591 /* compute the number of iterations */
7592 start_sequence ();
7593 {
7594 rtx temp_reg;
7595
7596 /* Again, the number of iterations is calculated by:
7597 ;
7598 ; compare-val - initial-val + (increment -1) + additional-iteration
7599 ; num_iterations = -----------------------------------------------------------------
7600 ; increment
7601 */
7602 /* ??? Do we have to call copy_rtx here before passing rtx to
7603 expand_binop? */
7604 if (compare_direction > 0) {
7605 /* <, <= :the loop variable is increasing */
7606 temp_reg = expand_binop (loop_var_mode, sub_optab, comparison_value,
7607 initial_value, NULL_RTX, 0, OPTAB_LIB_WIDEN);
7608 }
7609 else {
7610 temp_reg = expand_binop (loop_var_mode, sub_optab, initial_value,
7611 comparison_value, NULL_RTX, 0, OPTAB_LIB_WIDEN);
7612 }
7613
7614 if (increment_value_abs - 1 + add_iteration != 0)
7615 temp_reg = expand_binop (loop_var_mode, add_optab, temp_reg,
7616 GEN_INT (increment_value_abs - 1 + add_iteration),
7617 NULL_RTX, 0, OPTAB_LIB_WIDEN);
7618
7619 if (increment_value_abs != 1)
7620 {
7621 /* ??? This will generate an expensive divide instruction for
7622 most targets. The original authors apparently expected this
7623 to be a shift, since they test for power-of-2 divisors above,
7624 but just naively generating a divide instruction will not give
7625 a shift. It happens to work for the PowerPC target because
7626 the rs6000.md file has a divide pattern that emits shifts.
7627 It will probably not work for any other target. */
7628 iterations_num_reg = expand_binop (loop_var_mode, sdiv_optab,
7629 temp_reg,
7630 GEN_INT (increment_value_abs),
7631 NULL_RTX, 0, OPTAB_LIB_WIDEN);
7632 }
7633 else
7634 iterations_num_reg = temp_reg;
7635 }
7636 sequence = gen_sequence ();
7637 end_sequence ();
7638 emit_insn_before (sequence, loop_start);
7639 instrument_loop_bct (loop_start, loop_end, iterations_num_reg);
7640 }
7641 }
7642
7643 /* instrument loop by inserting a bct in it. This is done in the following way:
7644 1. A new register is created and assigned the hard register number of the count
7645 register.
7646 2. In the head of the loop the new variable is initialized by the value passed in the
7647 loop_num_iterations parameter.
7648 3. At the end of the loop, comparison of the register with 0 is generated.
7649 The created comparison follows the pattern defined for the
7650 decrement_and_branch_on_count insn, so this insn will be generated in assembly
7651 generation phase.
7652 4. The compare&branch on the old variable is deleted. So, if the loop-variable was
7653 not used elsewhere, it will be eliminated by data-flow analisys. */
7654
7655 static void
7656 instrument_loop_bct (loop_start, loop_end, loop_num_iterations)
7657 rtx loop_start, loop_end;
7658 rtx loop_num_iterations;
7659 {
7660 rtx temp_reg1, temp_reg2;
7661 rtx start_label;
7662
7663 rtx sequence;
7664 enum machine_mode loop_var_mode = SImode;
7665
7666 if (HAVE_decrement_and_branch_on_count)
7667 {
7668 if (loop_dump_stream)
7669 fprintf (loop_dump_stream, "Loop: Inserting BCT\n");
7670
7671 /* eliminate the check on the old variable */
7672 delete_insn (PREV_INSN (loop_end));
7673 delete_insn (PREV_INSN (loop_end));
7674
7675 /* insert the label which will delimit the start of the loop */
7676 start_label = gen_label_rtx ();
7677 emit_label_after (start_label, loop_start);
7678
7679 /* insert initialization of the count register into the loop header */
7680 start_sequence ();
7681 temp_reg1 = gen_reg_rtx (loop_var_mode);
7682 emit_insn (gen_move_insn (temp_reg1, loop_num_iterations));
7683
7684 /* this will be count register */
7685 temp_reg2 = gen_rtx_REG (loop_var_mode, COUNT_REGISTER_REGNUM);
7686 /* we have to move the value to the count register from an GPR
7687 because rtx pointed to by loop_num_iterations could contain
7688 expression which cannot be moved into count register */
7689 emit_insn (gen_move_insn (temp_reg2, temp_reg1));
7690
7691 sequence = gen_sequence ();
7692 end_sequence ();
7693 emit_insn_after (sequence, loop_start);
7694
7695 /* insert new comparison on the count register instead of the
7696 old one, generating the needed BCT pattern (that will be
7697 later recognized by assembly generation phase). */
7698 emit_jump_insn_before (gen_decrement_and_branch_on_count (temp_reg2, start_label),
7699 loop_end);
7700 LABEL_NUSES (start_label)++;
7701 }
7702
7703 }
7704 #endif /* HAVE_decrement_and_branch_on_count */
7705
7706 #endif /* HAIFA */
7707
7708 /* Scan the function and determine whether it has indirect (computed) jumps.
7709
7710 This is taken mostly from flow.c; similar code exists elsewhere
7711 in the compiler. It may be useful to put this into rtlanal.c. */
7712 static int
7713 indirect_jump_in_function_p (start)
7714 rtx start;
7715 {
7716 rtx insn;
7717
7718 for (insn = start; insn; insn = NEXT_INSN (insn))
7719 if (computed_jump_p (insn))
7720 return 1;
7721
7722 return 0;
7723 }