Remove unwanted CYGNUS LOCAL marker.
[gcc.git] / gcc / loop.c
1 /* Perform various loop optimizations, including strength reduction.
2 Copyright (C) 1987, 88, 89, 91-7, 1998 Free Software Foundation, Inc.
3
4 This file is part of GNU CC.
5
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
9 any later version.
10
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
20
21
22 /* This is the loop optimization pass of the compiler.
23 It finds invariant computations within loops and moves them
24 to the beginning of the loop. Then it identifies basic and
25 general induction variables. Strength reduction is applied to the general
26 induction variables, and induction variable elimination is applied to
27 the basic induction variables.
28
29 It also finds cases where
30 a register is set within the loop by zero-extending a narrower value
31 and changes these to zero the entire register once before the loop
32 and merely copy the low part within the loop.
33
34 Most of the complexity is in heuristics to decide when it is worth
35 while to do these things. */
36
37 #include "config.h"
38 #include <stdio.h>
39 #include "rtl.h"
40 #include "obstack.h"
41 #include "expr.h"
42 #include "insn-config.h"
43 #include "insn-flags.h"
44 #include "regs.h"
45 #include "hard-reg-set.h"
46 #include "recog.h"
47 #include "flags.h"
48 #include "real.h"
49 #include "loop.h"
50 #include "except.h"
51
52 /* Vector mapping INSN_UIDs to luids.
53 The luids are like uids but increase monotonically always.
54 We use them to see whether a jump comes from outside a given loop. */
55
56 int *uid_luid;
57
58 /* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop
59 number the insn is contained in. */
60
61 int *uid_loop_num;
62
63 /* 1 + largest uid of any insn. */
64
65 int max_uid_for_loop;
66
67 /* 1 + luid of last insn. */
68
69 static int max_luid;
70
71 /* Number of loops detected in current function. Used as index to the
72 next few tables. */
73
74 static int max_loop_num;
75
76 /* Indexed by loop number, contains the first and last insn of each loop. */
77
78 static rtx *loop_number_loop_starts, *loop_number_loop_ends;
79
80 /* For each loop, gives the containing loop number, -1 if none. */
81
82 int *loop_outer_loop;
83
84 #ifdef HAIFA
85 /* The main output of analyze_loop_iterations is placed here */
86
87 int *loop_can_insert_bct;
88
89 /* For each loop, determines whether some of its inner loops has used
90 count register */
91
92 int *loop_used_count_register;
93
94 /* loop parameters for arithmetic loops. These loops have a loop variable
95 which is initialized to loop_start_value, incremented in each iteration
96 by "loop_increment". At the end of the iteration the loop variable is
97 compared to the loop_comparison_value (using loop_comparison_code). */
98
99 rtx *loop_increment;
100 rtx *loop_comparison_value;
101 rtx *loop_start_value;
102 enum rtx_code *loop_comparison_code;
103 #endif /* HAIFA */
104
105 /* For each loop, keep track of its unrolling factor.
106 Potential values:
107 0: unrolled
108 1: not unrolled.
109 -1: completely unrolled
110 >0: holds the unroll exact factor. */
111 int *loop_unroll_factor;
112
113 /* Indexed by loop number, contains a nonzero value if the "loop" isn't
114 really a loop (an insn outside the loop branches into it). */
115
116 static char *loop_invalid;
117
118 /* Indexed by loop number, links together all LABEL_REFs which refer to
119 code labels outside the loop. Used by routines that need to know all
120 loop exits, such as final_biv_value and final_giv_value.
121
122 This does not include loop exits due to return instructions. This is
123 because all bivs and givs are pseudos, and hence must be dead after a
124 return, so the presense of a return does not affect any of the
125 optimizations that use this info. It is simpler to just not include return
126 instructions on this list. */
127
128 rtx *loop_number_exit_labels;
129
130 /* Indexed by loop number, counts the number of LABEL_REFs on
131 loop_number_exit_labels for this loop and all loops nested inside it. */
132
133 int *loop_number_exit_count;
134
135 /* Holds the number of loop iterations. It is zero if the number could not be
136 calculated. Must be unsigned since the number of iterations can
137 be as high as 2^wordsize-1. For loops with a wider iterator, this number
138 will will be zero if the number of loop iterations is too large for an
139 unsigned integer to hold. */
140
141 unsigned HOST_WIDE_INT loop_n_iterations;
142
143 /* Nonzero if there is a subroutine call in the current loop. */
144
145 static int loop_has_call;
146
147 /* Nonzero if there is a volatile memory reference in the current
148 loop. */
149
150 static int loop_has_volatile;
151
152 /* Added loop_continue which is the NOTE_INSN_LOOP_CONT of the
153 current loop. A continue statement will generate a branch to
154 NEXT_INSN (loop_continue). */
155
156 static rtx loop_continue;
157
158 /* Indexed by register number, contains the number of times the reg
159 is set during the loop being scanned.
160 During code motion, a negative value indicates a reg that has been
161 made a candidate; in particular -2 means that it is an candidate that
162 we know is equal to a constant and -1 means that it is an candidate
163 not known equal to a constant.
164 After code motion, regs moved have 0 (which is accurate now)
165 while the failed candidates have the original number of times set.
166
167 Therefore, at all times, == 0 indicates an invariant register;
168 < 0 a conditionally invariant one. */
169
170 static int *n_times_set;
171
172 /* Original value of n_times_set; same except that this value
173 is not set negative for a reg whose sets have been made candidates
174 and not set to 0 for a reg that is moved. */
175
176 static int *n_times_used;
177
178 /* Index by register number, 1 indicates that the register
179 cannot be moved or strength reduced. */
180
181 static char *may_not_optimize;
182
183 /* Nonzero means reg N has already been moved out of one loop.
184 This reduces the desire to move it out of another. */
185
186 static char *moved_once;
187
188 /* Array of MEMs that are stored in this loop. If there are too many to fit
189 here, we just turn on unknown_address_altered. */
190
191 #define NUM_STORES 30
192 static rtx loop_store_mems[NUM_STORES];
193
194 /* Index of first available slot in above array. */
195 static int loop_store_mems_idx;
196
197 /* Nonzero if we don't know what MEMs were changed in the current loop.
198 This happens if the loop contains a call (in which case `loop_has_call'
199 will also be set) or if we store into more than NUM_STORES MEMs. */
200
201 static int unknown_address_altered;
202
203 /* Count of movable (i.e. invariant) instructions discovered in the loop. */
204 static int num_movables;
205
206 /* Count of memory write instructions discovered in the loop. */
207 static int num_mem_sets;
208
209 /* Number of loops contained within the current one, including itself. */
210 static int loops_enclosed;
211
212 /* Bound on pseudo register number before loop optimization.
213 A pseudo has valid regscan info if its number is < max_reg_before_loop. */
214 int max_reg_before_loop;
215
216 /* This obstack is used in product_cheap_p to allocate its rtl. It
217 may call gen_reg_rtx which, in turn, may reallocate regno_reg_rtx.
218 If we used the same obstack that it did, we would be deallocating
219 that array. */
220
221 static struct obstack temp_obstack;
222
223 /* This is where the pointer to the obstack being used for RTL is stored. */
224
225 extern struct obstack *rtl_obstack;
226
227 #define obstack_chunk_alloc xmalloc
228 #define obstack_chunk_free free
229
230 extern char *oballoc ();
231 \f
232 /* During the analysis of a loop, a chain of `struct movable's
233 is made to record all the movable insns found.
234 Then the entire chain can be scanned to decide which to move. */
235
236 struct movable
237 {
238 rtx insn; /* A movable insn */
239 rtx set_src; /* The expression this reg is set from. */
240 rtx set_dest; /* The destination of this SET. */
241 rtx dependencies; /* When INSN is libcall, this is an EXPR_LIST
242 of any registers used within the LIBCALL. */
243 int consec; /* Number of consecutive following insns
244 that must be moved with this one. */
245 int regno; /* The register it sets */
246 short lifetime; /* lifetime of that register;
247 may be adjusted when matching movables
248 that load the same value are found. */
249 short savings; /* Number of insns we can move for this reg,
250 including other movables that force this
251 or match this one. */
252 unsigned int cond : 1; /* 1 if only conditionally movable */
253 unsigned int force : 1; /* 1 means MUST move this insn */
254 unsigned int global : 1; /* 1 means reg is live outside this loop */
255 /* If PARTIAL is 1, GLOBAL means something different:
256 that the reg is live outside the range from where it is set
257 to the following label. */
258 unsigned int done : 1; /* 1 inhibits further processing of this */
259
260 unsigned int partial : 1; /* 1 means this reg is used for zero-extending.
261 In particular, moving it does not make it
262 invariant. */
263 unsigned int move_insn : 1; /* 1 means that we call emit_move_insn to
264 load SRC, rather than copying INSN. */
265 unsigned int is_equiv : 1; /* 1 means a REG_EQUIV is present on INSN. */
266 enum machine_mode savemode; /* Nonzero means it is a mode for a low part
267 that we should avoid changing when clearing
268 the rest of the reg. */
269 struct movable *match; /* First entry for same value */
270 struct movable *forces; /* An insn that must be moved if this is */
271 struct movable *next;
272 };
273
274 FILE *loop_dump_stream;
275
276 /* Forward declarations. */
277
278 static void find_and_verify_loops ();
279 static void mark_loop_jump ();
280 static void prescan_loop ();
281 static int reg_in_basic_block_p ();
282 static int consec_sets_invariant_p ();
283 static rtx libcall_other_reg ();
284 static int labels_in_range_p ();
285 static void count_loop_regs_set ();
286 static void note_addr_stored ();
287 static int loop_reg_used_before_p ();
288 static void scan_loop ();
289 #if 0
290 static void replace_call_address ();
291 #endif
292 static rtx skip_consec_insns ();
293 static int libcall_benefit ();
294 static void ignore_some_movables ();
295 static void force_movables ();
296 static void combine_movables ();
297 static int rtx_equal_for_loop_p ();
298 static void move_movables ();
299 static void strength_reduce ();
300 static int valid_initial_value_p ();
301 static void find_mem_givs ();
302 static void record_biv ();
303 static void check_final_value ();
304 static void record_giv ();
305 static void update_giv_derive ();
306 static int basic_induction_var ();
307 static rtx simplify_giv_expr ();
308 static int general_induction_var ();
309 static int consec_sets_giv ();
310 static int check_dbra_loop ();
311 static rtx express_from ();
312 static int combine_givs_p ();
313 static void combine_givs ();
314 static int product_cheap_p ();
315 static int maybe_eliminate_biv ();
316 static int maybe_eliminate_biv_1 ();
317 static int last_use_this_basic_block ();
318 static void record_initial ();
319 static void update_reg_last_use ();
320
321 #ifdef HAIFA
322 /* This is extern from unroll.c */
323 void iteration_info ();
324
325 /* Two main functions for implementing bct:
326 first - to be called before loop unrolling, and the second - after */
327 static void analyze_loop_iterations ();
328 static void insert_bct ();
329
330 /* Auxiliary function that inserts the bct pattern into the loop */
331 static void instrument_loop_bct ();
332 #endif /* HAIFA */
333
334 /* Indirect_jump_in_function is computed once per function. */
335 int indirect_jump_in_function = 0;
336 static int indirect_jump_in_function_p ();
337
338 \f
339 /* Relative gain of eliminating various kinds of operations. */
340 int add_cost;
341 #if 0
342 int shift_cost;
343 int mult_cost;
344 #endif
345
346 /* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to
347 copy the value of the strength reduced giv to its original register. */
348 int copy_cost;
349
350 void
351 init_loop ()
352 {
353 char *free_point = (char *) oballoc (1);
354 rtx reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
355
356 add_cost = rtx_cost (gen_rtx_PLUS (word_mode, reg, reg), SET);
357
358 /* We multiply by 2 to reconcile the difference in scale between
359 these two ways of computing costs. Otherwise the cost of a copy
360 will be far less than the cost of an add. */
361
362 copy_cost = 2 * 2;
363
364 /* Free the objects we just allocated. */
365 obfree (free_point);
366
367 /* Initialize the obstack used for rtl in product_cheap_p. */
368 gcc_obstack_init (&temp_obstack);
369 }
370 \f
371 /* Entry point of this file. Perform loop optimization
372 on the current function. F is the first insn of the function
373 and DUMPFILE is a stream for output of a trace of actions taken
374 (or 0 if none should be output). */
375
376 void
377 loop_optimize (f, dumpfile, unroll_p)
378 /* f is the first instruction of a chain of insns for one function */
379 rtx f;
380 FILE *dumpfile;
381 int unroll_p;
382 {
383 register rtx insn;
384 register int i;
385 rtx last_insn;
386
387 loop_dump_stream = dumpfile;
388
389 init_recog_no_volatile ();
390 init_alias_analysis ();
391
392 max_reg_before_loop = max_reg_num ();
393
394 moved_once = (char *) alloca (max_reg_before_loop);
395 bzero (moved_once, max_reg_before_loop);
396
397 regs_may_share = 0;
398
399 /* Count the number of loops. */
400
401 max_loop_num = 0;
402 for (insn = f; insn; insn = NEXT_INSN (insn))
403 {
404 if (GET_CODE (insn) == NOTE
405 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
406 max_loop_num++;
407 }
408
409 /* Don't waste time if no loops. */
410 if (max_loop_num == 0)
411 return;
412
413 /* Get size to use for tables indexed by uids.
414 Leave some space for labels allocated by find_and_verify_loops. */
415 max_uid_for_loop = get_max_uid () + 1 + max_loop_num * 32;
416
417 uid_luid = (int *) alloca (max_uid_for_loop * sizeof (int));
418 uid_loop_num = (int *) alloca (max_uid_for_loop * sizeof (int));
419
420 bzero ((char *) uid_luid, max_uid_for_loop * sizeof (int));
421 bzero ((char *) uid_loop_num, max_uid_for_loop * sizeof (int));
422
423 /* Allocate tables for recording each loop. We set each entry, so they need
424 not be zeroed. */
425 loop_number_loop_starts = (rtx *) alloca (max_loop_num * sizeof (rtx));
426 loop_number_loop_ends = (rtx *) alloca (max_loop_num * sizeof (rtx));
427 loop_outer_loop = (int *) alloca (max_loop_num * sizeof (int));
428 loop_invalid = (char *) alloca (max_loop_num * sizeof (char));
429 loop_number_exit_labels = (rtx *) alloca (max_loop_num * sizeof (rtx));
430 loop_number_exit_count = (int *) alloca (max_loop_num * sizeof (int));
431
432 /* This is initialized by the unrolling code, so we go ahead
433 and clear them just in case we are not performing loop
434 unrolling. */
435 loop_unroll_factor = (int *) alloca (max_loop_num *sizeof (int));
436 bzero ((char *) loop_unroll_factor, max_loop_num * sizeof (int));
437
438 #ifdef HAIFA
439 /* Allocate for BCT optimization */
440 loop_can_insert_bct = (int *) alloca (max_loop_num * sizeof (int));
441 bzero ((char *) loop_can_insert_bct, max_loop_num * sizeof (int));
442
443 loop_used_count_register = (int *) alloca (max_loop_num * sizeof (int));
444 bzero ((char *) loop_used_count_register, max_loop_num * sizeof (int));
445
446 loop_increment = (rtx *) alloca (max_loop_num * sizeof (rtx));
447 loop_comparison_value = (rtx *) alloca (max_loop_num * sizeof (rtx));
448 loop_start_value = (rtx *) alloca (max_loop_num * sizeof (rtx));
449 bzero ((char *) loop_increment, max_loop_num * sizeof (rtx));
450 bzero ((char *) loop_comparison_value, max_loop_num * sizeof (rtx));
451 bzero ((char *) loop_start_value, max_loop_num * sizeof (rtx));
452
453 loop_comparison_code
454 = (enum rtx_code *) alloca (max_loop_num * sizeof (enum rtx_code));
455 bzero ((char *) loop_comparison_code, max_loop_num * sizeof (enum rtx_code));
456 #endif /* HAIFA */
457
458 /* Find and process each loop.
459 First, find them, and record them in order of their beginnings. */
460 find_and_verify_loops (f);
461
462 /* Now find all register lifetimes. This must be done after
463 find_and_verify_loops, because it might reorder the insns in the
464 function. */
465 reg_scan (f, max_reg_num (), 1);
466
467 /* See if we went too far. */
468 if (get_max_uid () > max_uid_for_loop)
469 abort ();
470
471 /* Compute the mapping from uids to luids.
472 LUIDs are numbers assigned to insns, like uids,
473 except that luids increase monotonically through the code.
474 Don't assign luids to line-number NOTEs, so that the distance in luids
475 between two insns is not affected by -g. */
476
477 for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
478 {
479 last_insn = insn;
480 if (GET_CODE (insn) != NOTE
481 || NOTE_LINE_NUMBER (insn) <= 0)
482 uid_luid[INSN_UID (insn)] = ++i;
483 else
484 /* Give a line number note the same luid as preceding insn. */
485 uid_luid[INSN_UID (insn)] = i;
486 }
487
488 max_luid = i + 1;
489
490 /* Don't leave gaps in uid_luid for insns that have been
491 deleted. It is possible that the first or last insn
492 using some register has been deleted by cross-jumping.
493 Make sure that uid_luid for that former insn's uid
494 points to the general area where that insn used to be. */
495 for (i = 0; i < max_uid_for_loop; i++)
496 {
497 uid_luid[0] = uid_luid[i];
498 if (uid_luid[0] != 0)
499 break;
500 }
501 for (i = 0; i < max_uid_for_loop; i++)
502 if (uid_luid[i] == 0)
503 uid_luid[i] = uid_luid[i - 1];
504
505 /* Create a mapping from loops to BLOCK tree nodes. */
506 if (unroll_p && write_symbols != NO_DEBUG)
507 find_loop_tree_blocks ();
508
509 /* Determine if the function has indirect jump. On some systems
510 this prevents low overhead loop instructions from being used. */
511 indirect_jump_in_function = indirect_jump_in_function_p (f);
512
513 /* Now scan the loops, last ones first, since this means inner ones are done
514 before outer ones. */
515 for (i = max_loop_num-1; i >= 0; i--)
516 if (! loop_invalid[i] && loop_number_loop_ends[i])
517 scan_loop (loop_number_loop_starts[i], loop_number_loop_ends[i],
518 max_reg_num (), unroll_p);
519
520 /* If debugging and unrolling loops, we must replicate the tree nodes
521 corresponding to the blocks inside the loop, so that the original one
522 to one mapping will remain. */
523 if (unroll_p && write_symbols != NO_DEBUG)
524 unroll_block_trees ();
525 }
526 \f
527 /* Optimize one loop whose start is LOOP_START and end is END.
528 LOOP_START is the NOTE_INSN_LOOP_BEG and END is the matching
529 NOTE_INSN_LOOP_END. */
530
531 /* ??? Could also move memory writes out of loops if the destination address
532 is invariant, the source is invariant, the memory write is not volatile,
533 and if we can prove that no read inside the loop can read this address
534 before the write occurs. If there is a read of this address after the
535 write, then we can also mark the memory read as invariant. */
536
537 static void
538 scan_loop (loop_start, end, nregs, unroll_p)
539 rtx loop_start, end;
540 int nregs;
541 int unroll_p;
542 {
543 register int i;
544 register rtx p;
545 /* 1 if we are scanning insns that could be executed zero times. */
546 int maybe_never = 0;
547 /* 1 if we are scanning insns that might never be executed
548 due to a subroutine call which might exit before they are reached. */
549 int call_passed = 0;
550 /* For a rotated loop that is entered near the bottom,
551 this is the label at the top. Otherwise it is zero. */
552 rtx loop_top = 0;
553 /* Jump insn that enters the loop, or 0 if control drops in. */
554 rtx loop_entry_jump = 0;
555 /* Place in the loop where control enters. */
556 rtx scan_start;
557 /* Number of insns in the loop. */
558 int insn_count;
559 int in_libcall = 0;
560 int tem;
561 rtx temp;
562 /* The SET from an insn, if it is the only SET in the insn. */
563 rtx set, set1;
564 /* Chain describing insns movable in current loop. */
565 struct movable *movables = 0;
566 /* Last element in `movables' -- so we can add elements at the end. */
567 struct movable *last_movable = 0;
568 /* Ratio of extra register life span we can justify
569 for saving an instruction. More if loop doesn't call subroutines
570 since in that case saving an insn makes more difference
571 and more registers are available. */
572 int threshold;
573 /* If we have calls, contains the insn in which a register was used
574 if it was used exactly once; contains const0_rtx if it was used more
575 than once. */
576 rtx *reg_single_usage = 0;
577 /* Nonzero if we are scanning instructions in a sub-loop. */
578 int loop_depth = 0;
579
580 n_times_set = (int *) alloca (nregs * sizeof (int));
581 n_times_used = (int *) alloca (nregs * sizeof (int));
582 may_not_optimize = (char *) alloca (nregs);
583
584 /* Determine whether this loop starts with a jump down to a test at
585 the end. This will occur for a small number of loops with a test
586 that is too complex to duplicate in front of the loop.
587
588 We search for the first insn or label in the loop, skipping NOTEs.
589 However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG
590 (because we might have a loop executed only once that contains a
591 loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END
592 (in case we have a degenerate loop).
593
594 Note that if we mistakenly think that a loop is entered at the top
595 when, in fact, it is entered at the exit test, the only effect will be
596 slightly poorer optimization. Making the opposite error can generate
597 incorrect code. Since very few loops now start with a jump to the
598 exit test, the code here to detect that case is very conservative. */
599
600 for (p = NEXT_INSN (loop_start);
601 p != end
602 && GET_CODE (p) != CODE_LABEL && GET_RTX_CLASS (GET_CODE (p)) != 'i'
603 && (GET_CODE (p) != NOTE
604 || (NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_BEG
605 && NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_END));
606 p = NEXT_INSN (p))
607 ;
608
609 scan_start = p;
610
611 /* Set up variables describing this loop. */
612 prescan_loop (loop_start, end);
613 threshold = (loop_has_call ? 1 : 2) * (1 + n_non_fixed_regs);
614
615 /* If loop has a jump before the first label,
616 the true entry is the target of that jump.
617 Start scan from there.
618 But record in LOOP_TOP the place where the end-test jumps
619 back to so we can scan that after the end of the loop. */
620 if (GET_CODE (p) == JUMP_INSN)
621 {
622 loop_entry_jump = p;
623
624 /* Loop entry must be unconditional jump (and not a RETURN) */
625 if (simplejump_p (p)
626 && JUMP_LABEL (p) != 0
627 /* Check to see whether the jump actually
628 jumps out of the loop (meaning it's no loop).
629 This case can happen for things like
630 do {..} while (0). If this label was generated previously
631 by loop, we can't tell anything about it and have to reject
632 the loop. */
633 && INSN_UID (JUMP_LABEL (p)) < max_uid_for_loop
634 && INSN_LUID (JUMP_LABEL (p)) >= INSN_LUID (loop_start)
635 && INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (end))
636 {
637 loop_top = next_label (scan_start);
638 scan_start = JUMP_LABEL (p);
639 }
640 }
641
642 /* If SCAN_START was an insn created by loop, we don't know its luid
643 as required by loop_reg_used_before_p. So skip such loops. (This
644 test may never be true, but it's best to play it safe.)
645
646 Also, skip loops where we do not start scanning at a label. This
647 test also rejects loops starting with a JUMP_INSN that failed the
648 test above. */
649
650 if (INSN_UID (scan_start) >= max_uid_for_loop
651 || GET_CODE (scan_start) != CODE_LABEL)
652 {
653 if (loop_dump_stream)
654 fprintf (loop_dump_stream, "\nLoop from %d to %d is phony.\n\n",
655 INSN_UID (loop_start), INSN_UID (end));
656 return;
657 }
658
659 /* Count number of times each reg is set during this loop.
660 Set may_not_optimize[I] if it is not safe to move out
661 the setting of register I. If this loop has calls, set
662 reg_single_usage[I]. */
663
664 bzero ((char *) n_times_set, nregs * sizeof (int));
665 bzero (may_not_optimize, nregs);
666
667 if (loop_has_call)
668 {
669 reg_single_usage = (rtx *) alloca (nregs * sizeof (rtx));
670 bzero ((char *) reg_single_usage, nregs * sizeof (rtx));
671 }
672
673 count_loop_regs_set (loop_top ? loop_top : loop_start, end,
674 may_not_optimize, reg_single_usage, &insn_count, nregs);
675
676 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
677 may_not_optimize[i] = 1, n_times_set[i] = 1;
678 bcopy ((char *) n_times_set, (char *) n_times_used, nregs * sizeof (int));
679
680 if (loop_dump_stream)
681 {
682 fprintf (loop_dump_stream, "\nLoop from %d to %d: %d real insns.\n",
683 INSN_UID (loop_start), INSN_UID (end), insn_count);
684 if (loop_continue)
685 fprintf (loop_dump_stream, "Continue at insn %d.\n",
686 INSN_UID (loop_continue));
687 }
688
689 /* Scan through the loop finding insns that are safe to move.
690 Set n_times_set negative for the reg being set, so that
691 this reg will be considered invariant for subsequent insns.
692 We consider whether subsequent insns use the reg
693 in deciding whether it is worth actually moving.
694
695 MAYBE_NEVER is nonzero if we have passed a conditional jump insn
696 and therefore it is possible that the insns we are scanning
697 would never be executed. At such times, we must make sure
698 that it is safe to execute the insn once instead of zero times.
699 When MAYBE_NEVER is 0, all insns will be executed at least once
700 so that is not a problem. */
701
702 p = scan_start;
703 while (1)
704 {
705 p = NEXT_INSN (p);
706 /* At end of a straight-in loop, we are done.
707 At end of a loop entered at the bottom, scan the top. */
708 if (p == scan_start)
709 break;
710 if (p == end)
711 {
712 if (loop_top != 0)
713 p = loop_top;
714 else
715 break;
716 if (p == scan_start)
717 break;
718 }
719
720 if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
721 && find_reg_note (p, REG_LIBCALL, NULL_RTX))
722 in_libcall = 1;
723 else if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
724 && find_reg_note (p, REG_RETVAL, NULL_RTX))
725 in_libcall = 0;
726
727 if (GET_CODE (p) == INSN
728 && (set = single_set (p))
729 && GET_CODE (SET_DEST (set)) == REG
730 && ! may_not_optimize[REGNO (SET_DEST (set))])
731 {
732 int tem1 = 0;
733 int tem2 = 0;
734 int move_insn = 0;
735 rtx src = SET_SRC (set);
736 rtx dependencies = 0;
737
738 /* Figure out what to use as a source of this insn. If a REG_EQUIV
739 note is given or if a REG_EQUAL note with a constant operand is
740 specified, use it as the source and mark that we should move
741 this insn by calling emit_move_insn rather that duplicating the
742 insn.
743
744 Otherwise, only use the REG_EQUAL contents if a REG_RETVAL note
745 is present. */
746 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
747 if (temp)
748 src = XEXP (temp, 0), move_insn = 1;
749 else
750 {
751 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
752 if (temp && CONSTANT_P (XEXP (temp, 0)))
753 src = XEXP (temp, 0), move_insn = 1;
754 if (temp && find_reg_note (p, REG_RETVAL, NULL_RTX))
755 {
756 src = XEXP (temp, 0);
757 /* A libcall block can use regs that don't appear in
758 the equivalent expression. To move the libcall,
759 we must move those regs too. */
760 dependencies = libcall_other_reg (p, src);
761 }
762 }
763
764 /* Don't try to optimize a register that was made
765 by loop-optimization for an inner loop.
766 We don't know its life-span, so we can't compute the benefit. */
767 if (REGNO (SET_DEST (set)) >= max_reg_before_loop)
768 ;
769 /* In order to move a register, we need to have one of three cases:
770 (1) it is used only in the same basic block as the set
771 (2) it is not a user variable and it is not used in the
772 exit test (this can cause the variable to be used
773 before it is set just like a user-variable).
774 (3) the set is guaranteed to be executed once the loop starts,
775 and the reg is not used until after that. */
776 else if (! ((! maybe_never
777 && ! loop_reg_used_before_p (set, p, loop_start,
778 scan_start, end))
779 || (! REG_USERVAR_P (SET_DEST (set))
780 && ! REG_LOOP_TEST_P (SET_DEST (set)))
781 || reg_in_basic_block_p (p, SET_DEST (set))))
782 ;
783 else if ((tem = invariant_p (src))
784 && (dependencies == 0
785 || (tem2 = invariant_p (dependencies)) != 0)
786 && (n_times_set[REGNO (SET_DEST (set))] == 1
787 || (tem1
788 = consec_sets_invariant_p (SET_DEST (set),
789 n_times_set[REGNO (SET_DEST (set))],
790 p)))
791 /* If the insn can cause a trap (such as divide by zero),
792 can't move it unless it's guaranteed to be executed
793 once loop is entered. Even a function call might
794 prevent the trap insn from being reached
795 (since it might exit!) */
796 && ! ((maybe_never || call_passed)
797 && may_trap_p (src)))
798 {
799 register struct movable *m;
800 register int regno = REGNO (SET_DEST (set));
801
802 /* A potential lossage is where we have a case where two insns
803 can be combined as long as they are both in the loop, but
804 we move one of them outside the loop. For large loops,
805 this can lose. The most common case of this is the address
806 of a function being called.
807
808 Therefore, if this register is marked as being used exactly
809 once if we are in a loop with calls (a "large loop"), see if
810 we can replace the usage of this register with the source
811 of this SET. If we can, delete this insn.
812
813 Don't do this if P has a REG_RETVAL note or if we have
814 SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */
815
816 if (reg_single_usage && reg_single_usage[regno] != 0
817 && reg_single_usage[regno] != const0_rtx
818 && REGNO_FIRST_UID (regno) == INSN_UID (p)
819 && (REGNO_LAST_UID (regno)
820 == INSN_UID (reg_single_usage[regno]))
821 && n_times_set[REGNO (SET_DEST (set))] == 1
822 && ! side_effects_p (SET_SRC (set))
823 && ! find_reg_note (p, REG_RETVAL, NULL_RTX)
824 && (! SMALL_REGISTER_CLASSES
825 || (! (GET_CODE (SET_SRC (set)) == REG
826 && REGNO (SET_SRC (set)) < FIRST_PSEUDO_REGISTER)))
827 /* This test is not redundant; SET_SRC (set) might be
828 a call-clobbered register and the life of REGNO
829 might span a call. */
830 && ! modified_between_p (SET_SRC (set), p,
831 reg_single_usage[regno])
832 && no_labels_between_p (p, reg_single_usage[regno])
833 && validate_replace_rtx (SET_DEST (set), SET_SRC (set),
834 reg_single_usage[regno]))
835 {
836 /* Replace any usage in a REG_EQUAL note. Must copy the
837 new source, so that we don't get rtx sharing between the
838 SET_SOURCE and REG_NOTES of insn p. */
839 REG_NOTES (reg_single_usage[regno])
840 = replace_rtx (REG_NOTES (reg_single_usage[regno]),
841 SET_DEST (set), copy_rtx (SET_SRC (set)));
842
843 PUT_CODE (p, NOTE);
844 NOTE_LINE_NUMBER (p) = NOTE_INSN_DELETED;
845 NOTE_SOURCE_FILE (p) = 0;
846 n_times_set[regno] = 0;
847 continue;
848 }
849
850 m = (struct movable *) alloca (sizeof (struct movable));
851 m->next = 0;
852 m->insn = p;
853 m->set_src = src;
854 m->dependencies = dependencies;
855 m->set_dest = SET_DEST (set);
856 m->force = 0;
857 m->consec = n_times_set[REGNO (SET_DEST (set))] - 1;
858 m->done = 0;
859 m->forces = 0;
860 m->partial = 0;
861 m->move_insn = move_insn;
862 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
863 m->savemode = VOIDmode;
864 m->regno = regno;
865 /* Set M->cond if either invariant_p or consec_sets_invariant_p
866 returned 2 (only conditionally invariant). */
867 m->cond = ((tem | tem1 | tem2) > 1);
868 m->global = (uid_luid[REGNO_LAST_UID (regno)] > INSN_LUID (end)
869 || uid_luid[REGNO_FIRST_UID (regno)] < INSN_LUID (loop_start));
870 m->match = 0;
871 m->lifetime = (uid_luid[REGNO_LAST_UID (regno)]
872 - uid_luid[REGNO_FIRST_UID (regno)]);
873 m->savings = n_times_used[regno];
874 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
875 m->savings += libcall_benefit (p);
876 n_times_set[regno] = move_insn ? -2 : -1;
877 /* Add M to the end of the chain MOVABLES. */
878 if (movables == 0)
879 movables = m;
880 else
881 last_movable->next = m;
882 last_movable = m;
883
884 if (m->consec > 0)
885 {
886 /* Skip this insn, not checking REG_LIBCALL notes. */
887 p = next_nonnote_insn (p);
888 /* Skip the consecutive insns, if there are any. */
889 p = skip_consec_insns (p, m->consec);
890 /* Back up to the last insn of the consecutive group. */
891 p = prev_nonnote_insn (p);
892
893 /* We must now reset m->move_insn, m->is_equiv, and possibly
894 m->set_src to correspond to the effects of all the
895 insns. */
896 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
897 if (temp)
898 m->set_src = XEXP (temp, 0), m->move_insn = 1;
899 else
900 {
901 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
902 if (temp && CONSTANT_P (XEXP (temp, 0)))
903 m->set_src = XEXP (temp, 0), m->move_insn = 1;
904 else
905 m->move_insn = 0;
906
907 }
908 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
909 }
910 }
911 /* If this register is always set within a STRICT_LOW_PART
912 or set to zero, then its high bytes are constant.
913 So clear them outside the loop and within the loop
914 just load the low bytes.
915 We must check that the machine has an instruction to do so.
916 Also, if the value loaded into the register
917 depends on the same register, this cannot be done. */
918 else if (SET_SRC (set) == const0_rtx
919 && GET_CODE (NEXT_INSN (p)) == INSN
920 && (set1 = single_set (NEXT_INSN (p)))
921 && GET_CODE (set1) == SET
922 && (GET_CODE (SET_DEST (set1)) == STRICT_LOW_PART)
923 && (GET_CODE (XEXP (SET_DEST (set1), 0)) == SUBREG)
924 && (SUBREG_REG (XEXP (SET_DEST (set1), 0))
925 == SET_DEST (set))
926 && !reg_mentioned_p (SET_DEST (set), SET_SRC (set1)))
927 {
928 register int regno = REGNO (SET_DEST (set));
929 if (n_times_set[regno] == 2)
930 {
931 register struct movable *m;
932 m = (struct movable *) alloca (sizeof (struct movable));
933 m->next = 0;
934 m->insn = p;
935 m->set_dest = SET_DEST (set);
936 m->dependencies = 0;
937 m->force = 0;
938 m->consec = 0;
939 m->done = 0;
940 m->forces = 0;
941 m->move_insn = 0;
942 m->partial = 1;
943 /* If the insn may not be executed on some cycles,
944 we can't clear the whole reg; clear just high part.
945 Not even if the reg is used only within this loop.
946 Consider this:
947 while (1)
948 while (s != t) {
949 if (foo ()) x = *s;
950 use (x);
951 }
952 Clearing x before the inner loop could clobber a value
953 being saved from the last time around the outer loop.
954 However, if the reg is not used outside this loop
955 and all uses of the register are in the same
956 basic block as the store, there is no problem.
957
958 If this insn was made by loop, we don't know its
959 INSN_LUID and hence must make a conservative
960 assumption. */
961 m->global = (INSN_UID (p) >= max_uid_for_loop
962 || (uid_luid[REGNO_LAST_UID (regno)]
963 > INSN_LUID (end))
964 || (uid_luid[REGNO_FIRST_UID (regno)]
965 < INSN_LUID (p))
966 || (labels_in_range_p
967 (p, uid_luid[REGNO_FIRST_UID (regno)])));
968 if (maybe_never && m->global)
969 m->savemode = GET_MODE (SET_SRC (set1));
970 else
971 m->savemode = VOIDmode;
972 m->regno = regno;
973 m->cond = 0;
974 m->match = 0;
975 m->lifetime = (uid_luid[REGNO_LAST_UID (regno)]
976 - uid_luid[REGNO_FIRST_UID (regno)]);
977 m->savings = 1;
978 n_times_set[regno] = -1;
979 /* Add M to the end of the chain MOVABLES. */
980 if (movables == 0)
981 movables = m;
982 else
983 last_movable->next = m;
984 last_movable = m;
985 }
986 }
987 }
988 /* Past a call insn, we get to insns which might not be executed
989 because the call might exit. This matters for insns that trap.
990 Call insns inside a REG_LIBCALL/REG_RETVAL block always return,
991 so they don't count. */
992 else if (GET_CODE (p) == CALL_INSN && ! in_libcall)
993 call_passed = 1;
994 /* Past a label or a jump, we get to insns for which we
995 can't count on whether or how many times they will be
996 executed during each iteration. Therefore, we can
997 only move out sets of trivial variables
998 (those not used after the loop). */
999 /* Similar code appears twice in strength_reduce. */
1000 else if ((GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN)
1001 /* If we enter the loop in the middle, and scan around to the
1002 beginning, don't set maybe_never for that. This must be an
1003 unconditional jump, otherwise the code at the top of the
1004 loop might never be executed. Unconditional jumps are
1005 followed a by barrier then loop end. */
1006 && ! (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == loop_top
1007 && NEXT_INSN (NEXT_INSN (p)) == end
1008 && simplejump_p (p)))
1009 maybe_never = 1;
1010 else if (GET_CODE (p) == NOTE)
1011 {
1012 /* At the virtual top of a converted loop, insns are again known to
1013 be executed: logically, the loop begins here even though the exit
1014 code has been duplicated. */
1015 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
1016 maybe_never = call_passed = 0;
1017 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
1018 loop_depth++;
1019 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
1020 loop_depth--;
1021 }
1022 }
1023
1024 /* If one movable subsumes another, ignore that other. */
1025
1026 ignore_some_movables (movables);
1027
1028 /* For each movable insn, see if the reg that it loads
1029 leads when it dies right into another conditionally movable insn.
1030 If so, record that the second insn "forces" the first one,
1031 since the second can be moved only if the first is. */
1032
1033 force_movables (movables);
1034
1035 /* See if there are multiple movable insns that load the same value.
1036 If there are, make all but the first point at the first one
1037 through the `match' field, and add the priorities of them
1038 all together as the priority of the first. */
1039
1040 combine_movables (movables, nregs);
1041
1042 /* Now consider each movable insn to decide whether it is worth moving.
1043 Store 0 in n_times_set for each reg that is moved. */
1044
1045 move_movables (movables, threshold,
1046 insn_count, loop_start, end, nregs);
1047
1048 /* Now candidates that still are negative are those not moved.
1049 Change n_times_set to indicate that those are not actually invariant. */
1050 for (i = 0; i < nregs; i++)
1051 if (n_times_set[i] < 0)
1052 n_times_set[i] = n_times_used[i];
1053
1054 if (flag_strength_reduce)
1055 strength_reduce (scan_start, end, loop_top,
1056 insn_count, loop_start, end, unroll_p);
1057 }
1058 \f
1059 /* Add elements to *OUTPUT to record all the pseudo-regs
1060 mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */
1061
1062 void
1063 record_excess_regs (in_this, not_in_this, output)
1064 rtx in_this, not_in_this;
1065 rtx *output;
1066 {
1067 enum rtx_code code;
1068 char *fmt;
1069 int i;
1070
1071 code = GET_CODE (in_this);
1072
1073 switch (code)
1074 {
1075 case PC:
1076 case CC0:
1077 case CONST_INT:
1078 case CONST_DOUBLE:
1079 case CONST:
1080 case SYMBOL_REF:
1081 case LABEL_REF:
1082 return;
1083
1084 case REG:
1085 if (REGNO (in_this) >= FIRST_PSEUDO_REGISTER
1086 && ! reg_mentioned_p (in_this, not_in_this))
1087 *output = gen_rtx_EXPR_LIST (VOIDmode, in_this, *output);
1088 return;
1089
1090 default:
1091 break;
1092 }
1093
1094 fmt = GET_RTX_FORMAT (code);
1095 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1096 {
1097 int j;
1098
1099 switch (fmt[i])
1100 {
1101 case 'E':
1102 for (j = 0; j < XVECLEN (in_this, i); j++)
1103 record_excess_regs (XVECEXP (in_this, i, j), not_in_this, output);
1104 break;
1105
1106 case 'e':
1107 record_excess_regs (XEXP (in_this, i), not_in_this, output);
1108 break;
1109 }
1110 }
1111 }
1112 \f
1113 /* Check what regs are referred to in the libcall block ending with INSN,
1114 aside from those mentioned in the equivalent value.
1115 If there are none, return 0.
1116 If there are one or more, return an EXPR_LIST containing all of them. */
1117
1118 static rtx
1119 libcall_other_reg (insn, equiv)
1120 rtx insn, equiv;
1121 {
1122 rtx note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
1123 rtx p = XEXP (note, 0);
1124 rtx output = 0;
1125
1126 /* First, find all the regs used in the libcall block
1127 that are not mentioned as inputs to the result. */
1128
1129 while (p != insn)
1130 {
1131 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
1132 || GET_CODE (p) == CALL_INSN)
1133 record_excess_regs (PATTERN (p), equiv, &output);
1134 p = NEXT_INSN (p);
1135 }
1136
1137 return output;
1138 }
1139 \f
1140 /* Return 1 if all uses of REG
1141 are between INSN and the end of the basic block. */
1142
1143 static int
1144 reg_in_basic_block_p (insn, reg)
1145 rtx insn, reg;
1146 {
1147 int regno = REGNO (reg);
1148 rtx p;
1149
1150 if (REGNO_FIRST_UID (regno) != INSN_UID (insn))
1151 return 0;
1152
1153 /* Search this basic block for the already recorded last use of the reg. */
1154 for (p = insn; p; p = NEXT_INSN (p))
1155 {
1156 switch (GET_CODE (p))
1157 {
1158 case NOTE:
1159 break;
1160
1161 case INSN:
1162 case CALL_INSN:
1163 /* Ordinary insn: if this is the last use, we win. */
1164 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1165 return 1;
1166 break;
1167
1168 case JUMP_INSN:
1169 /* Jump insn: if this is the last use, we win. */
1170 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1171 return 1;
1172 /* Otherwise, it's the end of the basic block, so we lose. */
1173 return 0;
1174
1175 case CODE_LABEL:
1176 case BARRIER:
1177 /* It's the end of the basic block, so we lose. */
1178 return 0;
1179
1180 default:
1181 break;
1182 }
1183 }
1184
1185 /* The "last use" doesn't follow the "first use"?? */
1186 abort ();
1187 }
1188 \f
1189 /* Compute the benefit of eliminating the insns in the block whose
1190 last insn is LAST. This may be a group of insns used to compute a
1191 value directly or can contain a library call. */
1192
1193 static int
1194 libcall_benefit (last)
1195 rtx last;
1196 {
1197 rtx insn;
1198 int benefit = 0;
1199
1200 for (insn = XEXP (find_reg_note (last, REG_RETVAL, NULL_RTX), 0);
1201 insn != last; insn = NEXT_INSN (insn))
1202 {
1203 if (GET_CODE (insn) == CALL_INSN)
1204 benefit += 10; /* Assume at least this many insns in a library
1205 routine. */
1206 else if (GET_CODE (insn) == INSN
1207 && GET_CODE (PATTERN (insn)) != USE
1208 && GET_CODE (PATTERN (insn)) != CLOBBER)
1209 benefit++;
1210 }
1211
1212 return benefit;
1213 }
1214 \f
1215 /* Skip COUNT insns from INSN, counting library calls as 1 insn. */
1216
1217 static rtx
1218 skip_consec_insns (insn, count)
1219 rtx insn;
1220 int count;
1221 {
1222 for (; count > 0; count--)
1223 {
1224 rtx temp;
1225
1226 /* If first insn of libcall sequence, skip to end. */
1227 /* Do this at start of loop, since INSN is guaranteed to
1228 be an insn here. */
1229 if (GET_CODE (insn) != NOTE
1230 && (temp = find_reg_note (insn, REG_LIBCALL, NULL_RTX)))
1231 insn = XEXP (temp, 0);
1232
1233 do insn = NEXT_INSN (insn);
1234 while (GET_CODE (insn) == NOTE);
1235 }
1236
1237 return insn;
1238 }
1239
1240 /* Ignore any movable whose insn falls within a libcall
1241 which is part of another movable.
1242 We make use of the fact that the movable for the libcall value
1243 was made later and so appears later on the chain. */
1244
1245 static void
1246 ignore_some_movables (movables)
1247 struct movable *movables;
1248 {
1249 register struct movable *m, *m1;
1250
1251 for (m = movables; m; m = m->next)
1252 {
1253 /* Is this a movable for the value of a libcall? */
1254 rtx note = find_reg_note (m->insn, REG_RETVAL, NULL_RTX);
1255 if (note)
1256 {
1257 rtx insn;
1258 /* Check for earlier movables inside that range,
1259 and mark them invalid. We cannot use LUIDs here because
1260 insns created by loop.c for prior loops don't have LUIDs.
1261 Rather than reject all such insns from movables, we just
1262 explicitly check each insn in the libcall (since invariant
1263 libcalls aren't that common). */
1264 for (insn = XEXP (note, 0); insn != m->insn; insn = NEXT_INSN (insn))
1265 for (m1 = movables; m1 != m; m1 = m1->next)
1266 if (m1->insn == insn)
1267 m1->done = 1;
1268 }
1269 }
1270 }
1271
1272 /* For each movable insn, see if the reg that it loads
1273 leads when it dies right into another conditionally movable insn.
1274 If so, record that the second insn "forces" the first one,
1275 since the second can be moved only if the first is. */
1276
1277 static void
1278 force_movables (movables)
1279 struct movable *movables;
1280 {
1281 register struct movable *m, *m1;
1282 for (m1 = movables; m1; m1 = m1->next)
1283 /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */
1284 if (!m1->partial && !m1->done)
1285 {
1286 int regno = m1->regno;
1287 for (m = m1->next; m; m = m->next)
1288 /* ??? Could this be a bug? What if CSE caused the
1289 register of M1 to be used after this insn?
1290 Since CSE does not update regno_last_uid,
1291 this insn M->insn might not be where it dies.
1292 But very likely this doesn't matter; what matters is
1293 that M's reg is computed from M1's reg. */
1294 if (INSN_UID (m->insn) == REGNO_LAST_UID (regno)
1295 && !m->done)
1296 break;
1297 if (m != 0 && m->set_src == m1->set_dest
1298 /* If m->consec, m->set_src isn't valid. */
1299 && m->consec == 0)
1300 m = 0;
1301
1302 /* Increase the priority of the moving the first insn
1303 since it permits the second to be moved as well. */
1304 if (m != 0)
1305 {
1306 m->forces = m1;
1307 m1->lifetime += m->lifetime;
1308 m1->savings += m1->savings;
1309 }
1310 }
1311 }
1312 \f
1313 /* Find invariant expressions that are equal and can be combined into
1314 one register. */
1315
1316 static void
1317 combine_movables (movables, nregs)
1318 struct movable *movables;
1319 int nregs;
1320 {
1321 register struct movable *m;
1322 char *matched_regs = (char *) alloca (nregs);
1323 enum machine_mode mode;
1324
1325 /* Regs that are set more than once are not allowed to match
1326 or be matched. I'm no longer sure why not. */
1327 /* Perhaps testing m->consec_sets would be more appropriate here? */
1328
1329 for (m = movables; m; m = m->next)
1330 if (m->match == 0 && n_times_used[m->regno] == 1 && !m->partial)
1331 {
1332 register struct movable *m1;
1333 int regno = m->regno;
1334
1335 bzero (matched_regs, nregs);
1336 matched_regs[regno] = 1;
1337
1338 /* We want later insns to match the first one. Don't make the first
1339 one match any later ones. So start this loop at m->next. */
1340 for (m1 = m->next; m1; m1 = m1->next)
1341 if (m != m1 && m1->match == 0 && n_times_used[m1->regno] == 1
1342 /* A reg used outside the loop mustn't be eliminated. */
1343 && !m1->global
1344 /* A reg used for zero-extending mustn't be eliminated. */
1345 && !m1->partial
1346 && (matched_regs[m1->regno]
1347 ||
1348 (
1349 /* Can combine regs with different modes loaded from the
1350 same constant only if the modes are the same or
1351 if both are integer modes with M wider or the same
1352 width as M1. The check for integer is redundant, but
1353 safe, since the only case of differing destination
1354 modes with equal sources is when both sources are
1355 VOIDmode, i.e., CONST_INT. */
1356 (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest)
1357 || (GET_MODE_CLASS (GET_MODE (m->set_dest)) == MODE_INT
1358 && GET_MODE_CLASS (GET_MODE (m1->set_dest)) == MODE_INT
1359 && (GET_MODE_BITSIZE (GET_MODE (m->set_dest))
1360 >= GET_MODE_BITSIZE (GET_MODE (m1->set_dest)))))
1361 /* See if the source of M1 says it matches M. */
1362 && ((GET_CODE (m1->set_src) == REG
1363 && matched_regs[REGNO (m1->set_src)])
1364 || rtx_equal_for_loop_p (m->set_src, m1->set_src,
1365 movables))))
1366 && ((m->dependencies == m1->dependencies)
1367 || rtx_equal_p (m->dependencies, m1->dependencies)))
1368 {
1369 m->lifetime += m1->lifetime;
1370 m->savings += m1->savings;
1371 m1->done = 1;
1372 m1->match = m;
1373 matched_regs[m1->regno] = 1;
1374 }
1375 }
1376
1377 /* Now combine the regs used for zero-extension.
1378 This can be done for those not marked `global'
1379 provided their lives don't overlap. */
1380
1381 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1382 mode = GET_MODE_WIDER_MODE (mode))
1383 {
1384 register struct movable *m0 = 0;
1385
1386 /* Combine all the registers for extension from mode MODE.
1387 Don't combine any that are used outside this loop. */
1388 for (m = movables; m; m = m->next)
1389 if (m->partial && ! m->global
1390 && mode == GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m->insn)))))
1391 {
1392 register struct movable *m1;
1393 int first = uid_luid[REGNO_FIRST_UID (m->regno)];
1394 int last = uid_luid[REGNO_LAST_UID (m->regno)];
1395
1396 if (m0 == 0)
1397 {
1398 /* First one: don't check for overlap, just record it. */
1399 m0 = m;
1400 continue;
1401 }
1402
1403 /* Make sure they extend to the same mode.
1404 (Almost always true.) */
1405 if (GET_MODE (m->set_dest) != GET_MODE (m0->set_dest))
1406 continue;
1407
1408 /* We already have one: check for overlap with those
1409 already combined together. */
1410 for (m1 = movables; m1 != m; m1 = m1->next)
1411 if (m1 == m0 || (m1->partial && m1->match == m0))
1412 if (! (uid_luid[REGNO_FIRST_UID (m1->regno)] > last
1413 || uid_luid[REGNO_LAST_UID (m1->regno)] < first))
1414 goto overlap;
1415
1416 /* No overlap: we can combine this with the others. */
1417 m0->lifetime += m->lifetime;
1418 m0->savings += m->savings;
1419 m->done = 1;
1420 m->match = m0;
1421
1422 overlap: ;
1423 }
1424 }
1425 }
1426 \f
1427 /* Return 1 if regs X and Y will become the same if moved. */
1428
1429 static int
1430 regs_match_p (x, y, movables)
1431 rtx x, y;
1432 struct movable *movables;
1433 {
1434 int xn = REGNO (x);
1435 int yn = REGNO (y);
1436 struct movable *mx, *my;
1437
1438 for (mx = movables; mx; mx = mx->next)
1439 if (mx->regno == xn)
1440 break;
1441
1442 for (my = movables; my; my = my->next)
1443 if (my->regno == yn)
1444 break;
1445
1446 return (mx && my
1447 && ((mx->match == my->match && mx->match != 0)
1448 || mx->match == my
1449 || mx == my->match));
1450 }
1451
1452 /* Return 1 if X and Y are identical-looking rtx's.
1453 This is the Lisp function EQUAL for rtx arguments.
1454
1455 If two registers are matching movables or a movable register and an
1456 equivalent constant, consider them equal. */
1457
1458 static int
1459 rtx_equal_for_loop_p (x, y, movables)
1460 rtx x, y;
1461 struct movable *movables;
1462 {
1463 register int i;
1464 register int j;
1465 register struct movable *m;
1466 register enum rtx_code code;
1467 register char *fmt;
1468
1469 if (x == y)
1470 return 1;
1471 if (x == 0 || y == 0)
1472 return 0;
1473
1474 code = GET_CODE (x);
1475
1476 /* If we have a register and a constant, they may sometimes be
1477 equal. */
1478 if (GET_CODE (x) == REG && n_times_set[REGNO (x)] == -2
1479 && CONSTANT_P (y))
1480 {
1481 for (m = movables; m; m = m->next)
1482 if (m->move_insn && m->regno == REGNO (x)
1483 && rtx_equal_p (m->set_src, y))
1484 return 1;
1485 }
1486 else if (GET_CODE (y) == REG && n_times_set[REGNO (y)] == -2
1487 && CONSTANT_P (x))
1488 {
1489 for (m = movables; m; m = m->next)
1490 if (m->move_insn && m->regno == REGNO (y)
1491 && rtx_equal_p (m->set_src, x))
1492 return 1;
1493 }
1494
1495 /* Otherwise, rtx's of different codes cannot be equal. */
1496 if (code != GET_CODE (y))
1497 return 0;
1498
1499 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1500 (REG:SI x) and (REG:HI x) are NOT equivalent. */
1501
1502 if (GET_MODE (x) != GET_MODE (y))
1503 return 0;
1504
1505 /* These three types of rtx's can be compared nonrecursively. */
1506 if (code == REG)
1507 return (REGNO (x) == REGNO (y) || regs_match_p (x, y, movables));
1508
1509 if (code == LABEL_REF)
1510 return XEXP (x, 0) == XEXP (y, 0);
1511 if (code == SYMBOL_REF)
1512 return XSTR (x, 0) == XSTR (y, 0);
1513
1514 /* Compare the elements. If any pair of corresponding elements
1515 fail to match, return 0 for the whole things. */
1516
1517 fmt = GET_RTX_FORMAT (code);
1518 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1519 {
1520 switch (fmt[i])
1521 {
1522 case 'w':
1523 if (XWINT (x, i) != XWINT (y, i))
1524 return 0;
1525 break;
1526
1527 case 'i':
1528 if (XINT (x, i) != XINT (y, i))
1529 return 0;
1530 break;
1531
1532 case 'E':
1533 /* Two vectors must have the same length. */
1534 if (XVECLEN (x, i) != XVECLEN (y, i))
1535 return 0;
1536
1537 /* And the corresponding elements must match. */
1538 for (j = 0; j < XVECLEN (x, i); j++)
1539 if (rtx_equal_for_loop_p (XVECEXP (x, i, j), XVECEXP (y, i, j), movables) == 0)
1540 return 0;
1541 break;
1542
1543 case 'e':
1544 if (rtx_equal_for_loop_p (XEXP (x, i), XEXP (y, i), movables) == 0)
1545 return 0;
1546 break;
1547
1548 case 's':
1549 if (strcmp (XSTR (x, i), XSTR (y, i)))
1550 return 0;
1551 break;
1552
1553 case 'u':
1554 /* These are just backpointers, so they don't matter. */
1555 break;
1556
1557 case '0':
1558 break;
1559
1560 /* It is believed that rtx's at this level will never
1561 contain anything but integers and other rtx's,
1562 except for within LABEL_REFs and SYMBOL_REFs. */
1563 default:
1564 abort ();
1565 }
1566 }
1567 return 1;
1568 }
1569 \f
1570 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to all
1571 insns in INSNS which use thet reference. */
1572
1573 static void
1574 add_label_notes (x, insns)
1575 rtx x;
1576 rtx insns;
1577 {
1578 enum rtx_code code = GET_CODE (x);
1579 int i, j;
1580 char *fmt;
1581 rtx insn;
1582
1583 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
1584 {
1585 rtx next = next_real_insn (XEXP (x, 0));
1586
1587 /* Don't record labels that refer to dispatch tables.
1588 This is not necessary, since the tablejump references the same label.
1589 And if we did record them, flow.c would make worse code. */
1590 if (next == 0
1591 || ! (GET_CODE (next) == JUMP_INSN
1592 && (GET_CODE (PATTERN (next)) == ADDR_VEC
1593 || GET_CODE (PATTERN (next)) == ADDR_DIFF_VEC)))
1594 {
1595 for (insn = insns; insn; insn = NEXT_INSN (insn))
1596 if (reg_mentioned_p (XEXP (x, 0), insn))
1597 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_LABEL, XEXP (x, 0),
1598 REG_NOTES (insn));
1599 }
1600 return;
1601 }
1602
1603 fmt = GET_RTX_FORMAT (code);
1604 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1605 {
1606 if (fmt[i] == 'e')
1607 add_label_notes (XEXP (x, i), insns);
1608 else if (fmt[i] == 'E')
1609 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1610 add_label_notes (XVECEXP (x, i, j), insns);
1611 }
1612 }
1613 \f
1614 /* Scan MOVABLES, and move the insns that deserve to be moved.
1615 If two matching movables are combined, replace one reg with the
1616 other throughout. */
1617
1618 static void
1619 move_movables (movables, threshold, insn_count, loop_start, end, nregs)
1620 struct movable *movables;
1621 int threshold;
1622 int insn_count;
1623 rtx loop_start;
1624 rtx end;
1625 int nregs;
1626 {
1627 rtx new_start = 0;
1628 register struct movable *m;
1629 register rtx p;
1630 /* Map of pseudo-register replacements to handle combining
1631 when we move several insns that load the same value
1632 into different pseudo-registers. */
1633 rtx *reg_map = (rtx *) alloca (nregs * sizeof (rtx));
1634 char *already_moved = (char *) alloca (nregs);
1635
1636 bzero (already_moved, nregs);
1637 bzero ((char *) reg_map, nregs * sizeof (rtx));
1638
1639 num_movables = 0;
1640
1641 for (m = movables; m; m = m->next)
1642 {
1643 /* Describe this movable insn. */
1644
1645 if (loop_dump_stream)
1646 {
1647 fprintf (loop_dump_stream, "Insn %d: regno %d (life %d), ",
1648 INSN_UID (m->insn), m->regno, m->lifetime);
1649 if (m->consec > 0)
1650 fprintf (loop_dump_stream, "consec %d, ", m->consec);
1651 if (m->cond)
1652 fprintf (loop_dump_stream, "cond ");
1653 if (m->force)
1654 fprintf (loop_dump_stream, "force ");
1655 if (m->global)
1656 fprintf (loop_dump_stream, "global ");
1657 if (m->done)
1658 fprintf (loop_dump_stream, "done ");
1659 if (m->move_insn)
1660 fprintf (loop_dump_stream, "move-insn ");
1661 if (m->match)
1662 fprintf (loop_dump_stream, "matches %d ",
1663 INSN_UID (m->match->insn));
1664 if (m->forces)
1665 fprintf (loop_dump_stream, "forces %d ",
1666 INSN_UID (m->forces->insn));
1667 }
1668
1669 /* Count movables. Value used in heuristics in strength_reduce. */
1670 num_movables++;
1671
1672 /* Ignore the insn if it's already done (it matched something else).
1673 Otherwise, see if it is now safe to move. */
1674
1675 if (!m->done
1676 && (! m->cond
1677 || (1 == invariant_p (m->set_src)
1678 && (m->dependencies == 0
1679 || 1 == invariant_p (m->dependencies))
1680 && (m->consec == 0
1681 || 1 == consec_sets_invariant_p (m->set_dest,
1682 m->consec + 1,
1683 m->insn))))
1684 && (! m->forces || m->forces->done))
1685 {
1686 register int regno;
1687 register rtx p;
1688 int savings = m->savings;
1689
1690 /* We have an insn that is safe to move.
1691 Compute its desirability. */
1692
1693 p = m->insn;
1694 regno = m->regno;
1695
1696 if (loop_dump_stream)
1697 fprintf (loop_dump_stream, "savings %d ", savings);
1698
1699 if (moved_once[regno])
1700 {
1701 insn_count *= 2;
1702
1703 if (loop_dump_stream)
1704 fprintf (loop_dump_stream, "halved since already moved ");
1705 }
1706
1707 /* An insn MUST be moved if we already moved something else
1708 which is safe only if this one is moved too: that is,
1709 if already_moved[REGNO] is nonzero. */
1710
1711 /* An insn is desirable to move if the new lifetime of the
1712 register is no more than THRESHOLD times the old lifetime.
1713 If it's not desirable, it means the loop is so big
1714 that moving won't speed things up much,
1715 and it is liable to make register usage worse. */
1716
1717 /* It is also desirable to move if it can be moved at no
1718 extra cost because something else was already moved. */
1719
1720 if (already_moved[regno]
1721 || flag_move_all_movables
1722 || (threshold * savings * m->lifetime) >= insn_count
1723 || (m->forces && m->forces->done
1724 && n_times_used[m->forces->regno] == 1))
1725 {
1726 int count;
1727 register struct movable *m1;
1728 rtx first;
1729
1730 /* Now move the insns that set the reg. */
1731
1732 if (m->partial && m->match)
1733 {
1734 rtx newpat, i1;
1735 rtx r1, r2;
1736 /* Find the end of this chain of matching regs.
1737 Thus, we load each reg in the chain from that one reg.
1738 And that reg is loaded with 0 directly,
1739 since it has ->match == 0. */
1740 for (m1 = m; m1->match; m1 = m1->match);
1741 newpat = gen_move_insn (SET_DEST (PATTERN (m->insn)),
1742 SET_DEST (PATTERN (m1->insn)));
1743 i1 = emit_insn_before (newpat, loop_start);
1744
1745 /* Mark the moved, invariant reg as being allowed to
1746 share a hard reg with the other matching invariant. */
1747 REG_NOTES (i1) = REG_NOTES (m->insn);
1748 r1 = SET_DEST (PATTERN (m->insn));
1749 r2 = SET_DEST (PATTERN (m1->insn));
1750 regs_may_share
1751 = gen_rtx_EXPR_LIST (VOIDmode, r1,
1752 gen_rtx_EXPR_LIST (VOIDmode, r2,
1753 regs_may_share));
1754 delete_insn (m->insn);
1755
1756 if (new_start == 0)
1757 new_start = i1;
1758
1759 if (loop_dump_stream)
1760 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1761 }
1762 /* If we are to re-generate the item being moved with a
1763 new move insn, first delete what we have and then emit
1764 the move insn before the loop. */
1765 else if (m->move_insn)
1766 {
1767 rtx i1, temp;
1768
1769 for (count = m->consec; count >= 0; count--)
1770 {
1771 /* If this is the first insn of a library call sequence,
1772 skip to the end. */
1773 if (GET_CODE (p) != NOTE
1774 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1775 p = XEXP (temp, 0);
1776
1777 /* If this is the last insn of a libcall sequence, then
1778 delete every insn in the sequence except the last.
1779 The last insn is handled in the normal manner. */
1780 if (GET_CODE (p) != NOTE
1781 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1782 {
1783 temp = XEXP (temp, 0);
1784 while (temp != p)
1785 temp = delete_insn (temp);
1786 }
1787
1788 p = delete_insn (p);
1789 while (p && GET_CODE (p) == NOTE)
1790 p = NEXT_INSN (p);
1791 }
1792
1793 start_sequence ();
1794 emit_move_insn (m->set_dest, m->set_src);
1795 temp = get_insns ();
1796 end_sequence ();
1797
1798 add_label_notes (m->set_src, temp);
1799
1800 i1 = emit_insns_before (temp, loop_start);
1801 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
1802 REG_NOTES (i1)
1803 = gen_rtx_EXPR_LIST (m->is_equiv ? REG_EQUIV : REG_EQUAL,
1804 m->set_src, REG_NOTES (i1));
1805
1806 if (loop_dump_stream)
1807 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1808
1809 /* The more regs we move, the less we like moving them. */
1810 threshold -= 3;
1811 }
1812 else
1813 {
1814 for (count = m->consec; count >= 0; count--)
1815 {
1816 rtx i1, temp;
1817
1818 /* If first insn of libcall sequence, skip to end. */
1819 /* Do this at start of loop, since p is guaranteed to
1820 be an insn here. */
1821 if (GET_CODE (p) != NOTE
1822 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1823 p = XEXP (temp, 0);
1824
1825 /* If last insn of libcall sequence, move all
1826 insns except the last before the loop. The last
1827 insn is handled in the normal manner. */
1828 if (GET_CODE (p) != NOTE
1829 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1830 {
1831 rtx fn_address = 0;
1832 rtx fn_reg = 0;
1833 rtx fn_address_insn = 0;
1834
1835 first = 0;
1836 for (temp = XEXP (temp, 0); temp != p;
1837 temp = NEXT_INSN (temp))
1838 {
1839 rtx body;
1840 rtx n;
1841 rtx next;
1842
1843 if (GET_CODE (temp) == NOTE)
1844 continue;
1845
1846 body = PATTERN (temp);
1847
1848 /* Find the next insn after TEMP,
1849 not counting USE or NOTE insns. */
1850 for (next = NEXT_INSN (temp); next != p;
1851 next = NEXT_INSN (next))
1852 if (! (GET_CODE (next) == INSN
1853 && GET_CODE (PATTERN (next)) == USE)
1854 && GET_CODE (next) != NOTE)
1855 break;
1856
1857 /* If that is the call, this may be the insn
1858 that loads the function address.
1859
1860 Extract the function address from the insn
1861 that loads it into a register.
1862 If this insn was cse'd, we get incorrect code.
1863
1864 So emit a new move insn that copies the
1865 function address into the register that the
1866 call insn will use. flow.c will delete any
1867 redundant stores that we have created. */
1868 if (GET_CODE (next) == CALL_INSN
1869 && GET_CODE (body) == SET
1870 && GET_CODE (SET_DEST (body)) == REG
1871 && (n = find_reg_note (temp, REG_EQUAL,
1872 NULL_RTX)))
1873 {
1874 fn_reg = SET_SRC (body);
1875 if (GET_CODE (fn_reg) != REG)
1876 fn_reg = SET_DEST (body);
1877 fn_address = XEXP (n, 0);
1878 fn_address_insn = temp;
1879 }
1880 /* We have the call insn.
1881 If it uses the register we suspect it might,
1882 load it with the correct address directly. */
1883 if (GET_CODE (temp) == CALL_INSN
1884 && fn_address != 0
1885 && reg_referenced_p (fn_reg, body))
1886 emit_insn_after (gen_move_insn (fn_reg,
1887 fn_address),
1888 fn_address_insn);
1889
1890 if (GET_CODE (temp) == CALL_INSN)
1891 {
1892 i1 = emit_call_insn_before (body, loop_start);
1893 /* Because the USAGE information potentially
1894 contains objects other than hard registers
1895 we need to copy it. */
1896 if (CALL_INSN_FUNCTION_USAGE (temp))
1897 CALL_INSN_FUNCTION_USAGE (i1)
1898 = copy_rtx (CALL_INSN_FUNCTION_USAGE (temp));
1899 }
1900 else
1901 i1 = emit_insn_before (body, loop_start);
1902 if (first == 0)
1903 first = i1;
1904 if (temp == fn_address_insn)
1905 fn_address_insn = i1;
1906 REG_NOTES (i1) = REG_NOTES (temp);
1907 delete_insn (temp);
1908 }
1909 }
1910 if (m->savemode != VOIDmode)
1911 {
1912 /* P sets REG to zero; but we should clear only
1913 the bits that are not covered by the mode
1914 m->savemode. */
1915 rtx reg = m->set_dest;
1916 rtx sequence;
1917 rtx tem;
1918
1919 start_sequence ();
1920 tem = expand_binop
1921 (GET_MODE (reg), and_optab, reg,
1922 GEN_INT ((((HOST_WIDE_INT) 1
1923 << GET_MODE_BITSIZE (m->savemode)))
1924 - 1),
1925 reg, 1, OPTAB_LIB_WIDEN);
1926 if (tem == 0)
1927 abort ();
1928 if (tem != reg)
1929 emit_move_insn (reg, tem);
1930 sequence = gen_sequence ();
1931 end_sequence ();
1932 i1 = emit_insn_before (sequence, loop_start);
1933 }
1934 else if (GET_CODE (p) == CALL_INSN)
1935 {
1936 i1 = emit_call_insn_before (PATTERN (p), loop_start);
1937 /* Because the USAGE information potentially
1938 contains objects other than hard registers
1939 we need to copy it. */
1940 if (CALL_INSN_FUNCTION_USAGE (p))
1941 CALL_INSN_FUNCTION_USAGE (i1)
1942 = copy_rtx (CALL_INSN_FUNCTION_USAGE (p));
1943 }
1944 else
1945 i1 = emit_insn_before (PATTERN (p), loop_start);
1946
1947 REG_NOTES (i1) = REG_NOTES (p);
1948
1949 /* If there is a REG_EQUAL note present whose value is
1950 not loop invariant, then delete it, since it may
1951 cause problems with later optimization passes.
1952 It is possible for cse to create such notes
1953 like this as a result of record_jump_cond. */
1954
1955 if ((temp = find_reg_note (i1, REG_EQUAL, NULL_RTX))
1956 && ! invariant_p (XEXP (temp, 0)))
1957 remove_note (i1, temp);
1958
1959 if (new_start == 0)
1960 new_start = i1;
1961
1962 if (loop_dump_stream)
1963 fprintf (loop_dump_stream, " moved to %d",
1964 INSN_UID (i1));
1965
1966 #if 0
1967 /* This isn't needed because REG_NOTES is copied
1968 below and is wrong since P might be a PARALLEL. */
1969 if (REG_NOTES (i1) == 0
1970 && ! m->partial /* But not if it's a zero-extend clr. */
1971 && ! m->global /* and not if used outside the loop
1972 (since it might get set outside). */
1973 && CONSTANT_P (SET_SRC (PATTERN (p))))
1974 REG_NOTES (i1)
1975 = gen_rtx_EXPR_LIST (REG_EQUAL,
1976 SET_SRC (PATTERN (p)),
1977 REG_NOTES (i1));
1978 #endif
1979
1980 /* If library call, now fix the REG_NOTES that contain
1981 insn pointers, namely REG_LIBCALL on FIRST
1982 and REG_RETVAL on I1. */
1983 if (temp = find_reg_note (i1, REG_RETVAL, NULL_RTX))
1984 {
1985 XEXP (temp, 0) = first;
1986 temp = find_reg_note (first, REG_LIBCALL, NULL_RTX);
1987 XEXP (temp, 0) = i1;
1988 }
1989
1990 delete_insn (p);
1991 do p = NEXT_INSN (p);
1992 while (p && GET_CODE (p) == NOTE);
1993 }
1994
1995 /* The more regs we move, the less we like moving them. */
1996 threshold -= 3;
1997 }
1998
1999 /* Any other movable that loads the same register
2000 MUST be moved. */
2001 already_moved[regno] = 1;
2002
2003 /* This reg has been moved out of one loop. */
2004 moved_once[regno] = 1;
2005
2006 /* The reg set here is now invariant. */
2007 if (! m->partial)
2008 n_times_set[regno] = 0;
2009
2010 m->done = 1;
2011
2012 /* Change the length-of-life info for the register
2013 to say it lives at least the full length of this loop.
2014 This will help guide optimizations in outer loops. */
2015
2016 if (uid_luid[REGNO_FIRST_UID (regno)] > INSN_LUID (loop_start))
2017 /* This is the old insn before all the moved insns.
2018 We can't use the moved insn because it is out of range
2019 in uid_luid. Only the old insns have luids. */
2020 REGNO_FIRST_UID (regno) = INSN_UID (loop_start);
2021 if (uid_luid[REGNO_LAST_UID (regno)] < INSN_LUID (end))
2022 REGNO_LAST_UID (regno) = INSN_UID (end);
2023
2024 /* Combine with this moved insn any other matching movables. */
2025
2026 if (! m->partial)
2027 for (m1 = movables; m1; m1 = m1->next)
2028 if (m1->match == m)
2029 {
2030 rtx temp;
2031
2032 /* Schedule the reg loaded by M1
2033 for replacement so that shares the reg of M.
2034 If the modes differ (only possible in restricted
2035 circumstances, make a SUBREG. */
2036 if (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest))
2037 reg_map[m1->regno] = m->set_dest;
2038 else
2039 reg_map[m1->regno]
2040 = gen_lowpart_common (GET_MODE (m1->set_dest),
2041 m->set_dest);
2042
2043 /* Get rid of the matching insn
2044 and prevent further processing of it. */
2045 m1->done = 1;
2046
2047 /* if library call, delete all insn except last, which
2048 is deleted below */
2049 if (temp = find_reg_note (m1->insn, REG_RETVAL,
2050 NULL_RTX))
2051 {
2052 for (temp = XEXP (temp, 0); temp != m1->insn;
2053 temp = NEXT_INSN (temp))
2054 delete_insn (temp);
2055 }
2056 delete_insn (m1->insn);
2057
2058 /* Any other movable that loads the same register
2059 MUST be moved. */
2060 already_moved[m1->regno] = 1;
2061
2062 /* The reg merged here is now invariant,
2063 if the reg it matches is invariant. */
2064 if (! m->partial)
2065 n_times_set[m1->regno] = 0;
2066 }
2067 }
2068 else if (loop_dump_stream)
2069 fprintf (loop_dump_stream, "not desirable");
2070 }
2071 else if (loop_dump_stream && !m->match)
2072 fprintf (loop_dump_stream, "not safe");
2073
2074 if (loop_dump_stream)
2075 fprintf (loop_dump_stream, "\n");
2076 }
2077
2078 if (new_start == 0)
2079 new_start = loop_start;
2080
2081 /* Go through all the instructions in the loop, making
2082 all the register substitutions scheduled in REG_MAP. */
2083 for (p = new_start; p != end; p = NEXT_INSN (p))
2084 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
2085 || GET_CODE (p) == CALL_INSN)
2086 {
2087 replace_regs (PATTERN (p), reg_map, nregs, 0);
2088 replace_regs (REG_NOTES (p), reg_map, nregs, 0);
2089 INSN_CODE (p) = -1;
2090 }
2091 }
2092 \f
2093 #if 0
2094 /* Scan X and replace the address of any MEM in it with ADDR.
2095 REG is the address that MEM should have before the replacement. */
2096
2097 static void
2098 replace_call_address (x, reg, addr)
2099 rtx x, reg, addr;
2100 {
2101 register enum rtx_code code;
2102 register int i;
2103 register char *fmt;
2104
2105 if (x == 0)
2106 return;
2107 code = GET_CODE (x);
2108 switch (code)
2109 {
2110 case PC:
2111 case CC0:
2112 case CONST_INT:
2113 case CONST_DOUBLE:
2114 case CONST:
2115 case SYMBOL_REF:
2116 case LABEL_REF:
2117 case REG:
2118 return;
2119
2120 case SET:
2121 /* Short cut for very common case. */
2122 replace_call_address (XEXP (x, 1), reg, addr);
2123 return;
2124
2125 case CALL:
2126 /* Short cut for very common case. */
2127 replace_call_address (XEXP (x, 0), reg, addr);
2128 return;
2129
2130 case MEM:
2131 /* If this MEM uses a reg other than the one we expected,
2132 something is wrong. */
2133 if (XEXP (x, 0) != reg)
2134 abort ();
2135 XEXP (x, 0) = addr;
2136 return;
2137
2138 default:
2139 break;
2140 }
2141
2142 fmt = GET_RTX_FORMAT (code);
2143 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2144 {
2145 if (fmt[i] == 'e')
2146 replace_call_address (XEXP (x, i), reg, addr);
2147 if (fmt[i] == 'E')
2148 {
2149 register int j;
2150 for (j = 0; j < XVECLEN (x, i); j++)
2151 replace_call_address (XVECEXP (x, i, j), reg, addr);
2152 }
2153 }
2154 }
2155 #endif
2156 \f
2157 /* Return the number of memory refs to addresses that vary
2158 in the rtx X. */
2159
2160 static int
2161 count_nonfixed_reads (x)
2162 rtx x;
2163 {
2164 register enum rtx_code code;
2165 register int i;
2166 register char *fmt;
2167 int value;
2168
2169 if (x == 0)
2170 return 0;
2171
2172 code = GET_CODE (x);
2173 switch (code)
2174 {
2175 case PC:
2176 case CC0:
2177 case CONST_INT:
2178 case CONST_DOUBLE:
2179 case CONST:
2180 case SYMBOL_REF:
2181 case LABEL_REF:
2182 case REG:
2183 return 0;
2184
2185 case MEM:
2186 return ((invariant_p (XEXP (x, 0)) != 1)
2187 + count_nonfixed_reads (XEXP (x, 0)));
2188
2189 default:
2190 break;
2191 }
2192
2193 value = 0;
2194 fmt = GET_RTX_FORMAT (code);
2195 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2196 {
2197 if (fmt[i] == 'e')
2198 value += count_nonfixed_reads (XEXP (x, i));
2199 if (fmt[i] == 'E')
2200 {
2201 register int j;
2202 for (j = 0; j < XVECLEN (x, i); j++)
2203 value += count_nonfixed_reads (XVECEXP (x, i, j));
2204 }
2205 }
2206 return value;
2207 }
2208
2209 \f
2210 #if 0
2211 /* P is an instruction that sets a register to the result of a ZERO_EXTEND.
2212 Replace it with an instruction to load just the low bytes
2213 if the machine supports such an instruction,
2214 and insert above LOOP_START an instruction to clear the register. */
2215
2216 static void
2217 constant_high_bytes (p, loop_start)
2218 rtx p, loop_start;
2219 {
2220 register rtx new;
2221 register int insn_code_number;
2222
2223 /* Try to change (SET (REG ...) (ZERO_EXTEND (..:B ...)))
2224 to (SET (STRICT_LOW_PART (SUBREG:B (REG...))) ...). */
2225
2226 new = gen_rtx_SET (VOIDmode,
2227 gen_rtx_STRICT_LOW_PART (VOIDmode,
2228 gen_rtx_SUBREG (GET_MODE (XEXP (SET_SRC (PATTERN (p)), 0)),
2229 SET_DEST (PATTERN (p)),
2230 0)),
2231 XEXP (SET_SRC (PATTERN (p)), 0));
2232 insn_code_number = recog (new, p);
2233
2234 if (insn_code_number)
2235 {
2236 register int i;
2237
2238 /* Clear destination register before the loop. */
2239 emit_insn_before (gen_rtx_SET (VOIDmode, SET_DEST (PATTERN (p)),
2240 const0_rtx),
2241 loop_start);
2242
2243 /* Inside the loop, just load the low part. */
2244 PATTERN (p) = new;
2245 }
2246 }
2247 #endif
2248 \f
2249 /* Scan a loop setting the variables `unknown_address_altered',
2250 `num_mem_sets', `loop_continue', loops_enclosed', `loop_has_call',
2251 and `loop_has_volatile'.
2252 Also, fill in the array `loop_store_mems'. */
2253
2254 static void
2255 prescan_loop (start, end)
2256 rtx start, end;
2257 {
2258 register int level = 1;
2259 register rtx insn;
2260
2261 unknown_address_altered = 0;
2262 loop_has_call = 0;
2263 loop_has_volatile = 0;
2264 loop_store_mems_idx = 0;
2265
2266 num_mem_sets = 0;
2267 loops_enclosed = 1;
2268 loop_continue = 0;
2269
2270 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2271 insn = NEXT_INSN (insn))
2272 {
2273 if (GET_CODE (insn) == NOTE)
2274 {
2275 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
2276 {
2277 ++level;
2278 /* Count number of loops contained in this one. */
2279 loops_enclosed++;
2280 }
2281 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
2282 {
2283 --level;
2284 if (level == 0)
2285 {
2286 end = insn;
2287 break;
2288 }
2289 }
2290 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_CONT)
2291 {
2292 if (level == 1)
2293 loop_continue = insn;
2294 }
2295 }
2296 else if (GET_CODE (insn) == CALL_INSN)
2297 {
2298 if (! CONST_CALL_P (insn))
2299 unknown_address_altered = 1;
2300 loop_has_call = 1;
2301 }
2302 else
2303 {
2304 if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
2305 {
2306 if (volatile_refs_p (PATTERN (insn)))
2307 loop_has_volatile = 1;
2308
2309 note_stores (PATTERN (insn), note_addr_stored);
2310 }
2311 }
2312 }
2313 }
2314 \f
2315 /* Scan the function looking for loops. Record the start and end of each loop.
2316 Also mark as invalid loops any loops that contain a setjmp or are branched
2317 to from outside the loop. */
2318
2319 static void
2320 find_and_verify_loops (f)
2321 rtx f;
2322 {
2323 rtx insn, label;
2324 int current_loop = -1;
2325 int next_loop = -1;
2326 int loop;
2327
2328 /* If there are jumps to undefined labels,
2329 treat them as jumps out of any/all loops.
2330 This also avoids writing past end of tables when there are no loops. */
2331 uid_loop_num[0] = -1;
2332
2333 /* Find boundaries of loops, mark which loops are contained within
2334 loops, and invalidate loops that have setjmp. */
2335
2336 for (insn = f; insn; insn = NEXT_INSN (insn))
2337 {
2338 if (GET_CODE (insn) == NOTE)
2339 switch (NOTE_LINE_NUMBER (insn))
2340 {
2341 case NOTE_INSN_LOOP_BEG:
2342 loop_number_loop_starts[++next_loop] = insn;
2343 loop_number_loop_ends[next_loop] = 0;
2344 loop_outer_loop[next_loop] = current_loop;
2345 loop_invalid[next_loop] = 0;
2346 loop_number_exit_labels[next_loop] = 0;
2347 loop_number_exit_count[next_loop] = 0;
2348 current_loop = next_loop;
2349 break;
2350
2351 case NOTE_INSN_SETJMP:
2352 /* In this case, we must invalidate our current loop and any
2353 enclosing loop. */
2354 for (loop = current_loop; loop != -1; loop = loop_outer_loop[loop])
2355 {
2356 loop_invalid[loop] = 1;
2357 if (loop_dump_stream)
2358 fprintf (loop_dump_stream,
2359 "\nLoop at %d ignored due to setjmp.\n",
2360 INSN_UID (loop_number_loop_starts[loop]));
2361 }
2362 break;
2363
2364 case NOTE_INSN_LOOP_END:
2365 if (current_loop == -1)
2366 abort ();
2367
2368 loop_number_loop_ends[current_loop] = insn;
2369 current_loop = loop_outer_loop[current_loop];
2370 break;
2371
2372 default:
2373 break;
2374 }
2375
2376 /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the
2377 enclosing loop, but this doesn't matter. */
2378 uid_loop_num[INSN_UID (insn)] = current_loop;
2379 }
2380
2381 /* Any loop containing a label used in an initializer must be invalidated,
2382 because it can be jumped into from anywhere. */
2383
2384 for (label = forced_labels; label; label = XEXP (label, 1))
2385 {
2386 int loop_num;
2387
2388 for (loop_num = uid_loop_num[INSN_UID (XEXP (label, 0))];
2389 loop_num != -1;
2390 loop_num = loop_outer_loop[loop_num])
2391 loop_invalid[loop_num] = 1;
2392 }
2393
2394 /* Any loop containing a label used for an exception handler must be
2395 invalidated, because it can be jumped into from anywhere. */
2396
2397 for (label = exception_handler_labels; label; label = XEXP (label, 1))
2398 {
2399 int loop_num;
2400
2401 for (loop_num = uid_loop_num[INSN_UID (XEXP (label, 0))];
2402 loop_num != -1;
2403 loop_num = loop_outer_loop[loop_num])
2404 loop_invalid[loop_num] = 1;
2405 }
2406
2407 /* Now scan all insn's in the function. If any JUMP_INSN branches into a
2408 loop that it is not contained within, that loop is marked invalid.
2409 If any INSN or CALL_INSN uses a label's address, then the loop containing
2410 that label is marked invalid, because it could be jumped into from
2411 anywhere.
2412
2413 Also look for blocks of code ending in an unconditional branch that
2414 exits the loop. If such a block is surrounded by a conditional
2415 branch around the block, move the block elsewhere (see below) and
2416 invert the jump to point to the code block. This may eliminate a
2417 label in our loop and will simplify processing by both us and a
2418 possible second cse pass. */
2419
2420 for (insn = f; insn; insn = NEXT_INSN (insn))
2421 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
2422 {
2423 int this_loop_num = uid_loop_num[INSN_UID (insn)];
2424
2425 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
2426 {
2427 rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
2428 if (note)
2429 {
2430 int loop_num;
2431
2432 for (loop_num = uid_loop_num[INSN_UID (XEXP (note, 0))];
2433 loop_num != -1;
2434 loop_num = loop_outer_loop[loop_num])
2435 loop_invalid[loop_num] = 1;
2436 }
2437 }
2438
2439 if (GET_CODE (insn) != JUMP_INSN)
2440 continue;
2441
2442 mark_loop_jump (PATTERN (insn), this_loop_num);
2443
2444 /* See if this is an unconditional branch outside the loop. */
2445 if (this_loop_num != -1
2446 && (GET_CODE (PATTERN (insn)) == RETURN
2447 || (simplejump_p (insn)
2448 && (uid_loop_num[INSN_UID (JUMP_LABEL (insn))]
2449 != this_loop_num)))
2450 && get_max_uid () < max_uid_for_loop)
2451 {
2452 rtx p;
2453 rtx our_next = next_real_insn (insn);
2454 int dest_loop;
2455 int outer_loop = -1;
2456
2457 /* Go backwards until we reach the start of the loop, a label,
2458 or a JUMP_INSN. */
2459 for (p = PREV_INSN (insn);
2460 GET_CODE (p) != CODE_LABEL
2461 && ! (GET_CODE (p) == NOTE
2462 && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
2463 && GET_CODE (p) != JUMP_INSN;
2464 p = PREV_INSN (p))
2465 ;
2466
2467 /* Check for the case where we have a jump to an inner nested
2468 loop, and do not perform the optimization in that case. */
2469
2470 if (JUMP_LABEL (insn))
2471 {
2472 dest_loop = uid_loop_num[INSN_UID (JUMP_LABEL (insn))];
2473 if (dest_loop != -1)
2474 {
2475 for (outer_loop = dest_loop; outer_loop != -1;
2476 outer_loop = loop_outer_loop[outer_loop])
2477 if (outer_loop == this_loop_num)
2478 break;
2479 }
2480 }
2481
2482 /* Make sure that the target of P is within the current loop. */
2483
2484 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
2485 && uid_loop_num[INSN_UID (JUMP_LABEL (p))] != this_loop_num)
2486 outer_loop = this_loop_num;
2487
2488 /* If we stopped on a JUMP_INSN to the next insn after INSN,
2489 we have a block of code to try to move.
2490
2491 We look backward and then forward from the target of INSN
2492 to find a BARRIER at the same loop depth as the target.
2493 If we find such a BARRIER, we make a new label for the start
2494 of the block, invert the jump in P and point it to that label,
2495 and move the block of code to the spot we found. */
2496
2497 if (outer_loop == -1
2498 && GET_CODE (p) == JUMP_INSN
2499 && JUMP_LABEL (p) != 0
2500 /* Just ignore jumps to labels that were never emitted.
2501 These always indicate compilation errors. */
2502 && INSN_UID (JUMP_LABEL (p)) != 0
2503 && condjump_p (p)
2504 && ! simplejump_p (p)
2505 && next_real_insn (JUMP_LABEL (p)) == our_next)
2506 {
2507 rtx target
2508 = JUMP_LABEL (insn) ? JUMP_LABEL (insn) : get_last_insn ();
2509 int target_loop_num = uid_loop_num[INSN_UID (target)];
2510 rtx loc;
2511
2512 for (loc = target; loc; loc = PREV_INSN (loc))
2513 if (GET_CODE (loc) == BARRIER
2514 && uid_loop_num[INSN_UID (loc)] == target_loop_num)
2515 break;
2516
2517 if (loc == 0)
2518 for (loc = target; loc; loc = NEXT_INSN (loc))
2519 if (GET_CODE (loc) == BARRIER
2520 && uid_loop_num[INSN_UID (loc)] == target_loop_num)
2521 break;
2522
2523 if (loc)
2524 {
2525 rtx cond_label = JUMP_LABEL (p);
2526 rtx new_label = get_label_after (p);
2527
2528 /* Ensure our label doesn't go away. */
2529 LABEL_NUSES (cond_label)++;
2530
2531 /* Verify that uid_loop_num is large enough and that
2532 we can invert P. */
2533 if (invert_jump (p, new_label))
2534 {
2535 rtx q, r;
2536
2537 /* If no suitable BARRIER was found, create a suitable
2538 one before TARGET. Since TARGET is a fall through
2539 path, we'll need to insert an jump around our block
2540 and a add a BARRIER before TARGET.
2541
2542 This creates an extra unconditional jump outside
2543 the loop. However, the benefits of removing rarely
2544 executed instructions from inside the loop usually
2545 outweighs the cost of the extra unconditional jump
2546 outside the loop. */
2547 if (loc == 0)
2548 {
2549 rtx temp;
2550
2551 temp = gen_jump (JUMP_LABEL (insn));
2552 temp = emit_jump_insn_before (temp, target);
2553 JUMP_LABEL (temp) = JUMP_LABEL (insn);
2554 LABEL_NUSES (JUMP_LABEL (insn))++;
2555 loc = emit_barrier_before (target);
2556 }
2557
2558 /* Include the BARRIER after INSN and copy the
2559 block after LOC. */
2560 new_label = squeeze_notes (new_label, NEXT_INSN (insn));
2561 reorder_insns (new_label, NEXT_INSN (insn), loc);
2562
2563 /* All those insns are now in TARGET_LOOP_NUM. */
2564 for (q = new_label; q != NEXT_INSN (NEXT_INSN (insn));
2565 q = NEXT_INSN (q))
2566 uid_loop_num[INSN_UID (q)] = target_loop_num;
2567
2568 /* The label jumped to by INSN is no longer a loop exit.
2569 Unless INSN does not have a label (e.g., it is a
2570 RETURN insn), search loop_number_exit_labels to find
2571 its label_ref, and remove it. Also turn off
2572 LABEL_OUTSIDE_LOOP_P bit. */
2573 if (JUMP_LABEL (insn))
2574 {
2575 int loop_num;
2576
2577 for (q = 0,
2578 r = loop_number_exit_labels[this_loop_num];
2579 r; q = r, r = LABEL_NEXTREF (r))
2580 if (XEXP (r, 0) == JUMP_LABEL (insn))
2581 {
2582 LABEL_OUTSIDE_LOOP_P (r) = 0;
2583 if (q)
2584 LABEL_NEXTREF (q) = LABEL_NEXTREF (r);
2585 else
2586 loop_number_exit_labels[this_loop_num]
2587 = LABEL_NEXTREF (r);
2588 break;
2589 }
2590
2591 for (loop_num = this_loop_num;
2592 loop_num != -1 && loop_num != target_loop_num;
2593 loop_num = loop_outer_loop[loop_num])
2594 loop_number_exit_count[loop_num]--;
2595
2596 /* If we didn't find it, then something is wrong. */
2597 if (! r)
2598 abort ();
2599 }
2600
2601 /* P is now a jump outside the loop, so it must be put
2602 in loop_number_exit_labels, and marked as such.
2603 The easiest way to do this is to just call
2604 mark_loop_jump again for P. */
2605 mark_loop_jump (PATTERN (p), this_loop_num);
2606
2607 /* If INSN now jumps to the insn after it,
2608 delete INSN. */
2609 if (JUMP_LABEL (insn) != 0
2610 && (next_real_insn (JUMP_LABEL (insn))
2611 == next_real_insn (insn)))
2612 delete_insn (insn);
2613 }
2614
2615 /* Continue the loop after where the conditional
2616 branch used to jump, since the only branch insn
2617 in the block (if it still remains) is an inter-loop
2618 branch and hence needs no processing. */
2619 insn = NEXT_INSN (cond_label);
2620
2621 if (--LABEL_NUSES (cond_label) == 0)
2622 delete_insn (cond_label);
2623
2624 /* This loop will be continued with NEXT_INSN (insn). */
2625 insn = PREV_INSN (insn);
2626 }
2627 }
2628 }
2629 }
2630 }
2631
2632 /* If any label in X jumps to a loop different from LOOP_NUM and any of the
2633 loops it is contained in, mark the target loop invalid.
2634
2635 For speed, we assume that X is part of a pattern of a JUMP_INSN. */
2636
2637 static void
2638 mark_loop_jump (x, loop_num)
2639 rtx x;
2640 int loop_num;
2641 {
2642 int dest_loop;
2643 int outer_loop;
2644 int i;
2645
2646 switch (GET_CODE (x))
2647 {
2648 case PC:
2649 case USE:
2650 case CLOBBER:
2651 case REG:
2652 case MEM:
2653 case CONST_INT:
2654 case CONST_DOUBLE:
2655 case RETURN:
2656 return;
2657
2658 case CONST:
2659 /* There could be a label reference in here. */
2660 mark_loop_jump (XEXP (x, 0), loop_num);
2661 return;
2662
2663 case PLUS:
2664 case MINUS:
2665 case MULT:
2666 mark_loop_jump (XEXP (x, 0), loop_num);
2667 mark_loop_jump (XEXP (x, 1), loop_num);
2668 return;
2669
2670 case SIGN_EXTEND:
2671 case ZERO_EXTEND:
2672 mark_loop_jump (XEXP (x, 0), loop_num);
2673 return;
2674
2675 case LABEL_REF:
2676 dest_loop = uid_loop_num[INSN_UID (XEXP (x, 0))];
2677
2678 /* Link together all labels that branch outside the loop. This
2679 is used by final_[bg]iv_value and the loop unrolling code. Also
2680 mark this LABEL_REF so we know that this branch should predict
2681 false. */
2682
2683 /* A check to make sure the label is not in an inner nested loop,
2684 since this does not count as a loop exit. */
2685 if (dest_loop != -1)
2686 {
2687 for (outer_loop = dest_loop; outer_loop != -1;
2688 outer_loop = loop_outer_loop[outer_loop])
2689 if (outer_loop == loop_num)
2690 break;
2691 }
2692 else
2693 outer_loop = -1;
2694
2695 if (loop_num != -1 && outer_loop == -1)
2696 {
2697 LABEL_OUTSIDE_LOOP_P (x) = 1;
2698 LABEL_NEXTREF (x) = loop_number_exit_labels[loop_num];
2699 loop_number_exit_labels[loop_num] = x;
2700
2701 for (outer_loop = loop_num;
2702 outer_loop != -1 && outer_loop != dest_loop;
2703 outer_loop = loop_outer_loop[outer_loop])
2704 loop_number_exit_count[outer_loop]++;
2705 }
2706
2707 /* If this is inside a loop, but not in the current loop or one enclosed
2708 by it, it invalidates at least one loop. */
2709
2710 if (dest_loop == -1)
2711 return;
2712
2713 /* We must invalidate every nested loop containing the target of this
2714 label, except those that also contain the jump insn. */
2715
2716 for (; dest_loop != -1; dest_loop = loop_outer_loop[dest_loop])
2717 {
2718 /* Stop when we reach a loop that also contains the jump insn. */
2719 for (outer_loop = loop_num; outer_loop != -1;
2720 outer_loop = loop_outer_loop[outer_loop])
2721 if (dest_loop == outer_loop)
2722 return;
2723
2724 /* If we get here, we know we need to invalidate a loop. */
2725 if (loop_dump_stream && ! loop_invalid[dest_loop])
2726 fprintf (loop_dump_stream,
2727 "\nLoop at %d ignored due to multiple entry points.\n",
2728 INSN_UID (loop_number_loop_starts[dest_loop]));
2729
2730 loop_invalid[dest_loop] = 1;
2731 }
2732 return;
2733
2734 case SET:
2735 /* If this is not setting pc, ignore. */
2736 if (SET_DEST (x) == pc_rtx)
2737 mark_loop_jump (SET_SRC (x), loop_num);
2738 return;
2739
2740 case IF_THEN_ELSE:
2741 mark_loop_jump (XEXP (x, 1), loop_num);
2742 mark_loop_jump (XEXP (x, 2), loop_num);
2743 return;
2744
2745 case PARALLEL:
2746 case ADDR_VEC:
2747 for (i = 0; i < XVECLEN (x, 0); i++)
2748 mark_loop_jump (XVECEXP (x, 0, i), loop_num);
2749 return;
2750
2751 case ADDR_DIFF_VEC:
2752 for (i = 0; i < XVECLEN (x, 1); i++)
2753 mark_loop_jump (XVECEXP (x, 1, i), loop_num);
2754 return;
2755
2756 default:
2757 /* Treat anything else (such as a symbol_ref)
2758 as a branch out of this loop, but not into any loop. */
2759
2760 if (loop_num != -1)
2761 {
2762 #ifdef HAIFA
2763 LABEL_OUTSIDE_LOOP_P (x) = 1;
2764 LABEL_NEXTREF (x) = loop_number_exit_labels[loop_num];
2765 #endif /* HAIFA */
2766
2767 loop_number_exit_labels[loop_num] = x;
2768
2769 for (outer_loop = loop_num; outer_loop != -1;
2770 outer_loop = loop_outer_loop[outer_loop])
2771 loop_number_exit_count[outer_loop]++;
2772 }
2773 return;
2774 }
2775 }
2776 \f
2777 /* Return nonzero if there is a label in the range from
2778 insn INSN to and including the insn whose luid is END
2779 INSN must have an assigned luid (i.e., it must not have
2780 been previously created by loop.c). */
2781
2782 static int
2783 labels_in_range_p (insn, end)
2784 rtx insn;
2785 int end;
2786 {
2787 while (insn && INSN_LUID (insn) <= end)
2788 {
2789 if (GET_CODE (insn) == CODE_LABEL)
2790 return 1;
2791 insn = NEXT_INSN (insn);
2792 }
2793
2794 return 0;
2795 }
2796
2797 /* Record that a memory reference X is being set. */
2798
2799 static void
2800 note_addr_stored (x)
2801 rtx x;
2802 {
2803 register int i;
2804
2805 if (x == 0 || GET_CODE (x) != MEM)
2806 return;
2807
2808 /* Count number of memory writes.
2809 This affects heuristics in strength_reduce. */
2810 num_mem_sets++;
2811
2812 /* BLKmode MEM means all memory is clobbered. */
2813 if (GET_MODE (x) == BLKmode)
2814 unknown_address_altered = 1;
2815
2816 if (unknown_address_altered)
2817 return;
2818
2819 for (i = 0; i < loop_store_mems_idx; i++)
2820 if (rtx_equal_p (XEXP (loop_store_mems[i], 0), XEXP (x, 0))
2821 && MEM_IN_STRUCT_P (x) == MEM_IN_STRUCT_P (loop_store_mems[i]))
2822 {
2823 /* We are storing at the same address as previously noted. Save the
2824 wider reference. */
2825 if (GET_MODE_SIZE (GET_MODE (x))
2826 > GET_MODE_SIZE (GET_MODE (loop_store_mems[i])))
2827 loop_store_mems[i] = x;
2828 break;
2829 }
2830
2831 if (i == NUM_STORES)
2832 unknown_address_altered = 1;
2833
2834 else if (i == loop_store_mems_idx)
2835 loop_store_mems[loop_store_mems_idx++] = x;
2836 }
2837 \f
2838 /* Return nonzero if the rtx X is invariant over the current loop.
2839
2840 The value is 2 if we refer to something only conditionally invariant.
2841
2842 If `unknown_address_altered' is nonzero, no memory ref is invariant.
2843 Otherwise, a memory ref is invariant if it does not conflict with
2844 anything stored in `loop_store_mems'. */
2845
2846 int
2847 invariant_p (x)
2848 register rtx x;
2849 {
2850 register int i;
2851 register enum rtx_code code;
2852 register char *fmt;
2853 int conditional = 0;
2854
2855 if (x == 0)
2856 return 1;
2857 code = GET_CODE (x);
2858 switch (code)
2859 {
2860 case CONST_INT:
2861 case CONST_DOUBLE:
2862 case SYMBOL_REF:
2863 case CONST:
2864 return 1;
2865
2866 case LABEL_REF:
2867 /* A LABEL_REF is normally invariant, however, if we are unrolling
2868 loops, and this label is inside the loop, then it isn't invariant.
2869 This is because each unrolled copy of the loop body will have
2870 a copy of this label. If this was invariant, then an insn loading
2871 the address of this label into a register might get moved outside
2872 the loop, and then each loop body would end up using the same label.
2873
2874 We don't know the loop bounds here though, so just fail for all
2875 labels. */
2876 if (flag_unroll_loops)
2877 return 0;
2878 else
2879 return 1;
2880
2881 case PC:
2882 case CC0:
2883 case UNSPEC_VOLATILE:
2884 return 0;
2885
2886 case REG:
2887 /* We used to check RTX_UNCHANGING_P (x) here, but that is invalid
2888 since the reg might be set by initialization within the loop. */
2889
2890 if ((x == frame_pointer_rtx || x == hard_frame_pointer_rtx
2891 || x == arg_pointer_rtx)
2892 && ! current_function_has_nonlocal_goto)
2893 return 1;
2894
2895 if (loop_has_call
2896 && REGNO (x) < FIRST_PSEUDO_REGISTER && call_used_regs[REGNO (x)])
2897 return 0;
2898
2899 if (n_times_set[REGNO (x)] < 0)
2900 return 2;
2901
2902 return n_times_set[REGNO (x)] == 0;
2903
2904 case MEM:
2905 /* Volatile memory references must be rejected. Do this before
2906 checking for read-only items, so that volatile read-only items
2907 will be rejected also. */
2908 if (MEM_VOLATILE_P (x))
2909 return 0;
2910
2911 /* Read-only items (such as constants in a constant pool) are
2912 invariant if their address is. */
2913 if (RTX_UNCHANGING_P (x))
2914 break;
2915
2916 /* If we filled the table (or had a subroutine call), any location
2917 in memory could have been clobbered. */
2918 if (unknown_address_altered)
2919 return 0;
2920
2921 /* See if there is any dependence between a store and this load. */
2922 for (i = loop_store_mems_idx - 1; i >= 0; i--)
2923 if (true_dependence (loop_store_mems[i], VOIDmode, x, rtx_varies_p))
2924 return 0;
2925
2926 /* It's not invalidated by a store in memory
2927 but we must still verify the address is invariant. */
2928 break;
2929
2930 case ASM_OPERANDS:
2931 /* Don't mess with insns declared volatile. */
2932 if (MEM_VOLATILE_P (x))
2933 return 0;
2934 break;
2935
2936 default:
2937 break;
2938 }
2939
2940 fmt = GET_RTX_FORMAT (code);
2941 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2942 {
2943 if (fmt[i] == 'e')
2944 {
2945 int tem = invariant_p (XEXP (x, i));
2946 if (tem == 0)
2947 return 0;
2948 if (tem == 2)
2949 conditional = 1;
2950 }
2951 else if (fmt[i] == 'E')
2952 {
2953 register int j;
2954 for (j = 0; j < XVECLEN (x, i); j++)
2955 {
2956 int tem = invariant_p (XVECEXP (x, i, j));
2957 if (tem == 0)
2958 return 0;
2959 if (tem == 2)
2960 conditional = 1;
2961 }
2962
2963 }
2964 }
2965
2966 return 1 + conditional;
2967 }
2968
2969 \f
2970 /* Return nonzero if all the insns in the loop that set REG
2971 are INSN and the immediately following insns,
2972 and if each of those insns sets REG in an invariant way
2973 (not counting uses of REG in them).
2974
2975 The value is 2 if some of these insns are only conditionally invariant.
2976
2977 We assume that INSN itself is the first set of REG
2978 and that its source is invariant. */
2979
2980 static int
2981 consec_sets_invariant_p (reg, n_sets, insn)
2982 int n_sets;
2983 rtx reg, insn;
2984 {
2985 register rtx p = insn;
2986 register int regno = REGNO (reg);
2987 rtx temp;
2988 /* Number of sets we have to insist on finding after INSN. */
2989 int count = n_sets - 1;
2990 int old = n_times_set[regno];
2991 int value = 0;
2992 int this;
2993
2994 /* If N_SETS hit the limit, we can't rely on its value. */
2995 if (n_sets == 127)
2996 return 0;
2997
2998 n_times_set[regno] = 0;
2999
3000 while (count > 0)
3001 {
3002 register enum rtx_code code;
3003 rtx set;
3004
3005 p = NEXT_INSN (p);
3006 code = GET_CODE (p);
3007
3008 /* If library call, skip to end of of it. */
3009 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
3010 p = XEXP (temp, 0);
3011
3012 this = 0;
3013 if (code == INSN
3014 && (set = single_set (p))
3015 && GET_CODE (SET_DEST (set)) == REG
3016 && REGNO (SET_DEST (set)) == regno)
3017 {
3018 this = invariant_p (SET_SRC (set));
3019 if (this != 0)
3020 value |= this;
3021 else if (temp = find_reg_note (p, REG_EQUAL, NULL_RTX))
3022 {
3023 /* If this is a libcall, then any invariant REG_EQUAL note is OK.
3024 If this is an ordinary insn, then only CONSTANT_P REG_EQUAL
3025 notes are OK. */
3026 this = (CONSTANT_P (XEXP (temp, 0))
3027 || (find_reg_note (p, REG_RETVAL, NULL_RTX)
3028 && invariant_p (XEXP (temp, 0))));
3029 if (this != 0)
3030 value |= this;
3031 }
3032 }
3033 if (this != 0)
3034 count--;
3035 else if (code != NOTE)
3036 {
3037 n_times_set[regno] = old;
3038 return 0;
3039 }
3040 }
3041
3042 n_times_set[regno] = old;
3043 /* If invariant_p ever returned 2, we return 2. */
3044 return 1 + (value & 2);
3045 }
3046
3047 #if 0
3048 /* I don't think this condition is sufficient to allow INSN
3049 to be moved, so we no longer test it. */
3050
3051 /* Return 1 if all insns in the basic block of INSN and following INSN
3052 that set REG are invariant according to TABLE. */
3053
3054 static int
3055 all_sets_invariant_p (reg, insn, table)
3056 rtx reg, insn;
3057 short *table;
3058 {
3059 register rtx p = insn;
3060 register int regno = REGNO (reg);
3061
3062 while (1)
3063 {
3064 register enum rtx_code code;
3065 p = NEXT_INSN (p);
3066 code = GET_CODE (p);
3067 if (code == CODE_LABEL || code == JUMP_INSN)
3068 return 1;
3069 if (code == INSN && GET_CODE (PATTERN (p)) == SET
3070 && GET_CODE (SET_DEST (PATTERN (p))) == REG
3071 && REGNO (SET_DEST (PATTERN (p))) == regno)
3072 {
3073 if (!invariant_p (SET_SRC (PATTERN (p)), table))
3074 return 0;
3075 }
3076 }
3077 }
3078 #endif /* 0 */
3079 \f
3080 /* Look at all uses (not sets) of registers in X. For each, if it is
3081 the single use, set USAGE[REGNO] to INSN; if there was a previous use in
3082 a different insn, set USAGE[REGNO] to const0_rtx. */
3083
3084 static void
3085 find_single_use_in_loop (insn, x, usage)
3086 rtx insn;
3087 rtx x;
3088 rtx *usage;
3089 {
3090 enum rtx_code code = GET_CODE (x);
3091 char *fmt = GET_RTX_FORMAT (code);
3092 int i, j;
3093
3094 if (code == REG)
3095 usage[REGNO (x)]
3096 = (usage[REGNO (x)] != 0 && usage[REGNO (x)] != insn)
3097 ? const0_rtx : insn;
3098
3099 else if (code == SET)
3100 {
3101 /* Don't count SET_DEST if it is a REG; otherwise count things
3102 in SET_DEST because if a register is partially modified, it won't
3103 show up as a potential movable so we don't care how USAGE is set
3104 for it. */
3105 if (GET_CODE (SET_DEST (x)) != REG)
3106 find_single_use_in_loop (insn, SET_DEST (x), usage);
3107 find_single_use_in_loop (insn, SET_SRC (x), usage);
3108 }
3109 else
3110 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3111 {
3112 if (fmt[i] == 'e' && XEXP (x, i) != 0)
3113 find_single_use_in_loop (insn, XEXP (x, i), usage);
3114 else if (fmt[i] == 'E')
3115 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3116 find_single_use_in_loop (insn, XVECEXP (x, i, j), usage);
3117 }
3118 }
3119 \f
3120 /* Increment N_TIMES_SET at the index of each register
3121 that is modified by an insn between FROM and TO.
3122 If the value of an element of N_TIMES_SET becomes 127 or more,
3123 stop incrementing it, to avoid overflow.
3124
3125 Store in SINGLE_USAGE[I] the single insn in which register I is
3126 used, if it is only used once. Otherwise, it is set to 0 (for no
3127 uses) or const0_rtx for more than one use. This parameter may be zero,
3128 in which case this processing is not done.
3129
3130 Store in *COUNT_PTR the number of actual instruction
3131 in the loop. We use this to decide what is worth moving out. */
3132
3133 /* last_set[n] is nonzero iff reg n has been set in the current basic block.
3134 In that case, it is the insn that last set reg n. */
3135
3136 static void
3137 count_loop_regs_set (from, to, may_not_move, single_usage, count_ptr, nregs)
3138 register rtx from, to;
3139 char *may_not_move;
3140 rtx *single_usage;
3141 int *count_ptr;
3142 int nregs;
3143 {
3144 register rtx *last_set = (rtx *) alloca (nregs * sizeof (rtx));
3145 register rtx insn;
3146 register int count = 0;
3147 register rtx dest;
3148
3149 bzero ((char *) last_set, nregs * sizeof (rtx));
3150 for (insn = from; insn != to; insn = NEXT_INSN (insn))
3151 {
3152 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
3153 {
3154 ++count;
3155
3156 /* If requested, record registers that have exactly one use. */
3157 if (single_usage)
3158 {
3159 find_single_use_in_loop (insn, PATTERN (insn), single_usage);
3160
3161 /* Include uses in REG_EQUAL notes. */
3162 if (REG_NOTES (insn))
3163 find_single_use_in_loop (insn, REG_NOTES (insn), single_usage);
3164 }
3165
3166 if (GET_CODE (PATTERN (insn)) == CLOBBER
3167 && GET_CODE (XEXP (PATTERN (insn), 0)) == REG)
3168 /* Don't move a reg that has an explicit clobber.
3169 We might do so sometimes, but it's not worth the pain. */
3170 may_not_move[REGNO (XEXP (PATTERN (insn), 0))] = 1;
3171
3172 if (GET_CODE (PATTERN (insn)) == SET
3173 || GET_CODE (PATTERN (insn)) == CLOBBER)
3174 {
3175 dest = SET_DEST (PATTERN (insn));
3176 while (GET_CODE (dest) == SUBREG
3177 || GET_CODE (dest) == ZERO_EXTRACT
3178 || GET_CODE (dest) == SIGN_EXTRACT
3179 || GET_CODE (dest) == STRICT_LOW_PART)
3180 dest = XEXP (dest, 0);
3181 if (GET_CODE (dest) == REG)
3182 {
3183 register int regno = REGNO (dest);
3184 /* If this is the first setting of this reg
3185 in current basic block, and it was set before,
3186 it must be set in two basic blocks, so it cannot
3187 be moved out of the loop. */
3188 if (n_times_set[regno] > 0 && last_set[regno] == 0)
3189 may_not_move[regno] = 1;
3190 /* If this is not first setting in current basic block,
3191 see if reg was used in between previous one and this.
3192 If so, neither one can be moved. */
3193 if (last_set[regno] != 0
3194 && reg_used_between_p (dest, last_set[regno], insn))
3195 may_not_move[regno] = 1;
3196 if (n_times_set[regno] < 127)
3197 ++n_times_set[regno];
3198 last_set[regno] = insn;
3199 }
3200 }
3201 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
3202 {
3203 register int i;
3204 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
3205 {
3206 register rtx x = XVECEXP (PATTERN (insn), 0, i);
3207 if (GET_CODE (x) == CLOBBER && GET_CODE (XEXP (x, 0)) == REG)
3208 /* Don't move a reg that has an explicit clobber.
3209 It's not worth the pain to try to do it correctly. */
3210 may_not_move[REGNO (XEXP (x, 0))] = 1;
3211
3212 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
3213 {
3214 dest = SET_DEST (x);
3215 while (GET_CODE (dest) == SUBREG
3216 || GET_CODE (dest) == ZERO_EXTRACT
3217 || GET_CODE (dest) == SIGN_EXTRACT
3218 || GET_CODE (dest) == STRICT_LOW_PART)
3219 dest = XEXP (dest, 0);
3220 if (GET_CODE (dest) == REG)
3221 {
3222 register int regno = REGNO (dest);
3223 if (n_times_set[regno] > 0 && last_set[regno] == 0)
3224 may_not_move[regno] = 1;
3225 if (last_set[regno] != 0
3226 && reg_used_between_p (dest, last_set[regno], insn))
3227 may_not_move[regno] = 1;
3228 if (n_times_set[regno] < 127)
3229 ++n_times_set[regno];
3230 last_set[regno] = insn;
3231 }
3232 }
3233 }
3234 }
3235 }
3236
3237 if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN)
3238 bzero ((char *) last_set, nregs * sizeof (rtx));
3239 }
3240 *count_ptr = count;
3241 }
3242 \f
3243 /* Given a loop that is bounded by LOOP_START and LOOP_END
3244 and that is entered at SCAN_START,
3245 return 1 if the register set in SET contained in insn INSN is used by
3246 any insn that precedes INSN in cyclic order starting
3247 from the loop entry point.
3248
3249 We don't want to use INSN_LUID here because if we restrict INSN to those
3250 that have a valid INSN_LUID, it means we cannot move an invariant out
3251 from an inner loop past two loops. */
3252
3253 static int
3254 loop_reg_used_before_p (set, insn, loop_start, scan_start, loop_end)
3255 rtx set, insn, loop_start, scan_start, loop_end;
3256 {
3257 rtx reg = SET_DEST (set);
3258 rtx p;
3259
3260 /* Scan forward checking for register usage. If we hit INSN, we
3261 are done. Otherwise, if we hit LOOP_END, wrap around to LOOP_START. */
3262 for (p = scan_start; p != insn; p = NEXT_INSN (p))
3263 {
3264 if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
3265 && reg_overlap_mentioned_p (reg, PATTERN (p)))
3266 return 1;
3267
3268 if (p == loop_end)
3269 p = loop_start;
3270 }
3271
3272 return 0;
3273 }
3274 \f
3275 /* A "basic induction variable" or biv is a pseudo reg that is set
3276 (within this loop) only by incrementing or decrementing it. */
3277 /* A "general induction variable" or giv is a pseudo reg whose
3278 value is a linear function of a biv. */
3279
3280 /* Bivs are recognized by `basic_induction_var';
3281 Givs by `general_induct_var'. */
3282
3283 /* Indexed by register number, indicates whether or not register is an
3284 induction variable, and if so what type. */
3285
3286 enum iv_mode *reg_iv_type;
3287
3288 /* Indexed by register number, contains pointer to `struct induction'
3289 if register is an induction variable. This holds general info for
3290 all induction variables. */
3291
3292 struct induction **reg_iv_info;
3293
3294 /* Indexed by register number, contains pointer to `struct iv_class'
3295 if register is a basic induction variable. This holds info describing
3296 the class (a related group) of induction variables that the biv belongs
3297 to. */
3298
3299 struct iv_class **reg_biv_class;
3300
3301 /* The head of a list which links together (via the next field)
3302 every iv class for the current loop. */
3303
3304 struct iv_class *loop_iv_list;
3305
3306 /* Communication with routines called via `note_stores'. */
3307
3308 static rtx note_insn;
3309
3310 /* Dummy register to have non-zero DEST_REG for DEST_ADDR type givs. */
3311
3312 static rtx addr_placeholder;
3313
3314 /* ??? Unfinished optimizations, and possible future optimizations,
3315 for the strength reduction code. */
3316
3317 /* ??? There is one more optimization you might be interested in doing: to
3318 allocate pseudo registers for frequently-accessed memory locations.
3319 If the same memory location is referenced each time around, it might
3320 be possible to copy it into a register before and out after.
3321 This is especially useful when the memory location is a variable which
3322 is in a stack slot because somewhere its address is taken. If the
3323 loop doesn't contain a function call and the variable isn't volatile,
3324 it is safe to keep the value in a register for the duration of the
3325 loop. One tricky thing is that the copying of the value back from the
3326 register has to be done on all exits from the loop. You need to check that
3327 all the exits from the loop go to the same place. */
3328
3329 /* ??? The interaction of biv elimination, and recognition of 'constant'
3330 bivs, may cause problems. */
3331
3332 /* ??? Add heuristics so that DEST_ADDR strength reduction does not cause
3333 performance problems.
3334
3335 Perhaps don't eliminate things that can be combined with an addressing
3336 mode. Find all givs that have the same biv, mult_val, and add_val;
3337 then for each giv, check to see if its only use dies in a following
3338 memory address. If so, generate a new memory address and check to see
3339 if it is valid. If it is valid, then store the modified memory address,
3340 otherwise, mark the giv as not done so that it will get its own iv. */
3341
3342 /* ??? Could try to optimize branches when it is known that a biv is always
3343 positive. */
3344
3345 /* ??? When replace a biv in a compare insn, we should replace with closest
3346 giv so that an optimized branch can still be recognized by the combiner,
3347 e.g. the VAX acb insn. */
3348
3349 /* ??? Many of the checks involving uid_luid could be simplified if regscan
3350 was rerun in loop_optimize whenever a register was added or moved.
3351 Also, some of the optimizations could be a little less conservative. */
3352 \f
3353 /* Perform strength reduction and induction variable elimination. */
3354
3355 /* Pseudo registers created during this function will be beyond the last
3356 valid index in several tables including n_times_set and regno_last_uid.
3357 This does not cause a problem here, because the added registers cannot be
3358 givs outside of their loop, and hence will never be reconsidered.
3359 But scan_loop must check regnos to make sure they are in bounds. */
3360
3361 static void
3362 strength_reduce (scan_start, end, loop_top, insn_count,
3363 loop_start, loop_end, unroll_p)
3364 rtx scan_start;
3365 rtx end;
3366 rtx loop_top;
3367 int insn_count;
3368 rtx loop_start;
3369 rtx loop_end;
3370 int unroll_p;
3371 {
3372 rtx p;
3373 rtx set;
3374 rtx inc_val;
3375 rtx mult_val;
3376 rtx dest_reg;
3377 /* This is 1 if current insn is not executed at least once for every loop
3378 iteration. */
3379 int not_every_iteration = 0;
3380 /* This is 1 if current insn may be executed more than once for every
3381 loop iteration. */
3382 int maybe_multiple = 0;
3383 /* Temporary list pointers for traversing loop_iv_list. */
3384 struct iv_class *bl, **backbl;
3385 /* Ratio of extra register life span we can justify
3386 for saving an instruction. More if loop doesn't call subroutines
3387 since in that case saving an insn makes more difference
3388 and more registers are available. */
3389 /* ??? could set this to last value of threshold in move_movables */
3390 int threshold = (loop_has_call ? 1 : 2) * (3 + n_non_fixed_regs);
3391 /* Map of pseudo-register replacements. */
3392 rtx *reg_map;
3393 int call_seen;
3394 rtx test;
3395 rtx end_insert_before;
3396 int loop_depth = 0;
3397
3398 reg_iv_type = (enum iv_mode *) alloca (max_reg_before_loop
3399 * sizeof (enum iv_mode *));
3400 bzero ((char *) reg_iv_type, max_reg_before_loop * sizeof (enum iv_mode *));
3401 reg_iv_info = (struct induction **)
3402 alloca (max_reg_before_loop * sizeof (struct induction *));
3403 bzero ((char *) reg_iv_info, (max_reg_before_loop
3404 * sizeof (struct induction *)));
3405 reg_biv_class = (struct iv_class **)
3406 alloca (max_reg_before_loop * sizeof (struct iv_class *));
3407 bzero ((char *) reg_biv_class, (max_reg_before_loop
3408 * sizeof (struct iv_class *)));
3409
3410 loop_iv_list = 0;
3411 addr_placeholder = gen_reg_rtx (Pmode);
3412
3413 /* Save insn immediately after the loop_end. Insns inserted after loop_end
3414 must be put before this insn, so that they will appear in the right
3415 order (i.e. loop order).
3416
3417 If loop_end is the end of the current function, then emit a
3418 NOTE_INSN_DELETED after loop_end and set end_insert_before to the
3419 dummy note insn. */
3420 if (NEXT_INSN (loop_end) != 0)
3421 end_insert_before = NEXT_INSN (loop_end);
3422 else
3423 end_insert_before = emit_note_after (NOTE_INSN_DELETED, loop_end);
3424
3425 /* Scan through loop to find all possible bivs. */
3426
3427 p = scan_start;
3428 while (1)
3429 {
3430 p = NEXT_INSN (p);
3431 /* At end of a straight-in loop, we are done.
3432 At end of a loop entered at the bottom, scan the top. */
3433 if (p == scan_start)
3434 break;
3435 if (p == end)
3436 {
3437 if (loop_top != 0)
3438 p = loop_top;
3439 else
3440 break;
3441 if (p == scan_start)
3442 break;
3443 }
3444
3445 if (GET_CODE (p) == INSN
3446 && (set = single_set (p))
3447 && GET_CODE (SET_DEST (set)) == REG)
3448 {
3449 dest_reg = SET_DEST (set);
3450 if (REGNO (dest_reg) < max_reg_before_loop
3451 && REGNO (dest_reg) >= FIRST_PSEUDO_REGISTER
3452 && reg_iv_type[REGNO (dest_reg)] != NOT_BASIC_INDUCT)
3453 {
3454 if (basic_induction_var (SET_SRC (set), GET_MODE (SET_SRC (set)),
3455 dest_reg, p, &inc_val, &mult_val))
3456 {
3457 /* It is a possible basic induction variable.
3458 Create and initialize an induction structure for it. */
3459
3460 struct induction *v
3461 = (struct induction *) alloca (sizeof (struct induction));
3462
3463 record_biv (v, p, dest_reg, inc_val, mult_val,
3464 not_every_iteration, maybe_multiple);
3465 reg_iv_type[REGNO (dest_reg)] = BASIC_INDUCT;
3466 }
3467 else if (REGNO (dest_reg) < max_reg_before_loop)
3468 reg_iv_type[REGNO (dest_reg)] = NOT_BASIC_INDUCT;
3469 }
3470 }
3471
3472 /* Past CODE_LABEL, we get to insns that may be executed multiple
3473 times. The only way we can be sure that they can't is if every
3474 every jump insn between here and the end of the loop either
3475 returns, exits the loop, is a forward jump, or is a jump
3476 to the loop start. */
3477
3478 if (GET_CODE (p) == CODE_LABEL)
3479 {
3480 rtx insn = p;
3481
3482 maybe_multiple = 0;
3483
3484 while (1)
3485 {
3486 insn = NEXT_INSN (insn);
3487 if (insn == scan_start)
3488 break;
3489 if (insn == end)
3490 {
3491 if (loop_top != 0)
3492 insn = loop_top;
3493 else
3494 break;
3495 if (insn == scan_start)
3496 break;
3497 }
3498
3499 if (GET_CODE (insn) == JUMP_INSN
3500 && GET_CODE (PATTERN (insn)) != RETURN
3501 && (! condjump_p (insn)
3502 || (JUMP_LABEL (insn) != 0
3503 && JUMP_LABEL (insn) != scan_start
3504 && (INSN_UID (JUMP_LABEL (insn)) >= max_uid_for_loop
3505 || INSN_UID (insn) >= max_uid_for_loop
3506 || (INSN_LUID (JUMP_LABEL (insn))
3507 < INSN_LUID (insn))))))
3508 {
3509 maybe_multiple = 1;
3510 break;
3511 }
3512 }
3513 }
3514
3515 /* Past a jump, we get to insns for which we can't count
3516 on whether they will be executed during each iteration. */
3517 /* This code appears twice in strength_reduce. There is also similar
3518 code in scan_loop. */
3519 if (GET_CODE (p) == JUMP_INSN
3520 /* If we enter the loop in the middle, and scan around to the
3521 beginning, don't set not_every_iteration for that.
3522 This can be any kind of jump, since we want to know if insns
3523 will be executed if the loop is executed. */
3524 && ! (JUMP_LABEL (p) == loop_top
3525 && ((NEXT_INSN (NEXT_INSN (p)) == loop_end && simplejump_p (p))
3526 || (NEXT_INSN (p) == loop_end && condjump_p (p)))))
3527 {
3528 rtx label = 0;
3529
3530 /* If this is a jump outside the loop, then it also doesn't
3531 matter. Check to see if the target of this branch is on the
3532 loop_number_exits_labels list. */
3533
3534 for (label = loop_number_exit_labels[uid_loop_num[INSN_UID (loop_start)]];
3535 label;
3536 label = LABEL_NEXTREF (label))
3537 if (XEXP (label, 0) == JUMP_LABEL (p))
3538 break;
3539
3540 if (! label)
3541 not_every_iteration = 1;
3542 }
3543
3544 else if (GET_CODE (p) == NOTE)
3545 {
3546 /* At the virtual top of a converted loop, insns are again known to
3547 be executed each iteration: logically, the loop begins here
3548 even though the exit code has been duplicated. */
3549 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
3550 not_every_iteration = 0;
3551 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
3552 loop_depth++;
3553 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
3554 loop_depth--;
3555 }
3556
3557 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
3558 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
3559 or not an insn is known to be executed each iteration of the
3560 loop, whether or not any iterations are known to occur.
3561
3562 Therefore, if we have just passed a label and have no more labels
3563 between here and the test insn of the loop, we know these insns
3564 will be executed each iteration. */
3565
3566 if (not_every_iteration && GET_CODE (p) == CODE_LABEL
3567 && no_labels_between_p (p, loop_end))
3568 not_every_iteration = 0;
3569 }
3570
3571 /* Scan loop_iv_list to remove all regs that proved not to be bivs.
3572 Make a sanity check against n_times_set. */
3573 for (backbl = &loop_iv_list, bl = *backbl; bl; bl = bl->next)
3574 {
3575 if (reg_iv_type[bl->regno] != BASIC_INDUCT
3576 /* Above happens if register modified by subreg, etc. */
3577 /* Make sure it is not recognized as a basic induction var: */
3578 || n_times_set[bl->regno] != bl->biv_count
3579 /* If never incremented, it is invariant that we decided not to
3580 move. So leave it alone. */
3581 || ! bl->incremented)
3582 {
3583 if (loop_dump_stream)
3584 fprintf (loop_dump_stream, "Reg %d: biv discarded, %s\n",
3585 bl->regno,
3586 (reg_iv_type[bl->regno] != BASIC_INDUCT
3587 ? "not induction variable"
3588 : (! bl->incremented ? "never incremented"
3589 : "count error")));
3590
3591 reg_iv_type[bl->regno] = NOT_BASIC_INDUCT;
3592 *backbl = bl->next;
3593 }
3594 else
3595 {
3596 backbl = &bl->next;
3597
3598 if (loop_dump_stream)
3599 fprintf (loop_dump_stream, "Reg %d: biv verified\n", bl->regno);
3600 }
3601 }
3602
3603 /* Exit if there are no bivs. */
3604 if (! loop_iv_list)
3605 {
3606 /* Can still unroll the loop anyways, but indicate that there is no
3607 strength reduction info available. */
3608 if (unroll_p)
3609 unroll_loop (loop_end, insn_count, loop_start, end_insert_before, 0);
3610
3611 return;
3612 }
3613
3614 /* Find initial value for each biv by searching backwards from loop_start,
3615 halting at first label. Also record any test condition. */
3616
3617 call_seen = 0;
3618 for (p = loop_start; p && GET_CODE (p) != CODE_LABEL; p = PREV_INSN (p))
3619 {
3620 note_insn = p;
3621
3622 if (GET_CODE (p) == CALL_INSN)
3623 call_seen = 1;
3624
3625 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
3626 || GET_CODE (p) == CALL_INSN)
3627 note_stores (PATTERN (p), record_initial);
3628
3629 /* Record any test of a biv that branches around the loop if no store
3630 between it and the start of loop. We only care about tests with
3631 constants and registers and only certain of those. */
3632 if (GET_CODE (p) == JUMP_INSN
3633 && JUMP_LABEL (p) != 0
3634 && next_real_insn (JUMP_LABEL (p)) == next_real_insn (loop_end)
3635 && (test = get_condition_for_loop (p)) != 0
3636 && GET_CODE (XEXP (test, 0)) == REG
3637 && REGNO (XEXP (test, 0)) < max_reg_before_loop
3638 && (bl = reg_biv_class[REGNO (XEXP (test, 0))]) != 0
3639 && valid_initial_value_p (XEXP (test, 1), p, call_seen, loop_start)
3640 && bl->init_insn == 0)
3641 {
3642 /* If an NE test, we have an initial value! */
3643 if (GET_CODE (test) == NE)
3644 {
3645 bl->init_insn = p;
3646 bl->init_set = gen_rtx_SET (VOIDmode,
3647 XEXP (test, 0), XEXP (test, 1));
3648 }
3649 else
3650 bl->initial_test = test;
3651 }
3652 }
3653
3654 /* Look at the each biv and see if we can say anything better about its
3655 initial value from any initializing insns set up above. (This is done
3656 in two passes to avoid missing SETs in a PARALLEL.) */
3657 for (bl = loop_iv_list; bl; bl = bl->next)
3658 {
3659 rtx src;
3660 rtx note;
3661
3662 if (! bl->init_insn)
3663 continue;
3664
3665 /* IF INIT_INSN has a REG_EQUAL or REG_EQUIV note and the value
3666 is a constant, use the value of that. */
3667 if (((note = find_reg_note (bl->init_insn, REG_EQUAL, 0)) != NULL
3668 && CONSTANT_P (XEXP (note, 0)))
3669 || ((note = find_reg_note (bl->init_insn, REG_EQUIV, 0)) != NULL
3670 && CONSTANT_P (XEXP (note, 0))))
3671 src = XEXP (note, 0);
3672 else
3673 src = SET_SRC (bl->init_set);
3674
3675 if (loop_dump_stream)
3676 fprintf (loop_dump_stream,
3677 "Biv %d initialized at insn %d: initial value ",
3678 bl->regno, INSN_UID (bl->init_insn));
3679
3680 if ((GET_MODE (src) == GET_MODE (regno_reg_rtx[bl->regno])
3681 || GET_MODE (src) == VOIDmode)
3682 && valid_initial_value_p (src, bl->init_insn, call_seen, loop_start))
3683 {
3684 bl->initial_value = src;
3685
3686 if (loop_dump_stream)
3687 {
3688 if (GET_CODE (src) == CONST_INT)
3689 {
3690 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (src));
3691 fputc ('\n', loop_dump_stream);
3692 }
3693 else
3694 {
3695 print_rtl (loop_dump_stream, src);
3696 fprintf (loop_dump_stream, "\n");
3697 }
3698 }
3699 }
3700 else
3701 {
3702 /* Biv initial value is not simple move,
3703 so let it keep initial value of "itself". */
3704
3705 if (loop_dump_stream)
3706 fprintf (loop_dump_stream, "is complex\n");
3707 }
3708 }
3709
3710 /* Search the loop for general induction variables. */
3711
3712 /* A register is a giv if: it is only set once, it is a function of a
3713 biv and a constant (or invariant), and it is not a biv. */
3714
3715 not_every_iteration = 0;
3716 loop_depth = 0;
3717 p = scan_start;
3718 while (1)
3719 {
3720 p = NEXT_INSN (p);
3721 /* At end of a straight-in loop, we are done.
3722 At end of a loop entered at the bottom, scan the top. */
3723 if (p == scan_start)
3724 break;
3725 if (p == end)
3726 {
3727 if (loop_top != 0)
3728 p = loop_top;
3729 else
3730 break;
3731 if (p == scan_start)
3732 break;
3733 }
3734
3735 /* Look for a general induction variable in a register. */
3736 if (GET_CODE (p) == INSN
3737 && (set = single_set (p))
3738 && GET_CODE (SET_DEST (set)) == REG
3739 && ! may_not_optimize[REGNO (SET_DEST (set))])
3740 {
3741 rtx src_reg;
3742 rtx add_val;
3743 rtx mult_val;
3744 int benefit;
3745 rtx regnote = 0;
3746
3747 dest_reg = SET_DEST (set);
3748 if (REGNO (dest_reg) < FIRST_PSEUDO_REGISTER)
3749 continue;
3750
3751 if (/* SET_SRC is a giv. */
3752 ((benefit = general_induction_var (SET_SRC (set),
3753 &src_reg, &add_val,
3754 &mult_val))
3755 /* Equivalent expression is a giv. */
3756 || ((regnote = find_reg_note (p, REG_EQUAL, NULL_RTX))
3757 && (benefit = general_induction_var (XEXP (regnote, 0),
3758 &src_reg,
3759 &add_val, &mult_val))))
3760 /* Don't try to handle any regs made by loop optimization.
3761 We have nothing on them in regno_first_uid, etc. */
3762 && REGNO (dest_reg) < max_reg_before_loop
3763 /* Don't recognize a BASIC_INDUCT_VAR here. */
3764 && dest_reg != src_reg
3765 /* This must be the only place where the register is set. */
3766 && (n_times_set[REGNO (dest_reg)] == 1
3767 /* or all sets must be consecutive and make a giv. */
3768 || (benefit = consec_sets_giv (benefit, p,
3769 src_reg, dest_reg,
3770 &add_val, &mult_val))))
3771 {
3772 int count;
3773 struct induction *v
3774 = (struct induction *) alloca (sizeof (struct induction));
3775 rtx temp;
3776
3777 /* If this is a library call, increase benefit. */
3778 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
3779 benefit += libcall_benefit (p);
3780
3781 /* Skip the consecutive insns, if there are any. */
3782 for (count = n_times_set[REGNO (dest_reg)] - 1;
3783 count > 0; count--)
3784 {
3785 /* If first insn of libcall sequence, skip to end.
3786 Do this at start of loop, since INSN is guaranteed to
3787 be an insn here. */
3788 if (GET_CODE (p) != NOTE
3789 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
3790 p = XEXP (temp, 0);
3791
3792 do p = NEXT_INSN (p);
3793 while (GET_CODE (p) == NOTE);
3794 }
3795
3796 record_giv (v, p, src_reg, dest_reg, mult_val, add_val, benefit,
3797 DEST_REG, not_every_iteration, NULL_PTR, loop_start,
3798 loop_end);
3799
3800 }
3801 }
3802
3803 #ifndef DONT_REDUCE_ADDR
3804 /* Look for givs which are memory addresses. */
3805 /* This resulted in worse code on a VAX 8600. I wonder if it
3806 still does. */
3807 if (GET_CODE (p) == INSN)
3808 find_mem_givs (PATTERN (p), p, not_every_iteration, loop_start,
3809 loop_end);
3810 #endif
3811
3812 /* Update the status of whether giv can derive other givs. This can
3813 change when we pass a label or an insn that updates a biv. */
3814 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
3815 || GET_CODE (p) == CODE_LABEL)
3816 update_giv_derive (p);
3817
3818 /* Past a jump, we get to insns for which we can't count
3819 on whether they will be executed during each iteration. */
3820 /* This code appears twice in strength_reduce. There is also similar
3821 code in scan_loop. */
3822 if (GET_CODE (p) == JUMP_INSN
3823 /* If we enter the loop in the middle, and scan around to the
3824 beginning, don't set not_every_iteration for that.
3825 This can be any kind of jump, since we want to know if insns
3826 will be executed if the loop is executed. */
3827 && ! (JUMP_LABEL (p) == loop_top
3828 && ((NEXT_INSN (NEXT_INSN (p)) == loop_end && simplejump_p (p))
3829 || (NEXT_INSN (p) == loop_end && condjump_p (p)))))
3830 {
3831 rtx label = 0;
3832
3833 /* If this is a jump outside the loop, then it also doesn't
3834 matter. Check to see if the target of this branch is on the
3835 loop_number_exits_labels list. */
3836
3837 for (label = loop_number_exit_labels[uid_loop_num[INSN_UID (loop_start)]];
3838 label;
3839 label = LABEL_NEXTREF (label))
3840 if (XEXP (label, 0) == JUMP_LABEL (p))
3841 break;
3842
3843 if (! label)
3844 not_every_iteration = 1;
3845 }
3846
3847 else if (GET_CODE (p) == NOTE)
3848 {
3849 /* At the virtual top of a converted loop, insns are again known to
3850 be executed each iteration: logically, the loop begins here
3851 even though the exit code has been duplicated. */
3852 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
3853 not_every_iteration = 0;
3854 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
3855 loop_depth++;
3856 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
3857 loop_depth--;
3858 }
3859
3860 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
3861 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
3862 or not an insn is known to be executed each iteration of the
3863 loop, whether or not any iterations are known to occur.
3864
3865 Therefore, if we have just passed a label and have no more labels
3866 between here and the test insn of the loop, we know these insns
3867 will be executed each iteration. */
3868
3869 if (not_every_iteration && GET_CODE (p) == CODE_LABEL
3870 && no_labels_between_p (p, loop_end))
3871 not_every_iteration = 0;
3872 }
3873
3874 /* Try to calculate and save the number of loop iterations. This is
3875 set to zero if the actual number can not be calculated. This must
3876 be called after all giv's have been identified, since otherwise it may
3877 fail if the iteration variable is a giv. */
3878
3879 loop_n_iterations = loop_iterations (loop_start, loop_end);
3880
3881 /* Now for each giv for which we still don't know whether or not it is
3882 replaceable, check to see if it is replaceable because its final value
3883 can be calculated. This must be done after loop_iterations is called,
3884 so that final_giv_value will work correctly. */
3885
3886 for (bl = loop_iv_list; bl; bl = bl->next)
3887 {
3888 struct induction *v;
3889
3890 for (v = bl->giv; v; v = v->next_iv)
3891 if (! v->replaceable && ! v->not_replaceable)
3892 check_final_value (v, loop_start, loop_end);
3893 }
3894
3895 /* Try to prove that the loop counter variable (if any) is always
3896 nonnegative; if so, record that fact with a REG_NONNEG note
3897 so that "decrement and branch until zero" insn can be used. */
3898 check_dbra_loop (loop_end, insn_count, loop_start);
3899
3900 #ifdef HAIFA
3901 /* record loop-variables relevant for BCT optimization before unrolling
3902 the loop. Unrolling may update part of this information, and the
3903 correct data will be used for generating the BCT. */
3904 #ifdef HAVE_decrement_and_branch_on_count
3905 if (HAVE_decrement_and_branch_on_count)
3906 analyze_loop_iterations (loop_start, loop_end);
3907 #endif
3908 #endif /* HAIFA */
3909
3910 /* Create reg_map to hold substitutions for replaceable giv regs. */
3911 reg_map = (rtx *) alloca (max_reg_before_loop * sizeof (rtx));
3912 bzero ((char *) reg_map, max_reg_before_loop * sizeof (rtx));
3913
3914 /* Examine each iv class for feasibility of strength reduction/induction
3915 variable elimination. */
3916
3917 for (bl = loop_iv_list; bl; bl = bl->next)
3918 {
3919 struct induction *v;
3920 int benefit;
3921 int all_reduced;
3922 rtx final_value = 0;
3923
3924 /* Test whether it will be possible to eliminate this biv
3925 provided all givs are reduced. This is possible if either
3926 the reg is not used outside the loop, or we can compute
3927 what its final value will be.
3928
3929 For architectures with a decrement_and_branch_until_zero insn,
3930 don't do this if we put a REG_NONNEG note on the endtest for
3931 this biv. */
3932
3933 /* Compare against bl->init_insn rather than loop_start.
3934 We aren't concerned with any uses of the biv between
3935 init_insn and loop_start since these won't be affected
3936 by the value of the biv elsewhere in the function, so
3937 long as init_insn doesn't use the biv itself.
3938 March 14, 1989 -- self@bayes.arc.nasa.gov */
3939
3940 if ((uid_luid[REGNO_LAST_UID (bl->regno)] < INSN_LUID (loop_end)
3941 && bl->init_insn
3942 && INSN_UID (bl->init_insn) < max_uid_for_loop
3943 && uid_luid[REGNO_FIRST_UID (bl->regno)] >= INSN_LUID (bl->init_insn)
3944 #ifdef HAVE_decrement_and_branch_until_zero
3945 && ! bl->nonneg
3946 #endif
3947 && ! reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set)))
3948 || ((final_value = final_biv_value (bl, loop_start, loop_end))
3949 #ifdef HAVE_decrement_and_branch_until_zero
3950 && ! bl->nonneg
3951 #endif
3952 ))
3953 bl->eliminable = maybe_eliminate_biv (bl, loop_start, end, 0,
3954 threshold, insn_count);
3955 else
3956 {
3957 if (loop_dump_stream)
3958 {
3959 fprintf (loop_dump_stream,
3960 "Cannot eliminate biv %d.\n",
3961 bl->regno);
3962 fprintf (loop_dump_stream,
3963 "First use: insn %d, last use: insn %d.\n",
3964 REGNO_FIRST_UID (bl->regno),
3965 REGNO_LAST_UID (bl->regno));
3966 }
3967 }
3968
3969 /* Combine all giv's for this iv_class. */
3970 combine_givs (bl);
3971
3972 /* This will be true at the end, if all givs which depend on this
3973 biv have been strength reduced.
3974 We can't (currently) eliminate the biv unless this is so. */
3975 all_reduced = 1;
3976
3977 /* Check each giv in this class to see if we will benefit by reducing
3978 it. Skip giv's combined with others. */
3979 for (v = bl->giv; v; v = v->next_iv)
3980 {
3981 struct induction *tv;
3982
3983 if (v->ignore || v->same)
3984 continue;
3985
3986 benefit = v->benefit;
3987
3988 /* Reduce benefit if not replaceable, since we will insert
3989 a move-insn to replace the insn that calculates this giv.
3990 Don't do this unless the giv is a user variable, since it
3991 will often be marked non-replaceable because of the duplication
3992 of the exit code outside the loop. In such a case, the copies
3993 we insert are dead and will be deleted. So they don't have
3994 a cost. Similar situations exist. */
3995 /* ??? The new final_[bg]iv_value code does a much better job
3996 of finding replaceable giv's, and hence this code may no longer
3997 be necessary. */
3998 if (! v->replaceable && ! bl->eliminable
3999 && REG_USERVAR_P (v->dest_reg))
4000 benefit -= copy_cost;
4001
4002 /* Decrease the benefit to count the add-insns that we will
4003 insert to increment the reduced reg for the giv. */
4004 benefit -= add_cost * bl->biv_count;
4005
4006 /* Decide whether to strength-reduce this giv or to leave the code
4007 unchanged (recompute it from the biv each time it is used).
4008 This decision can be made independently for each giv. */
4009
4010 #ifdef AUTO_INC_DEC
4011 /* Attempt to guess whether autoincrement will handle some of the
4012 new add insns; if so, increase BENEFIT (undo the subtraction of
4013 add_cost that was done above). */
4014 if (v->giv_type == DEST_ADDR
4015 && GET_CODE (v->mult_val) == CONST_INT)
4016 {
4017 #if defined (HAVE_POST_INCREMENT) || defined (HAVE_PRE_INCREMENT)
4018 if (INTVAL (v->mult_val) == GET_MODE_SIZE (v->mem_mode))
4019 benefit += add_cost * bl->biv_count;
4020 #endif
4021 #if defined (HAVE_POST_DECREMENT) || defined (HAVE_PRE_DECREMENT)
4022 if (-INTVAL (v->mult_val) == GET_MODE_SIZE (v->mem_mode))
4023 benefit += add_cost * bl->biv_count;
4024 #endif
4025 }
4026 #endif
4027
4028 /* If an insn is not to be strength reduced, then set its ignore
4029 flag, and clear all_reduced. */
4030
4031 /* A giv that depends on a reversed biv must be reduced if it is
4032 used after the loop exit, otherwise, it would have the wrong
4033 value after the loop exit. To make it simple, just reduce all
4034 of such giv's whether or not we know they are used after the loop
4035 exit. */
4036
4037 if ( ! flag_reduce_all_givs && v->lifetime * threshold * benefit < insn_count
4038 && ! bl->reversed )
4039 {
4040 if (loop_dump_stream)
4041 fprintf (loop_dump_stream,
4042 "giv of insn %d not worth while, %d vs %d.\n",
4043 INSN_UID (v->insn),
4044 v->lifetime * threshold * benefit, insn_count);
4045 v->ignore = 1;
4046 all_reduced = 0;
4047 }
4048 else
4049 {
4050 /* Check that we can increment the reduced giv without a
4051 multiply insn. If not, reject it. */
4052
4053 for (tv = bl->biv; tv; tv = tv->next_iv)
4054 if (tv->mult_val == const1_rtx
4055 && ! product_cheap_p (tv->add_val, v->mult_val))
4056 {
4057 if (loop_dump_stream)
4058 fprintf (loop_dump_stream,
4059 "giv of insn %d: would need a multiply.\n",
4060 INSN_UID (v->insn));
4061 v->ignore = 1;
4062 all_reduced = 0;
4063 break;
4064 }
4065 }
4066 }
4067
4068 /* Reduce each giv that we decided to reduce. */
4069
4070 for (v = bl->giv; v; v = v->next_iv)
4071 {
4072 struct induction *tv;
4073 if (! v->ignore && v->same == 0)
4074 {
4075 int auto_inc_opt = 0;
4076
4077 v->new_reg = gen_reg_rtx (v->mode);
4078
4079 #ifdef AUTO_INC_DEC
4080 /* If the target has auto-increment addressing modes, and
4081 this is an address giv, then try to put the increment
4082 immediately after its use, so that flow can create an
4083 auto-increment addressing mode. */
4084 if (v->giv_type == DEST_ADDR && bl->biv_count == 1
4085 && bl->biv->always_executed && ! bl->biv->maybe_multiple
4086 /* We don't handle reversed biv's because bl->biv->insn
4087 does not have a valid INSN_LUID. */
4088 && ! bl->reversed
4089 && v->always_executed && ! v->maybe_multiple)
4090 {
4091 /* If other giv's have been combined with this one, then
4092 this will work only if all uses of the other giv's occur
4093 before this giv's insn. This is difficult to check.
4094
4095 We simplify this by looking for the common case where
4096 there is one DEST_REG giv, and this giv's insn is the
4097 last use of the dest_reg of that DEST_REG giv. If the
4098 the increment occurs after the address giv, then we can
4099 perform the optimization. (Otherwise, the increment
4100 would have to go before other_giv, and we would not be
4101 able to combine it with the address giv to get an
4102 auto-inc address.) */
4103 if (v->combined_with)
4104 {
4105 struct induction *other_giv = 0;
4106
4107 for (tv = bl->giv; tv; tv = tv->next_iv)
4108 if (tv->same == v)
4109 {
4110 if (other_giv)
4111 break;
4112 else
4113 other_giv = tv;
4114 }
4115 if (! tv && other_giv
4116 && REGNO (other_giv->dest_reg) < max_reg_before_loop
4117 && (REGNO_LAST_UID (REGNO (other_giv->dest_reg))
4118 == INSN_UID (v->insn))
4119 && INSN_LUID (v->insn) < INSN_LUID (bl->biv->insn))
4120 auto_inc_opt = 1;
4121 }
4122 /* Check for case where increment is before the the address
4123 giv. Do this test in "loop order". */
4124 else if ((INSN_LUID (v->insn) > INSN_LUID (bl->biv->insn)
4125 && (INSN_LUID (v->insn) < INSN_LUID (scan_start)
4126 || (INSN_LUID (bl->biv->insn)
4127 > INSN_LUID (scan_start))))
4128 || (INSN_LUID (v->insn) < INSN_LUID (scan_start)
4129 && (INSN_LUID (scan_start)
4130 < INSN_LUID (bl->biv->insn))))
4131 auto_inc_opt = -1;
4132 else
4133 auto_inc_opt = 1;
4134
4135 #ifdef HAVE_cc0
4136 {
4137 rtx prev;
4138
4139 /* We can't put an insn immediately after one setting
4140 cc0, or immediately before one using cc0. */
4141 if ((auto_inc_opt == 1 && sets_cc0_p (PATTERN (v->insn)))
4142 || (auto_inc_opt == -1
4143 && (prev = prev_nonnote_insn (v->insn)) != 0
4144 && GET_RTX_CLASS (GET_CODE (prev)) == 'i'
4145 && sets_cc0_p (PATTERN (prev))))
4146 auto_inc_opt = 0;
4147 }
4148 #endif
4149
4150 if (auto_inc_opt)
4151 v->auto_inc_opt = 1;
4152 }
4153 #endif
4154
4155 /* For each place where the biv is incremented, add an insn
4156 to increment the new, reduced reg for the giv. */
4157 for (tv = bl->biv; tv; tv = tv->next_iv)
4158 {
4159 rtx insert_before;
4160
4161 if (! auto_inc_opt)
4162 insert_before = tv->insn;
4163 else if (auto_inc_opt == 1)
4164 insert_before = NEXT_INSN (v->insn);
4165 else
4166 insert_before = v->insn;
4167
4168 if (tv->mult_val == const1_rtx)
4169 emit_iv_add_mult (tv->add_val, v->mult_val,
4170 v->new_reg, v->new_reg, insert_before);
4171 else /* tv->mult_val == const0_rtx */
4172 /* A multiply is acceptable here
4173 since this is presumed to be seldom executed. */
4174 emit_iv_add_mult (tv->add_val, v->mult_val,
4175 v->add_val, v->new_reg, insert_before);
4176 }
4177
4178 /* Add code at loop start to initialize giv's reduced reg. */
4179
4180 emit_iv_add_mult (bl->initial_value, v->mult_val,
4181 v->add_val, v->new_reg, loop_start);
4182 }
4183 }
4184
4185 /* Rescan all givs. If a giv is the same as a giv not reduced, mark it
4186 as not reduced.
4187
4188 For each giv register that can be reduced now: if replaceable,
4189 substitute reduced reg wherever the old giv occurs;
4190 else add new move insn "giv_reg = reduced_reg".
4191
4192 Also check for givs whose first use is their definition and whose
4193 last use is the definition of another giv. If so, it is likely
4194 dead and should not be used to eliminate a biv. */
4195 for (v = bl->giv; v; v = v->next_iv)
4196 {
4197 if (v->same && v->same->ignore)
4198 v->ignore = 1;
4199
4200 if (v->ignore)
4201 continue;
4202
4203 if (v->giv_type == DEST_REG
4204 && REGNO_FIRST_UID (REGNO (v->dest_reg)) == INSN_UID (v->insn))
4205 {
4206 struct induction *v1;
4207
4208 for (v1 = bl->giv; v1; v1 = v1->next_iv)
4209 if (REGNO_LAST_UID (REGNO (v->dest_reg)) == INSN_UID (v1->insn))
4210 v->maybe_dead = 1;
4211 }
4212
4213 /* Update expression if this was combined, in case other giv was
4214 replaced. */
4215 if (v->same)
4216 v->new_reg = replace_rtx (v->new_reg,
4217 v->same->dest_reg, v->same->new_reg);
4218
4219 if (v->giv_type == DEST_ADDR)
4220 /* Store reduced reg as the address in the memref where we found
4221 this giv. */
4222 validate_change (v->insn, v->location, v->new_reg, 0);
4223 else if (v->replaceable)
4224 {
4225 reg_map[REGNO (v->dest_reg)] = v->new_reg;
4226
4227 #if 0
4228 /* I can no longer duplicate the original problem. Perhaps
4229 this is unnecessary now? */
4230
4231 /* Replaceable; it isn't strictly necessary to delete the old
4232 insn and emit a new one, because v->dest_reg is now dead.
4233
4234 However, especially when unrolling loops, the special
4235 handling for (set REG0 REG1) in the second cse pass may
4236 make v->dest_reg live again. To avoid this problem, emit
4237 an insn to set the original giv reg from the reduced giv.
4238 We can not delete the original insn, since it may be part
4239 of a LIBCALL, and the code in flow that eliminates dead
4240 libcalls will fail if it is deleted. */
4241 emit_insn_after (gen_move_insn (v->dest_reg, v->new_reg),
4242 v->insn);
4243 #endif
4244 }
4245 else
4246 {
4247 /* Not replaceable; emit an insn to set the original giv reg from
4248 the reduced giv, same as above. */
4249 emit_insn_after (gen_move_insn (v->dest_reg, v->new_reg),
4250 v->insn);
4251 }
4252
4253 /* When a loop is reversed, givs which depend on the reversed
4254 biv, and which are live outside the loop, must be set to their
4255 correct final value. This insn is only needed if the giv is
4256 not replaceable. The correct final value is the same as the
4257 value that the giv starts the reversed loop with. */
4258 if (bl->reversed && ! v->replaceable)
4259 emit_iv_add_mult (bl->initial_value, v->mult_val,
4260 v->add_val, v->dest_reg, end_insert_before);
4261 else if (v->final_value)
4262 {
4263 rtx insert_before;
4264
4265 /* If the loop has multiple exits, emit the insn before the
4266 loop to ensure that it will always be executed no matter
4267 how the loop exits. Otherwise, emit the insn after the loop,
4268 since this is slightly more efficient. */
4269 if (loop_number_exit_count[uid_loop_num[INSN_UID (loop_start)]])
4270 insert_before = loop_start;
4271 else
4272 insert_before = end_insert_before;
4273 emit_insn_before (gen_move_insn (v->dest_reg, v->final_value),
4274 insert_before);
4275
4276 #if 0
4277 /* If the insn to set the final value of the giv was emitted
4278 before the loop, then we must delete the insn inside the loop
4279 that sets it. If this is a LIBCALL, then we must delete
4280 every insn in the libcall. Note, however, that
4281 final_giv_value will only succeed when there are multiple
4282 exits if the giv is dead at each exit, hence it does not
4283 matter that the original insn remains because it is dead
4284 anyways. */
4285 /* Delete the insn inside the loop that sets the giv since
4286 the giv is now set before (or after) the loop. */
4287 delete_insn (v->insn);
4288 #endif
4289 }
4290
4291 if (loop_dump_stream)
4292 {
4293 fprintf (loop_dump_stream, "giv at %d reduced to ",
4294 INSN_UID (v->insn));
4295 print_rtl (loop_dump_stream, v->new_reg);
4296 fprintf (loop_dump_stream, "\n");
4297 }
4298 }
4299
4300 /* All the givs based on the biv bl have been reduced if they
4301 merit it. */
4302
4303 /* For each giv not marked as maybe dead that has been combined with a
4304 second giv, clear any "maybe dead" mark on that second giv.
4305 v->new_reg will either be or refer to the register of the giv it
4306 combined with.
4307
4308 Doing this clearing avoids problems in biv elimination where a
4309 giv's new_reg is a complex value that can't be put in the insn but
4310 the giv combined with (with a reg as new_reg) is marked maybe_dead.
4311 Since the register will be used in either case, we'd prefer it be
4312 used from the simpler giv. */
4313
4314 for (v = bl->giv; v; v = v->next_iv)
4315 if (! v->maybe_dead && v->same)
4316 v->same->maybe_dead = 0;
4317
4318 /* Try to eliminate the biv, if it is a candidate.
4319 This won't work if ! all_reduced,
4320 since the givs we planned to use might not have been reduced.
4321
4322 We have to be careful that we didn't initially think we could eliminate
4323 this biv because of a giv that we now think may be dead and shouldn't
4324 be used as a biv replacement.
4325
4326 Also, there is the possibility that we may have a giv that looks
4327 like it can be used to eliminate a biv, but the resulting insn
4328 isn't valid. This can happen, for example, on the 88k, where a
4329 JUMP_INSN can compare a register only with zero. Attempts to
4330 replace it with a compare with a constant will fail.
4331
4332 Note that in cases where this call fails, we may have replaced some
4333 of the occurrences of the biv with a giv, but no harm was done in
4334 doing so in the rare cases where it can occur. */
4335
4336 if (all_reduced == 1 && bl->eliminable
4337 && maybe_eliminate_biv (bl, loop_start, end, 1,
4338 threshold, insn_count))
4339
4340 {
4341 /* ?? If we created a new test to bypass the loop entirely,
4342 or otherwise drop straight in, based on this test, then
4343 we might want to rewrite it also. This way some later
4344 pass has more hope of removing the initialization of this
4345 biv entirely. */
4346
4347 /* If final_value != 0, then the biv may be used after loop end
4348 and we must emit an insn to set it just in case.
4349
4350 Reversed bivs already have an insn after the loop setting their
4351 value, so we don't need another one. We can't calculate the
4352 proper final value for such a biv here anyways. */
4353 if (final_value != 0 && ! bl->reversed)
4354 {
4355 rtx insert_before;
4356
4357 /* If the loop has multiple exits, emit the insn before the
4358 loop to ensure that it will always be executed no matter
4359 how the loop exits. Otherwise, emit the insn after the
4360 loop, since this is slightly more efficient. */
4361 if (loop_number_exit_count[uid_loop_num[INSN_UID (loop_start)]])
4362 insert_before = loop_start;
4363 else
4364 insert_before = end_insert_before;
4365
4366 emit_insn_before (gen_move_insn (bl->biv->dest_reg, final_value),
4367 end_insert_before);
4368 }
4369
4370 #if 0
4371 /* Delete all of the instructions inside the loop which set
4372 the biv, as they are all dead. If is safe to delete them,
4373 because an insn setting a biv will never be part of a libcall. */
4374 /* However, deleting them will invalidate the regno_last_uid info,
4375 so keeping them around is more convenient. Final_biv_value
4376 will only succeed when there are multiple exits if the biv
4377 is dead at each exit, hence it does not matter that the original
4378 insn remains, because it is dead anyways. */
4379 for (v = bl->biv; v; v = v->next_iv)
4380 delete_insn (v->insn);
4381 #endif
4382
4383 if (loop_dump_stream)
4384 fprintf (loop_dump_stream, "Reg %d: biv eliminated\n",
4385 bl->regno);
4386 }
4387 }
4388
4389 /* Go through all the instructions in the loop, making all the
4390 register substitutions scheduled in REG_MAP. */
4391
4392 for (p = loop_start; p != end; p = NEXT_INSN (p))
4393 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
4394 || GET_CODE (p) == CALL_INSN)
4395 {
4396 replace_regs (PATTERN (p), reg_map, max_reg_before_loop, 0);
4397 replace_regs (REG_NOTES (p), reg_map, max_reg_before_loop, 0);
4398 INSN_CODE (p) = -1;
4399 }
4400
4401 /* Unroll loops from within strength reduction so that we can use the
4402 induction variable information that strength_reduce has already
4403 collected. */
4404
4405 if (unroll_p)
4406 unroll_loop (loop_end, insn_count, loop_start, end_insert_before, 1);
4407
4408 #ifdef HAIFA
4409 /* instrument the loop with bct insn */
4410 #ifdef HAVE_decrement_and_branch_on_count
4411 if (HAVE_decrement_and_branch_on_count)
4412 insert_bct (loop_start, loop_end);
4413 #endif
4414 #endif /* HAIFA */
4415
4416 if (loop_dump_stream)
4417 fprintf (loop_dump_stream, "\n");
4418 }
4419 \f
4420 /* Return 1 if X is a valid source for an initial value (or as value being
4421 compared against in an initial test).
4422
4423 X must be either a register or constant and must not be clobbered between
4424 the current insn and the start of the loop.
4425
4426 INSN is the insn containing X. */
4427
4428 static int
4429 valid_initial_value_p (x, insn, call_seen, loop_start)
4430 rtx x;
4431 rtx insn;
4432 int call_seen;
4433 rtx loop_start;
4434 {
4435 if (CONSTANT_P (x))
4436 return 1;
4437
4438 /* Only consider pseudos we know about initialized in insns whose luids
4439 we know. */
4440 if (GET_CODE (x) != REG
4441 || REGNO (x) >= max_reg_before_loop)
4442 return 0;
4443
4444 /* Don't use call-clobbered registers across a call which clobbers it. On
4445 some machines, don't use any hard registers at all. */
4446 if (REGNO (x) < FIRST_PSEUDO_REGISTER
4447 && (SMALL_REGISTER_CLASSES
4448 || (call_used_regs[REGNO (x)] && call_seen)))
4449 return 0;
4450
4451 /* Don't use registers that have been clobbered before the start of the
4452 loop. */
4453 if (reg_set_between_p (x, insn, loop_start))
4454 return 0;
4455
4456 return 1;
4457 }
4458 \f
4459 /* Scan X for memory refs and check each memory address
4460 as a possible giv. INSN is the insn whose pattern X comes from.
4461 NOT_EVERY_ITERATION is 1 if the insn might not be executed during
4462 every loop iteration. */
4463
4464 static void
4465 find_mem_givs (x, insn, not_every_iteration, loop_start, loop_end)
4466 rtx x;
4467 rtx insn;
4468 int not_every_iteration;
4469 rtx loop_start, loop_end;
4470 {
4471 register int i, j;
4472 register enum rtx_code code;
4473 register char *fmt;
4474
4475 if (x == 0)
4476 return;
4477
4478 code = GET_CODE (x);
4479 switch (code)
4480 {
4481 case REG:
4482 case CONST_INT:
4483 case CONST:
4484 case CONST_DOUBLE:
4485 case SYMBOL_REF:
4486 case LABEL_REF:
4487 case PC:
4488 case CC0:
4489 case ADDR_VEC:
4490 case ADDR_DIFF_VEC:
4491 case USE:
4492 case CLOBBER:
4493 return;
4494
4495 case MEM:
4496 {
4497 rtx src_reg;
4498 rtx add_val;
4499 rtx mult_val;
4500 int benefit;
4501
4502 benefit = general_induction_var (XEXP (x, 0),
4503 &src_reg, &add_val, &mult_val);
4504
4505 /* Don't make a DEST_ADDR giv with mult_val == 1 && add_val == 0.
4506 Such a giv isn't useful. */
4507 if (benefit > 0 && (mult_val != const1_rtx || add_val != const0_rtx))
4508 {
4509 /* Found one; record it. */
4510 struct induction *v
4511 = (struct induction *) oballoc (sizeof (struct induction));
4512
4513 record_giv (v, insn, src_reg, addr_placeholder, mult_val,
4514 add_val, benefit, DEST_ADDR, not_every_iteration,
4515 &XEXP (x, 0), loop_start, loop_end);
4516
4517 v->mem_mode = GET_MODE (x);
4518 }
4519 }
4520 return;
4521
4522 default:
4523 break;
4524 }
4525
4526 /* Recursively scan the subexpressions for other mem refs. */
4527
4528 fmt = GET_RTX_FORMAT (code);
4529 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4530 if (fmt[i] == 'e')
4531 find_mem_givs (XEXP (x, i), insn, not_every_iteration, loop_start,
4532 loop_end);
4533 else if (fmt[i] == 'E')
4534 for (j = 0; j < XVECLEN (x, i); j++)
4535 find_mem_givs (XVECEXP (x, i, j), insn, not_every_iteration,
4536 loop_start, loop_end);
4537 }
4538 \f
4539 /* Fill in the data about one biv update.
4540 V is the `struct induction' in which we record the biv. (It is
4541 allocated by the caller, with alloca.)
4542 INSN is the insn that sets it.
4543 DEST_REG is the biv's reg.
4544
4545 MULT_VAL is const1_rtx if the biv is being incremented here, in which case
4546 INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is
4547 being set to INC_VAL.
4548
4549 NOT_EVERY_ITERATION is nonzero if this biv update is not know to be
4550 executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update
4551 can be executed more than once per iteration. If MAYBE_MULTIPLE
4552 and NOT_EVERY_ITERATION are both zero, we know that the biv update is
4553 executed exactly once per iteration. */
4554
4555 static void
4556 record_biv (v, insn, dest_reg, inc_val, mult_val,
4557 not_every_iteration, maybe_multiple)
4558 struct induction *v;
4559 rtx insn;
4560 rtx dest_reg;
4561 rtx inc_val;
4562 rtx mult_val;
4563 int not_every_iteration;
4564 int maybe_multiple;
4565 {
4566 struct iv_class *bl;
4567
4568 v->insn = insn;
4569 v->src_reg = dest_reg;
4570 v->dest_reg = dest_reg;
4571 v->mult_val = mult_val;
4572 v->add_val = inc_val;
4573 v->mode = GET_MODE (dest_reg);
4574 v->always_computable = ! not_every_iteration;
4575 v->always_executed = ! not_every_iteration;
4576 v->maybe_multiple = maybe_multiple;
4577
4578 /* Add this to the reg's iv_class, creating a class
4579 if this is the first incrementation of the reg. */
4580
4581 bl = reg_biv_class[REGNO (dest_reg)];
4582 if (bl == 0)
4583 {
4584 /* Create and initialize new iv_class. */
4585
4586 bl = (struct iv_class *) oballoc (sizeof (struct iv_class));
4587
4588 bl->regno = REGNO (dest_reg);
4589 bl->biv = 0;
4590 bl->giv = 0;
4591 bl->biv_count = 0;
4592 bl->giv_count = 0;
4593
4594 /* Set initial value to the reg itself. */
4595 bl->initial_value = dest_reg;
4596 /* We haven't seen the initializing insn yet */
4597 bl->init_insn = 0;
4598 bl->init_set = 0;
4599 bl->initial_test = 0;
4600 bl->incremented = 0;
4601 bl->eliminable = 0;
4602 bl->nonneg = 0;
4603 bl->reversed = 0;
4604 bl->total_benefit = 0;
4605
4606 /* Add this class to loop_iv_list. */
4607 bl->next = loop_iv_list;
4608 loop_iv_list = bl;
4609
4610 /* Put it in the array of biv register classes. */
4611 reg_biv_class[REGNO (dest_reg)] = bl;
4612 }
4613
4614 /* Update IV_CLASS entry for this biv. */
4615 v->next_iv = bl->biv;
4616 bl->biv = v;
4617 bl->biv_count++;
4618 if (mult_val == const1_rtx)
4619 bl->incremented = 1;
4620
4621 if (loop_dump_stream)
4622 {
4623 fprintf (loop_dump_stream,
4624 "Insn %d: possible biv, reg %d,",
4625 INSN_UID (insn), REGNO (dest_reg));
4626 if (GET_CODE (inc_val) == CONST_INT)
4627 {
4628 fprintf (loop_dump_stream, " const =");
4629 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (inc_val));
4630 fputc ('\n', loop_dump_stream);
4631 }
4632 else
4633 {
4634 fprintf (loop_dump_stream, " const = ");
4635 print_rtl (loop_dump_stream, inc_val);
4636 fprintf (loop_dump_stream, "\n");
4637 }
4638 }
4639 }
4640 \f
4641 /* Fill in the data about one giv.
4642 V is the `struct induction' in which we record the giv. (It is
4643 allocated by the caller, with alloca.)
4644 INSN is the insn that sets it.
4645 BENEFIT estimates the savings from deleting this insn.
4646 TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed
4647 into a register or is used as a memory address.
4648
4649 SRC_REG is the biv reg which the giv is computed from.
4650 DEST_REG is the giv's reg (if the giv is stored in a reg).
4651 MULT_VAL and ADD_VAL are the coefficients used to compute the giv.
4652 LOCATION points to the place where this giv's value appears in INSN. */
4653
4654 static void
4655 record_giv (v, insn, src_reg, dest_reg, mult_val, add_val, benefit,
4656 type, not_every_iteration, location, loop_start, loop_end)
4657 struct induction *v;
4658 rtx insn;
4659 rtx src_reg;
4660 rtx dest_reg;
4661 rtx mult_val, add_val;
4662 int benefit;
4663 enum g_types type;
4664 int not_every_iteration;
4665 rtx *location;
4666 rtx loop_start, loop_end;
4667 {
4668 struct induction *b;
4669 struct iv_class *bl;
4670 rtx set = single_set (insn);
4671
4672 v->insn = insn;
4673 v->src_reg = src_reg;
4674 v->giv_type = type;
4675 v->dest_reg = dest_reg;
4676 v->mult_val = mult_val;
4677 v->add_val = add_val;
4678 v->benefit = benefit;
4679 v->location = location;
4680 v->cant_derive = 0;
4681 v->combined_with = 0;
4682 v->maybe_multiple = 0;
4683 v->maybe_dead = 0;
4684 v->derive_adjustment = 0;
4685 v->same = 0;
4686 v->ignore = 0;
4687 v->new_reg = 0;
4688 v->final_value = 0;
4689 v->same_insn = 0;
4690 v->auto_inc_opt = 0;
4691 v->unrolled = 0;
4692 v->shared = 0;
4693
4694 /* The v->always_computable field is used in update_giv_derive, to
4695 determine whether a giv can be used to derive another giv. For a
4696 DEST_REG giv, INSN computes a new value for the giv, so its value
4697 isn't computable if INSN insn't executed every iteration.
4698 However, for a DEST_ADDR giv, INSN merely uses the value of the giv;
4699 it does not compute a new value. Hence the value is always computable
4700 regardless of whether INSN is executed each iteration. */
4701
4702 if (type == DEST_ADDR)
4703 v->always_computable = 1;
4704 else
4705 v->always_computable = ! not_every_iteration;
4706
4707 v->always_executed = ! not_every_iteration;
4708
4709 if (type == DEST_ADDR)
4710 {
4711 v->mode = GET_MODE (*location);
4712 v->lifetime = 1;
4713 v->times_used = 1;
4714 }
4715 else /* type == DEST_REG */
4716 {
4717 v->mode = GET_MODE (SET_DEST (set));
4718
4719 v->lifetime = (uid_luid[REGNO_LAST_UID (REGNO (dest_reg))]
4720 - uid_luid[REGNO_FIRST_UID (REGNO (dest_reg))]);
4721
4722 v->times_used = n_times_used[REGNO (dest_reg)];
4723
4724 /* If the lifetime is zero, it means that this register is
4725 really a dead store. So mark this as a giv that can be
4726 ignored. This will not prevent the biv from being eliminated. */
4727 if (v->lifetime == 0)
4728 v->ignore = 1;
4729
4730 reg_iv_type[REGNO (dest_reg)] = GENERAL_INDUCT;
4731 reg_iv_info[REGNO (dest_reg)] = v;
4732 }
4733
4734 /* Add the giv to the class of givs computed from one biv. */
4735
4736 bl = reg_biv_class[REGNO (src_reg)];
4737 if (bl)
4738 {
4739 v->next_iv = bl->giv;
4740 bl->giv = v;
4741 /* Don't count DEST_ADDR. This is supposed to count the number of
4742 insns that calculate givs. */
4743 if (type == DEST_REG)
4744 bl->giv_count++;
4745 bl->total_benefit += benefit;
4746 }
4747 else
4748 /* Fatal error, biv missing for this giv? */
4749 abort ();
4750
4751 if (type == DEST_ADDR)
4752 v->replaceable = 1;
4753 else
4754 {
4755 /* The giv can be replaced outright by the reduced register only if all
4756 of the following conditions are true:
4757 - the insn that sets the giv is always executed on any iteration
4758 on which the giv is used at all
4759 (there are two ways to deduce this:
4760 either the insn is executed on every iteration,
4761 or all uses follow that insn in the same basic block),
4762 - the giv is not used outside the loop
4763 - no assignments to the biv occur during the giv's lifetime. */
4764
4765 if (REGNO_FIRST_UID (REGNO (dest_reg)) == INSN_UID (insn)
4766 /* Previous line always fails if INSN was moved by loop opt. */
4767 && uid_luid[REGNO_LAST_UID (REGNO (dest_reg))] < INSN_LUID (loop_end)
4768 && (! not_every_iteration
4769 || last_use_this_basic_block (dest_reg, insn)))
4770 {
4771 /* Now check that there are no assignments to the biv within the
4772 giv's lifetime. This requires two separate checks. */
4773
4774 /* Check each biv update, and fail if any are between the first
4775 and last use of the giv.
4776
4777 If this loop contains an inner loop that was unrolled, then
4778 the insn modifying the biv may have been emitted by the loop
4779 unrolling code, and hence does not have a valid luid. Just
4780 mark the biv as not replaceable in this case. It is not very
4781 useful as a biv, because it is used in two different loops.
4782 It is very unlikely that we would be able to optimize the giv
4783 using this biv anyways. */
4784
4785 v->replaceable = 1;
4786 for (b = bl->biv; b; b = b->next_iv)
4787 {
4788 if (INSN_UID (b->insn) >= max_uid_for_loop
4789 || ((uid_luid[INSN_UID (b->insn)]
4790 >= uid_luid[REGNO_FIRST_UID (REGNO (dest_reg))])
4791 && (uid_luid[INSN_UID (b->insn)]
4792 <= uid_luid[REGNO_LAST_UID (REGNO (dest_reg))])))
4793 {
4794 v->replaceable = 0;
4795 v->not_replaceable = 1;
4796 break;
4797 }
4798 }
4799
4800 /* If there are any backwards branches that go from after the
4801 biv update to before it, then this giv is not replaceable. */
4802 if (v->replaceable)
4803 for (b = bl->biv; b; b = b->next_iv)
4804 if (back_branch_in_range_p (b->insn, loop_start, loop_end))
4805 {
4806 v->replaceable = 0;
4807 v->not_replaceable = 1;
4808 break;
4809 }
4810 }
4811 else
4812 {
4813 /* May still be replaceable, we don't have enough info here to
4814 decide. */
4815 v->replaceable = 0;
4816 v->not_replaceable = 0;
4817 }
4818 }
4819
4820 if (loop_dump_stream)
4821 {
4822 if (type == DEST_REG)
4823 fprintf (loop_dump_stream, "Insn %d: giv reg %d",
4824 INSN_UID (insn), REGNO (dest_reg));
4825 else
4826 fprintf (loop_dump_stream, "Insn %d: dest address",
4827 INSN_UID (insn));
4828
4829 fprintf (loop_dump_stream, " src reg %d benefit %d",
4830 REGNO (src_reg), v->benefit);
4831 fprintf (loop_dump_stream, " used %d lifetime %d",
4832 v->times_used, v->lifetime);
4833
4834 if (v->replaceable)
4835 fprintf (loop_dump_stream, " replaceable");
4836
4837 if (GET_CODE (mult_val) == CONST_INT)
4838 {
4839 fprintf (loop_dump_stream, " mult ");
4840 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (mult_val));
4841 }
4842 else
4843 {
4844 fprintf (loop_dump_stream, " mult ");
4845 print_rtl (loop_dump_stream, mult_val);
4846 }
4847
4848 if (GET_CODE (add_val) == CONST_INT)
4849 {
4850 fprintf (loop_dump_stream, " add ");
4851 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (add_val));
4852 }
4853 else
4854 {
4855 fprintf (loop_dump_stream, " add ");
4856 print_rtl (loop_dump_stream, add_val);
4857 }
4858 }
4859
4860 if (loop_dump_stream)
4861 fprintf (loop_dump_stream, "\n");
4862
4863 }
4864
4865
4866 /* All this does is determine whether a giv can be made replaceable because
4867 its final value can be calculated. This code can not be part of record_giv
4868 above, because final_giv_value requires that the number of loop iterations
4869 be known, and that can not be accurately calculated until after all givs
4870 have been identified. */
4871
4872 static void
4873 check_final_value (v, loop_start, loop_end)
4874 struct induction *v;
4875 rtx loop_start, loop_end;
4876 {
4877 struct iv_class *bl;
4878 rtx final_value = 0;
4879
4880 bl = reg_biv_class[REGNO (v->src_reg)];
4881
4882 /* DEST_ADDR givs will never reach here, because they are always marked
4883 replaceable above in record_giv. */
4884
4885 /* The giv can be replaced outright by the reduced register only if all
4886 of the following conditions are true:
4887 - the insn that sets the giv is always executed on any iteration
4888 on which the giv is used at all
4889 (there are two ways to deduce this:
4890 either the insn is executed on every iteration,
4891 or all uses follow that insn in the same basic block),
4892 - its final value can be calculated (this condition is different
4893 than the one above in record_giv)
4894 - no assignments to the biv occur during the giv's lifetime. */
4895
4896 #if 0
4897 /* This is only called now when replaceable is known to be false. */
4898 /* Clear replaceable, so that it won't confuse final_giv_value. */
4899 v->replaceable = 0;
4900 #endif
4901
4902 if ((final_value = final_giv_value (v, loop_start, loop_end))
4903 && (v->always_computable || last_use_this_basic_block (v->dest_reg, v->insn)))
4904 {
4905 int biv_increment_seen = 0;
4906 rtx p = v->insn;
4907 rtx last_giv_use;
4908
4909 v->replaceable = 1;
4910
4911 /* When trying to determine whether or not a biv increment occurs
4912 during the lifetime of the giv, we can ignore uses of the variable
4913 outside the loop because final_value is true. Hence we can not
4914 use regno_last_uid and regno_first_uid as above in record_giv. */
4915
4916 /* Search the loop to determine whether any assignments to the
4917 biv occur during the giv's lifetime. Start with the insn
4918 that sets the giv, and search around the loop until we come
4919 back to that insn again.
4920
4921 Also fail if there is a jump within the giv's lifetime that jumps
4922 to somewhere outside the lifetime but still within the loop. This
4923 catches spaghetti code where the execution order is not linear, and
4924 hence the above test fails. Here we assume that the giv lifetime
4925 does not extend from one iteration of the loop to the next, so as
4926 to make the test easier. Since the lifetime isn't known yet,
4927 this requires two loops. See also record_giv above. */
4928
4929 last_giv_use = v->insn;
4930
4931 while (1)
4932 {
4933 p = NEXT_INSN (p);
4934 if (p == loop_end)
4935 p = NEXT_INSN (loop_start);
4936 if (p == v->insn)
4937 break;
4938
4939 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
4940 || GET_CODE (p) == CALL_INSN)
4941 {
4942 if (biv_increment_seen)
4943 {
4944 if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
4945 {
4946 v->replaceable = 0;
4947 v->not_replaceable = 1;
4948 break;
4949 }
4950 }
4951 else if (reg_set_p (v->src_reg, PATTERN (p)))
4952 biv_increment_seen = 1;
4953 else if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
4954 last_giv_use = p;
4955 }
4956 }
4957
4958 /* Now that the lifetime of the giv is known, check for branches
4959 from within the lifetime to outside the lifetime if it is still
4960 replaceable. */
4961
4962 if (v->replaceable)
4963 {
4964 p = v->insn;
4965 while (1)
4966 {
4967 p = NEXT_INSN (p);
4968 if (p == loop_end)
4969 p = NEXT_INSN (loop_start);
4970 if (p == last_giv_use)
4971 break;
4972
4973 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
4974 && LABEL_NAME (JUMP_LABEL (p))
4975 && ((INSN_UID (JUMP_LABEL (p)) >= max_uid_for_loop)
4976 || (INSN_UID (v->insn) >= max_uid_for_loop)
4977 || (INSN_UID (last_giv_use) >= max_uid_for_loop)
4978 || (INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (v->insn)
4979 && INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (loop_start))
4980 || (INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (last_giv_use)
4981 && INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (loop_end))))
4982 {
4983 v->replaceable = 0;
4984 v->not_replaceable = 1;
4985
4986 if (loop_dump_stream)
4987 fprintf (loop_dump_stream,
4988 "Found branch outside giv lifetime.\n");
4989
4990 break;
4991 }
4992 }
4993 }
4994
4995 /* If it is replaceable, then save the final value. */
4996 if (v->replaceable)
4997 v->final_value = final_value;
4998 }
4999
5000 if (loop_dump_stream && v->replaceable)
5001 fprintf (loop_dump_stream, "Insn %d: giv reg %d final_value replaceable\n",
5002 INSN_UID (v->insn), REGNO (v->dest_reg));
5003 }
5004 \f
5005 /* Update the status of whether a giv can derive other givs.
5006
5007 We need to do something special if there is or may be an update to the biv
5008 between the time the giv is defined and the time it is used to derive
5009 another giv.
5010
5011 In addition, a giv that is only conditionally set is not allowed to
5012 derive another giv once a label has been passed.
5013
5014 The cases we look at are when a label or an update to a biv is passed. */
5015
5016 static void
5017 update_giv_derive (p)
5018 rtx p;
5019 {
5020 struct iv_class *bl;
5021 struct induction *biv, *giv;
5022 rtx tem;
5023 int dummy;
5024
5025 /* Search all IV classes, then all bivs, and finally all givs.
5026
5027 There are three cases we are concerned with. First we have the situation
5028 of a giv that is only updated conditionally. In that case, it may not
5029 derive any givs after a label is passed.
5030
5031 The second case is when a biv update occurs, or may occur, after the
5032 definition of a giv. For certain biv updates (see below) that are
5033 known to occur between the giv definition and use, we can adjust the
5034 giv definition. For others, or when the biv update is conditional,
5035 we must prevent the giv from deriving any other givs. There are two
5036 sub-cases within this case.
5037
5038 If this is a label, we are concerned with any biv update that is done
5039 conditionally, since it may be done after the giv is defined followed by
5040 a branch here (actually, we need to pass both a jump and a label, but
5041 this extra tracking doesn't seem worth it).
5042
5043 If this is a jump, we are concerned about any biv update that may be
5044 executed multiple times. We are actually only concerned about
5045 backward jumps, but it is probably not worth performing the test
5046 on the jump again here.
5047
5048 If this is a biv update, we must adjust the giv status to show that a
5049 subsequent biv update was performed. If this adjustment cannot be done,
5050 the giv cannot derive further givs. */
5051
5052 for (bl = loop_iv_list; bl; bl = bl->next)
5053 for (biv = bl->biv; biv; biv = biv->next_iv)
5054 if (GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN
5055 || biv->insn == p)
5056 {
5057 for (giv = bl->giv; giv; giv = giv->next_iv)
5058 {
5059 /* If cant_derive is already true, there is no point in
5060 checking all of these conditions again. */
5061 if (giv->cant_derive)
5062 continue;
5063
5064 /* If this giv is conditionally set and we have passed a label,
5065 it cannot derive anything. */
5066 if (GET_CODE (p) == CODE_LABEL && ! giv->always_computable)
5067 giv->cant_derive = 1;
5068
5069 /* Skip givs that have mult_val == 0, since
5070 they are really invariants. Also skip those that are
5071 replaceable, since we know their lifetime doesn't contain
5072 any biv update. */
5073 else if (giv->mult_val == const0_rtx || giv->replaceable)
5074 continue;
5075
5076 /* The only way we can allow this giv to derive another
5077 is if this is a biv increment and we can form the product
5078 of biv->add_val and giv->mult_val. In this case, we will
5079 be able to compute a compensation. */
5080 else if (biv->insn == p)
5081 {
5082 tem = 0;
5083
5084 if (biv->mult_val == const1_rtx)
5085 tem = simplify_giv_expr (gen_rtx_MULT (giv->mode,
5086 biv->add_val,
5087 giv->mult_val),
5088 &dummy);
5089
5090 if (tem && giv->derive_adjustment)
5091 tem = simplify_giv_expr (gen_rtx_PLUS (giv->mode, tem,
5092 giv->derive_adjustment),
5093 &dummy);
5094 if (tem)
5095 giv->derive_adjustment = tem;
5096 else
5097 giv->cant_derive = 1;
5098 }
5099 else if ((GET_CODE (p) == CODE_LABEL && ! biv->always_computable)
5100 || (GET_CODE (p) == JUMP_INSN && biv->maybe_multiple))
5101 giv->cant_derive = 1;
5102 }
5103 }
5104 }
5105 \f
5106 /* Check whether an insn is an increment legitimate for a basic induction var.
5107 X is the source of insn P, or a part of it.
5108 MODE is the mode in which X should be interpreted.
5109
5110 DEST_REG is the putative biv, also the destination of the insn.
5111 We accept patterns of these forms:
5112 REG = REG + INVARIANT (includes REG = REG - CONSTANT)
5113 REG = INVARIANT + REG
5114
5115 If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX,
5116 and store the additive term into *INC_VAL.
5117
5118 If X is an assignment of an invariant into DEST_REG, we set
5119 *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL.
5120
5121 We also want to detect a BIV when it corresponds to a variable
5122 whose mode was promoted via PROMOTED_MODE. In that case, an increment
5123 of the variable may be a PLUS that adds a SUBREG of that variable to
5124 an invariant and then sign- or zero-extends the result of the PLUS
5125 into the variable.
5126
5127 Most GIVs in such cases will be in the promoted mode, since that is the
5128 probably the natural computation mode (and almost certainly the mode
5129 used for addresses) on the machine. So we view the pseudo-reg containing
5130 the variable as the BIV, as if it were simply incremented.
5131
5132 Note that treating the entire pseudo as a BIV will result in making
5133 simple increments to any GIVs based on it. However, if the variable
5134 overflows in its declared mode but not its promoted mode, the result will
5135 be incorrect. This is acceptable if the variable is signed, since
5136 overflows in such cases are undefined, but not if it is unsigned, since
5137 those overflows are defined. So we only check for SIGN_EXTEND and
5138 not ZERO_EXTEND.
5139
5140 If we cannot find a biv, we return 0. */
5141
5142 static int
5143 basic_induction_var (x, mode, dest_reg, p, inc_val, mult_val)
5144 register rtx x;
5145 enum machine_mode mode;
5146 rtx p;
5147 rtx dest_reg;
5148 rtx *inc_val;
5149 rtx *mult_val;
5150 {
5151 register enum rtx_code code;
5152 rtx arg;
5153 rtx insn, set = 0;
5154
5155 code = GET_CODE (x);
5156 switch (code)
5157 {
5158 case PLUS:
5159 if (XEXP (x, 0) == dest_reg
5160 || (GET_CODE (XEXP (x, 0)) == SUBREG
5161 && SUBREG_PROMOTED_VAR_P (XEXP (x, 0))
5162 && SUBREG_REG (XEXP (x, 0)) == dest_reg))
5163 arg = XEXP (x, 1);
5164 else if (XEXP (x, 1) == dest_reg
5165 || (GET_CODE (XEXP (x, 1)) == SUBREG
5166 && SUBREG_PROMOTED_VAR_P (XEXP (x, 1))
5167 && SUBREG_REG (XEXP (x, 1)) == dest_reg))
5168 arg = XEXP (x, 0);
5169 else
5170 return 0;
5171
5172 if (invariant_p (arg) != 1)
5173 return 0;
5174
5175 *inc_val = convert_modes (GET_MODE (dest_reg), GET_MODE (x), arg, 0);
5176 *mult_val = const1_rtx;
5177 return 1;
5178
5179 case SUBREG:
5180 /* If this is a SUBREG for a promoted variable, check the inner
5181 value. */
5182 if (SUBREG_PROMOTED_VAR_P (x))
5183 return basic_induction_var (SUBREG_REG (x), GET_MODE (SUBREG_REG (x)),
5184 dest_reg, p, inc_val, mult_val);
5185 return 0;
5186
5187 case REG:
5188 /* If this register is assigned in the previous insn, look at its
5189 source, but don't go outside the loop or past a label. */
5190
5191 for (insn = PREV_INSN (p);
5192 (insn && GET_CODE (insn) == NOTE
5193 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
5194 insn = PREV_INSN (insn))
5195 ;
5196
5197 if (insn)
5198 set = single_set (insn);
5199
5200 if (set != 0
5201 && (SET_DEST (set) == x
5202 || (GET_CODE (SET_DEST (set)) == SUBREG
5203 && (GET_MODE_SIZE (GET_MODE (SET_DEST (set)))
5204 <= UNITS_PER_WORD)
5205 && SUBREG_REG (SET_DEST (set)) == x)))
5206 return basic_induction_var (SET_SRC (set),
5207 (GET_MODE (SET_SRC (set)) == VOIDmode
5208 ? GET_MODE (x)
5209 : GET_MODE (SET_SRC (set))),
5210 dest_reg, insn,
5211 inc_val, mult_val);
5212 /* ... fall through ... */
5213
5214 /* Can accept constant setting of biv only when inside inner most loop.
5215 Otherwise, a biv of an inner loop may be incorrectly recognized
5216 as a biv of the outer loop,
5217 causing code to be moved INTO the inner loop. */
5218 case MEM:
5219 if (invariant_p (x) != 1)
5220 return 0;
5221 case CONST_INT:
5222 case SYMBOL_REF:
5223 case CONST:
5224 if (loops_enclosed == 1)
5225 {
5226 /* Possible bug here? Perhaps we don't know the mode of X. */
5227 *inc_val = convert_modes (GET_MODE (dest_reg), mode, x, 0);
5228 *mult_val = const0_rtx;
5229 return 1;
5230 }
5231 else
5232 return 0;
5233
5234 case SIGN_EXTEND:
5235 return basic_induction_var (XEXP (x, 0), GET_MODE (XEXP (x, 0)),
5236 dest_reg, p, inc_val, mult_val);
5237 case ASHIFTRT:
5238 /* Similar, since this can be a sign extension. */
5239 for (insn = PREV_INSN (p);
5240 (insn && GET_CODE (insn) == NOTE
5241 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
5242 insn = PREV_INSN (insn))
5243 ;
5244
5245 if (insn)
5246 set = single_set (insn);
5247
5248 if (set && SET_DEST (set) == XEXP (x, 0)
5249 && GET_CODE (XEXP (x, 1)) == CONST_INT
5250 && INTVAL (XEXP (x, 1)) >= 0
5251 && GET_CODE (SET_SRC (set)) == ASHIFT
5252 && XEXP (x, 1) == XEXP (SET_SRC (set), 1))
5253 return basic_induction_var (XEXP (SET_SRC (set), 0),
5254 GET_MODE (XEXP (x, 0)),
5255 dest_reg, insn, inc_val, mult_val);
5256 return 0;
5257
5258 default:
5259 return 0;
5260 }
5261 }
5262 \f
5263 /* A general induction variable (giv) is any quantity that is a linear
5264 function of a basic induction variable,
5265 i.e. giv = biv * mult_val + add_val.
5266 The coefficients can be any loop invariant quantity.
5267 A giv need not be computed directly from the biv;
5268 it can be computed by way of other givs. */
5269
5270 /* Determine whether X computes a giv.
5271 If it does, return a nonzero value
5272 which is the benefit from eliminating the computation of X;
5273 set *SRC_REG to the register of the biv that it is computed from;
5274 set *ADD_VAL and *MULT_VAL to the coefficients,
5275 such that the value of X is biv * mult + add; */
5276
5277 static int
5278 general_induction_var (x, src_reg, add_val, mult_val)
5279 rtx x;
5280 rtx *src_reg;
5281 rtx *add_val;
5282 rtx *mult_val;
5283 {
5284 rtx orig_x = x;
5285 int benefit = 0;
5286 char *storage;
5287
5288 /* If this is an invariant, forget it, it isn't a giv. */
5289 if (invariant_p (x) == 1)
5290 return 0;
5291
5292 /* See if the expression could be a giv and get its form.
5293 Mark our place on the obstack in case we don't find a giv. */
5294 storage = (char *) oballoc (0);
5295 x = simplify_giv_expr (x, &benefit);
5296 if (x == 0)
5297 {
5298 obfree (storage);
5299 return 0;
5300 }
5301
5302 switch (GET_CODE (x))
5303 {
5304 case USE:
5305 case CONST_INT:
5306 /* Since this is now an invariant and wasn't before, it must be a giv
5307 with MULT_VAL == 0. It doesn't matter which BIV we associate this
5308 with. */
5309 *src_reg = loop_iv_list->biv->dest_reg;
5310 *mult_val = const0_rtx;
5311 *add_val = x;
5312 break;
5313
5314 case REG:
5315 /* This is equivalent to a BIV. */
5316 *src_reg = x;
5317 *mult_val = const1_rtx;
5318 *add_val = const0_rtx;
5319 break;
5320
5321 case PLUS:
5322 /* Either (plus (biv) (invar)) or
5323 (plus (mult (biv) (invar_1)) (invar_2)). */
5324 if (GET_CODE (XEXP (x, 0)) == MULT)
5325 {
5326 *src_reg = XEXP (XEXP (x, 0), 0);
5327 *mult_val = XEXP (XEXP (x, 0), 1);
5328 }
5329 else
5330 {
5331 *src_reg = XEXP (x, 0);
5332 *mult_val = const1_rtx;
5333 }
5334 *add_val = XEXP (x, 1);
5335 break;
5336
5337 case MULT:
5338 /* ADD_VAL is zero. */
5339 *src_reg = XEXP (x, 0);
5340 *mult_val = XEXP (x, 1);
5341 *add_val = const0_rtx;
5342 break;
5343
5344 default:
5345 abort ();
5346 }
5347
5348 /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be
5349 unless they are CONST_INT). */
5350 if (GET_CODE (*add_val) == USE)
5351 *add_val = XEXP (*add_val, 0);
5352 if (GET_CODE (*mult_val) == USE)
5353 *mult_val = XEXP (*mult_val, 0);
5354
5355 benefit += rtx_cost (orig_x, SET);
5356
5357 /* Always return some benefit if this is a giv so it will be detected
5358 as such. This allows elimination of bivs that might otherwise
5359 not be eliminated. */
5360 return benefit == 0 ? 1 : benefit;
5361 }
5362 \f
5363 /* Given an expression, X, try to form it as a linear function of a biv.
5364 We will canonicalize it to be of the form
5365 (plus (mult (BIV) (invar_1))
5366 (invar_2))
5367 with possible degeneracies.
5368
5369 The invariant expressions must each be of a form that can be used as a
5370 machine operand. We surround then with a USE rtx (a hack, but localized
5371 and certainly unambiguous!) if not a CONST_INT for simplicity in this
5372 routine; it is the caller's responsibility to strip them.
5373
5374 If no such canonicalization is possible (i.e., two biv's are used or an
5375 expression that is neither invariant nor a biv or giv), this routine
5376 returns 0.
5377
5378 For a non-zero return, the result will have a code of CONST_INT, USE,
5379 REG (for a BIV), PLUS, or MULT. No other codes will occur.
5380
5381 *BENEFIT will be incremented by the benefit of any sub-giv encountered. */
5382
5383 static rtx
5384 simplify_giv_expr (x, benefit)
5385 rtx x;
5386 int *benefit;
5387 {
5388 enum machine_mode mode = GET_MODE (x);
5389 rtx arg0, arg1;
5390 rtx tem;
5391
5392 /* If this is not an integer mode, or if we cannot do arithmetic in this
5393 mode, this can't be a giv. */
5394 if (mode != VOIDmode
5395 && (GET_MODE_CLASS (mode) != MODE_INT
5396 || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT))
5397 return 0;
5398
5399 switch (GET_CODE (x))
5400 {
5401 case PLUS:
5402 arg0 = simplify_giv_expr (XEXP (x, 0), benefit);
5403 arg1 = simplify_giv_expr (XEXP (x, 1), benefit);
5404 if (arg0 == 0 || arg1 == 0)
5405 return 0;
5406
5407 /* Put constant last, CONST_INT last if both constant. */
5408 if ((GET_CODE (arg0) == USE
5409 || GET_CODE (arg0) == CONST_INT)
5410 && GET_CODE (arg1) != CONST_INT)
5411 tem = arg0, arg0 = arg1, arg1 = tem;
5412
5413 /* Handle addition of zero, then addition of an invariant. */
5414 if (arg1 == const0_rtx)
5415 return arg0;
5416 else if (GET_CODE (arg1) == CONST_INT || GET_CODE (arg1) == USE)
5417 switch (GET_CODE (arg0))
5418 {
5419 case CONST_INT:
5420 case USE:
5421 /* Both invariant. Only valid if sum is machine operand.
5422 First strip off possible USE on the operands. */
5423 if (GET_CODE (arg0) == USE)
5424 arg0 = XEXP (arg0, 0);
5425
5426 if (GET_CODE (arg1) == USE)
5427 arg1 = XEXP (arg1, 0);
5428
5429 tem = 0;
5430 if (CONSTANT_P (arg0) && GET_CODE (arg1) == CONST_INT)
5431 {
5432 tem = plus_constant (arg0, INTVAL (arg1));
5433 if (GET_CODE (tem) != CONST_INT)
5434 tem = gen_rtx_USE (mode, tem);
5435 }
5436 else
5437 {
5438 /* Adding two invariants must result in an invariant,
5439 so enclose addition operation inside a USE and
5440 return it. */
5441 tem = gen_rtx_USE (mode, gen_rtx_PLUS (mode, arg0, arg1));
5442 }
5443
5444 return tem;
5445
5446 case REG:
5447 case MULT:
5448 /* biv + invar or mult + invar. Return sum. */
5449 return gen_rtx_PLUS (mode, arg0, arg1);
5450
5451 case PLUS:
5452 /* (a + invar_1) + invar_2. Associate. */
5453 return simplify_giv_expr (gen_rtx_PLUS (mode,
5454 XEXP (arg0, 0),
5455 gen_rtx_PLUS (mode,
5456 XEXP (arg0, 1), arg1)),
5457 benefit);
5458
5459 default:
5460 abort ();
5461 }
5462
5463 /* Each argument must be either REG, PLUS, or MULT. Convert REG to
5464 MULT to reduce cases. */
5465 if (GET_CODE (arg0) == REG)
5466 arg0 = gen_rtx_MULT (mode, arg0, const1_rtx);
5467 if (GET_CODE (arg1) == REG)
5468 arg1 = gen_rtx_MULT (mode, arg1, const1_rtx);
5469
5470 /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT.
5471 Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT.
5472 Recurse to associate the second PLUS. */
5473 if (GET_CODE (arg1) == MULT)
5474 tem = arg0, arg0 = arg1, arg1 = tem;
5475
5476 if (GET_CODE (arg1) == PLUS)
5477 return simplify_giv_expr (gen_rtx_PLUS (mode,
5478 gen_rtx_PLUS (mode, arg0,
5479 XEXP (arg1, 0)),
5480 XEXP (arg1, 1)),
5481 benefit);
5482
5483 /* Now must have MULT + MULT. Distribute if same biv, else not giv. */
5484 if (GET_CODE (arg0) != MULT || GET_CODE (arg1) != MULT)
5485 abort ();
5486
5487 if (XEXP (arg0, 0) != XEXP (arg1, 0))
5488 return 0;
5489
5490 return simplify_giv_expr (gen_rtx_MULT (mode,
5491 XEXP (arg0, 0),
5492 gen_rtx_PLUS (mode,
5493 XEXP (arg0, 1),
5494 XEXP (arg1, 1))),
5495 benefit);
5496
5497 case MINUS:
5498 /* Handle "a - b" as "a + b * (-1)". */
5499 return simplify_giv_expr (gen_rtx_PLUS (mode,
5500 XEXP (x, 0),
5501 gen_rtx_MULT (mode, XEXP (x, 1),
5502 constm1_rtx)),
5503 benefit);
5504
5505 case MULT:
5506 arg0 = simplify_giv_expr (XEXP (x, 0), benefit);
5507 arg1 = simplify_giv_expr (XEXP (x, 1), benefit);
5508 if (arg0 == 0 || arg1 == 0)
5509 return 0;
5510
5511 /* Put constant last, CONST_INT last if both constant. */
5512 if ((GET_CODE (arg0) == USE || GET_CODE (arg0) == CONST_INT)
5513 && GET_CODE (arg1) != CONST_INT)
5514 tem = arg0, arg0 = arg1, arg1 = tem;
5515
5516 /* If second argument is not now constant, not giv. */
5517 if (GET_CODE (arg1) != USE && GET_CODE (arg1) != CONST_INT)
5518 return 0;
5519
5520 /* Handle multiply by 0 or 1. */
5521 if (arg1 == const0_rtx)
5522 return const0_rtx;
5523
5524 else if (arg1 == const1_rtx)
5525 return arg0;
5526
5527 switch (GET_CODE (arg0))
5528 {
5529 case REG:
5530 /* biv * invar. Done. */
5531 return gen_rtx_MULT (mode, arg0, arg1);
5532
5533 case CONST_INT:
5534 /* Product of two constants. */
5535 return GEN_INT (INTVAL (arg0) * INTVAL (arg1));
5536
5537 case USE:
5538 /* invar * invar. Not giv. */
5539 return 0;
5540
5541 case MULT:
5542 /* (a * invar_1) * invar_2. Associate. */
5543 return simplify_giv_expr (gen_rtx_MULT (mode, XEXP (arg0, 0),
5544 gen_rtx_MULT (mode,
5545 XEXP (arg0, 1),
5546 arg1)),
5547 benefit);
5548
5549 case PLUS:
5550 /* (a + invar_1) * invar_2. Distribute. */
5551 return simplify_giv_expr (gen_rtx_PLUS (mode,
5552 gen_rtx_MULT (mode,
5553 XEXP (arg0, 0),
5554 arg1),
5555 gen_rtx_MULT (mode,
5556 XEXP (arg0, 1),
5557 arg1)),
5558 benefit);
5559
5560 default:
5561 abort ();
5562 }
5563
5564 case ASHIFT:
5565 /* Shift by constant is multiply by power of two. */
5566 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
5567 return 0;
5568
5569 return simplify_giv_expr (gen_rtx_MULT (mode,
5570 XEXP (x, 0),
5571 GEN_INT ((HOST_WIDE_INT) 1
5572 << INTVAL (XEXP (x, 1)))),
5573 benefit);
5574
5575 case NEG:
5576 /* "-a" is "a * (-1)" */
5577 return simplify_giv_expr (gen_rtx_MULT (mode, XEXP (x, 0), constm1_rtx),
5578 benefit);
5579
5580 case NOT:
5581 /* "~a" is "-a - 1". Silly, but easy. */
5582 return simplify_giv_expr (gen_rtx_MINUS (mode,
5583 gen_rtx_NEG (mode, XEXP (x, 0)),
5584 const1_rtx),
5585 benefit);
5586
5587 case USE:
5588 /* Already in proper form for invariant. */
5589 return x;
5590
5591 case REG:
5592 /* If this is a new register, we can't deal with it. */
5593 if (REGNO (x) >= max_reg_before_loop)
5594 return 0;
5595
5596 /* Check for biv or giv. */
5597 switch (reg_iv_type[REGNO (x)])
5598 {
5599 case BASIC_INDUCT:
5600 return x;
5601 case GENERAL_INDUCT:
5602 {
5603 struct induction *v = reg_iv_info[REGNO (x)];
5604
5605 /* Form expression from giv and add benefit. Ensure this giv
5606 can derive another and subtract any needed adjustment if so. */
5607 *benefit += v->benefit;
5608 if (v->cant_derive)
5609 return 0;
5610
5611 tem = gen_rtx_PLUS (mode, gen_rtx_MULT (mode, v->src_reg,
5612 v->mult_val),
5613 v->add_val);
5614 if (v->derive_adjustment)
5615 tem = gen_rtx_MINUS (mode, tem, v->derive_adjustment);
5616 return simplify_giv_expr (tem, benefit);
5617 }
5618
5619 default:
5620 break;
5621 }
5622
5623 /* Fall through to general case. */
5624 default:
5625 /* If invariant, return as USE (unless CONST_INT).
5626 Otherwise, not giv. */
5627 if (GET_CODE (x) == USE)
5628 x = XEXP (x, 0);
5629
5630 if (invariant_p (x) == 1)
5631 {
5632 if (GET_CODE (x) == CONST_INT)
5633 return x;
5634 else
5635 return gen_rtx_USE (mode, x);
5636 }
5637 else
5638 return 0;
5639 }
5640 }
5641 \f
5642 /* Help detect a giv that is calculated by several consecutive insns;
5643 for example,
5644 giv = biv * M
5645 giv = giv + A
5646 The caller has already identified the first insn P as having a giv as dest;
5647 we check that all other insns that set the same register follow
5648 immediately after P, that they alter nothing else,
5649 and that the result of the last is still a giv.
5650
5651 The value is 0 if the reg set in P is not really a giv.
5652 Otherwise, the value is the amount gained by eliminating
5653 all the consecutive insns that compute the value.
5654
5655 FIRST_BENEFIT is the amount gained by eliminating the first insn, P.
5656 SRC_REG is the reg of the biv; DEST_REG is the reg of the giv.
5657
5658 The coefficients of the ultimate giv value are stored in
5659 *MULT_VAL and *ADD_VAL. */
5660
5661 static int
5662 consec_sets_giv (first_benefit, p, src_reg, dest_reg,
5663 add_val, mult_val)
5664 int first_benefit;
5665 rtx p;
5666 rtx src_reg;
5667 rtx dest_reg;
5668 rtx *add_val;
5669 rtx *mult_val;
5670 {
5671 int count;
5672 enum rtx_code code;
5673 int benefit;
5674 rtx temp;
5675 rtx set;
5676
5677 /* Indicate that this is a giv so that we can update the value produced in
5678 each insn of the multi-insn sequence.
5679
5680 This induction structure will be used only by the call to
5681 general_induction_var below, so we can allocate it on our stack.
5682 If this is a giv, our caller will replace the induct var entry with
5683 a new induction structure. */
5684 struct induction *v
5685 = (struct induction *) alloca (sizeof (struct induction));
5686 v->src_reg = src_reg;
5687 v->mult_val = *mult_val;
5688 v->add_val = *add_val;
5689 v->benefit = first_benefit;
5690 v->cant_derive = 0;
5691 v->derive_adjustment = 0;
5692
5693 reg_iv_type[REGNO (dest_reg)] = GENERAL_INDUCT;
5694 reg_iv_info[REGNO (dest_reg)] = v;
5695
5696 count = n_times_set[REGNO (dest_reg)] - 1;
5697
5698 while (count > 0)
5699 {
5700 p = NEXT_INSN (p);
5701 code = GET_CODE (p);
5702
5703 /* If libcall, skip to end of call sequence. */
5704 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
5705 p = XEXP (temp, 0);
5706
5707 if (code == INSN
5708 && (set = single_set (p))
5709 && GET_CODE (SET_DEST (set)) == REG
5710 && SET_DEST (set) == dest_reg
5711 && ((benefit = general_induction_var (SET_SRC (set), &src_reg,
5712 add_val, mult_val))
5713 /* Giv created by equivalent expression. */
5714 || ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX))
5715 && (benefit = general_induction_var (XEXP (temp, 0), &src_reg,
5716 add_val, mult_val))))
5717 && src_reg == v->src_reg)
5718 {
5719 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
5720 benefit += libcall_benefit (p);
5721
5722 count--;
5723 v->mult_val = *mult_val;
5724 v->add_val = *add_val;
5725 v->benefit = benefit;
5726 }
5727 else if (code != NOTE)
5728 {
5729 /* Allow insns that set something other than this giv to a
5730 constant. Such insns are needed on machines which cannot
5731 include long constants and should not disqualify a giv. */
5732 if (code == INSN
5733 && (set = single_set (p))
5734 && SET_DEST (set) != dest_reg
5735 && CONSTANT_P (SET_SRC (set)))
5736 continue;
5737
5738 reg_iv_type[REGNO (dest_reg)] = UNKNOWN_INDUCT;
5739 return 0;
5740 }
5741 }
5742
5743 return v->benefit;
5744 }
5745 \f
5746 /* Return an rtx, if any, that expresses giv G2 as a function of the register
5747 represented by G1. If no such expression can be found, or it is clear that
5748 it cannot possibly be a valid address, 0 is returned.
5749
5750 To perform the computation, we note that
5751 G1 = a * v + b and
5752 G2 = c * v + d
5753 where `v' is the biv.
5754
5755 So G2 = (c/a) * G1 + (d - b*c/a) */
5756
5757 #ifdef ADDRESS_COST
5758 static rtx
5759 express_from (g1, g2)
5760 struct induction *g1, *g2;
5761 {
5762 rtx mult, add;
5763
5764 /* The value that G1 will be multiplied by must be a constant integer. Also,
5765 the only chance we have of getting a valid address is if b*c/a (see above
5766 for notation) is also an integer. */
5767 if (GET_CODE (g1->mult_val) != CONST_INT
5768 || GET_CODE (g2->mult_val) != CONST_INT
5769 || GET_CODE (g1->add_val) != CONST_INT
5770 || g1->mult_val == const0_rtx
5771 || INTVAL (g2->mult_val) % INTVAL (g1->mult_val) != 0)
5772 return 0;
5773
5774 mult = GEN_INT (INTVAL (g2->mult_val) / INTVAL (g1->mult_val));
5775 add = plus_constant (g2->add_val, - INTVAL (g1->add_val) * INTVAL (mult));
5776
5777 /* Form simplified final result. */
5778 if (mult == const0_rtx)
5779 return add;
5780 else if (mult == const1_rtx)
5781 mult = g1->dest_reg;
5782 else
5783 mult = gen_rtx_MULT (g2->mode, g1->dest_reg, mult);
5784
5785 if (add == const0_rtx)
5786 return mult;
5787 else
5788 return gen_rtx_PLUS (g2->mode, mult, add);
5789 }
5790 #endif
5791 \f
5792 /* Return 1 if giv G2 can be combined with G1. This means that G2 can use
5793 (either directly or via an address expression) a register used to represent
5794 G1. Set g2->new_reg to a represtation of G1 (normally just
5795 g1->dest_reg). */
5796
5797 static int
5798 combine_givs_p (g1, g2)
5799 struct induction *g1, *g2;
5800 {
5801 rtx tem;
5802
5803 /* If these givs are identical, they can be combined. */
5804 if (rtx_equal_p (g1->mult_val, g2->mult_val)
5805 && rtx_equal_p (g1->add_val, g2->add_val))
5806 {
5807 g2->new_reg = g1->dest_reg;
5808 return 1;
5809 }
5810
5811 #ifdef ADDRESS_COST
5812 /* If G2 can be expressed as a function of G1 and that function is valid
5813 as an address and no more expensive than using a register for G2,
5814 the expression of G2 in terms of G1 can be used. */
5815 if (g2->giv_type == DEST_ADDR
5816 && (tem = express_from (g1, g2)) != 0
5817 && memory_address_p (g2->mem_mode, tem)
5818 && ADDRESS_COST (tem) <= ADDRESS_COST (*g2->location))
5819 {
5820 g2->new_reg = tem;
5821 return 1;
5822 }
5823 #endif
5824
5825 return 0;
5826 }
5827 \f
5828 #ifdef GIV_SORT_CRITERION
5829 /* Compare two givs and sort the most desirable one for combinations first.
5830 This is used only in one qsort call below. */
5831
5832 static int
5833 giv_sort (x, y)
5834 struct induction **x, **y;
5835 {
5836 GIV_SORT_CRITERION (*x, *y);
5837
5838 return 0;
5839 }
5840 #endif
5841
5842 /* Check all pairs of givs for iv_class BL and see if any can be combined with
5843 any other. If so, point SAME to the giv combined with and set NEW_REG to
5844 be an expression (in terms of the other giv's DEST_REG) equivalent to the
5845 giv. Also, update BENEFIT and related fields for cost/benefit analysis. */
5846
5847 static void
5848 combine_givs (bl)
5849 struct iv_class *bl;
5850 {
5851 struct induction *g1, *g2, **giv_array;
5852 int i, j, giv_count, pass;
5853
5854 /* Count givs, because bl->giv_count is incorrect here. */
5855 giv_count = 0;
5856 for (g1 = bl->giv; g1; g1 = g1->next_iv)
5857 giv_count++;
5858
5859 giv_array
5860 = (struct induction **) alloca (giv_count * sizeof (struct induction *));
5861 i = 0;
5862 for (g1 = bl->giv; g1; g1 = g1->next_iv)
5863 giv_array[i++] = g1;
5864
5865 #ifdef GIV_SORT_CRITERION
5866 /* Sort the givs if GIV_SORT_CRITERION is defined.
5867 This is usually defined for processors which lack
5868 negative register offsets so more givs may be combined. */
5869
5870 if (loop_dump_stream)
5871 fprintf (loop_dump_stream, "%d givs counted, sorting...\n", giv_count);
5872
5873 qsort (giv_array, giv_count, sizeof (struct induction *), giv_sort);
5874 #endif
5875
5876 for (i = 0; i < giv_count; i++)
5877 {
5878 g1 = giv_array[i];
5879 for (pass = 0; pass <= 1; pass++)
5880 for (j = 0; j < giv_count; j++)
5881 {
5882 g2 = giv_array[j];
5883 if (g1 != g2
5884 /* First try to combine with replaceable givs, then all givs. */
5885 && (g1->replaceable || pass == 1)
5886 /* If either has already been combined or is to be ignored, can't
5887 combine. */
5888 && ! g1->ignore && ! g2->ignore && ! g1->same && ! g2->same
5889 /* If something has been based on G2, G2 cannot itself be based
5890 on something else. */
5891 && ! g2->combined_with
5892 && combine_givs_p (g1, g2))
5893 {
5894 /* g2->new_reg set by `combine_givs_p' */
5895 g2->same = g1;
5896 g1->combined_with = 1;
5897
5898 /* If one of these givs is a DEST_REG that was only used
5899 once, by the other giv, this is actually a single use.
5900 The DEST_REG has the correct cost, while the other giv
5901 counts the REG use too often. */
5902 if (g2->giv_type == DEST_REG
5903 && n_times_used[REGNO (g2->dest_reg)] == 1
5904 && reg_mentioned_p (g2->dest_reg, PATTERN (g1->insn)))
5905 g1->benefit = g2->benefit;
5906 else if (g1->giv_type != DEST_REG
5907 || n_times_used[REGNO (g1->dest_reg)] != 1
5908 || ! reg_mentioned_p (g1->dest_reg,
5909 PATTERN (g2->insn)))
5910 {
5911 g1->benefit += g2->benefit;
5912 g1->times_used += g2->times_used;
5913 }
5914 /* ??? The new final_[bg]iv_value code does a much better job
5915 of finding replaceable giv's, and hence this code may no
5916 longer be necessary. */
5917 if (! g2->replaceable && REG_USERVAR_P (g2->dest_reg))
5918 g1->benefit -= copy_cost;
5919 g1->lifetime += g2->lifetime;
5920
5921 if (loop_dump_stream)
5922 fprintf (loop_dump_stream, "giv at %d combined with giv at %d\n",
5923 INSN_UID (g2->insn), INSN_UID (g1->insn));
5924 }
5925 }
5926 }
5927 }
5928 \f
5929 /* EMIT code before INSERT_BEFORE to set REG = B * M + A. */
5930
5931 void
5932 emit_iv_add_mult (b, m, a, reg, insert_before)
5933 rtx b; /* initial value of basic induction variable */
5934 rtx m; /* multiplicative constant */
5935 rtx a; /* additive constant */
5936 rtx reg; /* destination register */
5937 rtx insert_before;
5938 {
5939 rtx seq;
5940 rtx result;
5941
5942 /* Prevent unexpected sharing of these rtx. */
5943 a = copy_rtx (a);
5944 b = copy_rtx (b);
5945
5946 /* Increase the lifetime of any invariants moved further in code. */
5947 update_reg_last_use (a, insert_before);
5948 update_reg_last_use (b, insert_before);
5949 update_reg_last_use (m, insert_before);
5950
5951 start_sequence ();
5952 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 0);
5953 if (reg != result)
5954 emit_move_insn (reg, result);
5955 seq = gen_sequence ();
5956 end_sequence ();
5957
5958 emit_insn_before (seq, insert_before);
5959
5960 record_base_value (REGNO (reg), b);
5961 }
5962 \f
5963 /* Test whether A * B can be computed without
5964 an actual multiply insn. Value is 1 if so. */
5965
5966 static int
5967 product_cheap_p (a, b)
5968 rtx a;
5969 rtx b;
5970 {
5971 int i;
5972 rtx tmp;
5973 struct obstack *old_rtl_obstack = rtl_obstack;
5974 char *storage = (char *) obstack_alloc (&temp_obstack, 0);
5975 int win = 1;
5976
5977 /* If only one is constant, make it B. */
5978 if (GET_CODE (a) == CONST_INT)
5979 tmp = a, a = b, b = tmp;
5980
5981 /* If first constant, both constant, so don't need multiply. */
5982 if (GET_CODE (a) == CONST_INT)
5983 return 1;
5984
5985 /* If second not constant, neither is constant, so would need multiply. */
5986 if (GET_CODE (b) != CONST_INT)
5987 return 0;
5988
5989 /* One operand is constant, so might not need multiply insn. Generate the
5990 code for the multiply and see if a call or multiply, or long sequence
5991 of insns is generated. */
5992
5993 rtl_obstack = &temp_obstack;
5994 start_sequence ();
5995 expand_mult (GET_MODE (a), a, b, NULL_RTX, 0);
5996 tmp = gen_sequence ();
5997 end_sequence ();
5998
5999 if (GET_CODE (tmp) == SEQUENCE)
6000 {
6001 if (XVEC (tmp, 0) == 0)
6002 win = 1;
6003 else if (XVECLEN (tmp, 0) > 3)
6004 win = 0;
6005 else
6006 for (i = 0; i < XVECLEN (tmp, 0); i++)
6007 {
6008 rtx insn = XVECEXP (tmp, 0, i);
6009
6010 if (GET_CODE (insn) != INSN
6011 || (GET_CODE (PATTERN (insn)) == SET
6012 && GET_CODE (SET_SRC (PATTERN (insn))) == MULT)
6013 || (GET_CODE (PATTERN (insn)) == PARALLEL
6014 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET
6015 && GET_CODE (SET_SRC (XVECEXP (PATTERN (insn), 0, 0))) == MULT))
6016 {
6017 win = 0;
6018 break;
6019 }
6020 }
6021 }
6022 else if (GET_CODE (tmp) == SET
6023 && GET_CODE (SET_SRC (tmp)) == MULT)
6024 win = 0;
6025 else if (GET_CODE (tmp) == PARALLEL
6026 && GET_CODE (XVECEXP (tmp, 0, 0)) == SET
6027 && GET_CODE (SET_SRC (XVECEXP (tmp, 0, 0))) == MULT)
6028 win = 0;
6029
6030 /* Free any storage we obtained in generating this multiply and restore rtl
6031 allocation to its normal obstack. */
6032 obstack_free (&temp_obstack, storage);
6033 rtl_obstack = old_rtl_obstack;
6034
6035 return win;
6036 }
6037 \f
6038 /* Check to see if loop can be terminated by a "decrement and branch until
6039 zero" instruction. If so, add a REG_NONNEG note to the branch insn if so.
6040 Also try reversing an increment loop to a decrement loop
6041 to see if the optimization can be performed.
6042 Value is nonzero if optimization was performed. */
6043
6044 /* This is useful even if the architecture doesn't have such an insn,
6045 because it might change a loops which increments from 0 to n to a loop
6046 which decrements from n to 0. A loop that decrements to zero is usually
6047 faster than one that increments from zero. */
6048
6049 /* ??? This could be rewritten to use some of the loop unrolling procedures,
6050 such as approx_final_value, biv_total_increment, loop_iterations, and
6051 final_[bg]iv_value. */
6052
6053 static int
6054 check_dbra_loop (loop_end, insn_count, loop_start)
6055 rtx loop_end;
6056 int insn_count;
6057 rtx loop_start;
6058 {
6059 struct iv_class *bl;
6060 rtx reg;
6061 rtx jump_label;
6062 rtx final_value;
6063 rtx start_value;
6064 rtx new_add_val;
6065 rtx comparison;
6066 rtx before_comparison;
6067 rtx p;
6068
6069 /* If last insn is a conditional branch, and the insn before tests a
6070 register value, try to optimize it. Otherwise, we can't do anything. */
6071
6072 comparison = get_condition_for_loop (PREV_INSN (loop_end));
6073 if (comparison == 0)
6074 return 0;
6075
6076 /* Check all of the bivs to see if the compare uses one of them.
6077 Skip biv's set more than once because we can't guarantee that
6078 it will be zero on the last iteration. Also skip if the biv is
6079 used between its update and the test insn. */
6080
6081 for (bl = loop_iv_list; bl; bl = bl->next)
6082 {
6083 if (bl->biv_count == 1
6084 && bl->biv->dest_reg == XEXP (comparison, 0)
6085 && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
6086 PREV_INSN (PREV_INSN (loop_end))))
6087 break;
6088 }
6089
6090 if (! bl)
6091 return 0;
6092
6093 /* Look for the case where the basic induction variable is always
6094 nonnegative, and equals zero on the last iteration.
6095 In this case, add a reg_note REG_NONNEG, which allows the
6096 m68k DBRA instruction to be used. */
6097
6098 if (((GET_CODE (comparison) == GT
6099 && GET_CODE (XEXP (comparison, 1)) == CONST_INT
6100 && INTVAL (XEXP (comparison, 1)) == -1)
6101 || (GET_CODE (comparison) == NE && XEXP (comparison, 1) == const0_rtx))
6102 && GET_CODE (bl->biv->add_val) == CONST_INT
6103 && INTVAL (bl->biv->add_val) < 0)
6104 {
6105 /* Initial value must be greater than 0,
6106 init_val % -dec_value == 0 to ensure that it equals zero on
6107 the last iteration */
6108
6109 if (GET_CODE (bl->initial_value) == CONST_INT
6110 && INTVAL (bl->initial_value) > 0
6111 && (INTVAL (bl->initial_value)
6112 % (-INTVAL (bl->biv->add_val))) == 0)
6113 {
6114 /* register always nonnegative, add REG_NOTE to branch */
6115 REG_NOTES (PREV_INSN (loop_end))
6116 = gen_rtx_EXPR_LIST (REG_NONNEG, NULL_RTX,
6117 REG_NOTES (PREV_INSN (loop_end)));
6118 bl->nonneg = 1;
6119
6120 return 1;
6121 }
6122
6123 /* If the decrement is 1 and the value was tested as >= 0 before
6124 the loop, then we can safely optimize. */
6125 for (p = loop_start; p; p = PREV_INSN (p))
6126 {
6127 if (GET_CODE (p) == CODE_LABEL)
6128 break;
6129 if (GET_CODE (p) != JUMP_INSN)
6130 continue;
6131
6132 before_comparison = get_condition_for_loop (p);
6133 if (before_comparison
6134 && XEXP (before_comparison, 0) == bl->biv->dest_reg
6135 && GET_CODE (before_comparison) == LT
6136 && XEXP (before_comparison, 1) == const0_rtx
6137 && ! reg_set_between_p (bl->biv->dest_reg, p, loop_start)
6138 && INTVAL (bl->biv->add_val) == -1)
6139 {
6140 REG_NOTES (PREV_INSN (loop_end))
6141 = gen_rtx_EXPR_LIST (REG_NONNEG, NULL_RTX,
6142 REG_NOTES (PREV_INSN (loop_end)));
6143 bl->nonneg = 1;
6144
6145 return 1;
6146 }
6147 }
6148 }
6149 else if (num_mem_sets <= 1)
6150 {
6151 /* Try to change inc to dec, so can apply above optimization. */
6152 /* Can do this if:
6153 all registers modified are induction variables or invariant,
6154 all memory references have non-overlapping addresses
6155 (obviously true if only one write)
6156 allow 2 insns for the compare/jump at the end of the loop. */
6157 /* Also, we must avoid any instructions which use both the reversed
6158 biv and another biv. Such instructions will fail if the loop is
6159 reversed. We meet this condition by requiring that either
6160 no_use_except_counting is true, or else that there is only
6161 one biv. */
6162 int num_nonfixed_reads = 0;
6163 /* 1 if the iteration var is used only to count iterations. */
6164 int no_use_except_counting = 0;
6165 /* 1 if the loop has no memory store, or it has a single memory store
6166 which is reversible. */
6167 int reversible_mem_store = 1;
6168
6169 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
6170 if (GET_RTX_CLASS (GET_CODE (p)) == 'i')
6171 num_nonfixed_reads += count_nonfixed_reads (PATTERN (p));
6172
6173 if (bl->giv_count == 0
6174 && ! loop_number_exit_count[uid_loop_num[INSN_UID (loop_start)]])
6175 {
6176 rtx bivreg = regno_reg_rtx[bl->regno];
6177
6178 /* If there are no givs for this biv, and the only exit is the
6179 fall through at the end of the the loop, then
6180 see if perhaps there are no uses except to count. */
6181 no_use_except_counting = 1;
6182 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
6183 if (GET_RTX_CLASS (GET_CODE (p)) == 'i')
6184 {
6185 rtx set = single_set (p);
6186
6187 if (set && GET_CODE (SET_DEST (set)) == REG
6188 && REGNO (SET_DEST (set)) == bl->regno)
6189 /* An insn that sets the biv is okay. */
6190 ;
6191 else if (p == prev_nonnote_insn (prev_nonnote_insn (loop_end))
6192 || p == prev_nonnote_insn (loop_end))
6193 /* Don't bother about the end test. */
6194 ;
6195 else if (reg_mentioned_p (bivreg, PATTERN (p)))
6196 /* Any other use of the biv is no good. */
6197 {
6198 no_use_except_counting = 0;
6199 break;
6200 }
6201 }
6202 }
6203
6204 /* If the loop has a single store, and the destination address is
6205 invariant, then we can't reverse the loop, because this address
6206 might then have the wrong value at loop exit.
6207 This would work if the source was invariant also, however, in that
6208 case, the insn should have been moved out of the loop. */
6209
6210 if (num_mem_sets == 1)
6211 reversible_mem_store
6212 = (! unknown_address_altered
6213 && ! invariant_p (XEXP (loop_store_mems[0], 0)));
6214
6215 /* This code only acts for innermost loops. Also it simplifies
6216 the memory address check by only reversing loops with
6217 zero or one memory access.
6218 Two memory accesses could involve parts of the same array,
6219 and that can't be reversed. */
6220
6221 if (num_nonfixed_reads <= 1
6222 && !loop_has_call
6223 && !loop_has_volatile
6224 && reversible_mem_store
6225 && (no_use_except_counting
6226 || ((bl->giv_count + bl->biv_count + num_mem_sets
6227 + num_movables + 2 == insn_count)
6228 && (bl == loop_iv_list && bl->next == 0))))
6229 {
6230 rtx tem;
6231
6232 /* Loop can be reversed. */
6233 if (loop_dump_stream)
6234 fprintf (loop_dump_stream, "Can reverse loop\n");
6235
6236 /* Now check other conditions:
6237
6238 The increment must be a constant, as must the initial value,
6239 and the comparison code must be LT.
6240
6241 This test can probably be improved since +/- 1 in the constant
6242 can be obtained by changing LT to LE and vice versa; this is
6243 confusing. */
6244
6245 if (comparison
6246 && GET_CODE (XEXP (comparison, 1)) == CONST_INT
6247 /* LE gets turned into LT */
6248 && GET_CODE (comparison) == LT
6249 && GET_CODE (bl->initial_value) == CONST_INT)
6250 {
6251 HOST_WIDE_INT add_val, comparison_val;
6252 rtx initial_value;
6253
6254 add_val = INTVAL (bl->biv->add_val);
6255 comparison_val = INTVAL (XEXP (comparison, 1));
6256 initial_value = bl->initial_value;
6257
6258 /* Normalize the initial value if it is an integer and
6259 has no other use except as a counter. This will allow
6260 a few more loops to be reversed. */
6261 if (no_use_except_counting
6262 && GET_CODE (initial_value) == CONST_INT)
6263 {
6264 comparison_val = comparison_val - INTVAL (bl->initial_value);
6265 initial_value = const0_rtx;
6266 }
6267
6268 /* If the initial value is not zero, or if the comparison
6269 value is not an exact multiple of the increment, then we
6270 can not reverse this loop. */
6271 if (initial_value != const0_rtx
6272 || (comparison_val % add_val) != 0)
6273 return 0;
6274
6275 /* Reset these in case we normalized the initial value
6276 and comparison value above. */
6277 bl->initial_value = initial_value;
6278 XEXP (comparison, 1) = GEN_INT (comparison_val);
6279
6280 /* Register will always be nonnegative, with value
6281 0 on last iteration if loop reversed */
6282
6283 /* Save some info needed to produce the new insns. */
6284 reg = bl->biv->dest_reg;
6285 jump_label = XEXP (SET_SRC (PATTERN (PREV_INSN (loop_end))), 1);
6286 if (jump_label == pc_rtx)
6287 jump_label = XEXP (SET_SRC (PATTERN (PREV_INSN (loop_end))), 2);
6288 new_add_val = GEN_INT (- INTVAL (bl->biv->add_val));
6289
6290 final_value = XEXP (comparison, 1);
6291 start_value = GEN_INT (INTVAL (XEXP (comparison, 1))
6292 - INTVAL (bl->biv->add_val));
6293
6294 /* Initialize biv to start_value before loop start.
6295 The old initializing insn will be deleted as a
6296 dead store by flow.c. */
6297 emit_insn_before (gen_move_insn (reg, start_value), loop_start);
6298
6299 /* Add insn to decrement register, and delete insn
6300 that incremented the register. */
6301 p = emit_insn_before (gen_add2_insn (reg, new_add_val),
6302 bl->biv->insn);
6303 delete_insn (bl->biv->insn);
6304
6305 /* Update biv info to reflect its new status. */
6306 bl->biv->insn = p;
6307 bl->initial_value = start_value;
6308 bl->biv->add_val = new_add_val;
6309
6310 /* Inc LABEL_NUSES so that delete_insn will
6311 not delete the label. */
6312 LABEL_NUSES (XEXP (jump_label, 0)) ++;
6313
6314 /* Emit an insn after the end of the loop to set the biv's
6315 proper exit value if it is used anywhere outside the loop. */
6316 if ((REGNO_LAST_UID (bl->regno)
6317 != INSN_UID (PREV_INSN (PREV_INSN (loop_end))))
6318 || ! bl->init_insn
6319 || REGNO_FIRST_UID (bl->regno) != INSN_UID (bl->init_insn))
6320 emit_insn_after (gen_move_insn (reg, final_value),
6321 loop_end);
6322
6323 /* Delete compare/branch at end of loop. */
6324 delete_insn (PREV_INSN (loop_end));
6325 delete_insn (PREV_INSN (loop_end));
6326
6327 /* Add new compare/branch insn at end of loop. */
6328 start_sequence ();
6329 emit_cmp_insn (reg, const0_rtx, GE, NULL_RTX,
6330 GET_MODE (reg), 0, 0);
6331 emit_jump_insn (gen_bge (XEXP (jump_label, 0)));
6332 tem = gen_sequence ();
6333 end_sequence ();
6334 emit_jump_insn_before (tem, loop_end);
6335
6336 for (tem = PREV_INSN (loop_end);
6337 tem && GET_CODE (tem) != JUMP_INSN; tem = PREV_INSN (tem))
6338 ;
6339 if (tem)
6340 {
6341 JUMP_LABEL (tem) = XEXP (jump_label, 0);
6342
6343 /* Increment of LABEL_NUSES done above. */
6344 /* Register is now always nonnegative,
6345 so add REG_NONNEG note to the branch. */
6346 REG_NOTES (tem) = gen_rtx_EXPR_LIST (REG_NONNEG, NULL_RTX,
6347 REG_NOTES (tem));
6348 }
6349
6350 bl->nonneg = 1;
6351
6352 /* Mark that this biv has been reversed. Each giv which depends
6353 on this biv, and which is also live past the end of the loop
6354 will have to be fixed up. */
6355
6356 bl->reversed = 1;
6357
6358 if (loop_dump_stream)
6359 fprintf (loop_dump_stream,
6360 "Reversed loop and added reg_nonneg\n");
6361
6362 return 1;
6363 }
6364 }
6365 }
6366
6367 return 0;
6368 }
6369 \f
6370 /* Verify whether the biv BL appears to be eliminable,
6371 based on the insns in the loop that refer to it.
6372 LOOP_START is the first insn of the loop, and END is the end insn.
6373
6374 If ELIMINATE_P is non-zero, actually do the elimination.
6375
6376 THRESHOLD and INSN_COUNT are from loop_optimize and are used to
6377 determine whether invariant insns should be placed inside or at the
6378 start of the loop. */
6379
6380 static int
6381 maybe_eliminate_biv (bl, loop_start, end, eliminate_p, threshold, insn_count)
6382 struct iv_class *bl;
6383 rtx loop_start;
6384 rtx end;
6385 int eliminate_p;
6386 int threshold, insn_count;
6387 {
6388 rtx reg = bl->biv->dest_reg;
6389 rtx p;
6390
6391 /* Scan all insns in the loop, stopping if we find one that uses the
6392 biv in a way that we cannot eliminate. */
6393
6394 for (p = loop_start; p != end; p = NEXT_INSN (p))
6395 {
6396 enum rtx_code code = GET_CODE (p);
6397 rtx where = threshold >= insn_count ? loop_start : p;
6398
6399 if ((code == INSN || code == JUMP_INSN || code == CALL_INSN)
6400 && reg_mentioned_p (reg, PATTERN (p))
6401 && ! maybe_eliminate_biv_1 (PATTERN (p), p, bl, eliminate_p, where))
6402 {
6403 if (loop_dump_stream)
6404 fprintf (loop_dump_stream,
6405 "Cannot eliminate biv %d: biv used in insn %d.\n",
6406 bl->regno, INSN_UID (p));
6407 break;
6408 }
6409 }
6410
6411 if (p == end)
6412 {
6413 if (loop_dump_stream)
6414 fprintf (loop_dump_stream, "biv %d %s eliminated.\n",
6415 bl->regno, eliminate_p ? "was" : "can be");
6416 return 1;
6417 }
6418
6419 return 0;
6420 }
6421 \f
6422 /* If BL appears in X (part of the pattern of INSN), see if we can
6423 eliminate its use. If so, return 1. If not, return 0.
6424
6425 If BIV does not appear in X, return 1.
6426
6427 If ELIMINATE_P is non-zero, actually do the elimination. WHERE indicates
6428 where extra insns should be added. Depending on how many items have been
6429 moved out of the loop, it will either be before INSN or at the start of
6430 the loop. */
6431
6432 static int
6433 maybe_eliminate_biv_1 (x, insn, bl, eliminate_p, where)
6434 rtx x, insn;
6435 struct iv_class *bl;
6436 int eliminate_p;
6437 rtx where;
6438 {
6439 enum rtx_code code = GET_CODE (x);
6440 rtx reg = bl->biv->dest_reg;
6441 enum machine_mode mode = GET_MODE (reg);
6442 struct induction *v;
6443 rtx arg, new, tem;
6444 int arg_operand;
6445 char *fmt;
6446 int i, j;
6447
6448 switch (code)
6449 {
6450 case REG:
6451 /* If we haven't already been able to do something with this BIV,
6452 we can't eliminate it. */
6453 if (x == reg)
6454 return 0;
6455 return 1;
6456
6457 case SET:
6458 /* If this sets the BIV, it is not a problem. */
6459 if (SET_DEST (x) == reg)
6460 return 1;
6461
6462 /* If this is an insn that defines a giv, it is also ok because
6463 it will go away when the giv is reduced. */
6464 for (v = bl->giv; v; v = v->next_iv)
6465 if (v->giv_type == DEST_REG && SET_DEST (x) == v->dest_reg)
6466 return 1;
6467
6468 #ifdef HAVE_cc0
6469 if (SET_DEST (x) == cc0_rtx && SET_SRC (x) == reg)
6470 {
6471 /* Can replace with any giv that was reduced and
6472 that has (MULT_VAL != 0) and (ADD_VAL == 0).
6473 Require a constant for MULT_VAL, so we know it's nonzero.
6474 ??? We disable this optimization to avoid potential
6475 overflows. */
6476
6477 for (v = bl->giv; v; v = v->next_iv)
6478 if (CONSTANT_P (v->mult_val) && v->mult_val != const0_rtx
6479 && v->add_val == const0_rtx
6480 && ! v->ignore && ! v->maybe_dead && v->always_computable
6481 && v->mode == mode
6482 && 0)
6483 {
6484 /* If the giv V had the auto-inc address optimization applied
6485 to it, and INSN occurs between the giv insn and the biv
6486 insn, then we must adjust the value used here.
6487 This is rare, so we don't bother to do so. */
6488 if (v->auto_inc_opt
6489 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
6490 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
6491 || (INSN_LUID (v->insn) > INSN_LUID (insn)
6492 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
6493 continue;
6494
6495 if (! eliminate_p)
6496 return 1;
6497
6498 /* If the giv has the opposite direction of change,
6499 then reverse the comparison. */
6500 if (INTVAL (v->mult_val) < 0)
6501 new = gen_rtx_COMPARE (GET_MODE (v->new_reg),
6502 const0_rtx, v->new_reg);
6503 else
6504 new = v->new_reg;
6505
6506 /* We can probably test that giv's reduced reg. */
6507 if (validate_change (insn, &SET_SRC (x), new, 0))
6508 return 1;
6509 }
6510
6511 /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0);
6512 replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL).
6513 Require a constant for MULT_VAL, so we know it's nonzero.
6514 ??? Do this only if ADD_VAL is a pointer to avoid a potential
6515 overflow problem. */
6516
6517 for (v = bl->giv; v; v = v->next_iv)
6518 if (CONSTANT_P (v->mult_val) && v->mult_val != const0_rtx
6519 && ! v->ignore && ! v->maybe_dead && v->always_computable
6520 && v->mode == mode
6521 && (GET_CODE (v->add_val) == SYMBOL_REF
6522 || GET_CODE (v->add_val) == LABEL_REF
6523 || GET_CODE (v->add_val) == CONST
6524 || (GET_CODE (v->add_val) == REG
6525 && REGNO_POINTER_FLAG (REGNO (v->add_val)))))
6526 {
6527 /* If the giv V had the auto-inc address optimization applied
6528 to it, and INSN occurs between the giv insn and the biv
6529 insn, then we must adjust the value used here.
6530 This is rare, so we don't bother to do so. */
6531 if (v->auto_inc_opt
6532 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
6533 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
6534 || (INSN_LUID (v->insn) > INSN_LUID (insn)
6535 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
6536 continue;
6537
6538 if (! eliminate_p)
6539 return 1;
6540
6541 /* If the giv has the opposite direction of change,
6542 then reverse the comparison. */
6543 if (INTVAL (v->mult_val) < 0)
6544 new = gen_rtx_COMPARE (VOIDmode, copy_rtx (v->add_val),
6545 v->new_reg);
6546 else
6547 new = gen_rtx_COMPARE (VOIDmode, v->new_reg,
6548 copy_rtx (v->add_val));
6549
6550 /* Replace biv with the giv's reduced register. */
6551 update_reg_last_use (v->add_val, insn);
6552 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
6553 return 1;
6554
6555 /* Insn doesn't support that constant or invariant. Copy it
6556 into a register (it will be a loop invariant.) */
6557 tem = gen_reg_rtx (GET_MODE (v->new_reg));
6558
6559 emit_insn_before (gen_move_insn (tem, copy_rtx (v->add_val)),
6560 where);
6561
6562 /* Substitute the new register for its invariant value in
6563 the compare expression. */
6564 XEXP (new, (INTVAL (v->mult_val) < 0) ? 0 : 1) = tem;
6565 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
6566 return 1;
6567 }
6568 }
6569 #endif
6570 break;
6571
6572 case COMPARE:
6573 case EQ: case NE:
6574 case GT: case GE: case GTU: case GEU:
6575 case LT: case LE: case LTU: case LEU:
6576 /* See if either argument is the biv. */
6577 if (XEXP (x, 0) == reg)
6578 arg = XEXP (x, 1), arg_operand = 1;
6579 else if (XEXP (x, 1) == reg)
6580 arg = XEXP (x, 0), arg_operand = 0;
6581 else
6582 break;
6583
6584 if (CONSTANT_P (arg))
6585 {
6586 /* First try to replace with any giv that has constant positive
6587 mult_val and constant add_val. We might be able to support
6588 negative mult_val, but it seems complex to do it in general. */
6589
6590 for (v = bl->giv; v; v = v->next_iv)
6591 if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0
6592 && (GET_CODE (v->add_val) == SYMBOL_REF
6593 || GET_CODE (v->add_val) == LABEL_REF
6594 || GET_CODE (v->add_val) == CONST
6595 || (GET_CODE (v->add_val) == REG
6596 && REGNO_POINTER_FLAG (REGNO (v->add_val))))
6597 && ! v->ignore && ! v->maybe_dead && v->always_computable
6598 && v->mode == mode)
6599 {
6600 /* If the giv V had the auto-inc address optimization applied
6601 to it, and INSN occurs between the giv insn and the biv
6602 insn, then we must adjust the value used here.
6603 This is rare, so we don't bother to do so. */
6604 if (v->auto_inc_opt
6605 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
6606 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
6607 || (INSN_LUID (v->insn) > INSN_LUID (insn)
6608 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
6609 continue;
6610
6611 if (! eliminate_p)
6612 return 1;
6613
6614 /* Replace biv with the giv's reduced reg. */
6615 XEXP (x, 1-arg_operand) = v->new_reg;
6616
6617 /* If all constants are actually constant integers and
6618 the derived constant can be directly placed in the COMPARE,
6619 do so. */
6620 if (GET_CODE (arg) == CONST_INT
6621 && GET_CODE (v->mult_val) == CONST_INT
6622 && GET_CODE (v->add_val) == CONST_INT
6623 && validate_change (insn, &XEXP (x, arg_operand),
6624 GEN_INT (INTVAL (arg)
6625 * INTVAL (v->mult_val)
6626 + INTVAL (v->add_val)), 0))
6627 return 1;
6628
6629 /* Otherwise, load it into a register. */
6630 tem = gen_reg_rtx (mode);
6631 emit_iv_add_mult (arg, v->mult_val, v->add_val, tem, where);
6632 if (validate_change (insn, &XEXP (x, arg_operand), tem, 0))
6633 return 1;
6634
6635 /* If that failed, put back the change we made above. */
6636 XEXP (x, 1-arg_operand) = reg;
6637 }
6638
6639 /* Look for giv with positive constant mult_val and nonconst add_val.
6640 Insert insns to calculate new compare value.
6641 ??? Turn this off due to possible overflow. */
6642
6643 for (v = bl->giv; v; v = v->next_iv)
6644 if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0
6645 && ! v->ignore && ! v->maybe_dead && v->always_computable
6646 && v->mode == mode
6647 && 0)
6648 {
6649 rtx tem;
6650
6651 /* If the giv V had the auto-inc address optimization applied
6652 to it, and INSN occurs between the giv insn and the biv
6653 insn, then we must adjust the value used here.
6654 This is rare, so we don't bother to do so. */
6655 if (v->auto_inc_opt
6656 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
6657 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
6658 || (INSN_LUID (v->insn) > INSN_LUID (insn)
6659 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
6660 continue;
6661
6662 if (! eliminate_p)
6663 return 1;
6664
6665 tem = gen_reg_rtx (mode);
6666
6667 /* Replace biv with giv's reduced register. */
6668 validate_change (insn, &XEXP (x, 1 - arg_operand),
6669 v->new_reg, 1);
6670
6671 /* Compute value to compare against. */
6672 emit_iv_add_mult (arg, v->mult_val, v->add_val, tem, where);
6673 /* Use it in this insn. */
6674 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
6675 if (apply_change_group ())
6676 return 1;
6677 }
6678 }
6679 else if (GET_CODE (arg) == REG || GET_CODE (arg) == MEM)
6680 {
6681 if (invariant_p (arg) == 1)
6682 {
6683 /* Look for giv with constant positive mult_val and nonconst
6684 add_val. Insert insns to compute new compare value.
6685 ??? Turn this off due to possible overflow. */
6686
6687 for (v = bl->giv; v; v = v->next_iv)
6688 if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0
6689 && ! v->ignore && ! v->maybe_dead && v->always_computable
6690 && v->mode == mode
6691 && 0)
6692 {
6693 rtx tem;
6694
6695 /* If the giv V had the auto-inc address optimization applied
6696 to it, and INSN occurs between the giv insn and the biv
6697 insn, then we must adjust the value used here.
6698 This is rare, so we don't bother to do so. */
6699 if (v->auto_inc_opt
6700 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
6701 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
6702 || (INSN_LUID (v->insn) > INSN_LUID (insn)
6703 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
6704 continue;
6705
6706 if (! eliminate_p)
6707 return 1;
6708
6709 tem = gen_reg_rtx (mode);
6710
6711 /* Replace biv with giv's reduced register. */
6712 validate_change (insn, &XEXP (x, 1 - arg_operand),
6713 v->new_reg, 1);
6714
6715 /* Compute value to compare against. */
6716 emit_iv_add_mult (arg, v->mult_val, v->add_val,
6717 tem, where);
6718 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
6719 if (apply_change_group ())
6720 return 1;
6721 }
6722 }
6723
6724 /* This code has problems. Basically, you can't know when
6725 seeing if we will eliminate BL, whether a particular giv
6726 of ARG will be reduced. If it isn't going to be reduced,
6727 we can't eliminate BL. We can try forcing it to be reduced,
6728 but that can generate poor code.
6729
6730 The problem is that the benefit of reducing TV, below should
6731 be increased if BL can actually be eliminated, but this means
6732 we might have to do a topological sort of the order in which
6733 we try to process biv. It doesn't seem worthwhile to do
6734 this sort of thing now. */
6735
6736 #if 0
6737 /* Otherwise the reg compared with had better be a biv. */
6738 if (GET_CODE (arg) != REG
6739 || reg_iv_type[REGNO (arg)] != BASIC_INDUCT)
6740 return 0;
6741
6742 /* Look for a pair of givs, one for each biv,
6743 with identical coefficients. */
6744 for (v = bl->giv; v; v = v->next_iv)
6745 {
6746 struct induction *tv;
6747
6748 if (v->ignore || v->maybe_dead || v->mode != mode)
6749 continue;
6750
6751 for (tv = reg_biv_class[REGNO (arg)]->giv; tv; tv = tv->next_iv)
6752 if (! tv->ignore && ! tv->maybe_dead
6753 && rtx_equal_p (tv->mult_val, v->mult_val)
6754 && rtx_equal_p (tv->add_val, v->add_val)
6755 && tv->mode == mode)
6756 {
6757 /* If the giv V had the auto-inc address optimization applied
6758 to it, and INSN occurs between the giv insn and the biv
6759 insn, then we must adjust the value used here.
6760 This is rare, so we don't bother to do so. */
6761 if (v->auto_inc_opt
6762 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
6763 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
6764 || (INSN_LUID (v->insn) > INSN_LUID (insn)
6765 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
6766 continue;
6767
6768 if (! eliminate_p)
6769 return 1;
6770
6771 /* Replace biv with its giv's reduced reg. */
6772 XEXP (x, 1-arg_operand) = v->new_reg;
6773 /* Replace other operand with the other giv's
6774 reduced reg. */
6775 XEXP (x, arg_operand) = tv->new_reg;
6776 return 1;
6777 }
6778 }
6779 #endif
6780 }
6781
6782 /* If we get here, the biv can't be eliminated. */
6783 return 0;
6784
6785 case MEM:
6786 /* If this address is a DEST_ADDR giv, it doesn't matter if the
6787 biv is used in it, since it will be replaced. */
6788 for (v = bl->giv; v; v = v->next_iv)
6789 if (v->giv_type == DEST_ADDR && v->location == &XEXP (x, 0))
6790 return 1;
6791 break;
6792
6793 default:
6794 break;
6795 }
6796
6797 /* See if any subexpression fails elimination. */
6798 fmt = GET_RTX_FORMAT (code);
6799 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
6800 {
6801 switch (fmt[i])
6802 {
6803 case 'e':
6804 if (! maybe_eliminate_biv_1 (XEXP (x, i), insn, bl,
6805 eliminate_p, where))
6806 return 0;
6807 break;
6808
6809 case 'E':
6810 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
6811 if (! maybe_eliminate_biv_1 (XVECEXP (x, i, j), insn, bl,
6812 eliminate_p, where))
6813 return 0;
6814 break;
6815 }
6816 }
6817
6818 return 1;
6819 }
6820 \f
6821 /* Return nonzero if the last use of REG
6822 is in an insn following INSN in the same basic block. */
6823
6824 static int
6825 last_use_this_basic_block (reg, insn)
6826 rtx reg;
6827 rtx insn;
6828 {
6829 rtx n;
6830 for (n = insn;
6831 n && GET_CODE (n) != CODE_LABEL && GET_CODE (n) != JUMP_INSN;
6832 n = NEXT_INSN (n))
6833 {
6834 if (REGNO_LAST_UID (REGNO (reg)) == INSN_UID (n))
6835 return 1;
6836 }
6837 return 0;
6838 }
6839 \f
6840 /* Called via `note_stores' to record the initial value of a biv. Here we
6841 just record the location of the set and process it later. */
6842
6843 static void
6844 record_initial (dest, set)
6845 rtx dest;
6846 rtx set;
6847 {
6848 struct iv_class *bl;
6849
6850 if (GET_CODE (dest) != REG
6851 || REGNO (dest) >= max_reg_before_loop
6852 || reg_iv_type[REGNO (dest)] != BASIC_INDUCT)
6853 return;
6854
6855 bl = reg_biv_class[REGNO (dest)];
6856
6857 /* If this is the first set found, record it. */
6858 if (bl->init_insn == 0)
6859 {
6860 bl->init_insn = note_insn;
6861 bl->init_set = set;
6862 }
6863 }
6864 \f
6865 /* If any of the registers in X are "old" and currently have a last use earlier
6866 than INSN, update them to have a last use of INSN. Their actual last use
6867 will be the previous insn but it will not have a valid uid_luid so we can't
6868 use it. */
6869
6870 static void
6871 update_reg_last_use (x, insn)
6872 rtx x;
6873 rtx insn;
6874 {
6875 /* Check for the case where INSN does not have a valid luid. In this case,
6876 there is no need to modify the regno_last_uid, as this can only happen
6877 when code is inserted after the loop_end to set a pseudo's final value,
6878 and hence this insn will never be the last use of x. */
6879 if (GET_CODE (x) == REG && REGNO (x) < max_reg_before_loop
6880 && INSN_UID (insn) < max_uid_for_loop
6881 && uid_luid[REGNO_LAST_UID (REGNO (x))] < uid_luid[INSN_UID (insn)])
6882 REGNO_LAST_UID (REGNO (x)) = INSN_UID (insn);
6883 else
6884 {
6885 register int i, j;
6886 register char *fmt = GET_RTX_FORMAT (GET_CODE (x));
6887 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6888 {
6889 if (fmt[i] == 'e')
6890 update_reg_last_use (XEXP (x, i), insn);
6891 else if (fmt[i] == 'E')
6892 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
6893 update_reg_last_use (XVECEXP (x, i, j), insn);
6894 }
6895 }
6896 }
6897 \f
6898 /* Given a jump insn JUMP, return the condition that will cause it to branch
6899 to its JUMP_LABEL. If the condition cannot be understood, or is an
6900 inequality floating-point comparison which needs to be reversed, 0 will
6901 be returned.
6902
6903 If EARLIEST is non-zero, it is a pointer to a place where the earliest
6904 insn used in locating the condition was found. If a replacement test
6905 of the condition is desired, it should be placed in front of that
6906 insn and we will be sure that the inputs are still valid.
6907
6908 The condition will be returned in a canonical form to simplify testing by
6909 callers. Specifically:
6910
6911 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
6912 (2) Both operands will be machine operands; (cc0) will have been replaced.
6913 (3) If an operand is a constant, it will be the second operand.
6914 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
6915 for GE, GEU, and LEU. */
6916
6917 rtx
6918 get_condition (jump, earliest)
6919 rtx jump;
6920 rtx *earliest;
6921 {
6922 enum rtx_code code;
6923 rtx prev = jump;
6924 rtx set;
6925 rtx tem;
6926 rtx op0, op1;
6927 int reverse_code = 0;
6928 int did_reverse_condition = 0;
6929
6930 /* If this is not a standard conditional jump, we can't parse it. */
6931 if (GET_CODE (jump) != JUMP_INSN
6932 || ! condjump_p (jump) || simplejump_p (jump))
6933 return 0;
6934
6935 code = GET_CODE (XEXP (SET_SRC (PATTERN (jump)), 0));
6936 op0 = XEXP (XEXP (SET_SRC (PATTERN (jump)), 0), 0);
6937 op1 = XEXP (XEXP (SET_SRC (PATTERN (jump)), 0), 1);
6938
6939 if (earliest)
6940 *earliest = jump;
6941
6942 /* If this branches to JUMP_LABEL when the condition is false, reverse
6943 the condition. */
6944 if (GET_CODE (XEXP (SET_SRC (PATTERN (jump)), 2)) == LABEL_REF
6945 && XEXP (XEXP (SET_SRC (PATTERN (jump)), 2), 0) == JUMP_LABEL (jump))
6946 code = reverse_condition (code), did_reverse_condition ^= 1;
6947
6948 /* If we are comparing a register with zero, see if the register is set
6949 in the previous insn to a COMPARE or a comparison operation. Perform
6950 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
6951 in cse.c */
6952
6953 while (GET_RTX_CLASS (code) == '<' && op1 == CONST0_RTX (GET_MODE (op0)))
6954 {
6955 /* Set non-zero when we find something of interest. */
6956 rtx x = 0;
6957
6958 #ifdef HAVE_cc0
6959 /* If comparison with cc0, import actual comparison from compare
6960 insn. */
6961 if (op0 == cc0_rtx)
6962 {
6963 if ((prev = prev_nonnote_insn (prev)) == 0
6964 || GET_CODE (prev) != INSN
6965 || (set = single_set (prev)) == 0
6966 || SET_DEST (set) != cc0_rtx)
6967 return 0;
6968
6969 op0 = SET_SRC (set);
6970 op1 = CONST0_RTX (GET_MODE (op0));
6971 if (earliest)
6972 *earliest = prev;
6973 }
6974 #endif
6975
6976 /* If this is a COMPARE, pick up the two things being compared. */
6977 if (GET_CODE (op0) == COMPARE)
6978 {
6979 op1 = XEXP (op0, 1);
6980 op0 = XEXP (op0, 0);
6981 continue;
6982 }
6983 else if (GET_CODE (op0) != REG)
6984 break;
6985
6986 /* Go back to the previous insn. Stop if it is not an INSN. We also
6987 stop if it isn't a single set or if it has a REG_INC note because
6988 we don't want to bother dealing with it. */
6989
6990 if ((prev = prev_nonnote_insn (prev)) == 0
6991 || GET_CODE (prev) != INSN
6992 || FIND_REG_INC_NOTE (prev, 0)
6993 || (set = single_set (prev)) == 0)
6994 break;
6995
6996 /* If this is setting OP0, get what it sets it to if it looks
6997 relevant. */
6998 if (rtx_equal_p (SET_DEST (set), op0))
6999 {
7000 enum machine_mode inner_mode = GET_MODE (SET_SRC (set));
7001
7002 if ((GET_CODE (SET_SRC (set)) == COMPARE
7003 || (((code == NE
7004 || (code == LT
7005 && GET_MODE_CLASS (inner_mode) == MODE_INT
7006 && (GET_MODE_BITSIZE (inner_mode)
7007 <= HOST_BITS_PER_WIDE_INT)
7008 && (STORE_FLAG_VALUE
7009 & ((HOST_WIDE_INT) 1
7010 << (GET_MODE_BITSIZE (inner_mode) - 1))))
7011 #ifdef FLOAT_STORE_FLAG_VALUE
7012 || (code == LT
7013 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
7014 && FLOAT_STORE_FLAG_VALUE < 0)
7015 #endif
7016 ))
7017 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<')))
7018 x = SET_SRC (set);
7019 else if (((code == EQ
7020 || (code == GE
7021 && (GET_MODE_BITSIZE (inner_mode)
7022 <= HOST_BITS_PER_WIDE_INT)
7023 && GET_MODE_CLASS (inner_mode) == MODE_INT
7024 && (STORE_FLAG_VALUE
7025 & ((HOST_WIDE_INT) 1
7026 << (GET_MODE_BITSIZE (inner_mode) - 1))))
7027 #ifdef FLOAT_STORE_FLAG_VALUE
7028 || (code == GE
7029 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
7030 && FLOAT_STORE_FLAG_VALUE < 0)
7031 #endif
7032 ))
7033 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<')
7034 {
7035 /* We might have reversed a LT to get a GE here. But this wasn't
7036 actually the comparison of data, so we don't flag that we
7037 have had to reverse the condition. */
7038 did_reverse_condition ^= 1;
7039 reverse_code = 1;
7040 x = SET_SRC (set);
7041 }
7042 else
7043 break;
7044 }
7045
7046 else if (reg_set_p (op0, prev))
7047 /* If this sets OP0, but not directly, we have to give up. */
7048 break;
7049
7050 if (x)
7051 {
7052 if (GET_RTX_CLASS (GET_CODE (x)) == '<')
7053 code = GET_CODE (x);
7054 if (reverse_code)
7055 {
7056 code = reverse_condition (code);
7057 did_reverse_condition ^= 1;
7058 reverse_code = 0;
7059 }
7060
7061 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
7062 if (earliest)
7063 *earliest = prev;
7064 }
7065 }
7066
7067 /* If constant is first, put it last. */
7068 if (CONSTANT_P (op0))
7069 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
7070
7071 /* If OP0 is the result of a comparison, we weren't able to find what
7072 was really being compared, so fail. */
7073 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
7074 return 0;
7075
7076 /* Canonicalize any ordered comparison with integers involving equality
7077 if we can do computations in the relevant mode and we do not
7078 overflow. */
7079
7080 if (GET_CODE (op1) == CONST_INT
7081 && GET_MODE (op0) != VOIDmode
7082 && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
7083 {
7084 HOST_WIDE_INT const_val = INTVAL (op1);
7085 unsigned HOST_WIDE_INT uconst_val = const_val;
7086 unsigned HOST_WIDE_INT max_val
7087 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
7088
7089 switch (code)
7090 {
7091 case LE:
7092 if (const_val != max_val >> 1)
7093 code = LT, op1 = GEN_INT (const_val + 1);
7094 break;
7095
7096 /* When cross-compiling, const_val might be sign-extended from
7097 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
7098 case GE:
7099 if ((const_val & max_val)
7100 != (((HOST_WIDE_INT) 1
7101 << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
7102 code = GT, op1 = GEN_INT (const_val - 1);
7103 break;
7104
7105 case LEU:
7106 if (uconst_val < max_val)
7107 code = LTU, op1 = GEN_INT (uconst_val + 1);
7108 break;
7109
7110 case GEU:
7111 if (uconst_val != 0)
7112 code = GTU, op1 = GEN_INT (uconst_val - 1);
7113 break;
7114
7115 default:
7116 break;
7117 }
7118 }
7119
7120 /* If this was floating-point and we reversed anything other than an
7121 EQ or NE, return zero. */
7122 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
7123 && did_reverse_condition && code != NE && code != EQ
7124 && ! flag_fast_math
7125 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
7126 return 0;
7127
7128 #ifdef HAVE_cc0
7129 /* Never return CC0; return zero instead. */
7130 if (op0 == cc0_rtx)
7131 return 0;
7132 #endif
7133
7134 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
7135 }
7136
7137 /* Similar to above routine, except that we also put an invariant last
7138 unless both operands are invariants. */
7139
7140 rtx
7141 get_condition_for_loop (x)
7142 rtx x;
7143 {
7144 rtx comparison = get_condition (x, NULL_PTR);
7145
7146 if (comparison == 0
7147 || ! invariant_p (XEXP (comparison, 0))
7148 || invariant_p (XEXP (comparison, 1)))
7149 return comparison;
7150
7151 return gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)), VOIDmode,
7152 XEXP (comparison, 1), XEXP (comparison, 0));
7153 }
7154
7155 #ifdef HAIFA
7156 /* Analyze a loop in order to instrument it with the use of count register.
7157 loop_start and loop_end are the first and last insns of the loop.
7158 This function works in cooperation with insert_bct ().
7159 loop_can_insert_bct[loop_num] is set according to whether the optimization
7160 is applicable to the loop. When it is applicable, the following variables
7161 are also set:
7162 loop_start_value[loop_num]
7163 loop_comparison_value[loop_num]
7164 loop_increment[loop_num]
7165 loop_comparison_code[loop_num] */
7166
7167 static
7168 void analyze_loop_iterations (loop_start, loop_end)
7169 rtx loop_start, loop_end;
7170 {
7171 rtx comparison, comparison_value;
7172 rtx iteration_var, initial_value, increment;
7173 enum rtx_code comparison_code;
7174
7175 rtx last_loop_insn;
7176 rtx insn;
7177 int i;
7178
7179 /* loop_variable mode */
7180 enum machine_mode original_mode;
7181
7182 /* find the number of the loop */
7183 int loop_num = uid_loop_num [INSN_UID (loop_start)];
7184
7185 /* we change our mind only when we are sure that loop will be instrumented */
7186 loop_can_insert_bct[loop_num] = 0;
7187
7188 /* is the optimization suppressed. */
7189 if ( !flag_branch_on_count_reg )
7190 return;
7191
7192 /* make sure that count-reg is not in use */
7193 if (loop_used_count_register[loop_num]){
7194 if (loop_dump_stream)
7195 fprintf (loop_dump_stream,
7196 "analyze_loop_iterations %d: BCT instrumentation failed: count register already in use\n",
7197 loop_num);
7198 return;
7199 }
7200
7201 /* make sure that the function has no indirect jumps. */
7202 if (indirect_jump_in_function){
7203 if (loop_dump_stream)
7204 fprintf (loop_dump_stream,
7205 "analyze_loop_iterations %d: BCT instrumentation failed: indirect jump in function\n",
7206 loop_num);
7207 return;
7208 }
7209
7210 /* make sure that the last loop insn is a conditional jump */
7211 last_loop_insn = PREV_INSN (loop_end);
7212 if (GET_CODE (last_loop_insn) != JUMP_INSN || !condjump_p (last_loop_insn)) {
7213 if (loop_dump_stream)
7214 fprintf (loop_dump_stream,
7215 "analyze_loop_iterations %d: BCT instrumentation failed: invalid jump at loop end\n",
7216 loop_num);
7217 return;
7218 }
7219
7220 /* First find the iteration variable. If the last insn is a conditional
7221 branch, and the insn preceding it tests a register value, make that
7222 register the iteration variable. */
7223
7224 /* We used to use prev_nonnote_insn here, but that fails because it might
7225 accidentally get the branch for a contained loop if the branch for this
7226 loop was deleted. We can only trust branches immediately before the
7227 loop_end. */
7228
7229 comparison = get_condition_for_loop (last_loop_insn);
7230 /* ??? Get_condition may switch position of induction variable and
7231 invariant register when it canonicalizes the comparison. */
7232
7233 if (comparison == 0) {
7234 if (loop_dump_stream)
7235 fprintf (loop_dump_stream,
7236 "analyze_loop_iterations %d: BCT instrumentation failed: comparison not found\n",
7237 loop_num);
7238 return;
7239 }
7240
7241 comparison_code = GET_CODE (comparison);
7242 iteration_var = XEXP (comparison, 0);
7243 comparison_value = XEXP (comparison, 1);
7244
7245 original_mode = GET_MODE (iteration_var);
7246 if (GET_MODE_CLASS (original_mode) != MODE_INT
7247 || GET_MODE_SIZE (original_mode) != UNITS_PER_WORD) {
7248 if (loop_dump_stream)
7249 fprintf (loop_dump_stream,
7250 "analyze_loop_iterations %d: BCT Instrumentation failed: loop variable not integer\n",
7251 loop_num);
7252 return;
7253 }
7254
7255 /* get info about loop bounds and increment */
7256 iteration_info (iteration_var, &initial_value, &increment,
7257 loop_start, loop_end);
7258
7259 /* make sure that all required loop data were found */
7260 if (!(initial_value && increment && comparison_value
7261 && invariant_p (comparison_value) && invariant_p (increment)
7262 && ! indirect_jump_in_function))
7263 {
7264 if (loop_dump_stream) {
7265 fprintf (loop_dump_stream,
7266 "analyze_loop_iterations %d: BCT instrumentation failed because of wrong loop: ", loop_num);
7267 if (!(initial_value && increment && comparison_value)) {
7268 fprintf (loop_dump_stream, "\tbounds not available: ");
7269 if ( ! initial_value )
7270 fprintf (loop_dump_stream, "initial ");
7271 if ( ! increment )
7272 fprintf (loop_dump_stream, "increment ");
7273 if ( ! comparison_value )
7274 fprintf (loop_dump_stream, "comparison ");
7275 fprintf (loop_dump_stream, "\n");
7276 }
7277 if (!invariant_p (comparison_value) || !invariant_p (increment))
7278 fprintf (loop_dump_stream, "\tloop bounds not invariant\n");
7279 }
7280 return;
7281 }
7282
7283 /* make sure that the increment is constant */
7284 if (GET_CODE (increment) != CONST_INT) {
7285 if (loop_dump_stream)
7286 fprintf (loop_dump_stream,
7287 "analyze_loop_iterations %d: instrumentation failed: not arithmetic loop\n",
7288 loop_num);
7289 return;
7290 }
7291
7292 /* make sure that the loop contains neither function call, nor jump on table.
7293 (the count register might be altered by the called function, and might
7294 be used for a branch on table). */
7295 for (insn = loop_start; insn && insn != loop_end; insn = NEXT_INSN (insn)) {
7296 if (GET_CODE (insn) == CALL_INSN){
7297 if (loop_dump_stream)
7298 fprintf (loop_dump_stream,
7299 "analyze_loop_iterations %d: BCT instrumentation failed: function call in the loop\n",
7300 loop_num);
7301 return;
7302 }
7303
7304 if (GET_CODE (insn) == JUMP_INSN
7305 && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
7306 || GET_CODE (PATTERN (insn)) == ADDR_VEC)){
7307 if (loop_dump_stream)
7308 fprintf (loop_dump_stream,
7309 "analyze_loop_iterations %d: BCT instrumentation failed: computed branch in the loop\n",
7310 loop_num);
7311 return;
7312 }
7313 }
7314
7315 /* At this point, we are sure that the loop can be instrumented with BCT.
7316 Some of the loops, however, will not be instrumented - the final decision
7317 is taken by insert_bct () */
7318 if (loop_dump_stream)
7319 fprintf (loop_dump_stream,
7320 "analyze_loop_iterations: loop (luid =%d) can be BCT instrumented.\n",
7321 loop_num);
7322
7323 /* mark all enclosing loops that they cannot use count register */
7324 /* ???: In fact, since insert_bct may decide not to instrument this loop,
7325 marking here may prevent instrumenting an enclosing loop that could
7326 actually be instrumented. But since this is rare, it is safer to mark
7327 here in case the order of calling (analyze/insert)_bct would be changed. */
7328 for (i=loop_num; i != -1; i = loop_outer_loop[i])
7329 loop_used_count_register[i] = 1;
7330
7331 /* Set data structures which will be used by the instrumentation phase */
7332 loop_start_value[loop_num] = initial_value;
7333 loop_comparison_value[loop_num] = comparison_value;
7334 loop_increment[loop_num] = increment;
7335 loop_comparison_code[loop_num] = comparison_code;
7336 loop_can_insert_bct[loop_num] = 1;
7337 }
7338
7339
7340 /* instrument loop for insertion of bct instruction. We distinguish between
7341 loops with compile-time bounds, to those with run-time bounds. The loop
7342 behaviour is analized according to the following characteristics/variables:
7343 ; Input variables:
7344 ; comparison-value: the value to which the iteration counter is compared.
7345 ; initial-value: iteration-counter initial value.
7346 ; increment: iteration-counter increment.
7347 ; Computed variables:
7348 ; increment-direction: the sign of the increment.
7349 ; compare-direction: '1' for GT, GTE, '-1' for LT, LTE, '0' for NE.
7350 ; range-direction: sign (comparison-value - initial-value)
7351 We give up on the following cases:
7352 ; loop variable overflow.
7353 ; run-time loop bounds with comparison code NE.
7354 */
7355
7356 static void
7357 insert_bct (loop_start, loop_end)
7358 rtx loop_start, loop_end;
7359 {
7360 rtx initial_value, comparison_value, increment;
7361 enum rtx_code comparison_code;
7362
7363 int increment_direction, compare_direction;
7364 int unsigned_p = 0;
7365
7366 /* if the loop condition is <= or >=, the number of iteration
7367 is 1 more than the range of the bounds of the loop */
7368 int add_iteration = 0;
7369
7370 /* the only machine mode we work with - is the integer of the size that the
7371 machine has */
7372 enum machine_mode loop_var_mode = SImode;
7373
7374 int loop_num = uid_loop_num [INSN_UID (loop_start)];
7375
7376 /* get loop-variables. No need to check that these are valid - already
7377 checked in analyze_loop_iterations (). */
7378 comparison_code = loop_comparison_code[loop_num];
7379 initial_value = loop_start_value[loop_num];
7380 comparison_value = loop_comparison_value[loop_num];
7381 increment = loop_increment[loop_num];
7382
7383 /* check analyze_loop_iterations decision for this loop. */
7384 if (! loop_can_insert_bct[loop_num]){
7385 if (loop_dump_stream)
7386 fprintf (loop_dump_stream,
7387 "insert_bct: [%d] - was decided not to instrument by analyze_loop_iterations ()\n",
7388 loop_num);
7389 return;
7390 }
7391
7392 /* It's impossible to instrument a competely unrolled loop. */
7393 if (loop_unroll_factor [loop_num] == -1)
7394 return;
7395
7396 /* make sure that the last loop insn is a conditional jump .
7397 This check is repeated from analyze_loop_iterations (),
7398 because unrolling might have changed that. */
7399 if (GET_CODE (PREV_INSN (loop_end)) != JUMP_INSN
7400 || !condjump_p (PREV_INSN (loop_end))) {
7401 if (loop_dump_stream)
7402 fprintf (loop_dump_stream,
7403 "insert_bct: not instrumenting BCT because of invalid branch\n");
7404 return;
7405 }
7406
7407 /* fix increment in case loop was unrolled. */
7408 if (loop_unroll_factor [loop_num] > 1)
7409 increment = GEN_INT ( INTVAL (increment) * loop_unroll_factor [loop_num] );
7410
7411 /* determine properties and directions of the loop */
7412 increment_direction = (INTVAL (increment) > 0) ? 1:-1;
7413 switch ( comparison_code ) {
7414 case LEU:
7415 unsigned_p = 1;
7416 /* fallthrough */
7417 case LE:
7418 compare_direction = 1;
7419 add_iteration = 1;
7420 break;
7421 case GEU:
7422 unsigned_p = 1;
7423 /* fallthrough */
7424 case GE:
7425 compare_direction = -1;
7426 add_iteration = 1;
7427 break;
7428 case EQ:
7429 /* in this case we cannot know the number of iterations */
7430 if (loop_dump_stream)
7431 fprintf (loop_dump_stream,
7432 "insert_bct: %d: loop cannot be instrumented: == in condition\n",
7433 loop_num);
7434 return;
7435 case LTU:
7436 unsigned_p = 1;
7437 /* fallthrough */
7438 case LT:
7439 compare_direction = 1;
7440 break;
7441 case GTU:
7442 unsigned_p = 1;
7443 /* fallthrough */
7444 case GT:
7445 compare_direction = -1;
7446 break;
7447 case NE:
7448 compare_direction = 0;
7449 break;
7450 default:
7451 abort ();
7452 }
7453
7454
7455 /* make sure that the loop does not end by an overflow */
7456 if (compare_direction != increment_direction) {
7457 if (loop_dump_stream)
7458 fprintf (loop_dump_stream,
7459 "insert_bct: %d: loop cannot be instrumented: terminated by overflow\n",
7460 loop_num);
7461 return;
7462 }
7463
7464 /* try to instrument the loop. */
7465
7466 /* Handle the simpler case, where the bounds are known at compile time. */
7467 if (GET_CODE (initial_value) == CONST_INT && GET_CODE (comparison_value) == CONST_INT)
7468 {
7469 int n_iterations;
7470 int increment_value_abs = INTVAL (increment) * increment_direction;
7471
7472 /* check the relation between compare-val and initial-val */
7473 int difference = INTVAL (comparison_value) - INTVAL (initial_value);
7474 int range_direction = (difference > 0) ? 1 : -1;
7475
7476 /* make sure the loop executes enough iterations to gain from BCT */
7477 if (difference > -3 && difference < 3) {
7478 if (loop_dump_stream)
7479 fprintf (loop_dump_stream,
7480 "insert_bct: loop %d not BCT instrumented: too small iteration count.\n",
7481 loop_num);
7482 return;
7483 }
7484
7485 /* make sure that the loop executes at least once */
7486 if ((range_direction == 1 && compare_direction == -1)
7487 || (range_direction == -1 && compare_direction == 1))
7488 {
7489 if (loop_dump_stream)
7490 fprintf (loop_dump_stream,
7491 "insert_bct: loop %d: does not iterate even once. Not instrumenting.\n",
7492 loop_num);
7493 return;
7494 }
7495
7496 /* make sure that the loop does not end by an overflow (in compile time
7497 bounds we must have an additional check for overflow, because here
7498 we also support the compare code of 'NE'. */
7499 if (comparison_code == NE
7500 && increment_direction != range_direction) {
7501 if (loop_dump_stream)
7502 fprintf (loop_dump_stream,
7503 "insert_bct (compile time bounds): %d: loop not instrumented: terminated by overflow\n",
7504 loop_num);
7505 return;
7506 }
7507
7508 /* Determine the number of iterations by:
7509 ;
7510 ; compare-val - initial-val + (increment -1) + additional-iteration
7511 ; num_iterations = -----------------------------------------------------------------
7512 ; increment
7513 */
7514 difference = (range_direction > 0) ? difference : -difference;
7515 #if 0
7516 fprintf (stderr, "difference is: %d\n", difference); /* @*/
7517 fprintf (stderr, "increment_value_abs is: %d\n", increment_value_abs); /* @*/
7518 fprintf (stderr, "add_iteration is: %d\n", add_iteration); /* @*/
7519 fprintf (stderr, "INTVAL (comparison_value) is: %d\n", INTVAL (comparison_value)); /* @*/
7520 fprintf (stderr, "INTVAL (initial_value) is: %d\n", INTVAL (initial_value)); /* @*/
7521 #endif
7522
7523 if (increment_value_abs == 0) {
7524 fprintf (stderr, "insert_bct: error: increment == 0 !!!\n");
7525 abort ();
7526 }
7527 n_iterations = (difference + increment_value_abs - 1 + add_iteration)
7528 / increment_value_abs;
7529
7530 #if 0
7531 fprintf (stderr, "number of iterations is: %d\n", n_iterations); /* @*/
7532 #endif
7533 instrument_loop_bct (loop_start, loop_end, GEN_INT (n_iterations));
7534
7535 /* Done with this loop. */
7536 return;
7537 }
7538
7539 /* Handle the more complex case, that the bounds are NOT known at compile time. */
7540 /* In this case we generate run_time calculation of the number of iterations */
7541
7542 /* With runtime bounds, if the compare is of the form '!=' we give up */
7543 if (comparison_code == NE) {
7544 if (loop_dump_stream)
7545 fprintf (loop_dump_stream,
7546 "insert_bct: fail for loop %d: runtime bounds with != comparison\n",
7547 loop_num);
7548 return;
7549 }
7550
7551 else {
7552 /* We rely on the existence of run-time guard to ensure that the
7553 loop executes at least once. */
7554 rtx sequence;
7555 rtx iterations_num_reg;
7556
7557 int increment_value_abs = INTVAL (increment) * increment_direction;
7558
7559 /* make sure that the increment is a power of two, otherwise (an
7560 expensive) divide is needed. */
7561 if (exact_log2 (increment_value_abs) == -1)
7562 {
7563 if (loop_dump_stream)
7564 fprintf (loop_dump_stream,
7565 "insert_bct: not instrumenting BCT because the increment is not power of 2\n");
7566 return;
7567 }
7568
7569 /* compute the number of iterations */
7570 start_sequence ();
7571 {
7572 rtx temp_reg;
7573
7574 /* Again, the number of iterations is calculated by:
7575 ;
7576 ; compare-val - initial-val + (increment -1) + additional-iteration
7577 ; num_iterations = -----------------------------------------------------------------
7578 ; increment
7579 */
7580 /* ??? Do we have to call copy_rtx here before passing rtx to
7581 expand_binop? */
7582 if (compare_direction > 0) {
7583 /* <, <= :the loop variable is increasing */
7584 temp_reg = expand_binop (loop_var_mode, sub_optab, comparison_value,
7585 initial_value, NULL_RTX, 0, OPTAB_LIB_WIDEN);
7586 }
7587 else {
7588 temp_reg = expand_binop (loop_var_mode, sub_optab, initial_value,
7589 comparison_value, NULL_RTX, 0, OPTAB_LIB_WIDEN);
7590 }
7591
7592 if (increment_value_abs - 1 + add_iteration != 0)
7593 temp_reg = expand_binop (loop_var_mode, add_optab, temp_reg,
7594 GEN_INT (increment_value_abs - 1 + add_iteration),
7595 NULL_RTX, 0, OPTAB_LIB_WIDEN);
7596
7597 if (increment_value_abs != 1)
7598 {
7599 /* ??? This will generate an expensive divide instruction for
7600 most targets. The original authors apparently expected this
7601 to be a shift, since they test for power-of-2 divisors above,
7602 but just naively generating a divide instruction will not give
7603 a shift. It happens to work for the PowerPC target because
7604 the rs6000.md file has a divide pattern that emits shifts.
7605 It will probably not work for any other target. */
7606 iterations_num_reg = expand_binop (loop_var_mode, sdiv_optab,
7607 temp_reg,
7608 GEN_INT (increment_value_abs),
7609 NULL_RTX, 0, OPTAB_LIB_WIDEN);
7610 }
7611 else
7612 iterations_num_reg = temp_reg;
7613 }
7614 sequence = gen_sequence ();
7615 end_sequence ();
7616 emit_insn_before (sequence, loop_start);
7617 instrument_loop_bct (loop_start, loop_end, iterations_num_reg);
7618 }
7619 }
7620
7621 /* instrument loop by inserting a bct in it. This is done in the following way:
7622 1. A new register is created and assigned the hard register number of the count
7623 register.
7624 2. In the head of the loop the new variable is initialized by the value passed in the
7625 loop_num_iterations parameter.
7626 3. At the end of the loop, comparison of the register with 0 is generated.
7627 The created comparison follows the pattern defined for the
7628 decrement_and_branch_on_count insn, so this insn will be generated in assembly
7629 generation phase.
7630 4. The compare&branch on the old variable is deleted. So, if the loop-variable was
7631 not used elsewhere, it will be eliminated by data-flow analisys. */
7632
7633 static void
7634 instrument_loop_bct (loop_start, loop_end, loop_num_iterations)
7635 rtx loop_start, loop_end;
7636 rtx loop_num_iterations;
7637 {
7638 rtx temp_reg1, temp_reg2;
7639 rtx start_label;
7640
7641 rtx sequence;
7642 enum machine_mode loop_var_mode = SImode;
7643
7644 #ifdef HAVE_decrement_and_branch_on_count
7645 if (HAVE_decrement_and_branch_on_count)
7646 {
7647 if (loop_dump_stream)
7648 fprintf (loop_dump_stream, "Loop: Inserting BCT\n");
7649
7650 /* eliminate the check on the old variable */
7651 delete_insn (PREV_INSN (loop_end));
7652 delete_insn (PREV_INSN (loop_end));
7653
7654 /* insert the label which will delimit the start of the loop */
7655 start_label = gen_label_rtx ();
7656 emit_label_after (start_label, loop_start);
7657
7658 /* insert initialization of the count register into the loop header */
7659 start_sequence ();
7660 temp_reg1 = gen_reg_rtx (loop_var_mode);
7661 emit_insn (gen_move_insn (temp_reg1, loop_num_iterations));
7662
7663 /* this will be count register */
7664 temp_reg2 = gen_rtx_REG (loop_var_mode, COUNT_REGISTER_REGNUM);
7665 /* we have to move the value to the count register from an GPR
7666 because rtx pointed to by loop_num_iterations could contain
7667 expression which cannot be moved into count register */
7668 emit_insn (gen_move_insn (temp_reg2, temp_reg1));
7669
7670 sequence = gen_sequence ();
7671 end_sequence ();
7672 emit_insn_after (sequence, loop_start);
7673
7674 /* insert new comparison on the count register instead of the
7675 old one, generating the needed BCT pattern (that will be
7676 later recognized by assembly generation phase). */
7677 emit_jump_insn_before (gen_decrement_and_branch_on_count (temp_reg2, start_label),
7678 loop_end);
7679 LABEL_NUSES (start_label)++;
7680 }
7681
7682 #endif /* HAVE_decrement_and_branch_on_count */
7683 }
7684 #endif /* HAIFA */
7685
7686 /* Scan the function and determine whether it has indirect (computed) jumps.
7687
7688 This is taken mostly from flow.c; similar code exists elsewhere
7689 in the compiler. It may be useful to put this into rtlanal.c. */
7690 static int
7691 indirect_jump_in_function_p (start)
7692 rtx start;
7693 {
7694 rtx insn;
7695
7696 for (insn = start; insn; insn = NEXT_INSN (insn))
7697 if (computed_jump_p (insn))
7698 return 1;
7699
7700 return 0;
7701 }