Oops, bump copyright year.
[gcc.git] / gcc / loop.c
1 /* Perform various loop optimizations, including strength reduction.
2 Copyright (C) 1987, 88, 89, 91-7, 1998 Free Software Foundation, Inc.
3
4 This file is part of GNU CC.
5
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
9 any later version.
10
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
20
21
22 /* This is the loop optimization pass of the compiler.
23 It finds invariant computations within loops and moves them
24 to the beginning of the loop. Then it identifies basic and
25 general induction variables. Strength reduction is applied to the general
26 induction variables, and induction variable elimination is applied to
27 the basic induction variables.
28
29 It also finds cases where
30 a register is set within the loop by zero-extending a narrower value
31 and changes these to zero the entire register once before the loop
32 and merely copy the low part within the loop.
33
34 Most of the complexity is in heuristics to decide when it is worth
35 while to do these things. */
36
37 #include "config.h"
38 #include <stdio.h>
39 #include "rtl.h"
40 #include "obstack.h"
41 #include "expr.h"
42 #include "insn-config.h"
43 #include "insn-flags.h"
44 #include "regs.h"
45 #include "hard-reg-set.h"
46 #include "recog.h"
47 #include "flags.h"
48 #include "real.h"
49 #include "loop.h"
50 #include "except.h"
51
52 /* Vector mapping INSN_UIDs to luids.
53 The luids are like uids but increase monotonically always.
54 We use them to see whether a jump comes from outside a given loop. */
55
56 int *uid_luid;
57
58 /* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop
59 number the insn is contained in. */
60
61 int *uid_loop_num;
62
63 /* 1 + largest uid of any insn. */
64
65 int max_uid_for_loop;
66
67 /* 1 + luid of last insn. */
68
69 static int max_luid;
70
71 /* Number of loops detected in current function. Used as index to the
72 next few tables. */
73
74 static int max_loop_num;
75
76 /* Indexed by loop number, contains the first and last insn of each loop. */
77
78 static rtx *loop_number_loop_starts, *loop_number_loop_ends;
79
80 /* For each loop, gives the containing loop number, -1 if none. */
81
82 int *loop_outer_loop;
83
84 #ifdef HAIFA
85 /* The main output of analyze_loop_iterations is placed here */
86
87 int *loop_can_insert_bct;
88
89 /* For each loop, determines whether some of its inner loops has used
90 count register */
91
92 int *loop_used_count_register;
93
94 /* loop parameters for arithmetic loops. These loops have a loop variable
95 which is initialized to loop_start_value, incremented in each iteration
96 by "loop_increment". At the end of the iteration the loop variable is
97 compared to the loop_comparison_value (using loop_comparison_code). */
98
99 rtx *loop_increment;
100 rtx *loop_comparison_value;
101 rtx *loop_start_value;
102 enum rtx_code *loop_comparison_code;
103 #endif /* HAIFA */
104
105 /* For each loop, keep track of its unrolling factor.
106 Potential values:
107 0: unrolled
108 1: not unrolled.
109 -1: completely unrolled
110 >0: holds the unroll exact factor. */
111 int *loop_unroll_factor;
112
113 /* Indexed by loop number, contains a nonzero value if the "loop" isn't
114 really a loop (an insn outside the loop branches into it). */
115
116 static char *loop_invalid;
117
118 /* Indexed by loop number, links together all LABEL_REFs which refer to
119 code labels outside the loop. Used by routines that need to know all
120 loop exits, such as final_biv_value and final_giv_value.
121
122 This does not include loop exits due to return instructions. This is
123 because all bivs and givs are pseudos, and hence must be dead after a
124 return, so the presense of a return does not affect any of the
125 optimizations that use this info. It is simpler to just not include return
126 instructions on this list. */
127
128 rtx *loop_number_exit_labels;
129
130 /* Indexed by loop number, counts the number of LABEL_REFs on
131 loop_number_exit_labels for this loop and all loops nested inside it. */
132
133 int *loop_number_exit_count;
134
135 /* Holds the number of loop iterations. It is zero if the number could not be
136 calculated. Must be unsigned since the number of iterations can
137 be as high as 2^wordsize-1. For loops with a wider iterator, this number
138 will will be zero if the number of loop iterations is too large for an
139 unsigned integer to hold. */
140
141 unsigned HOST_WIDE_INT loop_n_iterations;
142
143 /* Nonzero if there is a subroutine call in the current loop. */
144
145 static int loop_has_call;
146
147 /* Nonzero if there is a volatile memory reference in the current
148 loop. */
149
150 static int loop_has_volatile;
151
152 /* Added loop_continue which is the NOTE_INSN_LOOP_CONT of the
153 current loop. A continue statement will generate a branch to
154 NEXT_INSN (loop_continue). */
155
156 static rtx loop_continue;
157
158 /* Indexed by register number, contains the number of times the reg
159 is set during the loop being scanned.
160 During code motion, a negative value indicates a reg that has been
161 made a candidate; in particular -2 means that it is an candidate that
162 we know is equal to a constant and -1 means that it is an candidate
163 not known equal to a constant.
164 After code motion, regs moved have 0 (which is accurate now)
165 while the failed candidates have the original number of times set.
166
167 Therefore, at all times, == 0 indicates an invariant register;
168 < 0 a conditionally invariant one. */
169
170 static int *n_times_set;
171
172 /* Original value of n_times_set; same except that this value
173 is not set negative for a reg whose sets have been made candidates
174 and not set to 0 for a reg that is moved. */
175
176 static int *n_times_used;
177
178 /* Index by register number, 1 indicates that the register
179 cannot be moved or strength reduced. */
180
181 static char *may_not_optimize;
182
183 /* Nonzero means reg N has already been moved out of one loop.
184 This reduces the desire to move it out of another. */
185
186 static char *moved_once;
187
188 /* Array of MEMs that are stored in this loop. If there are too many to fit
189 here, we just turn on unknown_address_altered. */
190
191 #define NUM_STORES 30
192 static rtx loop_store_mems[NUM_STORES];
193
194 /* Index of first available slot in above array. */
195 static int loop_store_mems_idx;
196
197 /* Nonzero if we don't know what MEMs were changed in the current loop.
198 This happens if the loop contains a call (in which case `loop_has_call'
199 will also be set) or if we store into more than NUM_STORES MEMs. */
200
201 static int unknown_address_altered;
202
203 /* Count of movable (i.e. invariant) instructions discovered in the loop. */
204 static int num_movables;
205
206 /* Count of memory write instructions discovered in the loop. */
207 static int num_mem_sets;
208
209 /* Number of loops contained within the current one, including itself. */
210 static int loops_enclosed;
211
212 /* Bound on pseudo register number before loop optimization.
213 A pseudo has valid regscan info if its number is < max_reg_before_loop. */
214 int max_reg_before_loop;
215
216 /* This obstack is used in product_cheap_p to allocate its rtl. It
217 may call gen_reg_rtx which, in turn, may reallocate regno_reg_rtx.
218 If we used the same obstack that it did, we would be deallocating
219 that array. */
220
221 static struct obstack temp_obstack;
222
223 /* This is where the pointer to the obstack being used for RTL is stored. */
224
225 extern struct obstack *rtl_obstack;
226
227 #define obstack_chunk_alloc xmalloc
228 #define obstack_chunk_free free
229
230 extern char *oballoc ();
231 \f
232 /* During the analysis of a loop, a chain of `struct movable's
233 is made to record all the movable insns found.
234 Then the entire chain can be scanned to decide which to move. */
235
236 struct movable
237 {
238 rtx insn; /* A movable insn */
239 rtx set_src; /* The expression this reg is set from. */
240 rtx set_dest; /* The destination of this SET. */
241 rtx dependencies; /* When INSN is libcall, this is an EXPR_LIST
242 of any registers used within the LIBCALL. */
243 int consec; /* Number of consecutive following insns
244 that must be moved with this one. */
245 int regno; /* The register it sets */
246 short lifetime; /* lifetime of that register;
247 may be adjusted when matching movables
248 that load the same value are found. */
249 short savings; /* Number of insns we can move for this reg,
250 including other movables that force this
251 or match this one. */
252 unsigned int cond : 1; /* 1 if only conditionally movable */
253 unsigned int force : 1; /* 1 means MUST move this insn */
254 unsigned int global : 1; /* 1 means reg is live outside this loop */
255 /* If PARTIAL is 1, GLOBAL means something different:
256 that the reg is live outside the range from where it is set
257 to the following label. */
258 unsigned int done : 1; /* 1 inhibits further processing of this */
259
260 unsigned int partial : 1; /* 1 means this reg is used for zero-extending.
261 In particular, moving it does not make it
262 invariant. */
263 unsigned int move_insn : 1; /* 1 means that we call emit_move_insn to
264 load SRC, rather than copying INSN. */
265 unsigned int is_equiv : 1; /* 1 means a REG_EQUIV is present on INSN. */
266 enum machine_mode savemode; /* Nonzero means it is a mode for a low part
267 that we should avoid changing when clearing
268 the rest of the reg. */
269 struct movable *match; /* First entry for same value */
270 struct movable *forces; /* An insn that must be moved if this is */
271 struct movable *next;
272 };
273
274 FILE *loop_dump_stream;
275
276 /* Forward declarations. */
277
278 static void find_and_verify_loops ();
279 static void mark_loop_jump ();
280 static void prescan_loop ();
281 static int reg_in_basic_block_p ();
282 static int consec_sets_invariant_p ();
283 static rtx libcall_other_reg ();
284 static int labels_in_range_p ();
285 static void count_loop_regs_set ();
286 static void note_addr_stored ();
287 static int loop_reg_used_before_p ();
288 static void scan_loop ();
289 #if 0
290 static void replace_call_address ();
291 #endif
292 static rtx skip_consec_insns ();
293 static int libcall_benefit ();
294 static void ignore_some_movables ();
295 static void force_movables ();
296 static void combine_movables ();
297 static int rtx_equal_for_loop_p ();
298 static void move_movables ();
299 static void strength_reduce ();
300 static int valid_initial_value_p ();
301 static void find_mem_givs ();
302 static void record_biv ();
303 static void check_final_value ();
304 static void record_giv ();
305 static void update_giv_derive ();
306 static int basic_induction_var ();
307 static rtx simplify_giv_expr ();
308 static int general_induction_var ();
309 static int consec_sets_giv ();
310 static int check_dbra_loop ();
311 static rtx express_from ();
312 static int combine_givs_p ();
313 static void combine_givs ();
314 static int product_cheap_p ();
315 static int maybe_eliminate_biv ();
316 static int maybe_eliminate_biv_1 ();
317 static int last_use_this_basic_block ();
318 static void record_initial ();
319 static void update_reg_last_use ();
320
321 #ifdef HAIFA
322 /* This is extern from unroll.c */
323 void iteration_info ();
324
325 /* Two main functions for implementing bct:
326 first - to be called before loop unrolling, and the second - after */
327 static void analyze_loop_iterations ();
328 static void insert_bct ();
329
330 /* Auxiliary function that inserts the bct pattern into the loop */
331 static void instrument_loop_bct ();
332 #endif /* HAIFA */
333
334 /* Indirect_jump_in_function is computed once per function. */
335 int indirect_jump_in_function = 0;
336 static int indirect_jump_in_function_p ();
337
338 \f
339 /* Relative gain of eliminating various kinds of operations. */
340 int add_cost;
341 #if 0
342 int shift_cost;
343 int mult_cost;
344 #endif
345
346 /* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to
347 copy the value of the strength reduced giv to its original register. */
348 int copy_cost;
349
350 void
351 init_loop ()
352 {
353 char *free_point = (char *) oballoc (1);
354 rtx reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
355
356 add_cost = rtx_cost (gen_rtx_PLUS (word_mode, reg, reg), SET);
357
358 /* We multiply by 2 to reconcile the difference in scale between
359 these two ways of computing costs. Otherwise the cost of a copy
360 will be far less than the cost of an add. */
361
362 copy_cost = 2 * 2;
363
364 /* Free the objects we just allocated. */
365 obfree (free_point);
366
367 /* Initialize the obstack used for rtl in product_cheap_p. */
368 gcc_obstack_init (&temp_obstack);
369 }
370 \f
371 /* Entry point of this file. Perform loop optimization
372 on the current function. F is the first insn of the function
373 and DUMPFILE is a stream for output of a trace of actions taken
374 (or 0 if none should be output). */
375
376 void
377 loop_optimize (f, dumpfile, unroll_p)
378 /* f is the first instruction of a chain of insns for one function */
379 rtx f;
380 FILE *dumpfile;
381 int unroll_p;
382 {
383 register rtx insn;
384 register int i;
385 rtx last_insn;
386
387 loop_dump_stream = dumpfile;
388
389 init_recog_no_volatile ();
390 init_alias_analysis ();
391
392 max_reg_before_loop = max_reg_num ();
393
394 moved_once = (char *) alloca (max_reg_before_loop);
395 bzero (moved_once, max_reg_before_loop);
396
397 regs_may_share = 0;
398
399 /* Count the number of loops. */
400
401 max_loop_num = 0;
402 for (insn = f; insn; insn = NEXT_INSN (insn))
403 {
404 if (GET_CODE (insn) == NOTE
405 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
406 max_loop_num++;
407 }
408
409 /* Don't waste time if no loops. */
410 if (max_loop_num == 0)
411 return;
412
413 /* Get size to use for tables indexed by uids.
414 Leave some space for labels allocated by find_and_verify_loops. */
415 max_uid_for_loop = get_max_uid () + 1 + max_loop_num * 32;
416
417 uid_luid = (int *) alloca (max_uid_for_loop * sizeof (int));
418 uid_loop_num = (int *) alloca (max_uid_for_loop * sizeof (int));
419
420 bzero ((char *) uid_luid, max_uid_for_loop * sizeof (int));
421 bzero ((char *) uid_loop_num, max_uid_for_loop * sizeof (int));
422
423 /* Allocate tables for recording each loop. We set each entry, so they need
424 not be zeroed. */
425 loop_number_loop_starts = (rtx *) alloca (max_loop_num * sizeof (rtx));
426 loop_number_loop_ends = (rtx *) alloca (max_loop_num * sizeof (rtx));
427 loop_outer_loop = (int *) alloca (max_loop_num * sizeof (int));
428 loop_invalid = (char *) alloca (max_loop_num * sizeof (char));
429 loop_number_exit_labels = (rtx *) alloca (max_loop_num * sizeof (rtx));
430 loop_number_exit_count = (int *) alloca (max_loop_num * sizeof (int));
431
432 /* This is initialized by the unrolling code, so we go ahead
433 and clear them just in case we are not performing loop
434 unrolling. */
435 loop_unroll_factor = (int *) alloca (max_loop_num *sizeof (int));
436 bzero ((char *) loop_unroll_factor, max_loop_num * sizeof (int));
437
438 #ifdef HAIFA
439 /* Allocate for BCT optimization */
440 loop_can_insert_bct = (int *) alloca (max_loop_num * sizeof (int));
441 bzero ((char *) loop_can_insert_bct, max_loop_num * sizeof (int));
442
443 loop_used_count_register = (int *) alloca (max_loop_num * sizeof (int));
444 bzero ((char *) loop_used_count_register, max_loop_num * sizeof (int));
445
446 loop_increment = (rtx *) alloca (max_loop_num * sizeof (rtx));
447 loop_comparison_value = (rtx *) alloca (max_loop_num * sizeof (rtx));
448 loop_start_value = (rtx *) alloca (max_loop_num * sizeof (rtx));
449 bzero ((char *) loop_increment, max_loop_num * sizeof (rtx));
450 bzero ((char *) loop_comparison_value, max_loop_num * sizeof (rtx));
451 bzero ((char *) loop_start_value, max_loop_num * sizeof (rtx));
452
453 loop_comparison_code
454 = (enum rtx_code *) alloca (max_loop_num * sizeof (enum rtx_code));
455 bzero ((char *) loop_comparison_code, max_loop_num * sizeof (enum rtx_code));
456 #endif /* HAIFA */
457
458 /* Find and process each loop.
459 First, find them, and record them in order of their beginnings. */
460 find_and_verify_loops (f);
461
462 /* Now find all register lifetimes. This must be done after
463 find_and_verify_loops, because it might reorder the insns in the
464 function. */
465 reg_scan (f, max_reg_num (), 1);
466
467 /* See if we went too far. */
468 if (get_max_uid () > max_uid_for_loop)
469 abort ();
470
471 /* Compute the mapping from uids to luids.
472 LUIDs are numbers assigned to insns, like uids,
473 except that luids increase monotonically through the code.
474 Don't assign luids to line-number NOTEs, so that the distance in luids
475 between two insns is not affected by -g. */
476
477 for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
478 {
479 last_insn = insn;
480 if (GET_CODE (insn) != NOTE
481 || NOTE_LINE_NUMBER (insn) <= 0)
482 uid_luid[INSN_UID (insn)] = ++i;
483 else
484 /* Give a line number note the same luid as preceding insn. */
485 uid_luid[INSN_UID (insn)] = i;
486 }
487
488 max_luid = i + 1;
489
490 /* Don't leave gaps in uid_luid for insns that have been
491 deleted. It is possible that the first or last insn
492 using some register has been deleted by cross-jumping.
493 Make sure that uid_luid for that former insn's uid
494 points to the general area where that insn used to be. */
495 for (i = 0; i < max_uid_for_loop; i++)
496 {
497 uid_luid[0] = uid_luid[i];
498 if (uid_luid[0] != 0)
499 break;
500 }
501 for (i = 0; i < max_uid_for_loop; i++)
502 if (uid_luid[i] == 0)
503 uid_luid[i] = uid_luid[i - 1];
504
505 /* Create a mapping from loops to BLOCK tree nodes. */
506 if (unroll_p && write_symbols != NO_DEBUG)
507 find_loop_tree_blocks ();
508
509 /* Determine if the function has indirect jump. On some systems
510 this prevents low overhead loop instructions from being used. */
511 indirect_jump_in_function = indirect_jump_in_function_p (f);
512
513 /* Now scan the loops, last ones first, since this means inner ones are done
514 before outer ones. */
515 for (i = max_loop_num-1; i >= 0; i--)
516 if (! loop_invalid[i] && loop_number_loop_ends[i])
517 scan_loop (loop_number_loop_starts[i], loop_number_loop_ends[i],
518 max_reg_num (), unroll_p);
519
520 /* If debugging and unrolling loops, we must replicate the tree nodes
521 corresponding to the blocks inside the loop, so that the original one
522 to one mapping will remain. */
523 if (unroll_p && write_symbols != NO_DEBUG)
524 unroll_block_trees ();
525 }
526 \f
527 /* Optimize one loop whose start is LOOP_START and end is END.
528 LOOP_START is the NOTE_INSN_LOOP_BEG and END is the matching
529 NOTE_INSN_LOOP_END. */
530
531 /* ??? Could also move memory writes out of loops if the destination address
532 is invariant, the source is invariant, the memory write is not volatile,
533 and if we can prove that no read inside the loop can read this address
534 before the write occurs. If there is a read of this address after the
535 write, then we can also mark the memory read as invariant. */
536
537 static void
538 scan_loop (loop_start, end, nregs, unroll_p)
539 rtx loop_start, end;
540 int nregs;
541 int unroll_p;
542 {
543 register int i;
544 register rtx p;
545 /* 1 if we are scanning insns that could be executed zero times. */
546 int maybe_never = 0;
547 /* 1 if we are scanning insns that might never be executed
548 due to a subroutine call which might exit before they are reached. */
549 int call_passed = 0;
550 /* For a rotated loop that is entered near the bottom,
551 this is the label at the top. Otherwise it is zero. */
552 rtx loop_top = 0;
553 /* Jump insn that enters the loop, or 0 if control drops in. */
554 rtx loop_entry_jump = 0;
555 /* Place in the loop where control enters. */
556 rtx scan_start;
557 /* Number of insns in the loop. */
558 int insn_count;
559 int in_libcall = 0;
560 int tem;
561 rtx temp;
562 /* The SET from an insn, if it is the only SET in the insn. */
563 rtx set, set1;
564 /* Chain describing insns movable in current loop. */
565 struct movable *movables = 0;
566 /* Last element in `movables' -- so we can add elements at the end. */
567 struct movable *last_movable = 0;
568 /* Ratio of extra register life span we can justify
569 for saving an instruction. More if loop doesn't call subroutines
570 since in that case saving an insn makes more difference
571 and more registers are available. */
572 int threshold;
573 /* If we have calls, contains the insn in which a register was used
574 if it was used exactly once; contains const0_rtx if it was used more
575 than once. */
576 rtx *reg_single_usage = 0;
577 /* Nonzero if we are scanning instructions in a sub-loop. */
578 int loop_depth = 0;
579
580 n_times_set = (int *) alloca (nregs * sizeof (int));
581 n_times_used = (int *) alloca (nregs * sizeof (int));
582 may_not_optimize = (char *) alloca (nregs);
583
584 /* Determine whether this loop starts with a jump down to a test at
585 the end. This will occur for a small number of loops with a test
586 that is too complex to duplicate in front of the loop.
587
588 We search for the first insn or label in the loop, skipping NOTEs.
589 However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG
590 (because we might have a loop executed only once that contains a
591 loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END
592 (in case we have a degenerate loop).
593
594 Note that if we mistakenly think that a loop is entered at the top
595 when, in fact, it is entered at the exit test, the only effect will be
596 slightly poorer optimization. Making the opposite error can generate
597 incorrect code. Since very few loops now start with a jump to the
598 exit test, the code here to detect that case is very conservative. */
599
600 for (p = NEXT_INSN (loop_start);
601 p != end
602 && GET_CODE (p) != CODE_LABEL && GET_RTX_CLASS (GET_CODE (p)) != 'i'
603 && (GET_CODE (p) != NOTE
604 || (NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_BEG
605 && NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_END));
606 p = NEXT_INSN (p))
607 ;
608
609 scan_start = p;
610
611 /* Set up variables describing this loop. */
612 prescan_loop (loop_start, end);
613 threshold = (loop_has_call ? 1 : 2) * (1 + n_non_fixed_regs);
614
615 /* If loop has a jump before the first label,
616 the true entry is the target of that jump.
617 Start scan from there.
618 But record in LOOP_TOP the place where the end-test jumps
619 back to so we can scan that after the end of the loop. */
620 if (GET_CODE (p) == JUMP_INSN)
621 {
622 loop_entry_jump = p;
623
624 /* Loop entry must be unconditional jump (and not a RETURN) */
625 if (simplejump_p (p)
626 && JUMP_LABEL (p) != 0
627 /* Check to see whether the jump actually
628 jumps out of the loop (meaning it's no loop).
629 This case can happen for things like
630 do {..} while (0). If this label was generated previously
631 by loop, we can't tell anything about it and have to reject
632 the loop. */
633 && INSN_UID (JUMP_LABEL (p)) < max_uid_for_loop
634 && INSN_LUID (JUMP_LABEL (p)) >= INSN_LUID (loop_start)
635 && INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (end))
636 {
637 loop_top = next_label (scan_start);
638 scan_start = JUMP_LABEL (p);
639 }
640 }
641
642 /* If SCAN_START was an insn created by loop, we don't know its luid
643 as required by loop_reg_used_before_p. So skip such loops. (This
644 test may never be true, but it's best to play it safe.)
645
646 Also, skip loops where we do not start scanning at a label. This
647 test also rejects loops starting with a JUMP_INSN that failed the
648 test above. */
649
650 if (INSN_UID (scan_start) >= max_uid_for_loop
651 || GET_CODE (scan_start) != CODE_LABEL)
652 {
653 if (loop_dump_stream)
654 fprintf (loop_dump_stream, "\nLoop from %d to %d is phony.\n\n",
655 INSN_UID (loop_start), INSN_UID (end));
656 return;
657 }
658
659 /* Count number of times each reg is set during this loop.
660 Set may_not_optimize[I] if it is not safe to move out
661 the setting of register I. If this loop has calls, set
662 reg_single_usage[I]. */
663
664 bzero ((char *) n_times_set, nregs * sizeof (int));
665 bzero (may_not_optimize, nregs);
666
667 if (loop_has_call)
668 {
669 reg_single_usage = (rtx *) alloca (nregs * sizeof (rtx));
670 bzero ((char *) reg_single_usage, nregs * sizeof (rtx));
671 }
672
673 count_loop_regs_set (loop_top ? loop_top : loop_start, end,
674 may_not_optimize, reg_single_usage, &insn_count, nregs);
675
676 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
677 may_not_optimize[i] = 1, n_times_set[i] = 1;
678 bcopy ((char *) n_times_set, (char *) n_times_used, nregs * sizeof (int));
679
680 if (loop_dump_stream)
681 {
682 fprintf (loop_dump_stream, "\nLoop from %d to %d: %d real insns.\n",
683 INSN_UID (loop_start), INSN_UID (end), insn_count);
684 if (loop_continue)
685 fprintf (loop_dump_stream, "Continue at insn %d.\n",
686 INSN_UID (loop_continue));
687 }
688
689 /* Scan through the loop finding insns that are safe to move.
690 Set n_times_set negative for the reg being set, so that
691 this reg will be considered invariant for subsequent insns.
692 We consider whether subsequent insns use the reg
693 in deciding whether it is worth actually moving.
694
695 MAYBE_NEVER is nonzero if we have passed a conditional jump insn
696 and therefore it is possible that the insns we are scanning
697 would never be executed. At such times, we must make sure
698 that it is safe to execute the insn once instead of zero times.
699 When MAYBE_NEVER is 0, all insns will be executed at least once
700 so that is not a problem. */
701
702 p = scan_start;
703 while (1)
704 {
705 p = NEXT_INSN (p);
706 /* At end of a straight-in loop, we are done.
707 At end of a loop entered at the bottom, scan the top. */
708 if (p == scan_start)
709 break;
710 if (p == end)
711 {
712 if (loop_top != 0)
713 p = loop_top;
714 else
715 break;
716 if (p == scan_start)
717 break;
718 }
719
720 if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
721 && find_reg_note (p, REG_LIBCALL, NULL_RTX))
722 in_libcall = 1;
723 else if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
724 && find_reg_note (p, REG_RETVAL, NULL_RTX))
725 in_libcall = 0;
726
727 if (GET_CODE (p) == INSN
728 && (set = single_set (p))
729 && GET_CODE (SET_DEST (set)) == REG
730 && ! may_not_optimize[REGNO (SET_DEST (set))])
731 {
732 int tem1 = 0;
733 int tem2 = 0;
734 int move_insn = 0;
735 rtx src = SET_SRC (set);
736 rtx dependencies = 0;
737
738 /* Figure out what to use as a source of this insn. If a REG_EQUIV
739 note is given or if a REG_EQUAL note with a constant operand is
740 specified, use it as the source and mark that we should move
741 this insn by calling emit_move_insn rather that duplicating the
742 insn.
743
744 Otherwise, only use the REG_EQUAL contents if a REG_RETVAL note
745 is present. */
746 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
747 if (temp)
748 src = XEXP (temp, 0), move_insn = 1;
749 else
750 {
751 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
752 if (temp && CONSTANT_P (XEXP (temp, 0)))
753 src = XEXP (temp, 0), move_insn = 1;
754 if (temp && find_reg_note (p, REG_RETVAL, NULL_RTX))
755 {
756 src = XEXP (temp, 0);
757 /* A libcall block can use regs that don't appear in
758 the equivalent expression. To move the libcall,
759 we must move those regs too. */
760 dependencies = libcall_other_reg (p, src);
761 }
762 }
763
764 /* Don't try to optimize a register that was made
765 by loop-optimization for an inner loop.
766 We don't know its life-span, so we can't compute the benefit. */
767 if (REGNO (SET_DEST (set)) >= max_reg_before_loop)
768 ;
769 /* In order to move a register, we need to have one of three cases:
770 (1) it is used only in the same basic block as the set
771 (2) it is not a user variable and it is not used in the
772 exit test (this can cause the variable to be used
773 before it is set just like a user-variable).
774 (3) the set is guaranteed to be executed once the loop starts,
775 and the reg is not used until after that. */
776 else if (! ((! maybe_never
777 && ! loop_reg_used_before_p (set, p, loop_start,
778 scan_start, end))
779 || (! REG_USERVAR_P (SET_DEST (set))
780 && ! REG_LOOP_TEST_P (SET_DEST (set)))
781 || reg_in_basic_block_p (p, SET_DEST (set))))
782 ;
783 else if ((tem = invariant_p (src))
784 && (dependencies == 0
785 || (tem2 = invariant_p (dependencies)) != 0)
786 && (n_times_set[REGNO (SET_DEST (set))] == 1
787 || (tem1
788 = consec_sets_invariant_p (SET_DEST (set),
789 n_times_set[REGNO (SET_DEST (set))],
790 p)))
791 /* If the insn can cause a trap (such as divide by zero),
792 can't move it unless it's guaranteed to be executed
793 once loop is entered. Even a function call might
794 prevent the trap insn from being reached
795 (since it might exit!) */
796 && ! ((maybe_never || call_passed)
797 && may_trap_p (src)))
798 {
799 register struct movable *m;
800 register int regno = REGNO (SET_DEST (set));
801
802 /* A potential lossage is where we have a case where two insns
803 can be combined as long as they are both in the loop, but
804 we move one of them outside the loop. For large loops,
805 this can lose. The most common case of this is the address
806 of a function being called.
807
808 Therefore, if this register is marked as being used exactly
809 once if we are in a loop with calls (a "large loop"), see if
810 we can replace the usage of this register with the source
811 of this SET. If we can, delete this insn.
812
813 Don't do this if P has a REG_RETVAL note or if we have
814 SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */
815
816 if (reg_single_usage && reg_single_usage[regno] != 0
817 && reg_single_usage[regno] != const0_rtx
818 && REGNO_FIRST_UID (regno) == INSN_UID (p)
819 && (REGNO_LAST_UID (regno)
820 == INSN_UID (reg_single_usage[regno]))
821 && n_times_set[REGNO (SET_DEST (set))] == 1
822 && ! side_effects_p (SET_SRC (set))
823 && ! find_reg_note (p, REG_RETVAL, NULL_RTX)
824 && (! SMALL_REGISTER_CLASSES
825 || (! (GET_CODE (SET_SRC (set)) == REG
826 && REGNO (SET_SRC (set)) < FIRST_PSEUDO_REGISTER)))
827 /* This test is not redundant; SET_SRC (set) might be
828 a call-clobbered register and the life of REGNO
829 might span a call. */
830 && ! modified_between_p (SET_SRC (set), p,
831 reg_single_usage[regno])
832 && no_labels_between_p (p, reg_single_usage[regno])
833 && validate_replace_rtx (SET_DEST (set), SET_SRC (set),
834 reg_single_usage[regno]))
835 {
836 /* Replace any usage in a REG_EQUAL note. Must copy the
837 new source, so that we don't get rtx sharing between the
838 SET_SOURCE and REG_NOTES of insn p. */
839 REG_NOTES (reg_single_usage[regno])
840 = replace_rtx (REG_NOTES (reg_single_usage[regno]),
841 SET_DEST (set), copy_rtx (SET_SRC (set)));
842
843 PUT_CODE (p, NOTE);
844 NOTE_LINE_NUMBER (p) = NOTE_INSN_DELETED;
845 NOTE_SOURCE_FILE (p) = 0;
846 n_times_set[regno] = 0;
847 continue;
848 }
849
850 m = (struct movable *) alloca (sizeof (struct movable));
851 m->next = 0;
852 m->insn = p;
853 m->set_src = src;
854 m->dependencies = dependencies;
855 m->set_dest = SET_DEST (set);
856 m->force = 0;
857 m->consec = n_times_set[REGNO (SET_DEST (set))] - 1;
858 m->done = 0;
859 m->forces = 0;
860 m->partial = 0;
861 m->move_insn = move_insn;
862 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
863 m->savemode = VOIDmode;
864 m->regno = regno;
865 /* Set M->cond if either invariant_p or consec_sets_invariant_p
866 returned 2 (only conditionally invariant). */
867 m->cond = ((tem | tem1 | tem2) > 1);
868 m->global = (uid_luid[REGNO_LAST_UID (regno)] > INSN_LUID (end)
869 || uid_luid[REGNO_FIRST_UID (regno)] < INSN_LUID (loop_start));
870 m->match = 0;
871 m->lifetime = (uid_luid[REGNO_LAST_UID (regno)]
872 - uid_luid[REGNO_FIRST_UID (regno)]);
873 m->savings = n_times_used[regno];
874 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
875 m->savings += libcall_benefit (p);
876 n_times_set[regno] = move_insn ? -2 : -1;
877 /* Add M to the end of the chain MOVABLES. */
878 if (movables == 0)
879 movables = m;
880 else
881 last_movable->next = m;
882 last_movable = m;
883
884 if (m->consec > 0)
885 {
886 /* Skip this insn, not checking REG_LIBCALL notes. */
887 p = next_nonnote_insn (p);
888 /* Skip the consecutive insns, if there are any. */
889 p = skip_consec_insns (p, m->consec);
890 /* Back up to the last insn of the consecutive group. */
891 p = prev_nonnote_insn (p);
892
893 /* We must now reset m->move_insn, m->is_equiv, and possibly
894 m->set_src to correspond to the effects of all the
895 insns. */
896 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
897 if (temp)
898 m->set_src = XEXP (temp, 0), m->move_insn = 1;
899 else
900 {
901 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
902 if (temp && CONSTANT_P (XEXP (temp, 0)))
903 m->set_src = XEXP (temp, 0), m->move_insn = 1;
904 else
905 m->move_insn = 0;
906
907 }
908 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
909 }
910 }
911 /* If this register is always set within a STRICT_LOW_PART
912 or set to zero, then its high bytes are constant.
913 So clear them outside the loop and within the loop
914 just load the low bytes.
915 We must check that the machine has an instruction to do so.
916 Also, if the value loaded into the register
917 depends on the same register, this cannot be done. */
918 else if (SET_SRC (set) == const0_rtx
919 && GET_CODE (NEXT_INSN (p)) == INSN
920 && (set1 = single_set (NEXT_INSN (p)))
921 && GET_CODE (set1) == SET
922 && (GET_CODE (SET_DEST (set1)) == STRICT_LOW_PART)
923 && (GET_CODE (XEXP (SET_DEST (set1), 0)) == SUBREG)
924 && (SUBREG_REG (XEXP (SET_DEST (set1), 0))
925 == SET_DEST (set))
926 && !reg_mentioned_p (SET_DEST (set), SET_SRC (set1)))
927 {
928 register int regno = REGNO (SET_DEST (set));
929 if (n_times_set[regno] == 2)
930 {
931 register struct movable *m;
932 m = (struct movable *) alloca (sizeof (struct movable));
933 m->next = 0;
934 m->insn = p;
935 m->set_dest = SET_DEST (set);
936 m->dependencies = 0;
937 m->force = 0;
938 m->consec = 0;
939 m->done = 0;
940 m->forces = 0;
941 m->move_insn = 0;
942 m->partial = 1;
943 /* If the insn may not be executed on some cycles,
944 we can't clear the whole reg; clear just high part.
945 Not even if the reg is used only within this loop.
946 Consider this:
947 while (1)
948 while (s != t) {
949 if (foo ()) x = *s;
950 use (x);
951 }
952 Clearing x before the inner loop could clobber a value
953 being saved from the last time around the outer loop.
954 However, if the reg is not used outside this loop
955 and all uses of the register are in the same
956 basic block as the store, there is no problem.
957
958 If this insn was made by loop, we don't know its
959 INSN_LUID and hence must make a conservative
960 assumption. */
961 m->global = (INSN_UID (p) >= max_uid_for_loop
962 || (uid_luid[REGNO_LAST_UID (regno)]
963 > INSN_LUID (end))
964 || (uid_luid[REGNO_FIRST_UID (regno)]
965 < INSN_LUID (p))
966 || (labels_in_range_p
967 (p, uid_luid[REGNO_FIRST_UID (regno)])));
968 if (maybe_never && m->global)
969 m->savemode = GET_MODE (SET_SRC (set1));
970 else
971 m->savemode = VOIDmode;
972 m->regno = regno;
973 m->cond = 0;
974 m->match = 0;
975 m->lifetime = (uid_luid[REGNO_LAST_UID (regno)]
976 - uid_luid[REGNO_FIRST_UID (regno)]);
977 m->savings = 1;
978 n_times_set[regno] = -1;
979 /* Add M to the end of the chain MOVABLES. */
980 if (movables == 0)
981 movables = m;
982 else
983 last_movable->next = m;
984 last_movable = m;
985 }
986 }
987 }
988 /* Past a call insn, we get to insns which might not be executed
989 because the call might exit. This matters for insns that trap.
990 Call insns inside a REG_LIBCALL/REG_RETVAL block always return,
991 so they don't count. */
992 else if (GET_CODE (p) == CALL_INSN && ! in_libcall)
993 call_passed = 1;
994 /* Past a label or a jump, we get to insns for which we
995 can't count on whether or how many times they will be
996 executed during each iteration. Therefore, we can
997 only move out sets of trivial variables
998 (those not used after the loop). */
999 /* Similar code appears twice in strength_reduce. */
1000 else if ((GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN)
1001 /* If we enter the loop in the middle, and scan around to the
1002 beginning, don't set maybe_never for that. This must be an
1003 unconditional jump, otherwise the code at the top of the
1004 loop might never be executed. Unconditional jumps are
1005 followed a by barrier then loop end. */
1006 && ! (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == loop_top
1007 && NEXT_INSN (NEXT_INSN (p)) == end
1008 && simplejump_p (p)))
1009 maybe_never = 1;
1010 else if (GET_CODE (p) == NOTE)
1011 {
1012 /* At the virtual top of a converted loop, insns are again known to
1013 be executed: logically, the loop begins here even though the exit
1014 code has been duplicated. */
1015 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
1016 maybe_never = call_passed = 0;
1017 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
1018 loop_depth++;
1019 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
1020 loop_depth--;
1021 }
1022 }
1023
1024 /* If one movable subsumes another, ignore that other. */
1025
1026 ignore_some_movables (movables);
1027
1028 /* For each movable insn, see if the reg that it loads
1029 leads when it dies right into another conditionally movable insn.
1030 If so, record that the second insn "forces" the first one,
1031 since the second can be moved only if the first is. */
1032
1033 force_movables (movables);
1034
1035 /* See if there are multiple movable insns that load the same value.
1036 If there are, make all but the first point at the first one
1037 through the `match' field, and add the priorities of them
1038 all together as the priority of the first. */
1039
1040 combine_movables (movables, nregs);
1041
1042 /* Now consider each movable insn to decide whether it is worth moving.
1043 Store 0 in n_times_set for each reg that is moved. */
1044
1045 move_movables (movables, threshold,
1046 insn_count, loop_start, end, nregs);
1047
1048 /* Now candidates that still are negative are those not moved.
1049 Change n_times_set to indicate that those are not actually invariant. */
1050 for (i = 0; i < nregs; i++)
1051 if (n_times_set[i] < 0)
1052 n_times_set[i] = n_times_used[i];
1053
1054 if (flag_strength_reduce)
1055 strength_reduce (scan_start, end, loop_top,
1056 insn_count, loop_start, end, unroll_p);
1057 }
1058 \f
1059 /* Add elements to *OUTPUT to record all the pseudo-regs
1060 mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */
1061
1062 void
1063 record_excess_regs (in_this, not_in_this, output)
1064 rtx in_this, not_in_this;
1065 rtx *output;
1066 {
1067 enum rtx_code code;
1068 char *fmt;
1069 int i;
1070
1071 code = GET_CODE (in_this);
1072
1073 switch (code)
1074 {
1075 case PC:
1076 case CC0:
1077 case CONST_INT:
1078 case CONST_DOUBLE:
1079 case CONST:
1080 case SYMBOL_REF:
1081 case LABEL_REF:
1082 return;
1083
1084 case REG:
1085 if (REGNO (in_this) >= FIRST_PSEUDO_REGISTER
1086 && ! reg_mentioned_p (in_this, not_in_this))
1087 *output = gen_rtx_EXPR_LIST (VOIDmode, in_this, *output);
1088 return;
1089
1090 default:
1091 break;
1092 }
1093
1094 fmt = GET_RTX_FORMAT (code);
1095 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1096 {
1097 int j;
1098
1099 switch (fmt[i])
1100 {
1101 case 'E':
1102 for (j = 0; j < XVECLEN (in_this, i); j++)
1103 record_excess_regs (XVECEXP (in_this, i, j), not_in_this, output);
1104 break;
1105
1106 case 'e':
1107 record_excess_regs (XEXP (in_this, i), not_in_this, output);
1108 break;
1109 }
1110 }
1111 }
1112 \f
1113 /* Check what regs are referred to in the libcall block ending with INSN,
1114 aside from those mentioned in the equivalent value.
1115 If there are none, return 0.
1116 If there are one or more, return an EXPR_LIST containing all of them. */
1117
1118 static rtx
1119 libcall_other_reg (insn, equiv)
1120 rtx insn, equiv;
1121 {
1122 rtx note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
1123 rtx p = XEXP (note, 0);
1124 rtx output = 0;
1125
1126 /* First, find all the regs used in the libcall block
1127 that are not mentioned as inputs to the result. */
1128
1129 while (p != insn)
1130 {
1131 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
1132 || GET_CODE (p) == CALL_INSN)
1133 record_excess_regs (PATTERN (p), equiv, &output);
1134 p = NEXT_INSN (p);
1135 }
1136
1137 return output;
1138 }
1139 \f
1140 /* Return 1 if all uses of REG
1141 are between INSN and the end of the basic block. */
1142
1143 static int
1144 reg_in_basic_block_p (insn, reg)
1145 rtx insn, reg;
1146 {
1147 int regno = REGNO (reg);
1148 rtx p;
1149
1150 if (REGNO_FIRST_UID (regno) != INSN_UID (insn))
1151 return 0;
1152
1153 /* Search this basic block for the already recorded last use of the reg. */
1154 for (p = insn; p; p = NEXT_INSN (p))
1155 {
1156 switch (GET_CODE (p))
1157 {
1158 case NOTE:
1159 break;
1160
1161 case INSN:
1162 case CALL_INSN:
1163 /* Ordinary insn: if this is the last use, we win. */
1164 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1165 return 1;
1166 break;
1167
1168 case JUMP_INSN:
1169 /* Jump insn: if this is the last use, we win. */
1170 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1171 return 1;
1172 /* Otherwise, it's the end of the basic block, so we lose. */
1173 return 0;
1174
1175 case CODE_LABEL:
1176 case BARRIER:
1177 /* It's the end of the basic block, so we lose. */
1178 return 0;
1179
1180 default:
1181 break;
1182 }
1183 }
1184
1185 /* The "last use" doesn't follow the "first use"?? */
1186 abort ();
1187 }
1188 \f
1189 /* Compute the benefit of eliminating the insns in the block whose
1190 last insn is LAST. This may be a group of insns used to compute a
1191 value directly or can contain a library call. */
1192
1193 static int
1194 libcall_benefit (last)
1195 rtx last;
1196 {
1197 rtx insn;
1198 int benefit = 0;
1199
1200 for (insn = XEXP (find_reg_note (last, REG_RETVAL, NULL_RTX), 0);
1201 insn != last; insn = NEXT_INSN (insn))
1202 {
1203 if (GET_CODE (insn) == CALL_INSN)
1204 benefit += 10; /* Assume at least this many insns in a library
1205 routine. */
1206 else if (GET_CODE (insn) == INSN
1207 && GET_CODE (PATTERN (insn)) != USE
1208 && GET_CODE (PATTERN (insn)) != CLOBBER)
1209 benefit++;
1210 }
1211
1212 return benefit;
1213 }
1214 \f
1215 /* Skip COUNT insns from INSN, counting library calls as 1 insn. */
1216
1217 static rtx
1218 skip_consec_insns (insn, count)
1219 rtx insn;
1220 int count;
1221 {
1222 for (; count > 0; count--)
1223 {
1224 rtx temp;
1225
1226 /* If first insn of libcall sequence, skip to end. */
1227 /* Do this at start of loop, since INSN is guaranteed to
1228 be an insn here. */
1229 if (GET_CODE (insn) != NOTE
1230 && (temp = find_reg_note (insn, REG_LIBCALL, NULL_RTX)))
1231 insn = XEXP (temp, 0);
1232
1233 do insn = NEXT_INSN (insn);
1234 while (GET_CODE (insn) == NOTE);
1235 }
1236
1237 return insn;
1238 }
1239
1240 /* Ignore any movable whose insn falls within a libcall
1241 which is part of another movable.
1242 We make use of the fact that the movable for the libcall value
1243 was made later and so appears later on the chain. */
1244
1245 static void
1246 ignore_some_movables (movables)
1247 struct movable *movables;
1248 {
1249 register struct movable *m, *m1;
1250
1251 for (m = movables; m; m = m->next)
1252 {
1253 /* Is this a movable for the value of a libcall? */
1254 rtx note = find_reg_note (m->insn, REG_RETVAL, NULL_RTX);
1255 if (note)
1256 {
1257 rtx insn;
1258 /* Check for earlier movables inside that range,
1259 and mark them invalid. We cannot use LUIDs here because
1260 insns created by loop.c for prior loops don't have LUIDs.
1261 Rather than reject all such insns from movables, we just
1262 explicitly check each insn in the libcall (since invariant
1263 libcalls aren't that common). */
1264 for (insn = XEXP (note, 0); insn != m->insn; insn = NEXT_INSN (insn))
1265 for (m1 = movables; m1 != m; m1 = m1->next)
1266 if (m1->insn == insn)
1267 m1->done = 1;
1268 }
1269 }
1270 }
1271
1272 /* For each movable insn, see if the reg that it loads
1273 leads when it dies right into another conditionally movable insn.
1274 If so, record that the second insn "forces" the first one,
1275 since the second can be moved only if the first is. */
1276
1277 static void
1278 force_movables (movables)
1279 struct movable *movables;
1280 {
1281 register struct movable *m, *m1;
1282 for (m1 = movables; m1; m1 = m1->next)
1283 /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */
1284 if (!m1->partial && !m1->done)
1285 {
1286 int regno = m1->regno;
1287 for (m = m1->next; m; m = m->next)
1288 /* ??? Could this be a bug? What if CSE caused the
1289 register of M1 to be used after this insn?
1290 Since CSE does not update regno_last_uid,
1291 this insn M->insn might not be where it dies.
1292 But very likely this doesn't matter; what matters is
1293 that M's reg is computed from M1's reg. */
1294 if (INSN_UID (m->insn) == REGNO_LAST_UID (regno)
1295 && !m->done)
1296 break;
1297 if (m != 0 && m->set_src == m1->set_dest
1298 /* If m->consec, m->set_src isn't valid. */
1299 && m->consec == 0)
1300 m = 0;
1301
1302 /* Increase the priority of the moving the first insn
1303 since it permits the second to be moved as well. */
1304 if (m != 0)
1305 {
1306 m->forces = m1;
1307 m1->lifetime += m->lifetime;
1308 m1->savings += m1->savings;
1309 }
1310 }
1311 }
1312 \f
1313 /* Find invariant expressions that are equal and can be combined into
1314 one register. */
1315
1316 static void
1317 combine_movables (movables, nregs)
1318 struct movable *movables;
1319 int nregs;
1320 {
1321 register struct movable *m;
1322 char *matched_regs = (char *) alloca (nregs);
1323 enum machine_mode mode;
1324
1325 /* Regs that are set more than once are not allowed to match
1326 or be matched. I'm no longer sure why not. */
1327 /* Perhaps testing m->consec_sets would be more appropriate here? */
1328
1329 for (m = movables; m; m = m->next)
1330 if (m->match == 0 && n_times_used[m->regno] == 1 && !m->partial)
1331 {
1332 register struct movable *m1;
1333 int regno = m->regno;
1334
1335 bzero (matched_regs, nregs);
1336 matched_regs[regno] = 1;
1337
1338 /* We want later insns to match the first one. Don't make the first
1339 one match any later ones. So start this loop at m->next. */
1340 for (m1 = m->next; m1; m1 = m1->next)
1341 if (m != m1 && m1->match == 0 && n_times_used[m1->regno] == 1
1342 /* A reg used outside the loop mustn't be eliminated. */
1343 && !m1->global
1344 /* A reg used for zero-extending mustn't be eliminated. */
1345 && !m1->partial
1346 && (matched_regs[m1->regno]
1347 ||
1348 (
1349 /* Can combine regs with different modes loaded from the
1350 same constant only if the modes are the same or
1351 if both are integer modes with M wider or the same
1352 width as M1. The check for integer is redundant, but
1353 safe, since the only case of differing destination
1354 modes with equal sources is when both sources are
1355 VOIDmode, i.e., CONST_INT. */
1356 (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest)
1357 || (GET_MODE_CLASS (GET_MODE (m->set_dest)) == MODE_INT
1358 && GET_MODE_CLASS (GET_MODE (m1->set_dest)) == MODE_INT
1359 && (GET_MODE_BITSIZE (GET_MODE (m->set_dest))
1360 >= GET_MODE_BITSIZE (GET_MODE (m1->set_dest)))))
1361 /* See if the source of M1 says it matches M. */
1362 && ((GET_CODE (m1->set_src) == REG
1363 && matched_regs[REGNO (m1->set_src)])
1364 || rtx_equal_for_loop_p (m->set_src, m1->set_src,
1365 movables))))
1366 && ((m->dependencies == m1->dependencies)
1367 || rtx_equal_p (m->dependencies, m1->dependencies)))
1368 {
1369 m->lifetime += m1->lifetime;
1370 m->savings += m1->savings;
1371 m1->done = 1;
1372 m1->match = m;
1373 matched_regs[m1->regno] = 1;
1374 }
1375 }
1376
1377 /* Now combine the regs used for zero-extension.
1378 This can be done for those not marked `global'
1379 provided their lives don't overlap. */
1380
1381 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1382 mode = GET_MODE_WIDER_MODE (mode))
1383 {
1384 register struct movable *m0 = 0;
1385
1386 /* Combine all the registers for extension from mode MODE.
1387 Don't combine any that are used outside this loop. */
1388 for (m = movables; m; m = m->next)
1389 if (m->partial && ! m->global
1390 && mode == GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m->insn)))))
1391 {
1392 register struct movable *m1;
1393 int first = uid_luid[REGNO_FIRST_UID (m->regno)];
1394 int last = uid_luid[REGNO_LAST_UID (m->regno)];
1395
1396 if (m0 == 0)
1397 {
1398 /* First one: don't check for overlap, just record it. */
1399 m0 = m;
1400 continue;
1401 }
1402
1403 /* Make sure they extend to the same mode.
1404 (Almost always true.) */
1405 if (GET_MODE (m->set_dest) != GET_MODE (m0->set_dest))
1406 continue;
1407
1408 /* We already have one: check for overlap with those
1409 already combined together. */
1410 for (m1 = movables; m1 != m; m1 = m1->next)
1411 if (m1 == m0 || (m1->partial && m1->match == m0))
1412 if (! (uid_luid[REGNO_FIRST_UID (m1->regno)] > last
1413 || uid_luid[REGNO_LAST_UID (m1->regno)] < first))
1414 goto overlap;
1415
1416 /* No overlap: we can combine this with the others. */
1417 m0->lifetime += m->lifetime;
1418 m0->savings += m->savings;
1419 m->done = 1;
1420 m->match = m0;
1421
1422 overlap: ;
1423 }
1424 }
1425 }
1426 \f
1427 /* Return 1 if regs X and Y will become the same if moved. */
1428
1429 static int
1430 regs_match_p (x, y, movables)
1431 rtx x, y;
1432 struct movable *movables;
1433 {
1434 int xn = REGNO (x);
1435 int yn = REGNO (y);
1436 struct movable *mx, *my;
1437
1438 for (mx = movables; mx; mx = mx->next)
1439 if (mx->regno == xn)
1440 break;
1441
1442 for (my = movables; my; my = my->next)
1443 if (my->regno == yn)
1444 break;
1445
1446 return (mx && my
1447 && ((mx->match == my->match && mx->match != 0)
1448 || mx->match == my
1449 || mx == my->match));
1450 }
1451
1452 /* Return 1 if X and Y are identical-looking rtx's.
1453 This is the Lisp function EQUAL for rtx arguments.
1454
1455 If two registers are matching movables or a movable register and an
1456 equivalent constant, consider them equal. */
1457
1458 static int
1459 rtx_equal_for_loop_p (x, y, movables)
1460 rtx x, y;
1461 struct movable *movables;
1462 {
1463 register int i;
1464 register int j;
1465 register struct movable *m;
1466 register enum rtx_code code;
1467 register char *fmt;
1468
1469 if (x == y)
1470 return 1;
1471 if (x == 0 || y == 0)
1472 return 0;
1473
1474 code = GET_CODE (x);
1475
1476 /* If we have a register and a constant, they may sometimes be
1477 equal. */
1478 if (GET_CODE (x) == REG && n_times_set[REGNO (x)] == -2
1479 && CONSTANT_P (y))
1480 for (m = movables; m; m = m->next)
1481 if (m->move_insn && m->regno == REGNO (x)
1482 && rtx_equal_p (m->set_src, y))
1483 return 1;
1484
1485 else if (GET_CODE (y) == REG && n_times_set[REGNO (y)] == -2
1486 && CONSTANT_P (x))
1487 for (m = movables; m; m = m->next)
1488 if (m->move_insn && m->regno == REGNO (y)
1489 && rtx_equal_p (m->set_src, x))
1490 return 1;
1491
1492 /* Otherwise, rtx's of different codes cannot be equal. */
1493 if (code != GET_CODE (y))
1494 return 0;
1495
1496 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1497 (REG:SI x) and (REG:HI x) are NOT equivalent. */
1498
1499 if (GET_MODE (x) != GET_MODE (y))
1500 return 0;
1501
1502 /* These three types of rtx's can be compared nonrecursively. */
1503 if (code == REG)
1504 return (REGNO (x) == REGNO (y) || regs_match_p (x, y, movables));
1505
1506 if (code == LABEL_REF)
1507 return XEXP (x, 0) == XEXP (y, 0);
1508 if (code == SYMBOL_REF)
1509 return XSTR (x, 0) == XSTR (y, 0);
1510
1511 /* Compare the elements. If any pair of corresponding elements
1512 fail to match, return 0 for the whole things. */
1513
1514 fmt = GET_RTX_FORMAT (code);
1515 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1516 {
1517 switch (fmt[i])
1518 {
1519 case 'w':
1520 if (XWINT (x, i) != XWINT (y, i))
1521 return 0;
1522 break;
1523
1524 case 'i':
1525 if (XINT (x, i) != XINT (y, i))
1526 return 0;
1527 break;
1528
1529 case 'E':
1530 /* Two vectors must have the same length. */
1531 if (XVECLEN (x, i) != XVECLEN (y, i))
1532 return 0;
1533
1534 /* And the corresponding elements must match. */
1535 for (j = 0; j < XVECLEN (x, i); j++)
1536 if (rtx_equal_for_loop_p (XVECEXP (x, i, j), XVECEXP (y, i, j), movables) == 0)
1537 return 0;
1538 break;
1539
1540 case 'e':
1541 if (rtx_equal_for_loop_p (XEXP (x, i), XEXP (y, i), movables) == 0)
1542 return 0;
1543 break;
1544
1545 case 's':
1546 if (strcmp (XSTR (x, i), XSTR (y, i)))
1547 return 0;
1548 break;
1549
1550 case 'u':
1551 /* These are just backpointers, so they don't matter. */
1552 break;
1553
1554 case '0':
1555 break;
1556
1557 /* It is believed that rtx's at this level will never
1558 contain anything but integers and other rtx's,
1559 except for within LABEL_REFs and SYMBOL_REFs. */
1560 default:
1561 abort ();
1562 }
1563 }
1564 return 1;
1565 }
1566 \f
1567 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to all
1568 insns in INSNS which use thet reference. */
1569
1570 static void
1571 add_label_notes (x, insns)
1572 rtx x;
1573 rtx insns;
1574 {
1575 enum rtx_code code = GET_CODE (x);
1576 int i, j;
1577 char *fmt;
1578 rtx insn;
1579
1580 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
1581 {
1582 rtx next = next_real_insn (XEXP (x, 0));
1583
1584 /* Don't record labels that refer to dispatch tables.
1585 This is not necessary, since the tablejump references the same label.
1586 And if we did record them, flow.c would make worse code. */
1587 if (next == 0
1588 || ! (GET_CODE (next) == JUMP_INSN
1589 && (GET_CODE (PATTERN (next)) == ADDR_VEC
1590 || GET_CODE (PATTERN (next)) == ADDR_DIFF_VEC)))
1591 {
1592 for (insn = insns; insn; insn = NEXT_INSN (insn))
1593 if (reg_mentioned_p (XEXP (x, 0), insn))
1594 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_LABEL, XEXP (x, 0),
1595 REG_NOTES (insn));
1596 }
1597 return;
1598 }
1599
1600 fmt = GET_RTX_FORMAT (code);
1601 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1602 {
1603 if (fmt[i] == 'e')
1604 add_label_notes (XEXP (x, i), insns);
1605 else if (fmt[i] == 'E')
1606 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1607 add_label_notes (XVECEXP (x, i, j), insns);
1608 }
1609 }
1610 \f
1611 /* Scan MOVABLES, and move the insns that deserve to be moved.
1612 If two matching movables are combined, replace one reg with the
1613 other throughout. */
1614
1615 static void
1616 move_movables (movables, threshold, insn_count, loop_start, end, nregs)
1617 struct movable *movables;
1618 int threshold;
1619 int insn_count;
1620 rtx loop_start;
1621 rtx end;
1622 int nregs;
1623 {
1624 rtx new_start = 0;
1625 register struct movable *m;
1626 register rtx p;
1627 /* Map of pseudo-register replacements to handle combining
1628 when we move several insns that load the same value
1629 into different pseudo-registers. */
1630 rtx *reg_map = (rtx *) alloca (nregs * sizeof (rtx));
1631 char *already_moved = (char *) alloca (nregs);
1632
1633 bzero (already_moved, nregs);
1634 bzero ((char *) reg_map, nregs * sizeof (rtx));
1635
1636 num_movables = 0;
1637
1638 for (m = movables; m; m = m->next)
1639 {
1640 /* Describe this movable insn. */
1641
1642 if (loop_dump_stream)
1643 {
1644 fprintf (loop_dump_stream, "Insn %d: regno %d (life %d), ",
1645 INSN_UID (m->insn), m->regno, m->lifetime);
1646 if (m->consec > 0)
1647 fprintf (loop_dump_stream, "consec %d, ", m->consec);
1648 if (m->cond)
1649 fprintf (loop_dump_stream, "cond ");
1650 if (m->force)
1651 fprintf (loop_dump_stream, "force ");
1652 if (m->global)
1653 fprintf (loop_dump_stream, "global ");
1654 if (m->done)
1655 fprintf (loop_dump_stream, "done ");
1656 if (m->move_insn)
1657 fprintf (loop_dump_stream, "move-insn ");
1658 if (m->match)
1659 fprintf (loop_dump_stream, "matches %d ",
1660 INSN_UID (m->match->insn));
1661 if (m->forces)
1662 fprintf (loop_dump_stream, "forces %d ",
1663 INSN_UID (m->forces->insn));
1664 }
1665
1666 /* Count movables. Value used in heuristics in strength_reduce. */
1667 num_movables++;
1668
1669 /* Ignore the insn if it's already done (it matched something else).
1670 Otherwise, see if it is now safe to move. */
1671
1672 if (!m->done
1673 && (! m->cond
1674 || (1 == invariant_p (m->set_src)
1675 && (m->dependencies == 0
1676 || 1 == invariant_p (m->dependencies))
1677 && (m->consec == 0
1678 || 1 == consec_sets_invariant_p (m->set_dest,
1679 m->consec + 1,
1680 m->insn))))
1681 && (! m->forces || m->forces->done))
1682 {
1683 register int regno;
1684 register rtx p;
1685 int savings = m->savings;
1686
1687 /* We have an insn that is safe to move.
1688 Compute its desirability. */
1689
1690 p = m->insn;
1691 regno = m->regno;
1692
1693 if (loop_dump_stream)
1694 fprintf (loop_dump_stream, "savings %d ", savings);
1695
1696 if (moved_once[regno])
1697 {
1698 insn_count *= 2;
1699
1700 if (loop_dump_stream)
1701 fprintf (loop_dump_stream, "halved since already moved ");
1702 }
1703
1704 /* An insn MUST be moved if we already moved something else
1705 which is safe only if this one is moved too: that is,
1706 if already_moved[REGNO] is nonzero. */
1707
1708 /* An insn is desirable to move if the new lifetime of the
1709 register is no more than THRESHOLD times the old lifetime.
1710 If it's not desirable, it means the loop is so big
1711 that moving won't speed things up much,
1712 and it is liable to make register usage worse. */
1713
1714 /* It is also desirable to move if it can be moved at no
1715 extra cost because something else was already moved. */
1716
1717 if (already_moved[regno]
1718 || flag_move_all_movables
1719 || (threshold * savings * m->lifetime) >= insn_count
1720 || (m->forces && m->forces->done
1721 && n_times_used[m->forces->regno] == 1))
1722 {
1723 int count;
1724 register struct movable *m1;
1725 rtx first;
1726
1727 /* Now move the insns that set the reg. */
1728
1729 if (m->partial && m->match)
1730 {
1731 rtx newpat, i1;
1732 rtx r1, r2;
1733 /* Find the end of this chain of matching regs.
1734 Thus, we load each reg in the chain from that one reg.
1735 And that reg is loaded with 0 directly,
1736 since it has ->match == 0. */
1737 for (m1 = m; m1->match; m1 = m1->match);
1738 newpat = gen_move_insn (SET_DEST (PATTERN (m->insn)),
1739 SET_DEST (PATTERN (m1->insn)));
1740 i1 = emit_insn_before (newpat, loop_start);
1741
1742 /* Mark the moved, invariant reg as being allowed to
1743 share a hard reg with the other matching invariant. */
1744 REG_NOTES (i1) = REG_NOTES (m->insn);
1745 r1 = SET_DEST (PATTERN (m->insn));
1746 r2 = SET_DEST (PATTERN (m1->insn));
1747 regs_may_share
1748 = gen_rtx_EXPR_LIST (VOIDmode, r1,
1749 gen_rtx_EXPR_LIST (VOIDmode, r2,
1750 regs_may_share));
1751 delete_insn (m->insn);
1752
1753 if (new_start == 0)
1754 new_start = i1;
1755
1756 if (loop_dump_stream)
1757 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1758 }
1759 /* If we are to re-generate the item being moved with a
1760 new move insn, first delete what we have and then emit
1761 the move insn before the loop. */
1762 else if (m->move_insn)
1763 {
1764 rtx i1, temp;
1765
1766 for (count = m->consec; count >= 0; count--)
1767 {
1768 /* If this is the first insn of a library call sequence,
1769 skip to the end. */
1770 if (GET_CODE (p) != NOTE
1771 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1772 p = XEXP (temp, 0);
1773
1774 /* If this is the last insn of a libcall sequence, then
1775 delete every insn in the sequence except the last.
1776 The last insn is handled in the normal manner. */
1777 if (GET_CODE (p) != NOTE
1778 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1779 {
1780 temp = XEXP (temp, 0);
1781 while (temp != p)
1782 temp = delete_insn (temp);
1783 }
1784
1785 p = delete_insn (p);
1786 while (p && GET_CODE (p) == NOTE)
1787 p = NEXT_INSN (p);
1788 }
1789
1790 start_sequence ();
1791 emit_move_insn (m->set_dest, m->set_src);
1792 temp = get_insns ();
1793 end_sequence ();
1794
1795 add_label_notes (m->set_src, temp);
1796
1797 i1 = emit_insns_before (temp, loop_start);
1798 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
1799 REG_NOTES (i1)
1800 = gen_rtx_EXPR_LIST (m->is_equiv ? REG_EQUIV : REG_EQUAL,
1801 m->set_src, REG_NOTES (i1));
1802
1803 if (loop_dump_stream)
1804 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1805
1806 /* The more regs we move, the less we like moving them. */
1807 threshold -= 3;
1808 }
1809 else
1810 {
1811 for (count = m->consec; count >= 0; count--)
1812 {
1813 rtx i1, temp;
1814
1815 /* If first insn of libcall sequence, skip to end. */
1816 /* Do this at start of loop, since p is guaranteed to
1817 be an insn here. */
1818 if (GET_CODE (p) != NOTE
1819 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1820 p = XEXP (temp, 0);
1821
1822 /* If last insn of libcall sequence, move all
1823 insns except the last before the loop. The last
1824 insn is handled in the normal manner. */
1825 if (GET_CODE (p) != NOTE
1826 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1827 {
1828 rtx fn_address = 0;
1829 rtx fn_reg = 0;
1830 rtx fn_address_insn = 0;
1831
1832 first = 0;
1833 for (temp = XEXP (temp, 0); temp != p;
1834 temp = NEXT_INSN (temp))
1835 {
1836 rtx body;
1837 rtx n;
1838 rtx next;
1839
1840 if (GET_CODE (temp) == NOTE)
1841 continue;
1842
1843 body = PATTERN (temp);
1844
1845 /* Find the next insn after TEMP,
1846 not counting USE or NOTE insns. */
1847 for (next = NEXT_INSN (temp); next != p;
1848 next = NEXT_INSN (next))
1849 if (! (GET_CODE (next) == INSN
1850 && GET_CODE (PATTERN (next)) == USE)
1851 && GET_CODE (next) != NOTE)
1852 break;
1853
1854 /* If that is the call, this may be the insn
1855 that loads the function address.
1856
1857 Extract the function address from the insn
1858 that loads it into a register.
1859 If this insn was cse'd, we get incorrect code.
1860
1861 So emit a new move insn that copies the
1862 function address into the register that the
1863 call insn will use. flow.c will delete any
1864 redundant stores that we have created. */
1865 if (GET_CODE (next) == CALL_INSN
1866 && GET_CODE (body) == SET
1867 && GET_CODE (SET_DEST (body)) == REG
1868 && (n = find_reg_note (temp, REG_EQUAL,
1869 NULL_RTX)))
1870 {
1871 fn_reg = SET_SRC (body);
1872 if (GET_CODE (fn_reg) != REG)
1873 fn_reg = SET_DEST (body);
1874 fn_address = XEXP (n, 0);
1875 fn_address_insn = temp;
1876 }
1877 /* We have the call insn.
1878 If it uses the register we suspect it might,
1879 load it with the correct address directly. */
1880 if (GET_CODE (temp) == CALL_INSN
1881 && fn_address != 0
1882 && reg_referenced_p (fn_reg, body))
1883 emit_insn_after (gen_move_insn (fn_reg,
1884 fn_address),
1885 fn_address_insn);
1886
1887 if (GET_CODE (temp) == CALL_INSN)
1888 {
1889 i1 = emit_call_insn_before (body, loop_start);
1890 /* Because the USAGE information potentially
1891 contains objects other than hard registers
1892 we need to copy it. */
1893 if (CALL_INSN_FUNCTION_USAGE (temp))
1894 CALL_INSN_FUNCTION_USAGE (i1)
1895 = copy_rtx (CALL_INSN_FUNCTION_USAGE (temp));
1896 }
1897 else
1898 i1 = emit_insn_before (body, loop_start);
1899 if (first == 0)
1900 first = i1;
1901 if (temp == fn_address_insn)
1902 fn_address_insn = i1;
1903 REG_NOTES (i1) = REG_NOTES (temp);
1904 delete_insn (temp);
1905 }
1906 }
1907 if (m->savemode != VOIDmode)
1908 {
1909 /* P sets REG to zero; but we should clear only
1910 the bits that are not covered by the mode
1911 m->savemode. */
1912 rtx reg = m->set_dest;
1913 rtx sequence;
1914 rtx tem;
1915
1916 start_sequence ();
1917 tem = expand_binop
1918 (GET_MODE (reg), and_optab, reg,
1919 GEN_INT ((((HOST_WIDE_INT) 1
1920 << GET_MODE_BITSIZE (m->savemode)))
1921 - 1),
1922 reg, 1, OPTAB_LIB_WIDEN);
1923 if (tem == 0)
1924 abort ();
1925 if (tem != reg)
1926 emit_move_insn (reg, tem);
1927 sequence = gen_sequence ();
1928 end_sequence ();
1929 i1 = emit_insn_before (sequence, loop_start);
1930 }
1931 else if (GET_CODE (p) == CALL_INSN)
1932 {
1933 i1 = emit_call_insn_before (PATTERN (p), loop_start);
1934 /* Because the USAGE information potentially
1935 contains objects other than hard registers
1936 we need to copy it. */
1937 if (CALL_INSN_FUNCTION_USAGE (p))
1938 CALL_INSN_FUNCTION_USAGE (i1)
1939 = copy_rtx (CALL_INSN_FUNCTION_USAGE (p));
1940 }
1941 else
1942 i1 = emit_insn_before (PATTERN (p), loop_start);
1943
1944 REG_NOTES (i1) = REG_NOTES (p);
1945
1946 /* If there is a REG_EQUAL note present whose value is
1947 not loop invariant, then delete it, since it may
1948 cause problems with later optimization passes.
1949 It is possible for cse to create such notes
1950 like this as a result of record_jump_cond. */
1951
1952 if ((temp = find_reg_note (i1, REG_EQUAL, NULL_RTX))
1953 && ! invariant_p (XEXP (temp, 0)))
1954 remove_note (i1, temp);
1955
1956 if (new_start == 0)
1957 new_start = i1;
1958
1959 if (loop_dump_stream)
1960 fprintf (loop_dump_stream, " moved to %d",
1961 INSN_UID (i1));
1962
1963 #if 0
1964 /* This isn't needed because REG_NOTES is copied
1965 below and is wrong since P might be a PARALLEL. */
1966 if (REG_NOTES (i1) == 0
1967 && ! m->partial /* But not if it's a zero-extend clr. */
1968 && ! m->global /* and not if used outside the loop
1969 (since it might get set outside). */
1970 && CONSTANT_P (SET_SRC (PATTERN (p))))
1971 REG_NOTES (i1)
1972 = gen_rtx_EXPR_LIST (REG_EQUAL,
1973 SET_SRC (PATTERN (p)),
1974 REG_NOTES (i1));
1975 #endif
1976
1977 /* If library call, now fix the REG_NOTES that contain
1978 insn pointers, namely REG_LIBCALL on FIRST
1979 and REG_RETVAL on I1. */
1980 if (temp = find_reg_note (i1, REG_RETVAL, NULL_RTX))
1981 {
1982 XEXP (temp, 0) = first;
1983 temp = find_reg_note (first, REG_LIBCALL, NULL_RTX);
1984 XEXP (temp, 0) = i1;
1985 }
1986
1987 delete_insn (p);
1988 do p = NEXT_INSN (p);
1989 while (p && GET_CODE (p) == NOTE);
1990 }
1991
1992 /* The more regs we move, the less we like moving them. */
1993 threshold -= 3;
1994 }
1995
1996 /* Any other movable that loads the same register
1997 MUST be moved. */
1998 already_moved[regno] = 1;
1999
2000 /* This reg has been moved out of one loop. */
2001 moved_once[regno] = 1;
2002
2003 /* The reg set here is now invariant. */
2004 if (! m->partial)
2005 n_times_set[regno] = 0;
2006
2007 m->done = 1;
2008
2009 /* Change the length-of-life info for the register
2010 to say it lives at least the full length of this loop.
2011 This will help guide optimizations in outer loops. */
2012
2013 if (uid_luid[REGNO_FIRST_UID (regno)] > INSN_LUID (loop_start))
2014 /* This is the old insn before all the moved insns.
2015 We can't use the moved insn because it is out of range
2016 in uid_luid. Only the old insns have luids. */
2017 REGNO_FIRST_UID (regno) = INSN_UID (loop_start);
2018 if (uid_luid[REGNO_LAST_UID (regno)] < INSN_LUID (end))
2019 REGNO_LAST_UID (regno) = INSN_UID (end);
2020
2021 /* Combine with this moved insn any other matching movables. */
2022
2023 if (! m->partial)
2024 for (m1 = movables; m1; m1 = m1->next)
2025 if (m1->match == m)
2026 {
2027 rtx temp;
2028
2029 /* Schedule the reg loaded by M1
2030 for replacement so that shares the reg of M.
2031 If the modes differ (only possible in restricted
2032 circumstances, make a SUBREG. */
2033 if (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest))
2034 reg_map[m1->regno] = m->set_dest;
2035 else
2036 reg_map[m1->regno]
2037 = gen_lowpart_common (GET_MODE (m1->set_dest),
2038 m->set_dest);
2039
2040 /* Get rid of the matching insn
2041 and prevent further processing of it. */
2042 m1->done = 1;
2043
2044 /* if library call, delete all insn except last, which
2045 is deleted below */
2046 if (temp = find_reg_note (m1->insn, REG_RETVAL,
2047 NULL_RTX))
2048 {
2049 for (temp = XEXP (temp, 0); temp != m1->insn;
2050 temp = NEXT_INSN (temp))
2051 delete_insn (temp);
2052 }
2053 delete_insn (m1->insn);
2054
2055 /* Any other movable that loads the same register
2056 MUST be moved. */
2057 already_moved[m1->regno] = 1;
2058
2059 /* The reg merged here is now invariant,
2060 if the reg it matches is invariant. */
2061 if (! m->partial)
2062 n_times_set[m1->regno] = 0;
2063 }
2064 }
2065 else if (loop_dump_stream)
2066 fprintf (loop_dump_stream, "not desirable");
2067 }
2068 else if (loop_dump_stream && !m->match)
2069 fprintf (loop_dump_stream, "not safe");
2070
2071 if (loop_dump_stream)
2072 fprintf (loop_dump_stream, "\n");
2073 }
2074
2075 if (new_start == 0)
2076 new_start = loop_start;
2077
2078 /* Go through all the instructions in the loop, making
2079 all the register substitutions scheduled in REG_MAP. */
2080 for (p = new_start; p != end; p = NEXT_INSN (p))
2081 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
2082 || GET_CODE (p) == CALL_INSN)
2083 {
2084 replace_regs (PATTERN (p), reg_map, nregs, 0);
2085 replace_regs (REG_NOTES (p), reg_map, nregs, 0);
2086 INSN_CODE (p) = -1;
2087 }
2088 }
2089 \f
2090 #if 0
2091 /* Scan X and replace the address of any MEM in it with ADDR.
2092 REG is the address that MEM should have before the replacement. */
2093
2094 static void
2095 replace_call_address (x, reg, addr)
2096 rtx x, reg, addr;
2097 {
2098 register enum rtx_code code;
2099 register int i;
2100 register char *fmt;
2101
2102 if (x == 0)
2103 return;
2104 code = GET_CODE (x);
2105 switch (code)
2106 {
2107 case PC:
2108 case CC0:
2109 case CONST_INT:
2110 case CONST_DOUBLE:
2111 case CONST:
2112 case SYMBOL_REF:
2113 case LABEL_REF:
2114 case REG:
2115 return;
2116
2117 case SET:
2118 /* Short cut for very common case. */
2119 replace_call_address (XEXP (x, 1), reg, addr);
2120 return;
2121
2122 case CALL:
2123 /* Short cut for very common case. */
2124 replace_call_address (XEXP (x, 0), reg, addr);
2125 return;
2126
2127 case MEM:
2128 /* If this MEM uses a reg other than the one we expected,
2129 something is wrong. */
2130 if (XEXP (x, 0) != reg)
2131 abort ();
2132 XEXP (x, 0) = addr;
2133 return;
2134
2135 default:
2136 break;
2137 }
2138
2139 fmt = GET_RTX_FORMAT (code);
2140 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2141 {
2142 if (fmt[i] == 'e')
2143 replace_call_address (XEXP (x, i), reg, addr);
2144 if (fmt[i] == 'E')
2145 {
2146 register int j;
2147 for (j = 0; j < XVECLEN (x, i); j++)
2148 replace_call_address (XVECEXP (x, i, j), reg, addr);
2149 }
2150 }
2151 }
2152 #endif
2153 \f
2154 /* Return the number of memory refs to addresses that vary
2155 in the rtx X. */
2156
2157 static int
2158 count_nonfixed_reads (x)
2159 rtx x;
2160 {
2161 register enum rtx_code code;
2162 register int i;
2163 register char *fmt;
2164 int value;
2165
2166 if (x == 0)
2167 return 0;
2168
2169 code = GET_CODE (x);
2170 switch (code)
2171 {
2172 case PC:
2173 case CC0:
2174 case CONST_INT:
2175 case CONST_DOUBLE:
2176 case CONST:
2177 case SYMBOL_REF:
2178 case LABEL_REF:
2179 case REG:
2180 return 0;
2181
2182 case MEM:
2183 return ((invariant_p (XEXP (x, 0)) != 1)
2184 + count_nonfixed_reads (XEXP (x, 0)));
2185
2186 default:
2187 break;
2188 }
2189
2190 value = 0;
2191 fmt = GET_RTX_FORMAT (code);
2192 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2193 {
2194 if (fmt[i] == 'e')
2195 value += count_nonfixed_reads (XEXP (x, i));
2196 if (fmt[i] == 'E')
2197 {
2198 register int j;
2199 for (j = 0; j < XVECLEN (x, i); j++)
2200 value += count_nonfixed_reads (XVECEXP (x, i, j));
2201 }
2202 }
2203 return value;
2204 }
2205
2206 \f
2207 #if 0
2208 /* P is an instruction that sets a register to the result of a ZERO_EXTEND.
2209 Replace it with an instruction to load just the low bytes
2210 if the machine supports such an instruction,
2211 and insert above LOOP_START an instruction to clear the register. */
2212
2213 static void
2214 constant_high_bytes (p, loop_start)
2215 rtx p, loop_start;
2216 {
2217 register rtx new;
2218 register int insn_code_number;
2219
2220 /* Try to change (SET (REG ...) (ZERO_EXTEND (..:B ...)))
2221 to (SET (STRICT_LOW_PART (SUBREG:B (REG...))) ...). */
2222
2223 new = gen_rtx_SET (VOIDmode,
2224 gen_rtx_STRICT_LOW_PART (VOIDmode,
2225 gen_rtx_SUBREG (GET_MODE (XEXP (SET_SRC (PATTERN (p)), 0)),
2226 SET_DEST (PATTERN (p)),
2227 0)),
2228 XEXP (SET_SRC (PATTERN (p)), 0));
2229 insn_code_number = recog (new, p);
2230
2231 if (insn_code_number)
2232 {
2233 register int i;
2234
2235 /* Clear destination register before the loop. */
2236 emit_insn_before (gen_rtx_SET (VOIDmode, SET_DEST (PATTERN (p)),
2237 const0_rtx),
2238 loop_start);
2239
2240 /* Inside the loop, just load the low part. */
2241 PATTERN (p) = new;
2242 }
2243 }
2244 #endif
2245 \f
2246 /* Scan a loop setting the variables `unknown_address_altered',
2247 `num_mem_sets', `loop_continue', loops_enclosed', `loop_has_call',
2248 and `loop_has_volatile'.
2249 Also, fill in the array `loop_store_mems'. */
2250
2251 static void
2252 prescan_loop (start, end)
2253 rtx start, end;
2254 {
2255 register int level = 1;
2256 register rtx insn;
2257
2258 unknown_address_altered = 0;
2259 loop_has_call = 0;
2260 loop_has_volatile = 0;
2261 loop_store_mems_idx = 0;
2262
2263 num_mem_sets = 0;
2264 loops_enclosed = 1;
2265 loop_continue = 0;
2266
2267 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2268 insn = NEXT_INSN (insn))
2269 {
2270 if (GET_CODE (insn) == NOTE)
2271 {
2272 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
2273 {
2274 ++level;
2275 /* Count number of loops contained in this one. */
2276 loops_enclosed++;
2277 }
2278 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
2279 {
2280 --level;
2281 if (level == 0)
2282 {
2283 end = insn;
2284 break;
2285 }
2286 }
2287 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_CONT)
2288 {
2289 if (level == 1)
2290 loop_continue = insn;
2291 }
2292 }
2293 else if (GET_CODE (insn) == CALL_INSN)
2294 {
2295 if (! CONST_CALL_P (insn))
2296 unknown_address_altered = 1;
2297 loop_has_call = 1;
2298 }
2299 else
2300 {
2301 if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
2302 {
2303 if (volatile_refs_p (PATTERN (insn)))
2304 loop_has_volatile = 1;
2305
2306 note_stores (PATTERN (insn), note_addr_stored);
2307 }
2308 }
2309 }
2310 }
2311 \f
2312 /* Scan the function looking for loops. Record the start and end of each loop.
2313 Also mark as invalid loops any loops that contain a setjmp or are branched
2314 to from outside the loop. */
2315
2316 static void
2317 find_and_verify_loops (f)
2318 rtx f;
2319 {
2320 rtx insn, label;
2321 int current_loop = -1;
2322 int next_loop = -1;
2323 int loop;
2324
2325 /* If there are jumps to undefined labels,
2326 treat them as jumps out of any/all loops.
2327 This also avoids writing past end of tables when there are no loops. */
2328 uid_loop_num[0] = -1;
2329
2330 /* Find boundaries of loops, mark which loops are contained within
2331 loops, and invalidate loops that have setjmp. */
2332
2333 for (insn = f; insn; insn = NEXT_INSN (insn))
2334 {
2335 if (GET_CODE (insn) == NOTE)
2336 switch (NOTE_LINE_NUMBER (insn))
2337 {
2338 case NOTE_INSN_LOOP_BEG:
2339 loop_number_loop_starts[++next_loop] = insn;
2340 loop_number_loop_ends[next_loop] = 0;
2341 loop_outer_loop[next_loop] = current_loop;
2342 loop_invalid[next_loop] = 0;
2343 loop_number_exit_labels[next_loop] = 0;
2344 loop_number_exit_count[next_loop] = 0;
2345 current_loop = next_loop;
2346 break;
2347
2348 case NOTE_INSN_SETJMP:
2349 /* In this case, we must invalidate our current loop and any
2350 enclosing loop. */
2351 for (loop = current_loop; loop != -1; loop = loop_outer_loop[loop])
2352 {
2353 loop_invalid[loop] = 1;
2354 if (loop_dump_stream)
2355 fprintf (loop_dump_stream,
2356 "\nLoop at %d ignored due to setjmp.\n",
2357 INSN_UID (loop_number_loop_starts[loop]));
2358 }
2359 break;
2360
2361 case NOTE_INSN_LOOP_END:
2362 if (current_loop == -1)
2363 abort ();
2364
2365 loop_number_loop_ends[current_loop] = insn;
2366 current_loop = loop_outer_loop[current_loop];
2367 break;
2368
2369 default:
2370 break;
2371 }
2372
2373 /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the
2374 enclosing loop, but this doesn't matter. */
2375 uid_loop_num[INSN_UID (insn)] = current_loop;
2376 }
2377
2378 /* Any loop containing a label used in an initializer must be invalidated,
2379 because it can be jumped into from anywhere. */
2380
2381 for (label = forced_labels; label; label = XEXP (label, 1))
2382 {
2383 int loop_num;
2384
2385 for (loop_num = uid_loop_num[INSN_UID (XEXP (label, 0))];
2386 loop_num != -1;
2387 loop_num = loop_outer_loop[loop_num])
2388 loop_invalid[loop_num] = 1;
2389 }
2390
2391 /* Any loop containing a label used for an exception handler must be
2392 invalidated, because it can be jumped into from anywhere. */
2393
2394 for (label = exception_handler_labels; label; label = XEXP (label, 1))
2395 {
2396 int loop_num;
2397
2398 for (loop_num = uid_loop_num[INSN_UID (XEXP (label, 0))];
2399 loop_num != -1;
2400 loop_num = loop_outer_loop[loop_num])
2401 loop_invalid[loop_num] = 1;
2402 }
2403
2404 /* Now scan all insn's in the function. If any JUMP_INSN branches into a
2405 loop that it is not contained within, that loop is marked invalid.
2406 If any INSN or CALL_INSN uses a label's address, then the loop containing
2407 that label is marked invalid, because it could be jumped into from
2408 anywhere.
2409
2410 Also look for blocks of code ending in an unconditional branch that
2411 exits the loop. If such a block is surrounded by a conditional
2412 branch around the block, move the block elsewhere (see below) and
2413 invert the jump to point to the code block. This may eliminate a
2414 label in our loop and will simplify processing by both us and a
2415 possible second cse pass. */
2416
2417 for (insn = f; insn; insn = NEXT_INSN (insn))
2418 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
2419 {
2420 int this_loop_num = uid_loop_num[INSN_UID (insn)];
2421
2422 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
2423 {
2424 rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
2425 if (note)
2426 {
2427 int loop_num;
2428
2429 for (loop_num = uid_loop_num[INSN_UID (XEXP (note, 0))];
2430 loop_num != -1;
2431 loop_num = loop_outer_loop[loop_num])
2432 loop_invalid[loop_num] = 1;
2433 }
2434 }
2435
2436 if (GET_CODE (insn) != JUMP_INSN)
2437 continue;
2438
2439 mark_loop_jump (PATTERN (insn), this_loop_num);
2440
2441 /* See if this is an unconditional branch outside the loop. */
2442 if (this_loop_num != -1
2443 && (GET_CODE (PATTERN (insn)) == RETURN
2444 || (simplejump_p (insn)
2445 && (uid_loop_num[INSN_UID (JUMP_LABEL (insn))]
2446 != this_loop_num)))
2447 && get_max_uid () < max_uid_for_loop)
2448 {
2449 rtx p;
2450 rtx our_next = next_real_insn (insn);
2451 int dest_loop;
2452 int outer_loop = -1;
2453
2454 /* Go backwards until we reach the start of the loop, a label,
2455 or a JUMP_INSN. */
2456 for (p = PREV_INSN (insn);
2457 GET_CODE (p) != CODE_LABEL
2458 && ! (GET_CODE (p) == NOTE
2459 && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
2460 && GET_CODE (p) != JUMP_INSN;
2461 p = PREV_INSN (p))
2462 ;
2463
2464 /* Check for the case where we have a jump to an inner nested
2465 loop, and do not perform the optimization in that case. */
2466
2467 if (JUMP_LABEL (insn))
2468 {
2469 dest_loop = uid_loop_num[INSN_UID (JUMP_LABEL (insn))];
2470 if (dest_loop != -1)
2471 {
2472 for (outer_loop = dest_loop; outer_loop != -1;
2473 outer_loop = loop_outer_loop[outer_loop])
2474 if (outer_loop == this_loop_num)
2475 break;
2476 }
2477 }
2478
2479 /* Make sure that the target of P is within the current loop. */
2480
2481 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
2482 && uid_loop_num[INSN_UID (JUMP_LABEL (p))] != this_loop_num)
2483 outer_loop = this_loop_num;
2484
2485 /* If we stopped on a JUMP_INSN to the next insn after INSN,
2486 we have a block of code to try to move.
2487
2488 We look backward and then forward from the target of INSN
2489 to find a BARRIER at the same loop depth as the target.
2490 If we find such a BARRIER, we make a new label for the start
2491 of the block, invert the jump in P and point it to that label,
2492 and move the block of code to the spot we found. */
2493
2494 if (outer_loop == -1
2495 && GET_CODE (p) == JUMP_INSN
2496 && JUMP_LABEL (p) != 0
2497 /* Just ignore jumps to labels that were never emitted.
2498 These always indicate compilation errors. */
2499 && INSN_UID (JUMP_LABEL (p)) != 0
2500 && condjump_p (p)
2501 && ! simplejump_p (p)
2502 && next_real_insn (JUMP_LABEL (p)) == our_next)
2503 {
2504 rtx target
2505 = JUMP_LABEL (insn) ? JUMP_LABEL (insn) : get_last_insn ();
2506 int target_loop_num = uid_loop_num[INSN_UID (target)];
2507 rtx loc;
2508
2509 for (loc = target; loc; loc = PREV_INSN (loc))
2510 if (GET_CODE (loc) == BARRIER
2511 && uid_loop_num[INSN_UID (loc)] == target_loop_num)
2512 break;
2513
2514 if (loc == 0)
2515 for (loc = target; loc; loc = NEXT_INSN (loc))
2516 if (GET_CODE (loc) == BARRIER
2517 && uid_loop_num[INSN_UID (loc)] == target_loop_num)
2518 break;
2519
2520 if (loc)
2521 {
2522 rtx cond_label = JUMP_LABEL (p);
2523 rtx new_label = get_label_after (p);
2524
2525 /* Ensure our label doesn't go away. */
2526 LABEL_NUSES (cond_label)++;
2527
2528 /* Verify that uid_loop_num is large enough and that
2529 we can invert P. */
2530 if (invert_jump (p, new_label))
2531 {
2532 rtx q, r;
2533
2534 /* If no suitable BARRIER was found, create a suitable
2535 one before TARGET. Since TARGET is a fall through
2536 path, we'll need to insert an jump around our block
2537 and a add a BARRIER before TARGET.
2538
2539 This creates an extra unconditional jump outside
2540 the loop. However, the benefits of removing rarely
2541 executed instructions from inside the loop usually
2542 outweighs the cost of the extra unconditional jump
2543 outside the loop. */
2544 if (loc == 0)
2545 {
2546 rtx temp;
2547
2548 temp = gen_jump (JUMP_LABEL (insn));
2549 temp = emit_jump_insn_before (temp, target);
2550 JUMP_LABEL (temp) = JUMP_LABEL (insn);
2551 LABEL_NUSES (JUMP_LABEL (insn))++;
2552 loc = emit_barrier_before (target);
2553 }
2554
2555 /* Include the BARRIER after INSN and copy the
2556 block after LOC. */
2557 new_label = squeeze_notes (new_label, NEXT_INSN (insn));
2558 reorder_insns (new_label, NEXT_INSN (insn), loc);
2559
2560 /* All those insns are now in TARGET_LOOP_NUM. */
2561 for (q = new_label; q != NEXT_INSN (NEXT_INSN (insn));
2562 q = NEXT_INSN (q))
2563 uid_loop_num[INSN_UID (q)] = target_loop_num;
2564
2565 /* The label jumped to by INSN is no longer a loop exit.
2566 Unless INSN does not have a label (e.g., it is a
2567 RETURN insn), search loop_number_exit_labels to find
2568 its label_ref, and remove it. Also turn off
2569 LABEL_OUTSIDE_LOOP_P bit. */
2570 if (JUMP_LABEL (insn))
2571 {
2572 int loop_num;
2573
2574 for (q = 0,
2575 r = loop_number_exit_labels[this_loop_num];
2576 r; q = r, r = LABEL_NEXTREF (r))
2577 if (XEXP (r, 0) == JUMP_LABEL (insn))
2578 {
2579 LABEL_OUTSIDE_LOOP_P (r) = 0;
2580 if (q)
2581 LABEL_NEXTREF (q) = LABEL_NEXTREF (r);
2582 else
2583 loop_number_exit_labels[this_loop_num]
2584 = LABEL_NEXTREF (r);
2585 break;
2586 }
2587
2588 for (loop_num = this_loop_num;
2589 loop_num != -1 && loop_num != target_loop_num;
2590 loop_num = loop_outer_loop[loop_num])
2591 loop_number_exit_count[loop_num]--;
2592
2593 /* If we didn't find it, then something is wrong. */
2594 if (! r)
2595 abort ();
2596 }
2597
2598 /* P is now a jump outside the loop, so it must be put
2599 in loop_number_exit_labels, and marked as such.
2600 The easiest way to do this is to just call
2601 mark_loop_jump again for P. */
2602 mark_loop_jump (PATTERN (p), this_loop_num);
2603
2604 /* If INSN now jumps to the insn after it,
2605 delete INSN. */
2606 if (JUMP_LABEL (insn) != 0
2607 && (next_real_insn (JUMP_LABEL (insn))
2608 == next_real_insn (insn)))
2609 delete_insn (insn);
2610 }
2611
2612 /* Continue the loop after where the conditional
2613 branch used to jump, since the only branch insn
2614 in the block (if it still remains) is an inter-loop
2615 branch and hence needs no processing. */
2616 insn = NEXT_INSN (cond_label);
2617
2618 if (--LABEL_NUSES (cond_label) == 0)
2619 delete_insn (cond_label);
2620
2621 /* This loop will be continued with NEXT_INSN (insn). */
2622 insn = PREV_INSN (insn);
2623 }
2624 }
2625 }
2626 }
2627 }
2628
2629 /* If any label in X jumps to a loop different from LOOP_NUM and any of the
2630 loops it is contained in, mark the target loop invalid.
2631
2632 For speed, we assume that X is part of a pattern of a JUMP_INSN. */
2633
2634 static void
2635 mark_loop_jump (x, loop_num)
2636 rtx x;
2637 int loop_num;
2638 {
2639 int dest_loop;
2640 int outer_loop;
2641 int i;
2642
2643 switch (GET_CODE (x))
2644 {
2645 case PC:
2646 case USE:
2647 case CLOBBER:
2648 case REG:
2649 case MEM:
2650 case CONST_INT:
2651 case CONST_DOUBLE:
2652 case RETURN:
2653 return;
2654
2655 case CONST:
2656 /* There could be a label reference in here. */
2657 mark_loop_jump (XEXP (x, 0), loop_num);
2658 return;
2659
2660 case PLUS:
2661 case MINUS:
2662 case MULT:
2663 mark_loop_jump (XEXP (x, 0), loop_num);
2664 mark_loop_jump (XEXP (x, 1), loop_num);
2665 return;
2666
2667 case SIGN_EXTEND:
2668 case ZERO_EXTEND:
2669 mark_loop_jump (XEXP (x, 0), loop_num);
2670 return;
2671
2672 case LABEL_REF:
2673 dest_loop = uid_loop_num[INSN_UID (XEXP (x, 0))];
2674
2675 /* Link together all labels that branch outside the loop. This
2676 is used by final_[bg]iv_value and the loop unrolling code. Also
2677 mark this LABEL_REF so we know that this branch should predict
2678 false. */
2679
2680 /* A check to make sure the label is not in an inner nested loop,
2681 since this does not count as a loop exit. */
2682 if (dest_loop != -1)
2683 {
2684 for (outer_loop = dest_loop; outer_loop != -1;
2685 outer_loop = loop_outer_loop[outer_loop])
2686 if (outer_loop == loop_num)
2687 break;
2688 }
2689 else
2690 outer_loop = -1;
2691
2692 if (loop_num != -1 && outer_loop == -1)
2693 {
2694 LABEL_OUTSIDE_LOOP_P (x) = 1;
2695 LABEL_NEXTREF (x) = loop_number_exit_labels[loop_num];
2696 loop_number_exit_labels[loop_num] = x;
2697
2698 for (outer_loop = loop_num;
2699 outer_loop != -1 && outer_loop != dest_loop;
2700 outer_loop = loop_outer_loop[outer_loop])
2701 loop_number_exit_count[outer_loop]++;
2702 }
2703
2704 /* If this is inside a loop, but not in the current loop or one enclosed
2705 by it, it invalidates at least one loop. */
2706
2707 if (dest_loop == -1)
2708 return;
2709
2710 /* We must invalidate every nested loop containing the target of this
2711 label, except those that also contain the jump insn. */
2712
2713 for (; dest_loop != -1; dest_loop = loop_outer_loop[dest_loop])
2714 {
2715 /* Stop when we reach a loop that also contains the jump insn. */
2716 for (outer_loop = loop_num; outer_loop != -1;
2717 outer_loop = loop_outer_loop[outer_loop])
2718 if (dest_loop == outer_loop)
2719 return;
2720
2721 /* If we get here, we know we need to invalidate a loop. */
2722 if (loop_dump_stream && ! loop_invalid[dest_loop])
2723 fprintf (loop_dump_stream,
2724 "\nLoop at %d ignored due to multiple entry points.\n",
2725 INSN_UID (loop_number_loop_starts[dest_loop]));
2726
2727 loop_invalid[dest_loop] = 1;
2728 }
2729 return;
2730
2731 case SET:
2732 /* If this is not setting pc, ignore. */
2733 if (SET_DEST (x) == pc_rtx)
2734 mark_loop_jump (SET_SRC (x), loop_num);
2735 return;
2736
2737 case IF_THEN_ELSE:
2738 mark_loop_jump (XEXP (x, 1), loop_num);
2739 mark_loop_jump (XEXP (x, 2), loop_num);
2740 return;
2741
2742 case PARALLEL:
2743 case ADDR_VEC:
2744 for (i = 0; i < XVECLEN (x, 0); i++)
2745 mark_loop_jump (XVECEXP (x, 0, i), loop_num);
2746 return;
2747
2748 case ADDR_DIFF_VEC:
2749 for (i = 0; i < XVECLEN (x, 1); i++)
2750 mark_loop_jump (XVECEXP (x, 1, i), loop_num);
2751 return;
2752
2753 default:
2754 /* Treat anything else (such as a symbol_ref)
2755 as a branch out of this loop, but not into any loop. */
2756
2757 if (loop_num != -1)
2758 {
2759 #ifdef HAIFA
2760 LABEL_OUTSIDE_LOOP_P (x) = 1;
2761 LABEL_NEXTREF (x) = loop_number_exit_labels[loop_num];
2762 #endif /* HAIFA */
2763
2764 loop_number_exit_labels[loop_num] = x;
2765
2766 for (outer_loop = loop_num; outer_loop != -1;
2767 outer_loop = loop_outer_loop[outer_loop])
2768 loop_number_exit_count[outer_loop]++;
2769 }
2770 return;
2771 }
2772 }
2773 \f
2774 /* Return nonzero if there is a label in the range from
2775 insn INSN to and including the insn whose luid is END
2776 INSN must have an assigned luid (i.e., it must not have
2777 been previously created by loop.c). */
2778
2779 static int
2780 labels_in_range_p (insn, end)
2781 rtx insn;
2782 int end;
2783 {
2784 while (insn && INSN_LUID (insn) <= end)
2785 {
2786 if (GET_CODE (insn) == CODE_LABEL)
2787 return 1;
2788 insn = NEXT_INSN (insn);
2789 }
2790
2791 return 0;
2792 }
2793
2794 /* Record that a memory reference X is being set. */
2795
2796 static void
2797 note_addr_stored (x)
2798 rtx x;
2799 {
2800 register int i;
2801
2802 if (x == 0 || GET_CODE (x) != MEM)
2803 return;
2804
2805 /* Count number of memory writes.
2806 This affects heuristics in strength_reduce. */
2807 num_mem_sets++;
2808
2809 /* BLKmode MEM means all memory is clobbered. */
2810 if (GET_MODE (x) == BLKmode)
2811 unknown_address_altered = 1;
2812
2813 if (unknown_address_altered)
2814 return;
2815
2816 for (i = 0; i < loop_store_mems_idx; i++)
2817 if (rtx_equal_p (XEXP (loop_store_mems[i], 0), XEXP (x, 0))
2818 && MEM_IN_STRUCT_P (x) == MEM_IN_STRUCT_P (loop_store_mems[i]))
2819 {
2820 /* We are storing at the same address as previously noted. Save the
2821 wider reference. */
2822 if (GET_MODE_SIZE (GET_MODE (x))
2823 > GET_MODE_SIZE (GET_MODE (loop_store_mems[i])))
2824 loop_store_mems[i] = x;
2825 break;
2826 }
2827
2828 if (i == NUM_STORES)
2829 unknown_address_altered = 1;
2830
2831 else if (i == loop_store_mems_idx)
2832 loop_store_mems[loop_store_mems_idx++] = x;
2833 }
2834 \f
2835 /* Return nonzero if the rtx X is invariant over the current loop.
2836
2837 The value is 2 if we refer to something only conditionally invariant.
2838
2839 If `unknown_address_altered' is nonzero, no memory ref is invariant.
2840 Otherwise, a memory ref is invariant if it does not conflict with
2841 anything stored in `loop_store_mems'. */
2842
2843 int
2844 invariant_p (x)
2845 register rtx x;
2846 {
2847 register int i;
2848 register enum rtx_code code;
2849 register char *fmt;
2850 int conditional = 0;
2851
2852 if (x == 0)
2853 return 1;
2854 code = GET_CODE (x);
2855 switch (code)
2856 {
2857 case CONST_INT:
2858 case CONST_DOUBLE:
2859 case SYMBOL_REF:
2860 case CONST:
2861 return 1;
2862
2863 case LABEL_REF:
2864 /* A LABEL_REF is normally invariant, however, if we are unrolling
2865 loops, and this label is inside the loop, then it isn't invariant.
2866 This is because each unrolled copy of the loop body will have
2867 a copy of this label. If this was invariant, then an insn loading
2868 the address of this label into a register might get moved outside
2869 the loop, and then each loop body would end up using the same label.
2870
2871 We don't know the loop bounds here though, so just fail for all
2872 labels. */
2873 if (flag_unroll_loops)
2874 return 0;
2875 else
2876 return 1;
2877
2878 case PC:
2879 case CC0:
2880 case UNSPEC_VOLATILE:
2881 return 0;
2882
2883 case REG:
2884 /* We used to check RTX_UNCHANGING_P (x) here, but that is invalid
2885 since the reg might be set by initialization within the loop. */
2886
2887 if ((x == frame_pointer_rtx || x == hard_frame_pointer_rtx
2888 || x == arg_pointer_rtx)
2889 && ! current_function_has_nonlocal_goto)
2890 return 1;
2891
2892 if (loop_has_call
2893 && REGNO (x) < FIRST_PSEUDO_REGISTER && call_used_regs[REGNO (x)])
2894 return 0;
2895
2896 if (n_times_set[REGNO (x)] < 0)
2897 return 2;
2898
2899 return n_times_set[REGNO (x)] == 0;
2900
2901 case MEM:
2902 /* Volatile memory references must be rejected. Do this before
2903 checking for read-only items, so that volatile read-only items
2904 will be rejected also. */
2905 if (MEM_VOLATILE_P (x))
2906 return 0;
2907
2908 /* Read-only items (such as constants in a constant pool) are
2909 invariant if their address is. */
2910 if (RTX_UNCHANGING_P (x))
2911 break;
2912
2913 /* If we filled the table (or had a subroutine call), any location
2914 in memory could have been clobbered. */
2915 if (unknown_address_altered)
2916 return 0;
2917
2918 /* See if there is any dependence between a store and this load. */
2919 for (i = loop_store_mems_idx - 1; i >= 0; i--)
2920 if (true_dependence (loop_store_mems[i], VOIDmode, x, rtx_varies_p))
2921 return 0;
2922
2923 /* It's not invalidated by a store in memory
2924 but we must still verify the address is invariant. */
2925 break;
2926
2927 case ASM_OPERANDS:
2928 /* Don't mess with insns declared volatile. */
2929 if (MEM_VOLATILE_P (x))
2930 return 0;
2931 break;
2932
2933 default:
2934 break;
2935 }
2936
2937 fmt = GET_RTX_FORMAT (code);
2938 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2939 {
2940 if (fmt[i] == 'e')
2941 {
2942 int tem = invariant_p (XEXP (x, i));
2943 if (tem == 0)
2944 return 0;
2945 if (tem == 2)
2946 conditional = 1;
2947 }
2948 else if (fmt[i] == 'E')
2949 {
2950 register int j;
2951 for (j = 0; j < XVECLEN (x, i); j++)
2952 {
2953 int tem = invariant_p (XVECEXP (x, i, j));
2954 if (tem == 0)
2955 return 0;
2956 if (tem == 2)
2957 conditional = 1;
2958 }
2959
2960 }
2961 }
2962
2963 return 1 + conditional;
2964 }
2965
2966 \f
2967 /* Return nonzero if all the insns in the loop that set REG
2968 are INSN and the immediately following insns,
2969 and if each of those insns sets REG in an invariant way
2970 (not counting uses of REG in them).
2971
2972 The value is 2 if some of these insns are only conditionally invariant.
2973
2974 We assume that INSN itself is the first set of REG
2975 and that its source is invariant. */
2976
2977 static int
2978 consec_sets_invariant_p (reg, n_sets, insn)
2979 int n_sets;
2980 rtx reg, insn;
2981 {
2982 register rtx p = insn;
2983 register int regno = REGNO (reg);
2984 rtx temp;
2985 /* Number of sets we have to insist on finding after INSN. */
2986 int count = n_sets - 1;
2987 int old = n_times_set[regno];
2988 int value = 0;
2989 int this;
2990
2991 /* If N_SETS hit the limit, we can't rely on its value. */
2992 if (n_sets == 127)
2993 return 0;
2994
2995 n_times_set[regno] = 0;
2996
2997 while (count > 0)
2998 {
2999 register enum rtx_code code;
3000 rtx set;
3001
3002 p = NEXT_INSN (p);
3003 code = GET_CODE (p);
3004
3005 /* If library call, skip to end of of it. */
3006 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
3007 p = XEXP (temp, 0);
3008
3009 this = 0;
3010 if (code == INSN
3011 && (set = single_set (p))
3012 && GET_CODE (SET_DEST (set)) == REG
3013 && REGNO (SET_DEST (set)) == regno)
3014 {
3015 this = invariant_p (SET_SRC (set));
3016 if (this != 0)
3017 value |= this;
3018 else if (temp = find_reg_note (p, REG_EQUAL, NULL_RTX))
3019 {
3020 /* If this is a libcall, then any invariant REG_EQUAL note is OK.
3021 If this is an ordinary insn, then only CONSTANT_P REG_EQUAL
3022 notes are OK. */
3023 this = (CONSTANT_P (XEXP (temp, 0))
3024 || (find_reg_note (p, REG_RETVAL, NULL_RTX)
3025 && invariant_p (XEXP (temp, 0))));
3026 if (this != 0)
3027 value |= this;
3028 }
3029 }
3030 if (this != 0)
3031 count--;
3032 else if (code != NOTE)
3033 {
3034 n_times_set[regno] = old;
3035 return 0;
3036 }
3037 }
3038
3039 n_times_set[regno] = old;
3040 /* If invariant_p ever returned 2, we return 2. */
3041 return 1 + (value & 2);
3042 }
3043
3044 #if 0
3045 /* I don't think this condition is sufficient to allow INSN
3046 to be moved, so we no longer test it. */
3047
3048 /* Return 1 if all insns in the basic block of INSN and following INSN
3049 that set REG are invariant according to TABLE. */
3050
3051 static int
3052 all_sets_invariant_p (reg, insn, table)
3053 rtx reg, insn;
3054 short *table;
3055 {
3056 register rtx p = insn;
3057 register int regno = REGNO (reg);
3058
3059 while (1)
3060 {
3061 register enum rtx_code code;
3062 p = NEXT_INSN (p);
3063 code = GET_CODE (p);
3064 if (code == CODE_LABEL || code == JUMP_INSN)
3065 return 1;
3066 if (code == INSN && GET_CODE (PATTERN (p)) == SET
3067 && GET_CODE (SET_DEST (PATTERN (p))) == REG
3068 && REGNO (SET_DEST (PATTERN (p))) == regno)
3069 {
3070 if (!invariant_p (SET_SRC (PATTERN (p)), table))
3071 return 0;
3072 }
3073 }
3074 }
3075 #endif /* 0 */
3076 \f
3077 /* Look at all uses (not sets) of registers in X. For each, if it is
3078 the single use, set USAGE[REGNO] to INSN; if there was a previous use in
3079 a different insn, set USAGE[REGNO] to const0_rtx. */
3080
3081 static void
3082 find_single_use_in_loop (insn, x, usage)
3083 rtx insn;
3084 rtx x;
3085 rtx *usage;
3086 {
3087 enum rtx_code code = GET_CODE (x);
3088 char *fmt = GET_RTX_FORMAT (code);
3089 int i, j;
3090
3091 if (code == REG)
3092 usage[REGNO (x)]
3093 = (usage[REGNO (x)] != 0 && usage[REGNO (x)] != insn)
3094 ? const0_rtx : insn;
3095
3096 else if (code == SET)
3097 {
3098 /* Don't count SET_DEST if it is a REG; otherwise count things
3099 in SET_DEST because if a register is partially modified, it won't
3100 show up as a potential movable so we don't care how USAGE is set
3101 for it. */
3102 if (GET_CODE (SET_DEST (x)) != REG)
3103 find_single_use_in_loop (insn, SET_DEST (x), usage);
3104 find_single_use_in_loop (insn, SET_SRC (x), usage);
3105 }
3106 else
3107 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3108 {
3109 if (fmt[i] == 'e' && XEXP (x, i) != 0)
3110 find_single_use_in_loop (insn, XEXP (x, i), usage);
3111 else if (fmt[i] == 'E')
3112 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3113 find_single_use_in_loop (insn, XVECEXP (x, i, j), usage);
3114 }
3115 }
3116 \f
3117 /* Increment N_TIMES_SET at the index of each register
3118 that is modified by an insn between FROM and TO.
3119 If the value of an element of N_TIMES_SET becomes 127 or more,
3120 stop incrementing it, to avoid overflow.
3121
3122 Store in SINGLE_USAGE[I] the single insn in which register I is
3123 used, if it is only used once. Otherwise, it is set to 0 (for no
3124 uses) or const0_rtx for more than one use. This parameter may be zero,
3125 in which case this processing is not done.
3126
3127 Store in *COUNT_PTR the number of actual instruction
3128 in the loop. We use this to decide what is worth moving out. */
3129
3130 /* last_set[n] is nonzero iff reg n has been set in the current basic block.
3131 In that case, it is the insn that last set reg n. */
3132
3133 static void
3134 count_loop_regs_set (from, to, may_not_move, single_usage, count_ptr, nregs)
3135 register rtx from, to;
3136 char *may_not_move;
3137 rtx *single_usage;
3138 int *count_ptr;
3139 int nregs;
3140 {
3141 register rtx *last_set = (rtx *) alloca (nregs * sizeof (rtx));
3142 register rtx insn;
3143 register int count = 0;
3144 register rtx dest;
3145
3146 bzero ((char *) last_set, nregs * sizeof (rtx));
3147 for (insn = from; insn != to; insn = NEXT_INSN (insn))
3148 {
3149 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
3150 {
3151 ++count;
3152
3153 /* If requested, record registers that have exactly one use. */
3154 if (single_usage)
3155 {
3156 find_single_use_in_loop (insn, PATTERN (insn), single_usage);
3157
3158 /* Include uses in REG_EQUAL notes. */
3159 if (REG_NOTES (insn))
3160 find_single_use_in_loop (insn, REG_NOTES (insn), single_usage);
3161 }
3162
3163 if (GET_CODE (PATTERN (insn)) == CLOBBER
3164 && GET_CODE (XEXP (PATTERN (insn), 0)) == REG)
3165 /* Don't move a reg that has an explicit clobber.
3166 We might do so sometimes, but it's not worth the pain. */
3167 may_not_move[REGNO (XEXP (PATTERN (insn), 0))] = 1;
3168
3169 if (GET_CODE (PATTERN (insn)) == SET
3170 || GET_CODE (PATTERN (insn)) == CLOBBER)
3171 {
3172 dest = SET_DEST (PATTERN (insn));
3173 while (GET_CODE (dest) == SUBREG
3174 || GET_CODE (dest) == ZERO_EXTRACT
3175 || GET_CODE (dest) == SIGN_EXTRACT
3176 || GET_CODE (dest) == STRICT_LOW_PART)
3177 dest = XEXP (dest, 0);
3178 if (GET_CODE (dest) == REG)
3179 {
3180 register int regno = REGNO (dest);
3181 /* If this is the first setting of this reg
3182 in current basic block, and it was set before,
3183 it must be set in two basic blocks, so it cannot
3184 be moved out of the loop. */
3185 if (n_times_set[regno] > 0 && last_set[regno] == 0)
3186 may_not_move[regno] = 1;
3187 /* If this is not first setting in current basic block,
3188 see if reg was used in between previous one and this.
3189 If so, neither one can be moved. */
3190 if (last_set[regno] != 0
3191 && reg_used_between_p (dest, last_set[regno], insn))
3192 may_not_move[regno] = 1;
3193 if (n_times_set[regno] < 127)
3194 ++n_times_set[regno];
3195 last_set[regno] = insn;
3196 }
3197 }
3198 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
3199 {
3200 register int i;
3201 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
3202 {
3203 register rtx x = XVECEXP (PATTERN (insn), 0, i);
3204 if (GET_CODE (x) == CLOBBER && GET_CODE (XEXP (x, 0)) == REG)
3205 /* Don't move a reg that has an explicit clobber.
3206 It's not worth the pain to try to do it correctly. */
3207 may_not_move[REGNO (XEXP (x, 0))] = 1;
3208
3209 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
3210 {
3211 dest = SET_DEST (x);
3212 while (GET_CODE (dest) == SUBREG
3213 || GET_CODE (dest) == ZERO_EXTRACT
3214 || GET_CODE (dest) == SIGN_EXTRACT
3215 || GET_CODE (dest) == STRICT_LOW_PART)
3216 dest = XEXP (dest, 0);
3217 if (GET_CODE (dest) == REG)
3218 {
3219 register int regno = REGNO (dest);
3220 if (n_times_set[regno] > 0 && last_set[regno] == 0)
3221 may_not_move[regno] = 1;
3222 if (last_set[regno] != 0
3223 && reg_used_between_p (dest, last_set[regno], insn))
3224 may_not_move[regno] = 1;
3225 if (n_times_set[regno] < 127)
3226 ++n_times_set[regno];
3227 last_set[regno] = insn;
3228 }
3229 }
3230 }
3231 }
3232 }
3233
3234 if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN)
3235 bzero ((char *) last_set, nregs * sizeof (rtx));
3236 }
3237 *count_ptr = count;
3238 }
3239 \f
3240 /* Given a loop that is bounded by LOOP_START and LOOP_END
3241 and that is entered at SCAN_START,
3242 return 1 if the register set in SET contained in insn INSN is used by
3243 any insn that precedes INSN in cyclic order starting
3244 from the loop entry point.
3245
3246 We don't want to use INSN_LUID here because if we restrict INSN to those
3247 that have a valid INSN_LUID, it means we cannot move an invariant out
3248 from an inner loop past two loops. */
3249
3250 static int
3251 loop_reg_used_before_p (set, insn, loop_start, scan_start, loop_end)
3252 rtx set, insn, loop_start, scan_start, loop_end;
3253 {
3254 rtx reg = SET_DEST (set);
3255 rtx p;
3256
3257 /* Scan forward checking for register usage. If we hit INSN, we
3258 are done. Otherwise, if we hit LOOP_END, wrap around to LOOP_START. */
3259 for (p = scan_start; p != insn; p = NEXT_INSN (p))
3260 {
3261 if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
3262 && reg_overlap_mentioned_p (reg, PATTERN (p)))
3263 return 1;
3264
3265 if (p == loop_end)
3266 p = loop_start;
3267 }
3268
3269 return 0;
3270 }
3271 \f
3272 /* A "basic induction variable" or biv is a pseudo reg that is set
3273 (within this loop) only by incrementing or decrementing it. */
3274 /* A "general induction variable" or giv is a pseudo reg whose
3275 value is a linear function of a biv. */
3276
3277 /* Bivs are recognized by `basic_induction_var';
3278 Givs by `general_induct_var'. */
3279
3280 /* Indexed by register number, indicates whether or not register is an
3281 induction variable, and if so what type. */
3282
3283 enum iv_mode *reg_iv_type;
3284
3285 /* Indexed by register number, contains pointer to `struct induction'
3286 if register is an induction variable. This holds general info for
3287 all induction variables. */
3288
3289 struct induction **reg_iv_info;
3290
3291 /* Indexed by register number, contains pointer to `struct iv_class'
3292 if register is a basic induction variable. This holds info describing
3293 the class (a related group) of induction variables that the biv belongs
3294 to. */
3295
3296 struct iv_class **reg_biv_class;
3297
3298 /* The head of a list which links together (via the next field)
3299 every iv class for the current loop. */
3300
3301 struct iv_class *loop_iv_list;
3302
3303 /* Communication with routines called via `note_stores'. */
3304
3305 static rtx note_insn;
3306
3307 /* Dummy register to have non-zero DEST_REG for DEST_ADDR type givs. */
3308
3309 static rtx addr_placeholder;
3310
3311 /* ??? Unfinished optimizations, and possible future optimizations,
3312 for the strength reduction code. */
3313
3314 /* ??? There is one more optimization you might be interested in doing: to
3315 allocate pseudo registers for frequently-accessed memory locations.
3316 If the same memory location is referenced each time around, it might
3317 be possible to copy it into a register before and out after.
3318 This is especially useful when the memory location is a variable which
3319 is in a stack slot because somewhere its address is taken. If the
3320 loop doesn't contain a function call and the variable isn't volatile,
3321 it is safe to keep the value in a register for the duration of the
3322 loop. One tricky thing is that the copying of the value back from the
3323 register has to be done on all exits from the loop. You need to check that
3324 all the exits from the loop go to the same place. */
3325
3326 /* ??? The interaction of biv elimination, and recognition of 'constant'
3327 bivs, may cause problems. */
3328
3329 /* ??? Add heuristics so that DEST_ADDR strength reduction does not cause
3330 performance problems.
3331
3332 Perhaps don't eliminate things that can be combined with an addressing
3333 mode. Find all givs that have the same biv, mult_val, and add_val;
3334 then for each giv, check to see if its only use dies in a following
3335 memory address. If so, generate a new memory address and check to see
3336 if it is valid. If it is valid, then store the modified memory address,
3337 otherwise, mark the giv as not done so that it will get its own iv. */
3338
3339 /* ??? Could try to optimize branches when it is known that a biv is always
3340 positive. */
3341
3342 /* ??? When replace a biv in a compare insn, we should replace with closest
3343 giv so that an optimized branch can still be recognized by the combiner,
3344 e.g. the VAX acb insn. */
3345
3346 /* ??? Many of the checks involving uid_luid could be simplified if regscan
3347 was rerun in loop_optimize whenever a register was added or moved.
3348 Also, some of the optimizations could be a little less conservative. */
3349 \f
3350 /* Perform strength reduction and induction variable elimination. */
3351
3352 /* Pseudo registers created during this function will be beyond the last
3353 valid index in several tables including n_times_set and regno_last_uid.
3354 This does not cause a problem here, because the added registers cannot be
3355 givs outside of their loop, and hence will never be reconsidered.
3356 But scan_loop must check regnos to make sure they are in bounds. */
3357
3358 static void
3359 strength_reduce (scan_start, end, loop_top, insn_count,
3360 loop_start, loop_end, unroll_p)
3361 rtx scan_start;
3362 rtx end;
3363 rtx loop_top;
3364 int insn_count;
3365 rtx loop_start;
3366 rtx loop_end;
3367 int unroll_p;
3368 {
3369 rtx p;
3370 rtx set;
3371 rtx inc_val;
3372 rtx mult_val;
3373 rtx dest_reg;
3374 /* This is 1 if current insn is not executed at least once for every loop
3375 iteration. */
3376 int not_every_iteration = 0;
3377 /* This is 1 if current insn may be executed more than once for every
3378 loop iteration. */
3379 int maybe_multiple = 0;
3380 /* Temporary list pointers for traversing loop_iv_list. */
3381 struct iv_class *bl, **backbl;
3382 /* Ratio of extra register life span we can justify
3383 for saving an instruction. More if loop doesn't call subroutines
3384 since in that case saving an insn makes more difference
3385 and more registers are available. */
3386 /* ??? could set this to last value of threshold in move_movables */
3387 int threshold = (loop_has_call ? 1 : 2) * (3 + n_non_fixed_regs);
3388 /* Map of pseudo-register replacements. */
3389 rtx *reg_map;
3390 int call_seen;
3391 rtx test;
3392 rtx end_insert_before;
3393 int loop_depth = 0;
3394
3395 reg_iv_type = (enum iv_mode *) alloca (max_reg_before_loop
3396 * sizeof (enum iv_mode *));
3397 bzero ((char *) reg_iv_type, max_reg_before_loop * sizeof (enum iv_mode *));
3398 reg_iv_info = (struct induction **)
3399 alloca (max_reg_before_loop * sizeof (struct induction *));
3400 bzero ((char *) reg_iv_info, (max_reg_before_loop
3401 * sizeof (struct induction *)));
3402 reg_biv_class = (struct iv_class **)
3403 alloca (max_reg_before_loop * sizeof (struct iv_class *));
3404 bzero ((char *) reg_biv_class, (max_reg_before_loop
3405 * sizeof (struct iv_class *)));
3406
3407 loop_iv_list = 0;
3408 addr_placeholder = gen_reg_rtx (Pmode);
3409
3410 /* Save insn immediately after the loop_end. Insns inserted after loop_end
3411 must be put before this insn, so that they will appear in the right
3412 order (i.e. loop order).
3413
3414 If loop_end is the end of the current function, then emit a
3415 NOTE_INSN_DELETED after loop_end and set end_insert_before to the
3416 dummy note insn. */
3417 if (NEXT_INSN (loop_end) != 0)
3418 end_insert_before = NEXT_INSN (loop_end);
3419 else
3420 end_insert_before = emit_note_after (NOTE_INSN_DELETED, loop_end);
3421
3422 /* Scan through loop to find all possible bivs. */
3423
3424 p = scan_start;
3425 while (1)
3426 {
3427 p = NEXT_INSN (p);
3428 /* At end of a straight-in loop, we are done.
3429 At end of a loop entered at the bottom, scan the top. */
3430 if (p == scan_start)
3431 break;
3432 if (p == end)
3433 {
3434 if (loop_top != 0)
3435 p = loop_top;
3436 else
3437 break;
3438 if (p == scan_start)
3439 break;
3440 }
3441
3442 if (GET_CODE (p) == INSN
3443 && (set = single_set (p))
3444 && GET_CODE (SET_DEST (set)) == REG)
3445 {
3446 dest_reg = SET_DEST (set);
3447 if (REGNO (dest_reg) < max_reg_before_loop
3448 && REGNO (dest_reg) >= FIRST_PSEUDO_REGISTER
3449 && reg_iv_type[REGNO (dest_reg)] != NOT_BASIC_INDUCT)
3450 {
3451 if (basic_induction_var (SET_SRC (set), GET_MODE (SET_SRC (set)),
3452 dest_reg, p, &inc_val, &mult_val))
3453 {
3454 /* It is a possible basic induction variable.
3455 Create and initialize an induction structure for it. */
3456
3457 struct induction *v
3458 = (struct induction *) alloca (sizeof (struct induction));
3459
3460 record_biv (v, p, dest_reg, inc_val, mult_val,
3461 not_every_iteration, maybe_multiple);
3462 reg_iv_type[REGNO (dest_reg)] = BASIC_INDUCT;
3463 }
3464 else if (REGNO (dest_reg) < max_reg_before_loop)
3465 reg_iv_type[REGNO (dest_reg)] = NOT_BASIC_INDUCT;
3466 }
3467 }
3468
3469 /* Past CODE_LABEL, we get to insns that may be executed multiple
3470 times. The only way we can be sure that they can't is if every
3471 every jump insn between here and the end of the loop either
3472 returns, exits the loop, is a forward jump, or is a jump
3473 to the loop start. */
3474
3475 if (GET_CODE (p) == CODE_LABEL)
3476 {
3477 rtx insn = p;
3478
3479 maybe_multiple = 0;
3480
3481 while (1)
3482 {
3483 insn = NEXT_INSN (insn);
3484 if (insn == scan_start)
3485 break;
3486 if (insn == end)
3487 {
3488 if (loop_top != 0)
3489 insn = loop_top;
3490 else
3491 break;
3492 if (insn == scan_start)
3493 break;
3494 }
3495
3496 if (GET_CODE (insn) == JUMP_INSN
3497 && GET_CODE (PATTERN (insn)) != RETURN
3498 && (! condjump_p (insn)
3499 || (JUMP_LABEL (insn) != 0
3500 && JUMP_LABEL (insn) != scan_start
3501 && (INSN_UID (JUMP_LABEL (insn)) >= max_uid_for_loop
3502 || INSN_UID (insn) >= max_uid_for_loop
3503 || (INSN_LUID (JUMP_LABEL (insn))
3504 < INSN_LUID (insn))))))
3505 {
3506 maybe_multiple = 1;
3507 break;
3508 }
3509 }
3510 }
3511
3512 /* Past a jump, we get to insns for which we can't count
3513 on whether they will be executed during each iteration. */
3514 /* This code appears twice in strength_reduce. There is also similar
3515 code in scan_loop. */
3516 if (GET_CODE (p) == JUMP_INSN
3517 /* If we enter the loop in the middle, and scan around to the
3518 beginning, don't set not_every_iteration for that.
3519 This can be any kind of jump, since we want to know if insns
3520 will be executed if the loop is executed. */
3521 && ! (JUMP_LABEL (p) == loop_top
3522 && ((NEXT_INSN (NEXT_INSN (p)) == loop_end && simplejump_p (p))
3523 || (NEXT_INSN (p) == loop_end && condjump_p (p)))))
3524 {
3525 rtx label = 0;
3526
3527 /* If this is a jump outside the loop, then it also doesn't
3528 matter. Check to see if the target of this branch is on the
3529 loop_number_exits_labels list. */
3530
3531 for (label = loop_number_exit_labels[uid_loop_num[INSN_UID (loop_start)]];
3532 label;
3533 label = LABEL_NEXTREF (label))
3534 if (XEXP (label, 0) == JUMP_LABEL (p))
3535 break;
3536
3537 if (! label)
3538 not_every_iteration = 1;
3539 }
3540
3541 else if (GET_CODE (p) == NOTE)
3542 {
3543 /* At the virtual top of a converted loop, insns are again known to
3544 be executed each iteration: logically, the loop begins here
3545 even though the exit code has been duplicated. */
3546 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
3547 not_every_iteration = 0;
3548 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
3549 loop_depth++;
3550 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
3551 loop_depth--;
3552 }
3553
3554 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
3555 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
3556 or not an insn is known to be executed each iteration of the
3557 loop, whether or not any iterations are known to occur.
3558
3559 Therefore, if we have just passed a label and have no more labels
3560 between here and the test insn of the loop, we know these insns
3561 will be executed each iteration. */
3562
3563 if (not_every_iteration && GET_CODE (p) == CODE_LABEL
3564 && no_labels_between_p (p, loop_end))
3565 not_every_iteration = 0;
3566 }
3567
3568 /* Scan loop_iv_list to remove all regs that proved not to be bivs.
3569 Make a sanity check against n_times_set. */
3570 for (backbl = &loop_iv_list, bl = *backbl; bl; bl = bl->next)
3571 {
3572 if (reg_iv_type[bl->regno] != BASIC_INDUCT
3573 /* Above happens if register modified by subreg, etc. */
3574 /* Make sure it is not recognized as a basic induction var: */
3575 || n_times_set[bl->regno] != bl->biv_count
3576 /* If never incremented, it is invariant that we decided not to
3577 move. So leave it alone. */
3578 || ! bl->incremented)
3579 {
3580 if (loop_dump_stream)
3581 fprintf (loop_dump_stream, "Reg %d: biv discarded, %s\n",
3582 bl->regno,
3583 (reg_iv_type[bl->regno] != BASIC_INDUCT
3584 ? "not induction variable"
3585 : (! bl->incremented ? "never incremented"
3586 : "count error")));
3587
3588 reg_iv_type[bl->regno] = NOT_BASIC_INDUCT;
3589 *backbl = bl->next;
3590 }
3591 else
3592 {
3593 backbl = &bl->next;
3594
3595 if (loop_dump_stream)
3596 fprintf (loop_dump_stream, "Reg %d: biv verified\n", bl->regno);
3597 }
3598 }
3599
3600 /* Exit if there are no bivs. */
3601 if (! loop_iv_list)
3602 {
3603 /* Can still unroll the loop anyways, but indicate that there is no
3604 strength reduction info available. */
3605 if (unroll_p)
3606 unroll_loop (loop_end, insn_count, loop_start, end_insert_before, 0);
3607
3608 return;
3609 }
3610
3611 /* Find initial value for each biv by searching backwards from loop_start,
3612 halting at first label. Also record any test condition. */
3613
3614 call_seen = 0;
3615 for (p = loop_start; p && GET_CODE (p) != CODE_LABEL; p = PREV_INSN (p))
3616 {
3617 note_insn = p;
3618
3619 if (GET_CODE (p) == CALL_INSN)
3620 call_seen = 1;
3621
3622 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
3623 || GET_CODE (p) == CALL_INSN)
3624 note_stores (PATTERN (p), record_initial);
3625
3626 /* Record any test of a biv that branches around the loop if no store
3627 between it and the start of loop. We only care about tests with
3628 constants and registers and only certain of those. */
3629 if (GET_CODE (p) == JUMP_INSN
3630 && JUMP_LABEL (p) != 0
3631 && next_real_insn (JUMP_LABEL (p)) == next_real_insn (loop_end)
3632 && (test = get_condition_for_loop (p)) != 0
3633 && GET_CODE (XEXP (test, 0)) == REG
3634 && REGNO (XEXP (test, 0)) < max_reg_before_loop
3635 && (bl = reg_biv_class[REGNO (XEXP (test, 0))]) != 0
3636 && valid_initial_value_p (XEXP (test, 1), p, call_seen, loop_start)
3637 && bl->init_insn == 0)
3638 {
3639 /* If an NE test, we have an initial value! */
3640 if (GET_CODE (test) == NE)
3641 {
3642 bl->init_insn = p;
3643 bl->init_set = gen_rtx_SET (VOIDmode,
3644 XEXP (test, 0), XEXP (test, 1));
3645 }
3646 else
3647 bl->initial_test = test;
3648 }
3649 }
3650
3651 /* Look at the each biv and see if we can say anything better about its
3652 initial value from any initializing insns set up above. (This is done
3653 in two passes to avoid missing SETs in a PARALLEL.) */
3654 for (bl = loop_iv_list; bl; bl = bl->next)
3655 {
3656 rtx src;
3657 rtx note;
3658
3659 if (! bl->init_insn)
3660 continue;
3661
3662 /* IF INIT_INSN has a REG_EQUAL or REG_EQUIV note and the value
3663 is a constant, use the value of that. */
3664 if (((note = find_reg_note (bl->init_insn, REG_EQUAL, 0)) != NULL
3665 && CONSTANT_P (XEXP (note, 0)))
3666 || ((note = find_reg_note (bl->init_insn, REG_EQUIV, 0)) != NULL
3667 && CONSTANT_P (XEXP (note, 0))))
3668 src = XEXP (note, 0);
3669 else
3670 src = SET_SRC (bl->init_set);
3671
3672 if (loop_dump_stream)
3673 fprintf (loop_dump_stream,
3674 "Biv %d initialized at insn %d: initial value ",
3675 bl->regno, INSN_UID (bl->init_insn));
3676
3677 if ((GET_MODE (src) == GET_MODE (regno_reg_rtx[bl->regno])
3678 || GET_MODE (src) == VOIDmode)
3679 && valid_initial_value_p (src, bl->init_insn, call_seen, loop_start))
3680 {
3681 bl->initial_value = src;
3682
3683 if (loop_dump_stream)
3684 {
3685 if (GET_CODE (src) == CONST_INT)
3686 fprintf (loop_dump_stream, "%d\n", INTVAL (src));
3687 else
3688 {
3689 print_rtl (loop_dump_stream, src);
3690 fprintf (loop_dump_stream, "\n");
3691 }
3692 }
3693 }
3694 else
3695 {
3696 /* Biv initial value is not simple move,
3697 so let it keep initial value of "itself". */
3698
3699 if (loop_dump_stream)
3700 fprintf (loop_dump_stream, "is complex\n");
3701 }
3702 }
3703
3704 /* Search the loop for general induction variables. */
3705
3706 /* A register is a giv if: it is only set once, it is a function of a
3707 biv and a constant (or invariant), and it is not a biv. */
3708
3709 not_every_iteration = 0;
3710 loop_depth = 0;
3711 p = scan_start;
3712 while (1)
3713 {
3714 p = NEXT_INSN (p);
3715 /* At end of a straight-in loop, we are done.
3716 At end of a loop entered at the bottom, scan the top. */
3717 if (p == scan_start)
3718 break;
3719 if (p == end)
3720 {
3721 if (loop_top != 0)
3722 p = loop_top;
3723 else
3724 break;
3725 if (p == scan_start)
3726 break;
3727 }
3728
3729 /* Look for a general induction variable in a register. */
3730 if (GET_CODE (p) == INSN
3731 && (set = single_set (p))
3732 && GET_CODE (SET_DEST (set)) == REG
3733 && ! may_not_optimize[REGNO (SET_DEST (set))])
3734 {
3735 rtx src_reg;
3736 rtx add_val;
3737 rtx mult_val;
3738 int benefit;
3739 rtx regnote = 0;
3740
3741 dest_reg = SET_DEST (set);
3742 if (REGNO (dest_reg) < FIRST_PSEUDO_REGISTER)
3743 continue;
3744
3745 if (/* SET_SRC is a giv. */
3746 ((benefit = general_induction_var (SET_SRC (set),
3747 &src_reg, &add_val,
3748 &mult_val))
3749 /* Equivalent expression is a giv. */
3750 || ((regnote = find_reg_note (p, REG_EQUAL, NULL_RTX))
3751 && (benefit = general_induction_var (XEXP (regnote, 0),
3752 &src_reg,
3753 &add_val, &mult_val))))
3754 /* Don't try to handle any regs made by loop optimization.
3755 We have nothing on them in regno_first_uid, etc. */
3756 && REGNO (dest_reg) < max_reg_before_loop
3757 /* Don't recognize a BASIC_INDUCT_VAR here. */
3758 && dest_reg != src_reg
3759 /* This must be the only place where the register is set. */
3760 && (n_times_set[REGNO (dest_reg)] == 1
3761 /* or all sets must be consecutive and make a giv. */
3762 || (benefit = consec_sets_giv (benefit, p,
3763 src_reg, dest_reg,
3764 &add_val, &mult_val))))
3765 {
3766 int count;
3767 struct induction *v
3768 = (struct induction *) alloca (sizeof (struct induction));
3769 rtx temp;
3770
3771 /* If this is a library call, increase benefit. */
3772 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
3773 benefit += libcall_benefit (p);
3774
3775 /* Skip the consecutive insns, if there are any. */
3776 for (count = n_times_set[REGNO (dest_reg)] - 1;
3777 count > 0; count--)
3778 {
3779 /* If first insn of libcall sequence, skip to end.
3780 Do this at start of loop, since INSN is guaranteed to
3781 be an insn here. */
3782 if (GET_CODE (p) != NOTE
3783 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
3784 p = XEXP (temp, 0);
3785
3786 do p = NEXT_INSN (p);
3787 while (GET_CODE (p) == NOTE);
3788 }
3789
3790 record_giv (v, p, src_reg, dest_reg, mult_val, add_val, benefit,
3791 DEST_REG, not_every_iteration, NULL_PTR, loop_start,
3792 loop_end);
3793
3794 }
3795 }
3796
3797 #ifndef DONT_REDUCE_ADDR
3798 /* Look for givs which are memory addresses. */
3799 /* This resulted in worse code on a VAX 8600. I wonder if it
3800 still does. */
3801 if (GET_CODE (p) == INSN)
3802 find_mem_givs (PATTERN (p), p, not_every_iteration, loop_start,
3803 loop_end);
3804 #endif
3805
3806 /* Update the status of whether giv can derive other givs. This can
3807 change when we pass a label or an insn that updates a biv. */
3808 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
3809 || GET_CODE (p) == CODE_LABEL)
3810 update_giv_derive (p);
3811
3812 /* Past a jump, we get to insns for which we can't count
3813 on whether they will be executed during each iteration. */
3814 /* This code appears twice in strength_reduce. There is also similar
3815 code in scan_loop. */
3816 if (GET_CODE (p) == JUMP_INSN
3817 /* If we enter the loop in the middle, and scan around to the
3818 beginning, don't set not_every_iteration for that.
3819 This can be any kind of jump, since we want to know if insns
3820 will be executed if the loop is executed. */
3821 && ! (JUMP_LABEL (p) == loop_top
3822 && ((NEXT_INSN (NEXT_INSN (p)) == loop_end && simplejump_p (p))
3823 || (NEXT_INSN (p) == loop_end && condjump_p (p)))))
3824 {
3825 rtx label = 0;
3826
3827 /* If this is a jump outside the loop, then it also doesn't
3828 matter. Check to see if the target of this branch is on the
3829 loop_number_exits_labels list. */
3830
3831 for (label = loop_number_exit_labels[uid_loop_num[INSN_UID (loop_start)]];
3832 label;
3833 label = LABEL_NEXTREF (label))
3834 if (XEXP (label, 0) == JUMP_LABEL (p))
3835 break;
3836
3837 if (! label)
3838 not_every_iteration = 1;
3839 }
3840
3841 else if (GET_CODE (p) == NOTE)
3842 {
3843 /* At the virtual top of a converted loop, insns are again known to
3844 be executed each iteration: logically, the loop begins here
3845 even though the exit code has been duplicated. */
3846 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
3847 not_every_iteration = 0;
3848 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
3849 loop_depth++;
3850 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
3851 loop_depth--;
3852 }
3853
3854 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
3855 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
3856 or not an insn is known to be executed each iteration of the
3857 loop, whether or not any iterations are known to occur.
3858
3859 Therefore, if we have just passed a label and have no more labels
3860 between here and the test insn of the loop, we know these insns
3861 will be executed each iteration. */
3862
3863 if (not_every_iteration && GET_CODE (p) == CODE_LABEL
3864 && no_labels_between_p (p, loop_end))
3865 not_every_iteration = 0;
3866 }
3867
3868 /* Try to calculate and save the number of loop iterations. This is
3869 set to zero if the actual number can not be calculated. This must
3870 be called after all giv's have been identified, since otherwise it may
3871 fail if the iteration variable is a giv. */
3872
3873 loop_n_iterations = loop_iterations (loop_start, loop_end);
3874
3875 /* Now for each giv for which we still don't know whether or not it is
3876 replaceable, check to see if it is replaceable because its final value
3877 can be calculated. This must be done after loop_iterations is called,
3878 so that final_giv_value will work correctly. */
3879
3880 for (bl = loop_iv_list; bl; bl = bl->next)
3881 {
3882 struct induction *v;
3883
3884 for (v = bl->giv; v; v = v->next_iv)
3885 if (! v->replaceable && ! v->not_replaceable)
3886 check_final_value (v, loop_start, loop_end);
3887 }
3888
3889 /* Try to prove that the loop counter variable (if any) is always
3890 nonnegative; if so, record that fact with a REG_NONNEG note
3891 so that "decrement and branch until zero" insn can be used. */
3892 check_dbra_loop (loop_end, insn_count, loop_start);
3893
3894 #ifdef HAIFA
3895 /* record loop-variables relevant for BCT optimization before unrolling
3896 the loop. Unrolling may update part of this information, and the
3897 correct data will be used for generating the BCT. */
3898 #ifdef HAVE_decrement_and_branch_on_count
3899 if (HAVE_decrement_and_branch_on_count)
3900 analyze_loop_iterations (loop_start, loop_end);
3901 #endif
3902 #endif /* HAIFA */
3903
3904 /* Create reg_map to hold substitutions for replaceable giv regs. */
3905 reg_map = (rtx *) alloca (max_reg_before_loop * sizeof (rtx));
3906 bzero ((char *) reg_map, max_reg_before_loop * sizeof (rtx));
3907
3908 /* Examine each iv class for feasibility of strength reduction/induction
3909 variable elimination. */
3910
3911 for (bl = loop_iv_list; bl; bl = bl->next)
3912 {
3913 struct induction *v;
3914 int benefit;
3915 int all_reduced;
3916 rtx final_value = 0;
3917
3918 /* Test whether it will be possible to eliminate this biv
3919 provided all givs are reduced. This is possible if either
3920 the reg is not used outside the loop, or we can compute
3921 what its final value will be.
3922
3923 For architectures with a decrement_and_branch_until_zero insn,
3924 don't do this if we put a REG_NONNEG note on the endtest for
3925 this biv. */
3926
3927 /* Compare against bl->init_insn rather than loop_start.
3928 We aren't concerned with any uses of the biv between
3929 init_insn and loop_start since these won't be affected
3930 by the value of the biv elsewhere in the function, so
3931 long as init_insn doesn't use the biv itself.
3932 March 14, 1989 -- self@bayes.arc.nasa.gov */
3933
3934 if ((uid_luid[REGNO_LAST_UID (bl->regno)] < INSN_LUID (loop_end)
3935 && bl->init_insn
3936 && INSN_UID (bl->init_insn) < max_uid_for_loop
3937 && uid_luid[REGNO_FIRST_UID (bl->regno)] >= INSN_LUID (bl->init_insn)
3938 #ifdef HAVE_decrement_and_branch_until_zero
3939 && ! bl->nonneg
3940 #endif
3941 && ! reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set)))
3942 || ((final_value = final_biv_value (bl, loop_start, loop_end))
3943 #ifdef HAVE_decrement_and_branch_until_zero
3944 && ! bl->nonneg
3945 #endif
3946 ))
3947 bl->eliminable = maybe_eliminate_biv (bl, loop_start, end, 0,
3948 threshold, insn_count);
3949 else
3950 {
3951 if (loop_dump_stream)
3952 {
3953 fprintf (loop_dump_stream,
3954 "Cannot eliminate biv %d.\n",
3955 bl->regno);
3956 fprintf (loop_dump_stream,
3957 "First use: insn %d, last use: insn %d.\n",
3958 REGNO_FIRST_UID (bl->regno),
3959 REGNO_LAST_UID (bl->regno));
3960 }
3961 }
3962
3963 /* Combine all giv's for this iv_class. */
3964 combine_givs (bl);
3965
3966 /* This will be true at the end, if all givs which depend on this
3967 biv have been strength reduced.
3968 We can't (currently) eliminate the biv unless this is so. */
3969 all_reduced = 1;
3970
3971 /* Check each giv in this class to see if we will benefit by reducing
3972 it. Skip giv's combined with others. */
3973 for (v = bl->giv; v; v = v->next_iv)
3974 {
3975 struct induction *tv;
3976
3977 if (v->ignore || v->same)
3978 continue;
3979
3980 benefit = v->benefit;
3981
3982 /* Reduce benefit if not replaceable, since we will insert
3983 a move-insn to replace the insn that calculates this giv.
3984 Don't do this unless the giv is a user variable, since it
3985 will often be marked non-replaceable because of the duplication
3986 of the exit code outside the loop. In such a case, the copies
3987 we insert are dead and will be deleted. So they don't have
3988 a cost. Similar situations exist. */
3989 /* ??? The new final_[bg]iv_value code does a much better job
3990 of finding replaceable giv's, and hence this code may no longer
3991 be necessary. */
3992 if (! v->replaceable && ! bl->eliminable
3993 && REG_USERVAR_P (v->dest_reg))
3994 benefit -= copy_cost;
3995
3996 /* Decrease the benefit to count the add-insns that we will
3997 insert to increment the reduced reg for the giv. */
3998 benefit -= add_cost * bl->biv_count;
3999
4000 /* Decide whether to strength-reduce this giv or to leave the code
4001 unchanged (recompute it from the biv each time it is used).
4002 This decision can be made independently for each giv. */
4003
4004 #ifdef AUTO_INC_DEC
4005 /* Attempt to guess whether autoincrement will handle some of the
4006 new add insns; if so, increase BENEFIT (undo the subtraction of
4007 add_cost that was done above). */
4008 if (v->giv_type == DEST_ADDR
4009 && GET_CODE (v->mult_val) == CONST_INT)
4010 {
4011 #if defined (HAVE_POST_INCREMENT) || defined (HAVE_PRE_INCREMENT)
4012 if (INTVAL (v->mult_val) == GET_MODE_SIZE (v->mem_mode))
4013 benefit += add_cost * bl->biv_count;
4014 #endif
4015 #if defined (HAVE_POST_DECREMENT) || defined (HAVE_PRE_DECREMENT)
4016 if (-INTVAL (v->mult_val) == GET_MODE_SIZE (v->mem_mode))
4017 benefit += add_cost * bl->biv_count;
4018 #endif
4019 }
4020 #endif
4021
4022 /* If an insn is not to be strength reduced, then set its ignore
4023 flag, and clear all_reduced. */
4024
4025 /* A giv that depends on a reversed biv must be reduced if it is
4026 used after the loop exit, otherwise, it would have the wrong
4027 value after the loop exit. To make it simple, just reduce all
4028 of such giv's whether or not we know they are used after the loop
4029 exit. */
4030
4031 if ( ! flag_reduce_all_givs && v->lifetime * threshold * benefit < insn_count
4032 && ! bl->reversed )
4033 {
4034 if (loop_dump_stream)
4035 fprintf (loop_dump_stream,
4036 "giv of insn %d not worth while, %d vs %d.\n",
4037 INSN_UID (v->insn),
4038 v->lifetime * threshold * benefit, insn_count);
4039 v->ignore = 1;
4040 all_reduced = 0;
4041 }
4042 else
4043 {
4044 /* Check that we can increment the reduced giv without a
4045 multiply insn. If not, reject it. */
4046
4047 for (tv = bl->biv; tv; tv = tv->next_iv)
4048 if (tv->mult_val == const1_rtx
4049 && ! product_cheap_p (tv->add_val, v->mult_val))
4050 {
4051 if (loop_dump_stream)
4052 fprintf (loop_dump_stream,
4053 "giv of insn %d: would need a multiply.\n",
4054 INSN_UID (v->insn));
4055 v->ignore = 1;
4056 all_reduced = 0;
4057 break;
4058 }
4059 }
4060 }
4061
4062 /* Reduce each giv that we decided to reduce. */
4063
4064 for (v = bl->giv; v; v = v->next_iv)
4065 {
4066 struct induction *tv;
4067 if (! v->ignore && v->same == 0)
4068 {
4069 int auto_inc_opt = 0;
4070
4071 v->new_reg = gen_reg_rtx (v->mode);
4072
4073 #ifdef AUTO_INC_DEC
4074 /* If the target has auto-increment addressing modes, and
4075 this is an address giv, then try to put the increment
4076 immediately after its use, so that flow can create an
4077 auto-increment addressing mode. */
4078 if (v->giv_type == DEST_ADDR && bl->biv_count == 1
4079 && bl->biv->always_executed && ! bl->biv->maybe_multiple
4080 /* We don't handle reversed biv's because bl->biv->insn
4081 does not have a valid INSN_LUID. */
4082 && ! bl->reversed
4083 && v->always_executed && ! v->maybe_multiple)
4084 {
4085 /* If other giv's have been combined with this one, then
4086 this will work only if all uses of the other giv's occur
4087 before this giv's insn. This is difficult to check.
4088
4089 We simplify this by looking for the common case where
4090 there is one DEST_REG giv, and this giv's insn is the
4091 last use of the dest_reg of that DEST_REG giv. If the
4092 the increment occurs after the address giv, then we can
4093 perform the optimization. (Otherwise, the increment
4094 would have to go before other_giv, and we would not be
4095 able to combine it with the address giv to get an
4096 auto-inc address.) */
4097 if (v->combined_with)
4098 {
4099 struct induction *other_giv = 0;
4100
4101 for (tv = bl->giv; tv; tv = tv->next_iv)
4102 if (tv->same == v)
4103 {
4104 if (other_giv)
4105 break;
4106 else
4107 other_giv = tv;
4108 }
4109 if (! tv && other_giv
4110 && REGNO (other_giv->dest_reg) < max_reg_before_loop
4111 && (REGNO_LAST_UID (REGNO (other_giv->dest_reg))
4112 == INSN_UID (v->insn))
4113 && INSN_LUID (v->insn) < INSN_LUID (bl->biv->insn))
4114 auto_inc_opt = 1;
4115 }
4116 /* Check for case where increment is before the the address
4117 giv. Do this test in "loop order". */
4118 else if ((INSN_LUID (v->insn) > INSN_LUID (bl->biv->insn)
4119 && (INSN_LUID (v->insn) < INSN_LUID (scan_start)
4120 || (INSN_LUID (bl->biv->insn)
4121 > INSN_LUID (scan_start))))
4122 || (INSN_LUID (v->insn) < INSN_LUID (scan_start)
4123 && (INSN_LUID (scan_start)
4124 < INSN_LUID (bl->biv->insn))))
4125 auto_inc_opt = -1;
4126 else
4127 auto_inc_opt = 1;
4128
4129 #ifdef HAVE_cc0
4130 {
4131 rtx prev;
4132
4133 /* We can't put an insn immediately after one setting
4134 cc0, or immediately before one using cc0. */
4135 if ((auto_inc_opt == 1 && sets_cc0_p (PATTERN (v->insn)))
4136 || (auto_inc_opt == -1
4137 && (prev = prev_nonnote_insn (v->insn)) != 0
4138 && GET_RTX_CLASS (GET_CODE (prev)) == 'i'
4139 && sets_cc0_p (PATTERN (prev))))
4140 auto_inc_opt = 0;
4141 }
4142 #endif
4143
4144 if (auto_inc_opt)
4145 v->auto_inc_opt = 1;
4146 }
4147 #endif
4148
4149 /* For each place where the biv is incremented, add an insn
4150 to increment the new, reduced reg for the giv. */
4151 for (tv = bl->biv; tv; tv = tv->next_iv)
4152 {
4153 rtx insert_before;
4154
4155 if (! auto_inc_opt)
4156 insert_before = tv->insn;
4157 else if (auto_inc_opt == 1)
4158 insert_before = NEXT_INSN (v->insn);
4159 else
4160 insert_before = v->insn;
4161
4162 if (tv->mult_val == const1_rtx)
4163 emit_iv_add_mult (tv->add_val, v->mult_val,
4164 v->new_reg, v->new_reg, insert_before);
4165 else /* tv->mult_val == const0_rtx */
4166 /* A multiply is acceptable here
4167 since this is presumed to be seldom executed. */
4168 emit_iv_add_mult (tv->add_val, v->mult_val,
4169 v->add_val, v->new_reg, insert_before);
4170 }
4171
4172 /* Add code at loop start to initialize giv's reduced reg. */
4173
4174 emit_iv_add_mult (bl->initial_value, v->mult_val,
4175 v->add_val, v->new_reg, loop_start);
4176 }
4177 }
4178
4179 /* Rescan all givs. If a giv is the same as a giv not reduced, mark it
4180 as not reduced.
4181
4182 For each giv register that can be reduced now: if replaceable,
4183 substitute reduced reg wherever the old giv occurs;
4184 else add new move insn "giv_reg = reduced_reg".
4185
4186 Also check for givs whose first use is their definition and whose
4187 last use is the definition of another giv. If so, it is likely
4188 dead and should not be used to eliminate a biv. */
4189 for (v = bl->giv; v; v = v->next_iv)
4190 {
4191 if (v->same && v->same->ignore)
4192 v->ignore = 1;
4193
4194 if (v->ignore)
4195 continue;
4196
4197 if (v->giv_type == DEST_REG
4198 && REGNO_FIRST_UID (REGNO (v->dest_reg)) == INSN_UID (v->insn))
4199 {
4200 struct induction *v1;
4201
4202 for (v1 = bl->giv; v1; v1 = v1->next_iv)
4203 if (REGNO_LAST_UID (REGNO (v->dest_reg)) == INSN_UID (v1->insn))
4204 v->maybe_dead = 1;
4205 }
4206
4207 /* Update expression if this was combined, in case other giv was
4208 replaced. */
4209 if (v->same)
4210 v->new_reg = replace_rtx (v->new_reg,
4211 v->same->dest_reg, v->same->new_reg);
4212
4213 if (v->giv_type == DEST_ADDR)
4214 /* Store reduced reg as the address in the memref where we found
4215 this giv. */
4216 validate_change (v->insn, v->location, v->new_reg, 0);
4217 else if (v->replaceable)
4218 {
4219 reg_map[REGNO (v->dest_reg)] = v->new_reg;
4220
4221 #if 0
4222 /* I can no longer duplicate the original problem. Perhaps
4223 this is unnecessary now? */
4224
4225 /* Replaceable; it isn't strictly necessary to delete the old
4226 insn and emit a new one, because v->dest_reg is now dead.
4227
4228 However, especially when unrolling loops, the special
4229 handling for (set REG0 REG1) in the second cse pass may
4230 make v->dest_reg live again. To avoid this problem, emit
4231 an insn to set the original giv reg from the reduced giv.
4232 We can not delete the original insn, since it may be part
4233 of a LIBCALL, and the code in flow that eliminates dead
4234 libcalls will fail if it is deleted. */
4235 emit_insn_after (gen_move_insn (v->dest_reg, v->new_reg),
4236 v->insn);
4237 #endif
4238 }
4239 else
4240 {
4241 /* Not replaceable; emit an insn to set the original giv reg from
4242 the reduced giv, same as above. */
4243 emit_insn_after (gen_move_insn (v->dest_reg, v->new_reg),
4244 v->insn);
4245 }
4246
4247 /* When a loop is reversed, givs which depend on the reversed
4248 biv, and which are live outside the loop, must be set to their
4249 correct final value. This insn is only needed if the giv is
4250 not replaceable. The correct final value is the same as the
4251 value that the giv starts the reversed loop with. */
4252 if (bl->reversed && ! v->replaceable)
4253 emit_iv_add_mult (bl->initial_value, v->mult_val,
4254 v->add_val, v->dest_reg, end_insert_before);
4255 else if (v->final_value)
4256 {
4257 rtx insert_before;
4258
4259 /* If the loop has multiple exits, emit the insn before the
4260 loop to ensure that it will always be executed no matter
4261 how the loop exits. Otherwise, emit the insn after the loop,
4262 since this is slightly more efficient. */
4263 if (loop_number_exit_count[uid_loop_num[INSN_UID (loop_start)]])
4264 insert_before = loop_start;
4265 else
4266 insert_before = end_insert_before;
4267 emit_insn_before (gen_move_insn (v->dest_reg, v->final_value),
4268 insert_before);
4269
4270 #if 0
4271 /* If the insn to set the final value of the giv was emitted
4272 before the loop, then we must delete the insn inside the loop
4273 that sets it. If this is a LIBCALL, then we must delete
4274 every insn in the libcall. Note, however, that
4275 final_giv_value will only succeed when there are multiple
4276 exits if the giv is dead at each exit, hence it does not
4277 matter that the original insn remains because it is dead
4278 anyways. */
4279 /* Delete the insn inside the loop that sets the giv since
4280 the giv is now set before (or after) the loop. */
4281 delete_insn (v->insn);
4282 #endif
4283 }
4284
4285 if (loop_dump_stream)
4286 {
4287 fprintf (loop_dump_stream, "giv at %d reduced to ",
4288 INSN_UID (v->insn));
4289 print_rtl (loop_dump_stream, v->new_reg);
4290 fprintf (loop_dump_stream, "\n");
4291 }
4292 }
4293
4294 /* All the givs based on the biv bl have been reduced if they
4295 merit it. */
4296
4297 /* For each giv not marked as maybe dead that has been combined with a
4298 second giv, clear any "maybe dead" mark on that second giv.
4299 v->new_reg will either be or refer to the register of the giv it
4300 combined with.
4301
4302 Doing this clearing avoids problems in biv elimination where a
4303 giv's new_reg is a complex value that can't be put in the insn but
4304 the giv combined with (with a reg as new_reg) is marked maybe_dead.
4305 Since the register will be used in either case, we'd prefer it be
4306 used from the simpler giv. */
4307
4308 for (v = bl->giv; v; v = v->next_iv)
4309 if (! v->maybe_dead && v->same)
4310 v->same->maybe_dead = 0;
4311
4312 /* Try to eliminate the biv, if it is a candidate.
4313 This won't work if ! all_reduced,
4314 since the givs we planned to use might not have been reduced.
4315
4316 We have to be careful that we didn't initially think we could eliminate
4317 this biv because of a giv that we now think may be dead and shouldn't
4318 be used as a biv replacement.
4319
4320 Also, there is the possibility that we may have a giv that looks
4321 like it can be used to eliminate a biv, but the resulting insn
4322 isn't valid. This can happen, for example, on the 88k, where a
4323 JUMP_INSN can compare a register only with zero. Attempts to
4324 replace it with a compare with a constant will fail.
4325
4326 Note that in cases where this call fails, we may have replaced some
4327 of the occurrences of the biv with a giv, but no harm was done in
4328 doing so in the rare cases where it can occur. */
4329
4330 if (all_reduced == 1 && bl->eliminable
4331 && maybe_eliminate_biv (bl, loop_start, end, 1,
4332 threshold, insn_count))
4333
4334 {
4335 /* ?? If we created a new test to bypass the loop entirely,
4336 or otherwise drop straight in, based on this test, then
4337 we might want to rewrite it also. This way some later
4338 pass has more hope of removing the initialization of this
4339 biv entirely. */
4340
4341 /* If final_value != 0, then the biv may be used after loop end
4342 and we must emit an insn to set it just in case.
4343
4344 Reversed bivs already have an insn after the loop setting their
4345 value, so we don't need another one. We can't calculate the
4346 proper final value for such a biv here anyways. */
4347 if (final_value != 0 && ! bl->reversed)
4348 {
4349 rtx insert_before;
4350
4351 /* If the loop has multiple exits, emit the insn before the
4352 loop to ensure that it will always be executed no matter
4353 how the loop exits. Otherwise, emit the insn after the
4354 loop, since this is slightly more efficient. */
4355 if (loop_number_exit_count[uid_loop_num[INSN_UID (loop_start)]])
4356 insert_before = loop_start;
4357 else
4358 insert_before = end_insert_before;
4359
4360 emit_insn_before (gen_move_insn (bl->biv->dest_reg, final_value),
4361 end_insert_before);
4362 }
4363
4364 #if 0
4365 /* Delete all of the instructions inside the loop which set
4366 the biv, as they are all dead. If is safe to delete them,
4367 because an insn setting a biv will never be part of a libcall. */
4368 /* However, deleting them will invalidate the regno_last_uid info,
4369 so keeping them around is more convenient. Final_biv_value
4370 will only succeed when there are multiple exits if the biv
4371 is dead at each exit, hence it does not matter that the original
4372 insn remains, because it is dead anyways. */
4373 for (v = bl->biv; v; v = v->next_iv)
4374 delete_insn (v->insn);
4375 #endif
4376
4377 if (loop_dump_stream)
4378 fprintf (loop_dump_stream, "Reg %d: biv eliminated\n",
4379 bl->regno);
4380 }
4381 }
4382
4383 /* Go through all the instructions in the loop, making all the
4384 register substitutions scheduled in REG_MAP. */
4385
4386 for (p = loop_start; p != end; p = NEXT_INSN (p))
4387 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
4388 || GET_CODE (p) == CALL_INSN)
4389 {
4390 replace_regs (PATTERN (p), reg_map, max_reg_before_loop, 0);
4391 replace_regs (REG_NOTES (p), reg_map, max_reg_before_loop, 0);
4392 INSN_CODE (p) = -1;
4393 }
4394
4395 /* Unroll loops from within strength reduction so that we can use the
4396 induction variable information that strength_reduce has already
4397 collected. */
4398
4399 if (unroll_p)
4400 unroll_loop (loop_end, insn_count, loop_start, end_insert_before, 1);
4401
4402 #ifdef HAIFA
4403 /* instrument the loop with bct insn */
4404 #ifdef HAVE_decrement_and_branch_on_count
4405 if (HAVE_decrement_and_branch_on_count)
4406 insert_bct (loop_start, loop_end);
4407 #endif
4408 #endif /* HAIFA */
4409
4410 if (loop_dump_stream)
4411 fprintf (loop_dump_stream, "\n");
4412 }
4413 \f
4414 /* Return 1 if X is a valid source for an initial value (or as value being
4415 compared against in an initial test).
4416
4417 X must be either a register or constant and must not be clobbered between
4418 the current insn and the start of the loop.
4419
4420 INSN is the insn containing X. */
4421
4422 static int
4423 valid_initial_value_p (x, insn, call_seen, loop_start)
4424 rtx x;
4425 rtx insn;
4426 int call_seen;
4427 rtx loop_start;
4428 {
4429 if (CONSTANT_P (x))
4430 return 1;
4431
4432 /* Only consider pseudos we know about initialized in insns whose luids
4433 we know. */
4434 if (GET_CODE (x) != REG
4435 || REGNO (x) >= max_reg_before_loop)
4436 return 0;
4437
4438 /* Don't use call-clobbered registers across a call which clobbers it. On
4439 some machines, don't use any hard registers at all. */
4440 if (REGNO (x) < FIRST_PSEUDO_REGISTER
4441 && (SMALL_REGISTER_CLASSES
4442 || (call_used_regs[REGNO (x)] && call_seen)))
4443 return 0;
4444
4445 /* Don't use registers that have been clobbered before the start of the
4446 loop. */
4447 if (reg_set_between_p (x, insn, loop_start))
4448 return 0;
4449
4450 return 1;
4451 }
4452 \f
4453 /* Scan X for memory refs and check each memory address
4454 as a possible giv. INSN is the insn whose pattern X comes from.
4455 NOT_EVERY_ITERATION is 1 if the insn might not be executed during
4456 every loop iteration. */
4457
4458 static void
4459 find_mem_givs (x, insn, not_every_iteration, loop_start, loop_end)
4460 rtx x;
4461 rtx insn;
4462 int not_every_iteration;
4463 rtx loop_start, loop_end;
4464 {
4465 register int i, j;
4466 register enum rtx_code code;
4467 register char *fmt;
4468
4469 if (x == 0)
4470 return;
4471
4472 code = GET_CODE (x);
4473 switch (code)
4474 {
4475 case REG:
4476 case CONST_INT:
4477 case CONST:
4478 case CONST_DOUBLE:
4479 case SYMBOL_REF:
4480 case LABEL_REF:
4481 case PC:
4482 case CC0:
4483 case ADDR_VEC:
4484 case ADDR_DIFF_VEC:
4485 case USE:
4486 case CLOBBER:
4487 return;
4488
4489 case MEM:
4490 {
4491 rtx src_reg;
4492 rtx add_val;
4493 rtx mult_val;
4494 int benefit;
4495
4496 benefit = general_induction_var (XEXP (x, 0),
4497 &src_reg, &add_val, &mult_val);
4498
4499 /* Don't make a DEST_ADDR giv with mult_val == 1 && add_val == 0.
4500 Such a giv isn't useful. */
4501 if (benefit > 0 && (mult_val != const1_rtx || add_val != const0_rtx))
4502 {
4503 /* Found one; record it. */
4504 struct induction *v
4505 = (struct induction *) oballoc (sizeof (struct induction));
4506
4507 record_giv (v, insn, src_reg, addr_placeholder, mult_val,
4508 add_val, benefit, DEST_ADDR, not_every_iteration,
4509 &XEXP (x, 0), loop_start, loop_end);
4510
4511 v->mem_mode = GET_MODE (x);
4512 }
4513 }
4514 return;
4515
4516 default:
4517 break;
4518 }
4519
4520 /* Recursively scan the subexpressions for other mem refs. */
4521
4522 fmt = GET_RTX_FORMAT (code);
4523 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4524 if (fmt[i] == 'e')
4525 find_mem_givs (XEXP (x, i), insn, not_every_iteration, loop_start,
4526 loop_end);
4527 else if (fmt[i] == 'E')
4528 for (j = 0; j < XVECLEN (x, i); j++)
4529 find_mem_givs (XVECEXP (x, i, j), insn, not_every_iteration,
4530 loop_start, loop_end);
4531 }
4532 \f
4533 /* Fill in the data about one biv update.
4534 V is the `struct induction' in which we record the biv. (It is
4535 allocated by the caller, with alloca.)
4536 INSN is the insn that sets it.
4537 DEST_REG is the biv's reg.
4538
4539 MULT_VAL is const1_rtx if the biv is being incremented here, in which case
4540 INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is
4541 being set to INC_VAL.
4542
4543 NOT_EVERY_ITERATION is nonzero if this biv update is not know to be
4544 executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update
4545 can be executed more than once per iteration. If MAYBE_MULTIPLE
4546 and NOT_EVERY_ITERATION are both zero, we know that the biv update is
4547 executed exactly once per iteration. */
4548
4549 static void
4550 record_biv (v, insn, dest_reg, inc_val, mult_val,
4551 not_every_iteration, maybe_multiple)
4552 struct induction *v;
4553 rtx insn;
4554 rtx dest_reg;
4555 rtx inc_val;
4556 rtx mult_val;
4557 int not_every_iteration;
4558 int maybe_multiple;
4559 {
4560 struct iv_class *bl;
4561
4562 v->insn = insn;
4563 v->src_reg = dest_reg;
4564 v->dest_reg = dest_reg;
4565 v->mult_val = mult_val;
4566 v->add_val = inc_val;
4567 v->mode = GET_MODE (dest_reg);
4568 v->always_computable = ! not_every_iteration;
4569 v->always_executed = ! not_every_iteration;
4570 v->maybe_multiple = maybe_multiple;
4571
4572 /* Add this to the reg's iv_class, creating a class
4573 if this is the first incrementation of the reg. */
4574
4575 bl = reg_biv_class[REGNO (dest_reg)];
4576 if (bl == 0)
4577 {
4578 /* Create and initialize new iv_class. */
4579
4580 bl = (struct iv_class *) oballoc (sizeof (struct iv_class));
4581
4582 bl->regno = REGNO (dest_reg);
4583 bl->biv = 0;
4584 bl->giv = 0;
4585 bl->biv_count = 0;
4586 bl->giv_count = 0;
4587
4588 /* Set initial value to the reg itself. */
4589 bl->initial_value = dest_reg;
4590 /* We haven't seen the initializing insn yet */
4591 bl->init_insn = 0;
4592 bl->init_set = 0;
4593 bl->initial_test = 0;
4594 bl->incremented = 0;
4595 bl->eliminable = 0;
4596 bl->nonneg = 0;
4597 bl->reversed = 0;
4598 bl->total_benefit = 0;
4599
4600 /* Add this class to loop_iv_list. */
4601 bl->next = loop_iv_list;
4602 loop_iv_list = bl;
4603
4604 /* Put it in the array of biv register classes. */
4605 reg_biv_class[REGNO (dest_reg)] = bl;
4606 }
4607
4608 /* Update IV_CLASS entry for this biv. */
4609 v->next_iv = bl->biv;
4610 bl->biv = v;
4611 bl->biv_count++;
4612 if (mult_val == const1_rtx)
4613 bl->incremented = 1;
4614
4615 if (loop_dump_stream)
4616 {
4617 fprintf (loop_dump_stream,
4618 "Insn %d: possible biv, reg %d,",
4619 INSN_UID (insn), REGNO (dest_reg));
4620 if (GET_CODE (inc_val) == CONST_INT)
4621 fprintf (loop_dump_stream, " const = %d\n",
4622 INTVAL (inc_val));
4623 else
4624 {
4625 fprintf (loop_dump_stream, " const = ");
4626 print_rtl (loop_dump_stream, inc_val);
4627 fprintf (loop_dump_stream, "\n");
4628 }
4629 }
4630 }
4631 \f
4632 /* Fill in the data about one giv.
4633 V is the `struct induction' in which we record the giv. (It is
4634 allocated by the caller, with alloca.)
4635 INSN is the insn that sets it.
4636 BENEFIT estimates the savings from deleting this insn.
4637 TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed
4638 into a register or is used as a memory address.
4639
4640 SRC_REG is the biv reg which the giv is computed from.
4641 DEST_REG is the giv's reg (if the giv is stored in a reg).
4642 MULT_VAL and ADD_VAL are the coefficients used to compute the giv.
4643 LOCATION points to the place where this giv's value appears in INSN. */
4644
4645 static void
4646 record_giv (v, insn, src_reg, dest_reg, mult_val, add_val, benefit,
4647 type, not_every_iteration, location, loop_start, loop_end)
4648 struct induction *v;
4649 rtx insn;
4650 rtx src_reg;
4651 rtx dest_reg;
4652 rtx mult_val, add_val;
4653 int benefit;
4654 enum g_types type;
4655 int not_every_iteration;
4656 rtx *location;
4657 rtx loop_start, loop_end;
4658 {
4659 struct induction *b;
4660 struct iv_class *bl;
4661 rtx set = single_set (insn);
4662 rtx p;
4663
4664 v->insn = insn;
4665 v->src_reg = src_reg;
4666 v->giv_type = type;
4667 v->dest_reg = dest_reg;
4668 v->mult_val = mult_val;
4669 v->add_val = add_val;
4670 v->benefit = benefit;
4671 v->location = location;
4672 v->cant_derive = 0;
4673 v->combined_with = 0;
4674 v->maybe_multiple = 0;
4675 v->maybe_dead = 0;
4676 v->derive_adjustment = 0;
4677 v->same = 0;
4678 v->ignore = 0;
4679 v->new_reg = 0;
4680 v->final_value = 0;
4681 v->same_insn = 0;
4682 v->auto_inc_opt = 0;
4683 v->unrolled = 0;
4684 v->shared = 0;
4685
4686 /* The v->always_computable field is used in update_giv_derive, to
4687 determine whether a giv can be used to derive another giv. For a
4688 DEST_REG giv, INSN computes a new value for the giv, so its value
4689 isn't computable if INSN insn't executed every iteration.
4690 However, for a DEST_ADDR giv, INSN merely uses the value of the giv;
4691 it does not compute a new value. Hence the value is always computable
4692 regardless of whether INSN is executed each iteration. */
4693
4694 if (type == DEST_ADDR)
4695 v->always_computable = 1;
4696 else
4697 v->always_computable = ! not_every_iteration;
4698
4699 v->always_executed = ! not_every_iteration;
4700
4701 if (type == DEST_ADDR)
4702 {
4703 v->mode = GET_MODE (*location);
4704 v->lifetime = 1;
4705 v->times_used = 1;
4706 }
4707 else /* type == DEST_REG */
4708 {
4709 v->mode = GET_MODE (SET_DEST (set));
4710
4711 v->lifetime = (uid_luid[REGNO_LAST_UID (REGNO (dest_reg))]
4712 - uid_luid[REGNO_FIRST_UID (REGNO (dest_reg))]);
4713
4714 v->times_used = n_times_used[REGNO (dest_reg)];
4715
4716 /* If the lifetime is zero, it means that this register is
4717 really a dead store. So mark this as a giv that can be
4718 ignored. This will not prevent the biv from being eliminated. */
4719 if (v->lifetime == 0)
4720 v->ignore = 1;
4721
4722 reg_iv_type[REGNO (dest_reg)] = GENERAL_INDUCT;
4723 reg_iv_info[REGNO (dest_reg)] = v;
4724 }
4725
4726 /* Add the giv to the class of givs computed from one biv. */
4727
4728 bl = reg_biv_class[REGNO (src_reg)];
4729 if (bl)
4730 {
4731 v->next_iv = bl->giv;
4732 bl->giv = v;
4733 /* Don't count DEST_ADDR. This is supposed to count the number of
4734 insns that calculate givs. */
4735 if (type == DEST_REG)
4736 bl->giv_count++;
4737 bl->total_benefit += benefit;
4738 }
4739 else
4740 /* Fatal error, biv missing for this giv? */
4741 abort ();
4742
4743 if (type == DEST_ADDR)
4744 v->replaceable = 1;
4745 else
4746 {
4747 /* The giv can be replaced outright by the reduced register only if all
4748 of the following conditions are true:
4749 - the insn that sets the giv is always executed on any iteration
4750 on which the giv is used at all
4751 (there are two ways to deduce this:
4752 either the insn is executed on every iteration,
4753 or all uses follow that insn in the same basic block),
4754 - the giv is not used outside the loop
4755 - no assignments to the biv occur during the giv's lifetime. */
4756
4757 if (REGNO_FIRST_UID (REGNO (dest_reg)) == INSN_UID (insn)
4758 /* Previous line always fails if INSN was moved by loop opt. */
4759 && uid_luid[REGNO_LAST_UID (REGNO (dest_reg))] < INSN_LUID (loop_end)
4760 && (! not_every_iteration
4761 || last_use_this_basic_block (dest_reg, insn)))
4762 {
4763 /* Now check that there are no assignments to the biv within the
4764 giv's lifetime. This requires two separate checks. */
4765
4766 /* Check each biv update, and fail if any are between the first
4767 and last use of the giv.
4768
4769 If this loop contains an inner loop that was unrolled, then
4770 the insn modifying the biv may have been emitted by the loop
4771 unrolling code, and hence does not have a valid luid. Just
4772 mark the biv as not replaceable in this case. It is not very
4773 useful as a biv, because it is used in two different loops.
4774 It is very unlikely that we would be able to optimize the giv
4775 using this biv anyways. */
4776
4777 v->replaceable = 1;
4778 for (b = bl->biv; b; b = b->next_iv)
4779 {
4780 if (INSN_UID (b->insn) >= max_uid_for_loop
4781 || ((uid_luid[INSN_UID (b->insn)]
4782 >= uid_luid[REGNO_FIRST_UID (REGNO (dest_reg))])
4783 && (uid_luid[INSN_UID (b->insn)]
4784 <= uid_luid[REGNO_LAST_UID (REGNO (dest_reg))])))
4785 {
4786 v->replaceable = 0;
4787 v->not_replaceable = 1;
4788 break;
4789 }
4790 }
4791
4792 /* If there are any backwards branches that go from after the
4793 biv update to before it, then this giv is not replaceable. */
4794 if (v->replaceable)
4795 for (b = bl->biv; b; b = b->next_iv)
4796 if (back_branch_in_range_p (b->insn, loop_start, loop_end))
4797 {
4798 v->replaceable = 0;
4799 v->not_replaceable = 1;
4800 break;
4801 }
4802 }
4803 else
4804 {
4805 /* May still be replaceable, we don't have enough info here to
4806 decide. */
4807 v->replaceable = 0;
4808 v->not_replaceable = 0;
4809 }
4810 }
4811
4812 if (loop_dump_stream)
4813 {
4814 if (type == DEST_REG)
4815 fprintf (loop_dump_stream, "Insn %d: giv reg %d",
4816 INSN_UID (insn), REGNO (dest_reg));
4817 else
4818 fprintf (loop_dump_stream, "Insn %d: dest address",
4819 INSN_UID (insn));
4820
4821 fprintf (loop_dump_stream, " src reg %d benefit %d",
4822 REGNO (src_reg), v->benefit);
4823 fprintf (loop_dump_stream, " used %d lifetime %d",
4824 v->times_used, v->lifetime);
4825
4826 if (v->replaceable)
4827 fprintf (loop_dump_stream, " replaceable");
4828
4829 if (GET_CODE (mult_val) == CONST_INT)
4830 fprintf (loop_dump_stream, " mult %d",
4831 INTVAL (mult_val));
4832 else
4833 {
4834 fprintf (loop_dump_stream, " mult ");
4835 print_rtl (loop_dump_stream, mult_val);
4836 }
4837
4838 if (GET_CODE (add_val) == CONST_INT)
4839 fprintf (loop_dump_stream, " add %d",
4840 INTVAL (add_val));
4841 else
4842 {
4843 fprintf (loop_dump_stream, " add ");
4844 print_rtl (loop_dump_stream, add_val);
4845 }
4846 }
4847
4848 if (loop_dump_stream)
4849 fprintf (loop_dump_stream, "\n");
4850
4851 }
4852
4853
4854 /* All this does is determine whether a giv can be made replaceable because
4855 its final value can be calculated. This code can not be part of record_giv
4856 above, because final_giv_value requires that the number of loop iterations
4857 be known, and that can not be accurately calculated until after all givs
4858 have been identified. */
4859
4860 static void
4861 check_final_value (v, loop_start, loop_end)
4862 struct induction *v;
4863 rtx loop_start, loop_end;
4864 {
4865 struct iv_class *bl;
4866 rtx final_value = 0;
4867
4868 bl = reg_biv_class[REGNO (v->src_reg)];
4869
4870 /* DEST_ADDR givs will never reach here, because they are always marked
4871 replaceable above in record_giv. */
4872
4873 /* The giv can be replaced outright by the reduced register only if all
4874 of the following conditions are true:
4875 - the insn that sets the giv is always executed on any iteration
4876 on which the giv is used at all
4877 (there are two ways to deduce this:
4878 either the insn is executed on every iteration,
4879 or all uses follow that insn in the same basic block),
4880 - its final value can be calculated (this condition is different
4881 than the one above in record_giv)
4882 - no assignments to the biv occur during the giv's lifetime. */
4883
4884 #if 0
4885 /* This is only called now when replaceable is known to be false. */
4886 /* Clear replaceable, so that it won't confuse final_giv_value. */
4887 v->replaceable = 0;
4888 #endif
4889
4890 if ((final_value = final_giv_value (v, loop_start, loop_end))
4891 && (v->always_computable || last_use_this_basic_block (v->dest_reg, v->insn)))
4892 {
4893 int biv_increment_seen = 0;
4894 rtx p = v->insn;
4895 rtx last_giv_use;
4896
4897 v->replaceable = 1;
4898
4899 /* When trying to determine whether or not a biv increment occurs
4900 during the lifetime of the giv, we can ignore uses of the variable
4901 outside the loop because final_value is true. Hence we can not
4902 use regno_last_uid and regno_first_uid as above in record_giv. */
4903
4904 /* Search the loop to determine whether any assignments to the
4905 biv occur during the giv's lifetime. Start with the insn
4906 that sets the giv, and search around the loop until we come
4907 back to that insn again.
4908
4909 Also fail if there is a jump within the giv's lifetime that jumps
4910 to somewhere outside the lifetime but still within the loop. This
4911 catches spaghetti code where the execution order is not linear, and
4912 hence the above test fails. Here we assume that the giv lifetime
4913 does not extend from one iteration of the loop to the next, so as
4914 to make the test easier. Since the lifetime isn't known yet,
4915 this requires two loops. See also record_giv above. */
4916
4917 last_giv_use = v->insn;
4918
4919 while (1)
4920 {
4921 p = NEXT_INSN (p);
4922 if (p == loop_end)
4923 p = NEXT_INSN (loop_start);
4924 if (p == v->insn)
4925 break;
4926
4927 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
4928 || GET_CODE (p) == CALL_INSN)
4929 {
4930 if (biv_increment_seen)
4931 {
4932 if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
4933 {
4934 v->replaceable = 0;
4935 v->not_replaceable = 1;
4936 break;
4937 }
4938 }
4939 else if (reg_set_p (v->src_reg, PATTERN (p)))
4940 biv_increment_seen = 1;
4941 else if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
4942 last_giv_use = p;
4943 }
4944 }
4945
4946 /* Now that the lifetime of the giv is known, check for branches
4947 from within the lifetime to outside the lifetime if it is still
4948 replaceable. */
4949
4950 if (v->replaceable)
4951 {
4952 p = v->insn;
4953 while (1)
4954 {
4955 p = NEXT_INSN (p);
4956 if (p == loop_end)
4957 p = NEXT_INSN (loop_start);
4958 if (p == last_giv_use)
4959 break;
4960
4961 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
4962 && LABEL_NAME (JUMP_LABEL (p))
4963 && ((INSN_UID (JUMP_LABEL (p)) >= max_uid_for_loop)
4964 || (INSN_UID (v->insn) >= max_uid_for_loop)
4965 || (INSN_UID (last_giv_use) >= max_uid_for_loop)
4966 || (INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (v->insn)
4967 && INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (loop_start))
4968 || (INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (last_giv_use)
4969 && INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (loop_end))))
4970 {
4971 v->replaceable = 0;
4972 v->not_replaceable = 1;
4973
4974 if (loop_dump_stream)
4975 fprintf (loop_dump_stream,
4976 "Found branch outside giv lifetime.\n");
4977
4978 break;
4979 }
4980 }
4981 }
4982
4983 /* If it is replaceable, then save the final value. */
4984 if (v->replaceable)
4985 v->final_value = final_value;
4986 }
4987
4988 if (loop_dump_stream && v->replaceable)
4989 fprintf (loop_dump_stream, "Insn %d: giv reg %d final_value replaceable\n",
4990 INSN_UID (v->insn), REGNO (v->dest_reg));
4991 }
4992 \f
4993 /* Update the status of whether a giv can derive other givs.
4994
4995 We need to do something special if there is or may be an update to the biv
4996 between the time the giv is defined and the time it is used to derive
4997 another giv.
4998
4999 In addition, a giv that is only conditionally set is not allowed to
5000 derive another giv once a label has been passed.
5001
5002 The cases we look at are when a label or an update to a biv is passed. */
5003
5004 static void
5005 update_giv_derive (p)
5006 rtx p;
5007 {
5008 struct iv_class *bl;
5009 struct induction *biv, *giv;
5010 rtx tem;
5011 int dummy;
5012
5013 /* Search all IV classes, then all bivs, and finally all givs.
5014
5015 There are three cases we are concerned with. First we have the situation
5016 of a giv that is only updated conditionally. In that case, it may not
5017 derive any givs after a label is passed.
5018
5019 The second case is when a biv update occurs, or may occur, after the
5020 definition of a giv. For certain biv updates (see below) that are
5021 known to occur between the giv definition and use, we can adjust the
5022 giv definition. For others, or when the biv update is conditional,
5023 we must prevent the giv from deriving any other givs. There are two
5024 sub-cases within this case.
5025
5026 If this is a label, we are concerned with any biv update that is done
5027 conditionally, since it may be done after the giv is defined followed by
5028 a branch here (actually, we need to pass both a jump and a label, but
5029 this extra tracking doesn't seem worth it).
5030
5031 If this is a jump, we are concerned about any biv update that may be
5032 executed multiple times. We are actually only concerned about
5033 backward jumps, but it is probably not worth performing the test
5034 on the jump again here.
5035
5036 If this is a biv update, we must adjust the giv status to show that a
5037 subsequent biv update was performed. If this adjustment cannot be done,
5038 the giv cannot derive further givs. */
5039
5040 for (bl = loop_iv_list; bl; bl = bl->next)
5041 for (biv = bl->biv; biv; biv = biv->next_iv)
5042 if (GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN
5043 || biv->insn == p)
5044 {
5045 for (giv = bl->giv; giv; giv = giv->next_iv)
5046 {
5047 /* If cant_derive is already true, there is no point in
5048 checking all of these conditions again. */
5049 if (giv->cant_derive)
5050 continue;
5051
5052 /* If this giv is conditionally set and we have passed a label,
5053 it cannot derive anything. */
5054 if (GET_CODE (p) == CODE_LABEL && ! giv->always_computable)
5055 giv->cant_derive = 1;
5056
5057 /* Skip givs that have mult_val == 0, since
5058 they are really invariants. Also skip those that are
5059 replaceable, since we know their lifetime doesn't contain
5060 any biv update. */
5061 else if (giv->mult_val == const0_rtx || giv->replaceable)
5062 continue;
5063
5064 /* The only way we can allow this giv to derive another
5065 is if this is a biv increment and we can form the product
5066 of biv->add_val and giv->mult_val. In this case, we will
5067 be able to compute a compensation. */
5068 else if (biv->insn == p)
5069 {
5070 tem = 0;
5071
5072 if (biv->mult_val == const1_rtx)
5073 tem = simplify_giv_expr (gen_rtx_MULT (giv->mode,
5074 biv->add_val,
5075 giv->mult_val),
5076 &dummy);
5077
5078 if (tem && giv->derive_adjustment)
5079 tem = simplify_giv_expr (gen_rtx_PLUS (giv->mode, tem,
5080 giv->derive_adjustment),
5081 &dummy);
5082 if (tem)
5083 giv->derive_adjustment = tem;
5084 else
5085 giv->cant_derive = 1;
5086 }
5087 else if ((GET_CODE (p) == CODE_LABEL && ! biv->always_computable)
5088 || (GET_CODE (p) == JUMP_INSN && biv->maybe_multiple))
5089 giv->cant_derive = 1;
5090 }
5091 }
5092 }
5093 \f
5094 /* Check whether an insn is an increment legitimate for a basic induction var.
5095 X is the source of insn P, or a part of it.
5096 MODE is the mode in which X should be interpreted.
5097
5098 DEST_REG is the putative biv, also the destination of the insn.
5099 We accept patterns of these forms:
5100 REG = REG + INVARIANT (includes REG = REG - CONSTANT)
5101 REG = INVARIANT + REG
5102
5103 If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX,
5104 and store the additive term into *INC_VAL.
5105
5106 If X is an assignment of an invariant into DEST_REG, we set
5107 *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL.
5108
5109 We also want to detect a BIV when it corresponds to a variable
5110 whose mode was promoted via PROMOTED_MODE. In that case, an increment
5111 of the variable may be a PLUS that adds a SUBREG of that variable to
5112 an invariant and then sign- or zero-extends the result of the PLUS
5113 into the variable.
5114
5115 Most GIVs in such cases will be in the promoted mode, since that is the
5116 probably the natural computation mode (and almost certainly the mode
5117 used for addresses) on the machine. So we view the pseudo-reg containing
5118 the variable as the BIV, as if it were simply incremented.
5119
5120 Note that treating the entire pseudo as a BIV will result in making
5121 simple increments to any GIVs based on it. However, if the variable
5122 overflows in its declared mode but not its promoted mode, the result will
5123 be incorrect. This is acceptable if the variable is signed, since
5124 overflows in such cases are undefined, but not if it is unsigned, since
5125 those overflows are defined. So we only check for SIGN_EXTEND and
5126 not ZERO_EXTEND.
5127
5128 If we cannot find a biv, we return 0. */
5129
5130 static int
5131 basic_induction_var (x, mode, dest_reg, p, inc_val, mult_val)
5132 register rtx x;
5133 enum machine_mode mode;
5134 rtx p;
5135 rtx dest_reg;
5136 rtx *inc_val;
5137 rtx *mult_val;
5138 {
5139 register enum rtx_code code;
5140 rtx arg;
5141 rtx insn, set = 0;
5142
5143 code = GET_CODE (x);
5144 switch (code)
5145 {
5146 case PLUS:
5147 if (XEXP (x, 0) == dest_reg
5148 || (GET_CODE (XEXP (x, 0)) == SUBREG
5149 && SUBREG_PROMOTED_VAR_P (XEXP (x, 0))
5150 && SUBREG_REG (XEXP (x, 0)) == dest_reg))
5151 arg = XEXP (x, 1);
5152 else if (XEXP (x, 1) == dest_reg
5153 || (GET_CODE (XEXP (x, 1)) == SUBREG
5154 && SUBREG_PROMOTED_VAR_P (XEXP (x, 1))
5155 && SUBREG_REG (XEXP (x, 1)) == dest_reg))
5156 arg = XEXP (x, 0);
5157 else
5158 return 0;
5159
5160 if (invariant_p (arg) != 1)
5161 return 0;
5162
5163 *inc_val = convert_modes (GET_MODE (dest_reg), GET_MODE (x), arg, 0);
5164 *mult_val = const1_rtx;
5165 return 1;
5166
5167 case SUBREG:
5168 /* If this is a SUBREG for a promoted variable, check the inner
5169 value. */
5170 if (SUBREG_PROMOTED_VAR_P (x))
5171 return basic_induction_var (SUBREG_REG (x), GET_MODE (SUBREG_REG (x)),
5172 dest_reg, p, inc_val, mult_val);
5173 return 0;
5174
5175 case REG:
5176 /* If this register is assigned in the previous insn, look at its
5177 source, but don't go outside the loop or past a label. */
5178
5179 for (insn = PREV_INSN (p);
5180 (insn && GET_CODE (insn) == NOTE
5181 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
5182 insn = PREV_INSN (insn))
5183 ;
5184
5185 if (insn)
5186 set = single_set (insn);
5187
5188 if (set != 0
5189 && (SET_DEST (set) == x
5190 || (GET_CODE (SET_DEST (set)) == SUBREG
5191 && (GET_MODE_SIZE (GET_MODE (SET_DEST (set)))
5192 <= UNITS_PER_WORD)
5193 && SUBREG_REG (SET_DEST (set)) == x)))
5194 return basic_induction_var (SET_SRC (set),
5195 (GET_MODE (SET_SRC (set)) == VOIDmode
5196 ? GET_MODE (x)
5197 : GET_MODE (SET_SRC (set))),
5198 dest_reg, insn,
5199 inc_val, mult_val);
5200 /* ... fall through ... */
5201
5202 /* Can accept constant setting of biv only when inside inner most loop.
5203 Otherwise, a biv of an inner loop may be incorrectly recognized
5204 as a biv of the outer loop,
5205 causing code to be moved INTO the inner loop. */
5206 case MEM:
5207 if (invariant_p (x) != 1)
5208 return 0;
5209 case CONST_INT:
5210 case SYMBOL_REF:
5211 case CONST:
5212 if (loops_enclosed == 1)
5213 {
5214 /* Possible bug here? Perhaps we don't know the mode of X. */
5215 *inc_val = convert_modes (GET_MODE (dest_reg), mode, x, 0);
5216 *mult_val = const0_rtx;
5217 return 1;
5218 }
5219 else
5220 return 0;
5221
5222 case SIGN_EXTEND:
5223 return basic_induction_var (XEXP (x, 0), GET_MODE (XEXP (x, 0)),
5224 dest_reg, p, inc_val, mult_val);
5225 case ASHIFTRT:
5226 /* Similar, since this can be a sign extension. */
5227 for (insn = PREV_INSN (p);
5228 (insn && GET_CODE (insn) == NOTE
5229 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
5230 insn = PREV_INSN (insn))
5231 ;
5232
5233 if (insn)
5234 set = single_set (insn);
5235
5236 if (set && SET_DEST (set) == XEXP (x, 0)
5237 && GET_CODE (XEXP (x, 1)) == CONST_INT
5238 && INTVAL (XEXP (x, 1)) >= 0
5239 && GET_CODE (SET_SRC (set)) == ASHIFT
5240 && XEXP (x, 1) == XEXP (SET_SRC (set), 1))
5241 return basic_induction_var (XEXP (SET_SRC (set), 0),
5242 GET_MODE (XEXP (x, 0)),
5243 dest_reg, insn, inc_val, mult_val);
5244 return 0;
5245
5246 default:
5247 return 0;
5248 }
5249 }
5250 \f
5251 /* A general induction variable (giv) is any quantity that is a linear
5252 function of a basic induction variable,
5253 i.e. giv = biv * mult_val + add_val.
5254 The coefficients can be any loop invariant quantity.
5255 A giv need not be computed directly from the biv;
5256 it can be computed by way of other givs. */
5257
5258 /* Determine whether X computes a giv.
5259 If it does, return a nonzero value
5260 which is the benefit from eliminating the computation of X;
5261 set *SRC_REG to the register of the biv that it is computed from;
5262 set *ADD_VAL and *MULT_VAL to the coefficients,
5263 such that the value of X is biv * mult + add; */
5264
5265 static int
5266 general_induction_var (x, src_reg, add_val, mult_val)
5267 rtx x;
5268 rtx *src_reg;
5269 rtx *add_val;
5270 rtx *mult_val;
5271 {
5272 rtx orig_x = x;
5273 int benefit = 0;
5274 char *storage;
5275
5276 /* If this is an invariant, forget it, it isn't a giv. */
5277 if (invariant_p (x) == 1)
5278 return 0;
5279
5280 /* See if the expression could be a giv and get its form.
5281 Mark our place on the obstack in case we don't find a giv. */
5282 storage = (char *) oballoc (0);
5283 x = simplify_giv_expr (x, &benefit);
5284 if (x == 0)
5285 {
5286 obfree (storage);
5287 return 0;
5288 }
5289
5290 switch (GET_CODE (x))
5291 {
5292 case USE:
5293 case CONST_INT:
5294 /* Since this is now an invariant and wasn't before, it must be a giv
5295 with MULT_VAL == 0. It doesn't matter which BIV we associate this
5296 with. */
5297 *src_reg = loop_iv_list->biv->dest_reg;
5298 *mult_val = const0_rtx;
5299 *add_val = x;
5300 break;
5301
5302 case REG:
5303 /* This is equivalent to a BIV. */
5304 *src_reg = x;
5305 *mult_val = const1_rtx;
5306 *add_val = const0_rtx;
5307 break;
5308
5309 case PLUS:
5310 /* Either (plus (biv) (invar)) or
5311 (plus (mult (biv) (invar_1)) (invar_2)). */
5312 if (GET_CODE (XEXP (x, 0)) == MULT)
5313 {
5314 *src_reg = XEXP (XEXP (x, 0), 0);
5315 *mult_val = XEXP (XEXP (x, 0), 1);
5316 }
5317 else
5318 {
5319 *src_reg = XEXP (x, 0);
5320 *mult_val = const1_rtx;
5321 }
5322 *add_val = XEXP (x, 1);
5323 break;
5324
5325 case MULT:
5326 /* ADD_VAL is zero. */
5327 *src_reg = XEXP (x, 0);
5328 *mult_val = XEXP (x, 1);
5329 *add_val = const0_rtx;
5330 break;
5331
5332 default:
5333 abort ();
5334 }
5335
5336 /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be
5337 unless they are CONST_INT). */
5338 if (GET_CODE (*add_val) == USE)
5339 *add_val = XEXP (*add_val, 0);
5340 if (GET_CODE (*mult_val) == USE)
5341 *mult_val = XEXP (*mult_val, 0);
5342
5343 benefit += rtx_cost (orig_x, SET);
5344
5345 /* Always return some benefit if this is a giv so it will be detected
5346 as such. This allows elimination of bivs that might otherwise
5347 not be eliminated. */
5348 return benefit == 0 ? 1 : benefit;
5349 }
5350 \f
5351 /* Given an expression, X, try to form it as a linear function of a biv.
5352 We will canonicalize it to be of the form
5353 (plus (mult (BIV) (invar_1))
5354 (invar_2))
5355 with possible degeneracies.
5356
5357 The invariant expressions must each be of a form that can be used as a
5358 machine operand. We surround then with a USE rtx (a hack, but localized
5359 and certainly unambiguous!) if not a CONST_INT for simplicity in this
5360 routine; it is the caller's responsibility to strip them.
5361
5362 If no such canonicalization is possible (i.e., two biv's are used or an
5363 expression that is neither invariant nor a biv or giv), this routine
5364 returns 0.
5365
5366 For a non-zero return, the result will have a code of CONST_INT, USE,
5367 REG (for a BIV), PLUS, or MULT. No other codes will occur.
5368
5369 *BENEFIT will be incremented by the benefit of any sub-giv encountered. */
5370
5371 static rtx
5372 simplify_giv_expr (x, benefit)
5373 rtx x;
5374 int *benefit;
5375 {
5376 enum machine_mode mode = GET_MODE (x);
5377 rtx arg0, arg1;
5378 rtx tem;
5379
5380 /* If this is not an integer mode, or if we cannot do arithmetic in this
5381 mode, this can't be a giv. */
5382 if (mode != VOIDmode
5383 && (GET_MODE_CLASS (mode) != MODE_INT
5384 || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT))
5385 return 0;
5386
5387 switch (GET_CODE (x))
5388 {
5389 case PLUS:
5390 arg0 = simplify_giv_expr (XEXP (x, 0), benefit);
5391 arg1 = simplify_giv_expr (XEXP (x, 1), benefit);
5392 if (arg0 == 0 || arg1 == 0)
5393 return 0;
5394
5395 /* Put constant last, CONST_INT last if both constant. */
5396 if ((GET_CODE (arg0) == USE
5397 || GET_CODE (arg0) == CONST_INT)
5398 && GET_CODE (arg1) != CONST_INT)
5399 tem = arg0, arg0 = arg1, arg1 = tem;
5400
5401 /* Handle addition of zero, then addition of an invariant. */
5402 if (arg1 == const0_rtx)
5403 return arg0;
5404 else if (GET_CODE (arg1) == CONST_INT || GET_CODE (arg1) == USE)
5405 switch (GET_CODE (arg0))
5406 {
5407 case CONST_INT:
5408 case USE:
5409 /* Both invariant. Only valid if sum is machine operand.
5410 First strip off possible USE on the operands. */
5411 if (GET_CODE (arg0) == USE)
5412 arg0 = XEXP (arg0, 0);
5413
5414 if (GET_CODE (arg1) == USE)
5415 arg1 = XEXP (arg1, 0);
5416
5417 tem = 0;
5418 if (CONSTANT_P (arg0) && GET_CODE (arg1) == CONST_INT)
5419 {
5420 tem = plus_constant (arg0, INTVAL (arg1));
5421 if (GET_CODE (tem) != CONST_INT)
5422 tem = gen_rtx_USE (mode, tem);
5423 }
5424 else
5425 {
5426 /* Adding two invariants must result in an invariant,
5427 so enclose addition operation inside a USE and
5428 return it. */
5429 tem = gen_rtx_USE (mode, gen_rtx_PLUS (mode, arg0, arg1));
5430 }
5431
5432 return tem;
5433
5434 case REG:
5435 case MULT:
5436 /* biv + invar or mult + invar. Return sum. */
5437 return gen_rtx_PLUS (mode, arg0, arg1);
5438
5439 case PLUS:
5440 /* (a + invar_1) + invar_2. Associate. */
5441 return simplify_giv_expr (gen_rtx_PLUS (mode,
5442 XEXP (arg0, 0),
5443 gen_rtx_PLUS (mode,
5444 XEXP (arg0, 1), arg1)),
5445 benefit);
5446
5447 default:
5448 abort ();
5449 }
5450
5451 /* Each argument must be either REG, PLUS, or MULT. Convert REG to
5452 MULT to reduce cases. */
5453 if (GET_CODE (arg0) == REG)
5454 arg0 = gen_rtx_MULT (mode, arg0, const1_rtx);
5455 if (GET_CODE (arg1) == REG)
5456 arg1 = gen_rtx_MULT (mode, arg1, const1_rtx);
5457
5458 /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT.
5459 Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT.
5460 Recurse to associate the second PLUS. */
5461 if (GET_CODE (arg1) == MULT)
5462 tem = arg0, arg0 = arg1, arg1 = tem;
5463
5464 if (GET_CODE (arg1) == PLUS)
5465 return simplify_giv_expr (gen_rtx_PLUS (mode,
5466 gen_rtx_PLUS (mode, arg0,
5467 XEXP (arg1, 0)),
5468 XEXP (arg1, 1)),
5469 benefit);
5470
5471 /* Now must have MULT + MULT. Distribute if same biv, else not giv. */
5472 if (GET_CODE (arg0) != MULT || GET_CODE (arg1) != MULT)
5473 abort ();
5474
5475 if (XEXP (arg0, 0) != XEXP (arg1, 0))
5476 return 0;
5477
5478 return simplify_giv_expr (gen_rtx_MULT (mode,
5479 XEXP (arg0, 0),
5480 gen_rtx_PLUS (mode,
5481 XEXP (arg0, 1),
5482 XEXP (arg1, 1))),
5483 benefit);
5484
5485 case MINUS:
5486 /* Handle "a - b" as "a + b * (-1)". */
5487 return simplify_giv_expr (gen_rtx_PLUS (mode,
5488 XEXP (x, 0),
5489 gen_rtx_MULT (mode, XEXP (x, 1),
5490 constm1_rtx)),
5491 benefit);
5492
5493 case MULT:
5494 arg0 = simplify_giv_expr (XEXP (x, 0), benefit);
5495 arg1 = simplify_giv_expr (XEXP (x, 1), benefit);
5496 if (arg0 == 0 || arg1 == 0)
5497 return 0;
5498
5499 /* Put constant last, CONST_INT last if both constant. */
5500 if ((GET_CODE (arg0) == USE || GET_CODE (arg0) == CONST_INT)
5501 && GET_CODE (arg1) != CONST_INT)
5502 tem = arg0, arg0 = arg1, arg1 = tem;
5503
5504 /* If second argument is not now constant, not giv. */
5505 if (GET_CODE (arg1) != USE && GET_CODE (arg1) != CONST_INT)
5506 return 0;
5507
5508 /* Handle multiply by 0 or 1. */
5509 if (arg1 == const0_rtx)
5510 return const0_rtx;
5511
5512 else if (arg1 == const1_rtx)
5513 return arg0;
5514
5515 switch (GET_CODE (arg0))
5516 {
5517 case REG:
5518 /* biv * invar. Done. */
5519 return gen_rtx_MULT (mode, arg0, arg1);
5520
5521 case CONST_INT:
5522 /* Product of two constants. */
5523 return GEN_INT (INTVAL (arg0) * INTVAL (arg1));
5524
5525 case USE:
5526 /* invar * invar. Not giv. */
5527 return 0;
5528
5529 case MULT:
5530 /* (a * invar_1) * invar_2. Associate. */
5531 return simplify_giv_expr (gen_rtx_MULT (mode, XEXP (arg0, 0),
5532 gen_rtx_MULT (mode,
5533 XEXP (arg0, 1),
5534 arg1)),
5535 benefit);
5536
5537 case PLUS:
5538 /* (a + invar_1) * invar_2. Distribute. */
5539 return simplify_giv_expr (gen_rtx_PLUS (mode,
5540 gen_rtx_MULT (mode,
5541 XEXP (arg0, 0),
5542 arg1),
5543 gen_rtx_MULT (mode,
5544 XEXP (arg0, 1),
5545 arg1)),
5546 benefit);
5547
5548 default:
5549 abort ();
5550 }
5551
5552 case ASHIFT:
5553 /* Shift by constant is multiply by power of two. */
5554 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
5555 return 0;
5556
5557 return simplify_giv_expr (gen_rtx_MULT (mode,
5558 XEXP (x, 0),
5559 GEN_INT ((HOST_WIDE_INT) 1
5560 << INTVAL (XEXP (x, 1)))),
5561 benefit);
5562
5563 case NEG:
5564 /* "-a" is "a * (-1)" */
5565 return simplify_giv_expr (gen_rtx_MULT (mode, XEXP (x, 0), constm1_rtx),
5566 benefit);
5567
5568 case NOT:
5569 /* "~a" is "-a - 1". Silly, but easy. */
5570 return simplify_giv_expr (gen_rtx_MINUS (mode,
5571 gen_rtx_NEG (mode, XEXP (x, 0)),
5572 const1_rtx),
5573 benefit);
5574
5575 case USE:
5576 /* Already in proper form for invariant. */
5577 return x;
5578
5579 case REG:
5580 /* If this is a new register, we can't deal with it. */
5581 if (REGNO (x) >= max_reg_before_loop)
5582 return 0;
5583
5584 /* Check for biv or giv. */
5585 switch (reg_iv_type[REGNO (x)])
5586 {
5587 case BASIC_INDUCT:
5588 return x;
5589 case GENERAL_INDUCT:
5590 {
5591 struct induction *v = reg_iv_info[REGNO (x)];
5592
5593 /* Form expression from giv and add benefit. Ensure this giv
5594 can derive another and subtract any needed adjustment if so. */
5595 *benefit += v->benefit;
5596 if (v->cant_derive)
5597 return 0;
5598
5599 tem = gen_rtx_PLUS (mode, gen_rtx_MULT (mode, v->src_reg,
5600 v->mult_val),
5601 v->add_val);
5602 if (v->derive_adjustment)
5603 tem = gen_rtx_MINUS (mode, tem, v->derive_adjustment);
5604 return simplify_giv_expr (tem, benefit);
5605 }
5606
5607 default:
5608 break;
5609 }
5610
5611 /* Fall through to general case. */
5612 default:
5613 /* If invariant, return as USE (unless CONST_INT).
5614 Otherwise, not giv. */
5615 if (GET_CODE (x) == USE)
5616 x = XEXP (x, 0);
5617
5618 if (invariant_p (x) == 1)
5619 {
5620 if (GET_CODE (x) == CONST_INT)
5621 return x;
5622 else
5623 return gen_rtx_USE (mode, x);
5624 }
5625 else
5626 return 0;
5627 }
5628 }
5629 \f
5630 /* Help detect a giv that is calculated by several consecutive insns;
5631 for example,
5632 giv = biv * M
5633 giv = giv + A
5634 The caller has already identified the first insn P as having a giv as dest;
5635 we check that all other insns that set the same register follow
5636 immediately after P, that they alter nothing else,
5637 and that the result of the last is still a giv.
5638
5639 The value is 0 if the reg set in P is not really a giv.
5640 Otherwise, the value is the amount gained by eliminating
5641 all the consecutive insns that compute the value.
5642
5643 FIRST_BENEFIT is the amount gained by eliminating the first insn, P.
5644 SRC_REG is the reg of the biv; DEST_REG is the reg of the giv.
5645
5646 The coefficients of the ultimate giv value are stored in
5647 *MULT_VAL and *ADD_VAL. */
5648
5649 static int
5650 consec_sets_giv (first_benefit, p, src_reg, dest_reg,
5651 add_val, mult_val)
5652 int first_benefit;
5653 rtx p;
5654 rtx src_reg;
5655 rtx dest_reg;
5656 rtx *add_val;
5657 rtx *mult_val;
5658 {
5659 int count;
5660 enum rtx_code code;
5661 int benefit;
5662 rtx temp;
5663 rtx set;
5664
5665 /* Indicate that this is a giv so that we can update the value produced in
5666 each insn of the multi-insn sequence.
5667
5668 This induction structure will be used only by the call to
5669 general_induction_var below, so we can allocate it on our stack.
5670 If this is a giv, our caller will replace the induct var entry with
5671 a new induction structure. */
5672 struct induction *v
5673 = (struct induction *) alloca (sizeof (struct induction));
5674 v->src_reg = src_reg;
5675 v->mult_val = *mult_val;
5676 v->add_val = *add_val;
5677 v->benefit = first_benefit;
5678 v->cant_derive = 0;
5679 v->derive_adjustment = 0;
5680
5681 reg_iv_type[REGNO (dest_reg)] = GENERAL_INDUCT;
5682 reg_iv_info[REGNO (dest_reg)] = v;
5683
5684 count = n_times_set[REGNO (dest_reg)] - 1;
5685
5686 while (count > 0)
5687 {
5688 p = NEXT_INSN (p);
5689 code = GET_CODE (p);
5690
5691 /* If libcall, skip to end of call sequence. */
5692 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
5693 p = XEXP (temp, 0);
5694
5695 if (code == INSN
5696 && (set = single_set (p))
5697 && GET_CODE (SET_DEST (set)) == REG
5698 && SET_DEST (set) == dest_reg
5699 && ((benefit = general_induction_var (SET_SRC (set), &src_reg,
5700 add_val, mult_val))
5701 /* Giv created by equivalent expression. */
5702 || ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX))
5703 && (benefit = general_induction_var (XEXP (temp, 0), &src_reg,
5704 add_val, mult_val))))
5705 && src_reg == v->src_reg)
5706 {
5707 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
5708 benefit += libcall_benefit (p);
5709
5710 count--;
5711 v->mult_val = *mult_val;
5712 v->add_val = *add_val;
5713 v->benefit = benefit;
5714 }
5715 else if (code != NOTE)
5716 {
5717 /* Allow insns that set something other than this giv to a
5718 constant. Such insns are needed on machines which cannot
5719 include long constants and should not disqualify a giv. */
5720 if (code == INSN
5721 && (set = single_set (p))
5722 && SET_DEST (set) != dest_reg
5723 && CONSTANT_P (SET_SRC (set)))
5724 continue;
5725
5726 reg_iv_type[REGNO (dest_reg)] = UNKNOWN_INDUCT;
5727 return 0;
5728 }
5729 }
5730
5731 return v->benefit;
5732 }
5733 \f
5734 /* Return an rtx, if any, that expresses giv G2 as a function of the register
5735 represented by G1. If no such expression can be found, or it is clear that
5736 it cannot possibly be a valid address, 0 is returned.
5737
5738 To perform the computation, we note that
5739 G1 = a * v + b and
5740 G2 = c * v + d
5741 where `v' is the biv.
5742
5743 So G2 = (c/a) * G1 + (d - b*c/a) */
5744
5745 #ifdef ADDRESS_COST
5746 static rtx
5747 express_from (g1, g2)
5748 struct induction *g1, *g2;
5749 {
5750 rtx mult, add;
5751
5752 /* The value that G1 will be multiplied by must be a constant integer. Also,
5753 the only chance we have of getting a valid address is if b*c/a (see above
5754 for notation) is also an integer. */
5755 if (GET_CODE (g1->mult_val) != CONST_INT
5756 || GET_CODE (g2->mult_val) != CONST_INT
5757 || GET_CODE (g1->add_val) != CONST_INT
5758 || g1->mult_val == const0_rtx
5759 || INTVAL (g2->mult_val) % INTVAL (g1->mult_val) != 0)
5760 return 0;
5761
5762 mult = GEN_INT (INTVAL (g2->mult_val) / INTVAL (g1->mult_val));
5763 add = plus_constant (g2->add_val, - INTVAL (g1->add_val) * INTVAL (mult));
5764
5765 /* Form simplified final result. */
5766 if (mult == const0_rtx)
5767 return add;
5768 else if (mult == const1_rtx)
5769 mult = g1->dest_reg;
5770 else
5771 mult = gen_rtx_MULT (g2->mode, g1->dest_reg, mult);
5772
5773 if (add == const0_rtx)
5774 return mult;
5775 else
5776 return gen_rtx_PLUS (g2->mode, mult, add);
5777 }
5778 #endif
5779 \f
5780 /* Return 1 if giv G2 can be combined with G1. This means that G2 can use
5781 (either directly or via an address expression) a register used to represent
5782 G1. Set g2->new_reg to a represtation of G1 (normally just
5783 g1->dest_reg). */
5784
5785 static int
5786 combine_givs_p (g1, g2)
5787 struct induction *g1, *g2;
5788 {
5789 rtx tem;
5790
5791 /* If these givs are identical, they can be combined. */
5792 if (rtx_equal_p (g1->mult_val, g2->mult_val)
5793 && rtx_equal_p (g1->add_val, g2->add_val))
5794 {
5795 g2->new_reg = g1->dest_reg;
5796 return 1;
5797 }
5798
5799 #ifdef ADDRESS_COST
5800 /* If G2 can be expressed as a function of G1 and that function is valid
5801 as an address and no more expensive than using a register for G2,
5802 the expression of G2 in terms of G1 can be used. */
5803 if (g2->giv_type == DEST_ADDR
5804 && (tem = express_from (g1, g2)) != 0
5805 && memory_address_p (g2->mem_mode, tem)
5806 && ADDRESS_COST (tem) <= ADDRESS_COST (*g2->location))
5807 {
5808 g2->new_reg = tem;
5809 return 1;
5810 }
5811 #endif
5812
5813 return 0;
5814 }
5815 \f
5816 #ifdef GIV_SORT_CRITERION
5817 /* Compare two givs and sort the most desirable one for combinations first.
5818 This is used only in one qsort call below. */
5819
5820 static int
5821 giv_sort (x, y)
5822 struct induction **x, **y;
5823 {
5824 GIV_SORT_CRITERION (*x, *y);
5825
5826 return 0;
5827 }
5828 #endif
5829
5830 /* Check all pairs of givs for iv_class BL and see if any can be combined with
5831 any other. If so, point SAME to the giv combined with and set NEW_REG to
5832 be an expression (in terms of the other giv's DEST_REG) equivalent to the
5833 giv. Also, update BENEFIT and related fields for cost/benefit analysis. */
5834
5835 static void
5836 combine_givs (bl)
5837 struct iv_class *bl;
5838 {
5839 struct induction *g1, *g2, **giv_array, *temp_iv;
5840 int i, j, giv_count, pass;
5841
5842 /* Count givs, because bl->giv_count is incorrect here. */
5843 giv_count = 0;
5844 for (g1 = bl->giv; g1; g1 = g1->next_iv)
5845 giv_count++;
5846
5847 giv_array
5848 = (struct induction **) alloca (giv_count * sizeof (struct induction *));
5849 i = 0;
5850 for (g1 = bl->giv; g1; g1 = g1->next_iv)
5851 giv_array[i++] = g1;
5852
5853 #ifdef GIV_SORT_CRITERION
5854 /* Sort the givs if GIV_SORT_CRITERION is defined.
5855 This is usually defined for processors which lack
5856 negative register offsets so more givs may be combined. */
5857
5858 if (loop_dump_stream)
5859 fprintf (loop_dump_stream, "%d givs counted, sorting...\n", giv_count);
5860
5861 qsort (giv_array, giv_count, sizeof (struct induction *), giv_sort);
5862 #endif
5863
5864 for (i = 0; i < giv_count; i++)
5865 {
5866 g1 = giv_array[i];
5867 for (pass = 0; pass <= 1; pass++)
5868 for (j = 0; j < giv_count; j++)
5869 {
5870 g2 = giv_array[j];
5871 if (g1 != g2
5872 /* First try to combine with replaceable givs, then all givs. */
5873 && (g1->replaceable || pass == 1)
5874 /* If either has already been combined or is to be ignored, can't
5875 combine. */
5876 && ! g1->ignore && ! g2->ignore && ! g1->same && ! g2->same
5877 /* If something has been based on G2, G2 cannot itself be based
5878 on something else. */
5879 && ! g2->combined_with
5880 && combine_givs_p (g1, g2))
5881 {
5882 /* g2->new_reg set by `combine_givs_p' */
5883 g2->same = g1;
5884 g1->combined_with = 1;
5885
5886 /* If one of these givs is a DEST_REG that was only used
5887 once, by the other giv, this is actually a single use.
5888 The DEST_REG has the correct cost, while the other giv
5889 counts the REG use too often. */
5890 if (g2->giv_type == DEST_REG
5891 && n_times_used[REGNO (g2->dest_reg)] == 1
5892 && reg_mentioned_p (g2->dest_reg, PATTERN (g1->insn)))
5893 g1->benefit = g2->benefit;
5894 else if (g1->giv_type != DEST_REG
5895 || n_times_used[REGNO (g1->dest_reg)] != 1
5896 || ! reg_mentioned_p (g1->dest_reg,
5897 PATTERN (g2->insn)))
5898 {
5899 g1->benefit += g2->benefit;
5900 g1->times_used += g2->times_used;
5901 }
5902 /* ??? The new final_[bg]iv_value code does a much better job
5903 of finding replaceable giv's, and hence this code may no
5904 longer be necessary. */
5905 if (! g2->replaceable && REG_USERVAR_P (g2->dest_reg))
5906 g1->benefit -= copy_cost;
5907 g1->lifetime += g2->lifetime;
5908
5909 if (loop_dump_stream)
5910 fprintf (loop_dump_stream, "giv at %d combined with giv at %d\n",
5911 INSN_UID (g2->insn), INSN_UID (g1->insn));
5912 }
5913 }
5914 }
5915 }
5916 \f
5917 /* EMIT code before INSERT_BEFORE to set REG = B * M + A. */
5918
5919 void
5920 emit_iv_add_mult (b, m, a, reg, insert_before)
5921 rtx b; /* initial value of basic induction variable */
5922 rtx m; /* multiplicative constant */
5923 rtx a; /* additive constant */
5924 rtx reg; /* destination register */
5925 rtx insert_before;
5926 {
5927 rtx seq;
5928 rtx result;
5929
5930 /* Prevent unexpected sharing of these rtx. */
5931 a = copy_rtx (a);
5932 b = copy_rtx (b);
5933
5934 /* Increase the lifetime of any invariants moved further in code. */
5935 update_reg_last_use (a, insert_before);
5936 update_reg_last_use (b, insert_before);
5937 update_reg_last_use (m, insert_before);
5938
5939 start_sequence ();
5940 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 0);
5941 if (reg != result)
5942 emit_move_insn (reg, result);
5943 seq = gen_sequence ();
5944 end_sequence ();
5945
5946 emit_insn_before (seq, insert_before);
5947
5948 record_base_value (REGNO (reg), b);
5949 }
5950 \f
5951 /* Test whether A * B can be computed without
5952 an actual multiply insn. Value is 1 if so. */
5953
5954 static int
5955 product_cheap_p (a, b)
5956 rtx a;
5957 rtx b;
5958 {
5959 int i;
5960 rtx tmp;
5961 struct obstack *old_rtl_obstack = rtl_obstack;
5962 char *storage = (char *) obstack_alloc (&temp_obstack, 0);
5963 int win = 1;
5964
5965 /* If only one is constant, make it B. */
5966 if (GET_CODE (a) == CONST_INT)
5967 tmp = a, a = b, b = tmp;
5968
5969 /* If first constant, both constant, so don't need multiply. */
5970 if (GET_CODE (a) == CONST_INT)
5971 return 1;
5972
5973 /* If second not constant, neither is constant, so would need multiply. */
5974 if (GET_CODE (b) != CONST_INT)
5975 return 0;
5976
5977 /* One operand is constant, so might not need multiply insn. Generate the
5978 code for the multiply and see if a call or multiply, or long sequence
5979 of insns is generated. */
5980
5981 rtl_obstack = &temp_obstack;
5982 start_sequence ();
5983 expand_mult (GET_MODE (a), a, b, NULL_RTX, 0);
5984 tmp = gen_sequence ();
5985 end_sequence ();
5986
5987 if (GET_CODE (tmp) == SEQUENCE)
5988 {
5989 if (XVEC (tmp, 0) == 0)
5990 win = 1;
5991 else if (XVECLEN (tmp, 0) > 3)
5992 win = 0;
5993 else
5994 for (i = 0; i < XVECLEN (tmp, 0); i++)
5995 {
5996 rtx insn = XVECEXP (tmp, 0, i);
5997
5998 if (GET_CODE (insn) != INSN
5999 || (GET_CODE (PATTERN (insn)) == SET
6000 && GET_CODE (SET_SRC (PATTERN (insn))) == MULT)
6001 || (GET_CODE (PATTERN (insn)) == PARALLEL
6002 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET
6003 && GET_CODE (SET_SRC (XVECEXP (PATTERN (insn), 0, 0))) == MULT))
6004 {
6005 win = 0;
6006 break;
6007 }
6008 }
6009 }
6010 else if (GET_CODE (tmp) == SET
6011 && GET_CODE (SET_SRC (tmp)) == MULT)
6012 win = 0;
6013 else if (GET_CODE (tmp) == PARALLEL
6014 && GET_CODE (XVECEXP (tmp, 0, 0)) == SET
6015 && GET_CODE (SET_SRC (XVECEXP (tmp, 0, 0))) == MULT)
6016 win = 0;
6017
6018 /* Free any storage we obtained in generating this multiply and restore rtl
6019 allocation to its normal obstack. */
6020 obstack_free (&temp_obstack, storage);
6021 rtl_obstack = old_rtl_obstack;
6022
6023 return win;
6024 }
6025 \f
6026 /* Check to see if loop can be terminated by a "decrement and branch until
6027 zero" instruction. If so, add a REG_NONNEG note to the branch insn if so.
6028 Also try reversing an increment loop to a decrement loop
6029 to see if the optimization can be performed.
6030 Value is nonzero if optimization was performed. */
6031
6032 /* This is useful even if the architecture doesn't have such an insn,
6033 because it might change a loops which increments from 0 to n to a loop
6034 which decrements from n to 0. A loop that decrements to zero is usually
6035 faster than one that increments from zero. */
6036
6037 /* ??? This could be rewritten to use some of the loop unrolling procedures,
6038 such as approx_final_value, biv_total_increment, loop_iterations, and
6039 final_[bg]iv_value. */
6040
6041 static int
6042 check_dbra_loop (loop_end, insn_count, loop_start)
6043 rtx loop_end;
6044 int insn_count;
6045 rtx loop_start;
6046 {
6047 struct iv_class *bl;
6048 rtx reg;
6049 rtx jump_label;
6050 rtx final_value;
6051 rtx start_value;
6052 rtx new_add_val;
6053 rtx comparison;
6054 rtx before_comparison;
6055 rtx p;
6056
6057 /* If last insn is a conditional branch, and the insn before tests a
6058 register value, try to optimize it. Otherwise, we can't do anything. */
6059
6060 comparison = get_condition_for_loop (PREV_INSN (loop_end));
6061 if (comparison == 0)
6062 return 0;
6063
6064 /* Check all of the bivs to see if the compare uses one of them.
6065 Skip biv's set more than once because we can't guarantee that
6066 it will be zero on the last iteration. Also skip if the biv is
6067 used between its update and the test insn. */
6068
6069 for (bl = loop_iv_list; bl; bl = bl->next)
6070 {
6071 if (bl->biv_count == 1
6072 && bl->biv->dest_reg == XEXP (comparison, 0)
6073 && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
6074 PREV_INSN (PREV_INSN (loop_end))))
6075 break;
6076 }
6077
6078 if (! bl)
6079 return 0;
6080
6081 /* Look for the case where the basic induction variable is always
6082 nonnegative, and equals zero on the last iteration.
6083 In this case, add a reg_note REG_NONNEG, which allows the
6084 m68k DBRA instruction to be used. */
6085
6086 if (((GET_CODE (comparison) == GT
6087 && GET_CODE (XEXP (comparison, 1)) == CONST_INT
6088 && INTVAL (XEXP (comparison, 1)) == -1)
6089 || (GET_CODE (comparison) == NE && XEXP (comparison, 1) == const0_rtx))
6090 && GET_CODE (bl->biv->add_val) == CONST_INT
6091 && INTVAL (bl->biv->add_val) < 0)
6092 {
6093 /* Initial value must be greater than 0,
6094 init_val % -dec_value == 0 to ensure that it equals zero on
6095 the last iteration */
6096
6097 if (GET_CODE (bl->initial_value) == CONST_INT
6098 && INTVAL (bl->initial_value) > 0
6099 && (INTVAL (bl->initial_value)
6100 % (-INTVAL (bl->biv->add_val))) == 0)
6101 {
6102 /* register always nonnegative, add REG_NOTE to branch */
6103 REG_NOTES (PREV_INSN (loop_end))
6104 = gen_rtx_EXPR_LIST (REG_NONNEG, NULL_RTX,
6105 REG_NOTES (PREV_INSN (loop_end)));
6106 bl->nonneg = 1;
6107
6108 return 1;
6109 }
6110
6111 /* If the decrement is 1 and the value was tested as >= 0 before
6112 the loop, then we can safely optimize. */
6113 for (p = loop_start; p; p = PREV_INSN (p))
6114 {
6115 if (GET_CODE (p) == CODE_LABEL)
6116 break;
6117 if (GET_CODE (p) != JUMP_INSN)
6118 continue;
6119
6120 before_comparison = get_condition_for_loop (p);
6121 if (before_comparison
6122 && XEXP (before_comparison, 0) == bl->biv->dest_reg
6123 && GET_CODE (before_comparison) == LT
6124 && XEXP (before_comparison, 1) == const0_rtx
6125 && ! reg_set_between_p (bl->biv->dest_reg, p, loop_start)
6126 && INTVAL (bl->biv->add_val) == -1)
6127 {
6128 REG_NOTES (PREV_INSN (loop_end))
6129 = gen_rtx_EXPR_LIST (REG_NONNEG, NULL_RTX,
6130 REG_NOTES (PREV_INSN (loop_end)));
6131 bl->nonneg = 1;
6132
6133 return 1;
6134 }
6135 }
6136 }
6137 else if (num_mem_sets <= 1)
6138 {
6139 /* Try to change inc to dec, so can apply above optimization. */
6140 /* Can do this if:
6141 all registers modified are induction variables or invariant,
6142 all memory references have non-overlapping addresses
6143 (obviously true if only one write)
6144 allow 2 insns for the compare/jump at the end of the loop. */
6145 /* Also, we must avoid any instructions which use both the reversed
6146 biv and another biv. Such instructions will fail if the loop is
6147 reversed. We meet this condition by requiring that either
6148 no_use_except_counting is true, or else that there is only
6149 one biv. */
6150 int num_nonfixed_reads = 0;
6151 /* 1 if the iteration var is used only to count iterations. */
6152 int no_use_except_counting = 0;
6153 /* 1 if the loop has no memory store, or it has a single memory store
6154 which is reversible. */
6155 int reversible_mem_store = 1;
6156
6157 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
6158 if (GET_RTX_CLASS (GET_CODE (p)) == 'i')
6159 num_nonfixed_reads += count_nonfixed_reads (PATTERN (p));
6160
6161 if (bl->giv_count == 0
6162 && ! loop_number_exit_count[uid_loop_num[INSN_UID (loop_start)]])
6163 {
6164 rtx bivreg = regno_reg_rtx[bl->regno];
6165
6166 /* If there are no givs for this biv, and the only exit is the
6167 fall through at the end of the the loop, then
6168 see if perhaps there are no uses except to count. */
6169 no_use_except_counting = 1;
6170 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
6171 if (GET_RTX_CLASS (GET_CODE (p)) == 'i')
6172 {
6173 rtx set = single_set (p);
6174
6175 if (set && GET_CODE (SET_DEST (set)) == REG
6176 && REGNO (SET_DEST (set)) == bl->regno)
6177 /* An insn that sets the biv is okay. */
6178 ;
6179 else if (p == prev_nonnote_insn (prev_nonnote_insn (loop_end))
6180 || p == prev_nonnote_insn (loop_end))
6181 /* Don't bother about the end test. */
6182 ;
6183 else if (reg_mentioned_p (bivreg, PATTERN (p)))
6184 /* Any other use of the biv is no good. */
6185 {
6186 no_use_except_counting = 0;
6187 break;
6188 }
6189 }
6190 }
6191
6192 /* If the loop has a single store, and the destination address is
6193 invariant, then we can't reverse the loop, because this address
6194 might then have the wrong value at loop exit.
6195 This would work if the source was invariant also, however, in that
6196 case, the insn should have been moved out of the loop. */
6197
6198 if (num_mem_sets == 1)
6199 reversible_mem_store
6200 = (! unknown_address_altered
6201 && ! invariant_p (XEXP (loop_store_mems[0], 0)));
6202
6203 /* This code only acts for innermost loops. Also it simplifies
6204 the memory address check by only reversing loops with
6205 zero or one memory access.
6206 Two memory accesses could involve parts of the same array,
6207 and that can't be reversed. */
6208
6209 if (num_nonfixed_reads <= 1
6210 && !loop_has_call
6211 && !loop_has_volatile
6212 && reversible_mem_store
6213 && (no_use_except_counting
6214 || ((bl->giv_count + bl->biv_count + num_mem_sets
6215 + num_movables + 2 == insn_count)
6216 && (bl == loop_iv_list && bl->next == 0))))
6217 {
6218 rtx tem;
6219
6220 /* Loop can be reversed. */
6221 if (loop_dump_stream)
6222 fprintf (loop_dump_stream, "Can reverse loop\n");
6223
6224 /* Now check other conditions:
6225
6226 The increment must be a constant, as must the initial value,
6227 and the comparison code must be LT.
6228
6229 This test can probably be improved since +/- 1 in the constant
6230 can be obtained by changing LT to LE and vice versa; this is
6231 confusing. */
6232
6233 if (comparison
6234 && GET_CODE (XEXP (comparison, 1)) == CONST_INT
6235 /* LE gets turned into LT */
6236 && GET_CODE (comparison) == LT
6237 && GET_CODE (bl->initial_value) == CONST_INT)
6238 {
6239 HOST_WIDE_INT add_val, comparison_val;
6240 rtx initial_value;
6241
6242 add_val = INTVAL (bl->biv->add_val);
6243 comparison_val = INTVAL (XEXP (comparison, 1));
6244 initial_value = bl->initial_value;
6245
6246 /* Normalize the initial value if it is an integer and
6247 has no other use except as a counter. This will allow
6248 a few more loops to be reversed. */
6249 if (no_use_except_counting
6250 && GET_CODE (initial_value) == CONST_INT)
6251 {
6252 comparison_val = comparison_val - INTVAL (bl->initial_value);
6253 initial_value = const0_rtx;
6254 }
6255
6256 /* If the initial value is not zero, or if the comparison
6257 value is not an exact multiple of the increment, then we
6258 can not reverse this loop. */
6259 if (initial_value != const0_rtx
6260 || (comparison_val % add_val) != 0)
6261 return 0;
6262
6263 /* Reset these in case we normalized the initial value
6264 and comparison value above. */
6265 bl->initial_value = initial_value;
6266 XEXP (comparison, 1) = GEN_INT (comparison_val);
6267
6268 /* Register will always be nonnegative, with value
6269 0 on last iteration if loop reversed */
6270
6271 /* Save some info needed to produce the new insns. */
6272 reg = bl->biv->dest_reg;
6273 jump_label = XEXP (SET_SRC (PATTERN (PREV_INSN (loop_end))), 1);
6274 if (jump_label == pc_rtx)
6275 jump_label = XEXP (SET_SRC (PATTERN (PREV_INSN (loop_end))), 2);
6276 new_add_val = GEN_INT (- INTVAL (bl->biv->add_val));
6277
6278 final_value = XEXP (comparison, 1);
6279 start_value = GEN_INT (INTVAL (XEXP (comparison, 1))
6280 - INTVAL (bl->biv->add_val));
6281
6282 /* Initialize biv to start_value before loop start.
6283 The old initializing insn will be deleted as a
6284 dead store by flow.c. */
6285 emit_insn_before (gen_move_insn (reg, start_value), loop_start);
6286
6287 /* Add insn to decrement register, and delete insn
6288 that incremented the register. */
6289 p = emit_insn_before (gen_add2_insn (reg, new_add_val),
6290 bl->biv->insn);
6291 delete_insn (bl->biv->insn);
6292
6293 /* Update biv info to reflect its new status. */
6294 bl->biv->insn = p;
6295 bl->initial_value = start_value;
6296 bl->biv->add_val = new_add_val;
6297
6298 /* Inc LABEL_NUSES so that delete_insn will
6299 not delete the label. */
6300 LABEL_NUSES (XEXP (jump_label, 0)) ++;
6301
6302 /* Emit an insn after the end of the loop to set the biv's
6303 proper exit value if it is used anywhere outside the loop. */
6304 if ((REGNO_LAST_UID (bl->regno)
6305 != INSN_UID (PREV_INSN (PREV_INSN (loop_end))))
6306 || ! bl->init_insn
6307 || REGNO_FIRST_UID (bl->regno) != INSN_UID (bl->init_insn))
6308 emit_insn_after (gen_move_insn (reg, final_value),
6309 loop_end);
6310
6311 /* Delete compare/branch at end of loop. */
6312 delete_insn (PREV_INSN (loop_end));
6313 delete_insn (PREV_INSN (loop_end));
6314
6315 /* Add new compare/branch insn at end of loop. */
6316 start_sequence ();
6317 emit_cmp_insn (reg, const0_rtx, GE, NULL_RTX,
6318 GET_MODE (reg), 0, 0);
6319 emit_jump_insn (gen_bge (XEXP (jump_label, 0)));
6320 tem = gen_sequence ();
6321 end_sequence ();
6322 emit_jump_insn_before (tem, loop_end);
6323
6324 for (tem = PREV_INSN (loop_end);
6325 tem && GET_CODE (tem) != JUMP_INSN; tem = PREV_INSN (tem))
6326 ;
6327 if (tem)
6328 {
6329 JUMP_LABEL (tem) = XEXP (jump_label, 0);
6330
6331 /* Increment of LABEL_NUSES done above. */
6332 /* Register is now always nonnegative,
6333 so add REG_NONNEG note to the branch. */
6334 REG_NOTES (tem) = gen_rtx_EXPR_LIST (REG_NONNEG, NULL_RTX,
6335 REG_NOTES (tem));
6336 }
6337
6338 bl->nonneg = 1;
6339
6340 /* Mark that this biv has been reversed. Each giv which depends
6341 on this biv, and which is also live past the end of the loop
6342 will have to be fixed up. */
6343
6344 bl->reversed = 1;
6345
6346 if (loop_dump_stream)
6347 fprintf (loop_dump_stream,
6348 "Reversed loop and added reg_nonneg\n");
6349
6350 return 1;
6351 }
6352 }
6353 }
6354
6355 return 0;
6356 }
6357 \f
6358 /* Verify whether the biv BL appears to be eliminable,
6359 based on the insns in the loop that refer to it.
6360 LOOP_START is the first insn of the loop, and END is the end insn.
6361
6362 If ELIMINATE_P is non-zero, actually do the elimination.
6363
6364 THRESHOLD and INSN_COUNT are from loop_optimize and are used to
6365 determine whether invariant insns should be placed inside or at the
6366 start of the loop. */
6367
6368 static int
6369 maybe_eliminate_biv (bl, loop_start, end, eliminate_p, threshold, insn_count)
6370 struct iv_class *bl;
6371 rtx loop_start;
6372 rtx end;
6373 int eliminate_p;
6374 int threshold, insn_count;
6375 {
6376 rtx reg = bl->biv->dest_reg;
6377 rtx p;
6378
6379 /* Scan all insns in the loop, stopping if we find one that uses the
6380 biv in a way that we cannot eliminate. */
6381
6382 for (p = loop_start; p != end; p = NEXT_INSN (p))
6383 {
6384 enum rtx_code code = GET_CODE (p);
6385 rtx where = threshold >= insn_count ? loop_start : p;
6386
6387 if ((code == INSN || code == JUMP_INSN || code == CALL_INSN)
6388 && reg_mentioned_p (reg, PATTERN (p))
6389 && ! maybe_eliminate_biv_1 (PATTERN (p), p, bl, eliminate_p, where))
6390 {
6391 if (loop_dump_stream)
6392 fprintf (loop_dump_stream,
6393 "Cannot eliminate biv %d: biv used in insn %d.\n",
6394 bl->regno, INSN_UID (p));
6395 break;
6396 }
6397 }
6398
6399 if (p == end)
6400 {
6401 if (loop_dump_stream)
6402 fprintf (loop_dump_stream, "biv %d %s eliminated.\n",
6403 bl->regno, eliminate_p ? "was" : "can be");
6404 return 1;
6405 }
6406
6407 return 0;
6408 }
6409 \f
6410 /* If BL appears in X (part of the pattern of INSN), see if we can
6411 eliminate its use. If so, return 1. If not, return 0.
6412
6413 If BIV does not appear in X, return 1.
6414
6415 If ELIMINATE_P is non-zero, actually do the elimination. WHERE indicates
6416 where extra insns should be added. Depending on how many items have been
6417 moved out of the loop, it will either be before INSN or at the start of
6418 the loop. */
6419
6420 static int
6421 maybe_eliminate_biv_1 (x, insn, bl, eliminate_p, where)
6422 rtx x, insn;
6423 struct iv_class *bl;
6424 int eliminate_p;
6425 rtx where;
6426 {
6427 enum rtx_code code = GET_CODE (x);
6428 rtx reg = bl->biv->dest_reg;
6429 enum machine_mode mode = GET_MODE (reg);
6430 struct induction *v;
6431 rtx arg, new, tem;
6432 int arg_operand;
6433 char *fmt;
6434 int i, j;
6435
6436 switch (code)
6437 {
6438 case REG:
6439 /* If we haven't already been able to do something with this BIV,
6440 we can't eliminate it. */
6441 if (x == reg)
6442 return 0;
6443 return 1;
6444
6445 case SET:
6446 /* If this sets the BIV, it is not a problem. */
6447 if (SET_DEST (x) == reg)
6448 return 1;
6449
6450 /* If this is an insn that defines a giv, it is also ok because
6451 it will go away when the giv is reduced. */
6452 for (v = bl->giv; v; v = v->next_iv)
6453 if (v->giv_type == DEST_REG && SET_DEST (x) == v->dest_reg)
6454 return 1;
6455
6456 #ifdef HAVE_cc0
6457 if (SET_DEST (x) == cc0_rtx && SET_SRC (x) == reg)
6458 {
6459 /* Can replace with any giv that was reduced and
6460 that has (MULT_VAL != 0) and (ADD_VAL == 0).
6461 Require a constant for MULT_VAL, so we know it's nonzero.
6462 ??? We disable this optimization to avoid potential
6463 overflows. */
6464
6465 for (v = bl->giv; v; v = v->next_iv)
6466 if (CONSTANT_P (v->mult_val) && v->mult_val != const0_rtx
6467 && v->add_val == const0_rtx
6468 && ! v->ignore && ! v->maybe_dead && v->always_computable
6469 && v->mode == mode
6470 && 0)
6471 {
6472 /* If the giv V had the auto-inc address optimization applied
6473 to it, and INSN occurs between the giv insn and the biv
6474 insn, then we must adjust the value used here.
6475 This is rare, so we don't bother to do so. */
6476 if (v->auto_inc_opt
6477 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
6478 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
6479 || (INSN_LUID (v->insn) > INSN_LUID (insn)
6480 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
6481 continue;
6482
6483 if (! eliminate_p)
6484 return 1;
6485
6486 /* If the giv has the opposite direction of change,
6487 then reverse the comparison. */
6488 if (INTVAL (v->mult_val) < 0)
6489 new = gen_rtx_COMPARE (GET_MODE (v->new_reg),
6490 const0_rtx, v->new_reg);
6491 else
6492 new = v->new_reg;
6493
6494 /* We can probably test that giv's reduced reg. */
6495 if (validate_change (insn, &SET_SRC (x), new, 0))
6496 return 1;
6497 }
6498
6499 /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0);
6500 replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL).
6501 Require a constant for MULT_VAL, so we know it's nonzero.
6502 ??? Do this only if ADD_VAL is a pointer to avoid a potential
6503 overflow problem. */
6504
6505 for (v = bl->giv; v; v = v->next_iv)
6506 if (CONSTANT_P (v->mult_val) && v->mult_val != const0_rtx
6507 && ! v->ignore && ! v->maybe_dead && v->always_computable
6508 && v->mode == mode
6509 && (GET_CODE (v->add_val) == SYMBOL_REF
6510 || GET_CODE (v->add_val) == LABEL_REF
6511 || GET_CODE (v->add_val) == CONST
6512 || (GET_CODE (v->add_val) == REG
6513 && REGNO_POINTER_FLAG (REGNO (v->add_val)))))
6514 {
6515 /* If the giv V had the auto-inc address optimization applied
6516 to it, and INSN occurs between the giv insn and the biv
6517 insn, then we must adjust the value used here.
6518 This is rare, so we don't bother to do so. */
6519 if (v->auto_inc_opt
6520 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
6521 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
6522 || (INSN_LUID (v->insn) > INSN_LUID (insn)
6523 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
6524 continue;
6525
6526 if (! eliminate_p)
6527 return 1;
6528
6529 /* If the giv has the opposite direction of change,
6530 then reverse the comparison. */
6531 if (INTVAL (v->mult_val) < 0)
6532 new = gen_rtx_COMPARE (VOIDmode, copy_rtx (v->add_val),
6533 v->new_reg);
6534 else
6535 new = gen_rtx_COMPARE (VOIDmode, v->new_reg,
6536 copy_rtx (v->add_val));
6537
6538 /* Replace biv with the giv's reduced register. */
6539 update_reg_last_use (v->add_val, insn);
6540 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
6541 return 1;
6542
6543 /* Insn doesn't support that constant or invariant. Copy it
6544 into a register (it will be a loop invariant.) */
6545 tem = gen_reg_rtx (GET_MODE (v->new_reg));
6546
6547 emit_insn_before (gen_move_insn (tem, copy_rtx (v->add_val)),
6548 where);
6549
6550 /* Substitute the new register for its invariant value in
6551 the compare expression. */
6552 XEXP (new, (INTVAL (v->mult_val) < 0) ? 0 : 1) = tem;
6553 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
6554 return 1;
6555 }
6556 }
6557 #endif
6558 break;
6559
6560 case COMPARE:
6561 case EQ: case NE:
6562 case GT: case GE: case GTU: case GEU:
6563 case LT: case LE: case LTU: case LEU:
6564 /* See if either argument is the biv. */
6565 if (XEXP (x, 0) == reg)
6566 arg = XEXP (x, 1), arg_operand = 1;
6567 else if (XEXP (x, 1) == reg)
6568 arg = XEXP (x, 0), arg_operand = 0;
6569 else
6570 break;
6571
6572 if (CONSTANT_P (arg))
6573 {
6574 /* First try to replace with any giv that has constant positive
6575 mult_val and constant add_val. We might be able to support
6576 negative mult_val, but it seems complex to do it in general. */
6577
6578 for (v = bl->giv; v; v = v->next_iv)
6579 if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0
6580 && (GET_CODE (v->add_val) == SYMBOL_REF
6581 || GET_CODE (v->add_val) == LABEL_REF
6582 || GET_CODE (v->add_val) == CONST
6583 || (GET_CODE (v->add_val) == REG
6584 && REGNO_POINTER_FLAG (REGNO (v->add_val))))
6585 && ! v->ignore && ! v->maybe_dead && v->always_computable
6586 && v->mode == mode)
6587 {
6588 /* If the giv V had the auto-inc address optimization applied
6589 to it, and INSN occurs between the giv insn and the biv
6590 insn, then we must adjust the value used here.
6591 This is rare, so we don't bother to do so. */
6592 if (v->auto_inc_opt
6593 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
6594 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
6595 || (INSN_LUID (v->insn) > INSN_LUID (insn)
6596 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
6597 continue;
6598
6599 if (! eliminate_p)
6600 return 1;
6601
6602 /* Replace biv with the giv's reduced reg. */
6603 XEXP (x, 1-arg_operand) = v->new_reg;
6604
6605 /* If all constants are actually constant integers and
6606 the derived constant can be directly placed in the COMPARE,
6607 do so. */
6608 if (GET_CODE (arg) == CONST_INT
6609 && GET_CODE (v->mult_val) == CONST_INT
6610 && GET_CODE (v->add_val) == CONST_INT
6611 && validate_change (insn, &XEXP (x, arg_operand),
6612 GEN_INT (INTVAL (arg)
6613 * INTVAL (v->mult_val)
6614 + INTVAL (v->add_val)), 0))
6615 return 1;
6616
6617 /* Otherwise, load it into a register. */
6618 tem = gen_reg_rtx (mode);
6619 emit_iv_add_mult (arg, v->mult_val, v->add_val, tem, where);
6620 if (validate_change (insn, &XEXP (x, arg_operand), tem, 0))
6621 return 1;
6622
6623 /* If that failed, put back the change we made above. */
6624 XEXP (x, 1-arg_operand) = reg;
6625 }
6626
6627 /* Look for giv with positive constant mult_val and nonconst add_val.
6628 Insert insns to calculate new compare value.
6629 ??? Turn this off due to possible overflow. */
6630
6631 for (v = bl->giv; v; v = v->next_iv)
6632 if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0
6633 && ! v->ignore && ! v->maybe_dead && v->always_computable
6634 && v->mode == mode
6635 && 0)
6636 {
6637 rtx tem;
6638
6639 /* If the giv V had the auto-inc address optimization applied
6640 to it, and INSN occurs between the giv insn and the biv
6641 insn, then we must adjust the value used here.
6642 This is rare, so we don't bother to do so. */
6643 if (v->auto_inc_opt
6644 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
6645 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
6646 || (INSN_LUID (v->insn) > INSN_LUID (insn)
6647 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
6648 continue;
6649
6650 if (! eliminate_p)
6651 return 1;
6652
6653 tem = gen_reg_rtx (mode);
6654
6655 /* Replace biv with giv's reduced register. */
6656 validate_change (insn, &XEXP (x, 1 - arg_operand),
6657 v->new_reg, 1);
6658
6659 /* Compute value to compare against. */
6660 emit_iv_add_mult (arg, v->mult_val, v->add_val, tem, where);
6661 /* Use it in this insn. */
6662 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
6663 if (apply_change_group ())
6664 return 1;
6665 }
6666 }
6667 else if (GET_CODE (arg) == REG || GET_CODE (arg) == MEM)
6668 {
6669 if (invariant_p (arg) == 1)
6670 {
6671 /* Look for giv with constant positive mult_val and nonconst
6672 add_val. Insert insns to compute new compare value.
6673 ??? Turn this off due to possible overflow. */
6674
6675 for (v = bl->giv; v; v = v->next_iv)
6676 if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0
6677 && ! v->ignore && ! v->maybe_dead && v->always_computable
6678 && v->mode == mode
6679 && 0)
6680 {
6681 rtx tem;
6682
6683 /* If the giv V had the auto-inc address optimization applied
6684 to it, and INSN occurs between the giv insn and the biv
6685 insn, then we must adjust the value used here.
6686 This is rare, so we don't bother to do so. */
6687 if (v->auto_inc_opt
6688 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
6689 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
6690 || (INSN_LUID (v->insn) > INSN_LUID (insn)
6691 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
6692 continue;
6693
6694 if (! eliminate_p)
6695 return 1;
6696
6697 tem = gen_reg_rtx (mode);
6698
6699 /* Replace biv with giv's reduced register. */
6700 validate_change (insn, &XEXP (x, 1 - arg_operand),
6701 v->new_reg, 1);
6702
6703 /* Compute value to compare against. */
6704 emit_iv_add_mult (arg, v->mult_val, v->add_val,
6705 tem, where);
6706 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
6707 if (apply_change_group ())
6708 return 1;
6709 }
6710 }
6711
6712 /* This code has problems. Basically, you can't know when
6713 seeing if we will eliminate BL, whether a particular giv
6714 of ARG will be reduced. If it isn't going to be reduced,
6715 we can't eliminate BL. We can try forcing it to be reduced,
6716 but that can generate poor code.
6717
6718 The problem is that the benefit of reducing TV, below should
6719 be increased if BL can actually be eliminated, but this means
6720 we might have to do a topological sort of the order in which
6721 we try to process biv. It doesn't seem worthwhile to do
6722 this sort of thing now. */
6723
6724 #if 0
6725 /* Otherwise the reg compared with had better be a biv. */
6726 if (GET_CODE (arg) != REG
6727 || reg_iv_type[REGNO (arg)] != BASIC_INDUCT)
6728 return 0;
6729
6730 /* Look for a pair of givs, one for each biv,
6731 with identical coefficients. */
6732 for (v = bl->giv; v; v = v->next_iv)
6733 {
6734 struct induction *tv;
6735
6736 if (v->ignore || v->maybe_dead || v->mode != mode)
6737 continue;
6738
6739 for (tv = reg_biv_class[REGNO (arg)]->giv; tv; tv = tv->next_iv)
6740 if (! tv->ignore && ! tv->maybe_dead
6741 && rtx_equal_p (tv->mult_val, v->mult_val)
6742 && rtx_equal_p (tv->add_val, v->add_val)
6743 && tv->mode == mode)
6744 {
6745 /* If the giv V had the auto-inc address optimization applied
6746 to it, and INSN occurs between the giv insn and the biv
6747 insn, then we must adjust the value used here.
6748 This is rare, so we don't bother to do so. */
6749 if (v->auto_inc_opt
6750 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
6751 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
6752 || (INSN_LUID (v->insn) > INSN_LUID (insn)
6753 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
6754 continue;
6755
6756 if (! eliminate_p)
6757 return 1;
6758
6759 /* Replace biv with its giv's reduced reg. */
6760 XEXP (x, 1-arg_operand) = v->new_reg;
6761 /* Replace other operand with the other giv's
6762 reduced reg. */
6763 XEXP (x, arg_operand) = tv->new_reg;
6764 return 1;
6765 }
6766 }
6767 #endif
6768 }
6769
6770 /* If we get here, the biv can't be eliminated. */
6771 return 0;
6772
6773 case MEM:
6774 /* If this address is a DEST_ADDR giv, it doesn't matter if the
6775 biv is used in it, since it will be replaced. */
6776 for (v = bl->giv; v; v = v->next_iv)
6777 if (v->giv_type == DEST_ADDR && v->location == &XEXP (x, 0))
6778 return 1;
6779 break;
6780
6781 default:
6782 break;
6783 }
6784
6785 /* See if any subexpression fails elimination. */
6786 fmt = GET_RTX_FORMAT (code);
6787 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
6788 {
6789 switch (fmt[i])
6790 {
6791 case 'e':
6792 if (! maybe_eliminate_biv_1 (XEXP (x, i), insn, bl,
6793 eliminate_p, where))
6794 return 0;
6795 break;
6796
6797 case 'E':
6798 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
6799 if (! maybe_eliminate_biv_1 (XVECEXP (x, i, j), insn, bl,
6800 eliminate_p, where))
6801 return 0;
6802 break;
6803 }
6804 }
6805
6806 return 1;
6807 }
6808 \f
6809 /* Return nonzero if the last use of REG
6810 is in an insn following INSN in the same basic block. */
6811
6812 static int
6813 last_use_this_basic_block (reg, insn)
6814 rtx reg;
6815 rtx insn;
6816 {
6817 rtx n;
6818 for (n = insn;
6819 n && GET_CODE (n) != CODE_LABEL && GET_CODE (n) != JUMP_INSN;
6820 n = NEXT_INSN (n))
6821 {
6822 if (REGNO_LAST_UID (REGNO (reg)) == INSN_UID (n))
6823 return 1;
6824 }
6825 return 0;
6826 }
6827 \f
6828 /* Called via `note_stores' to record the initial value of a biv. Here we
6829 just record the location of the set and process it later. */
6830
6831 static void
6832 record_initial (dest, set)
6833 rtx dest;
6834 rtx set;
6835 {
6836 struct iv_class *bl;
6837
6838 if (GET_CODE (dest) != REG
6839 || REGNO (dest) >= max_reg_before_loop
6840 || reg_iv_type[REGNO (dest)] != BASIC_INDUCT)
6841 return;
6842
6843 bl = reg_biv_class[REGNO (dest)];
6844
6845 /* If this is the first set found, record it. */
6846 if (bl->init_insn == 0)
6847 {
6848 bl->init_insn = note_insn;
6849 bl->init_set = set;
6850 }
6851 }
6852 \f
6853 /* If any of the registers in X are "old" and currently have a last use earlier
6854 than INSN, update them to have a last use of INSN. Their actual last use
6855 will be the previous insn but it will not have a valid uid_luid so we can't
6856 use it. */
6857
6858 static void
6859 update_reg_last_use (x, insn)
6860 rtx x;
6861 rtx insn;
6862 {
6863 /* Check for the case where INSN does not have a valid luid. In this case,
6864 there is no need to modify the regno_last_uid, as this can only happen
6865 when code is inserted after the loop_end to set a pseudo's final value,
6866 and hence this insn will never be the last use of x. */
6867 if (GET_CODE (x) == REG && REGNO (x) < max_reg_before_loop
6868 && INSN_UID (insn) < max_uid_for_loop
6869 && uid_luid[REGNO_LAST_UID (REGNO (x))] < uid_luid[INSN_UID (insn)])
6870 REGNO_LAST_UID (REGNO (x)) = INSN_UID (insn);
6871 else
6872 {
6873 register int i, j;
6874 register char *fmt = GET_RTX_FORMAT (GET_CODE (x));
6875 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6876 {
6877 if (fmt[i] == 'e')
6878 update_reg_last_use (XEXP (x, i), insn);
6879 else if (fmt[i] == 'E')
6880 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
6881 update_reg_last_use (XVECEXP (x, i, j), insn);
6882 }
6883 }
6884 }
6885 \f
6886 /* Given a jump insn JUMP, return the condition that will cause it to branch
6887 to its JUMP_LABEL. If the condition cannot be understood, or is an
6888 inequality floating-point comparison which needs to be reversed, 0 will
6889 be returned.
6890
6891 If EARLIEST is non-zero, it is a pointer to a place where the earliest
6892 insn used in locating the condition was found. If a replacement test
6893 of the condition is desired, it should be placed in front of that
6894 insn and we will be sure that the inputs are still valid.
6895
6896 The condition will be returned in a canonical form to simplify testing by
6897 callers. Specifically:
6898
6899 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
6900 (2) Both operands will be machine operands; (cc0) will have been replaced.
6901 (3) If an operand is a constant, it will be the second operand.
6902 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
6903 for GE, GEU, and LEU. */
6904
6905 rtx
6906 get_condition (jump, earliest)
6907 rtx jump;
6908 rtx *earliest;
6909 {
6910 enum rtx_code code;
6911 rtx prev = jump;
6912 rtx set;
6913 rtx tem;
6914 rtx op0, op1;
6915 int reverse_code = 0;
6916 int did_reverse_condition = 0;
6917
6918 /* If this is not a standard conditional jump, we can't parse it. */
6919 if (GET_CODE (jump) != JUMP_INSN
6920 || ! condjump_p (jump) || simplejump_p (jump))
6921 return 0;
6922
6923 code = GET_CODE (XEXP (SET_SRC (PATTERN (jump)), 0));
6924 op0 = XEXP (XEXP (SET_SRC (PATTERN (jump)), 0), 0);
6925 op1 = XEXP (XEXP (SET_SRC (PATTERN (jump)), 0), 1);
6926
6927 if (earliest)
6928 *earliest = jump;
6929
6930 /* If this branches to JUMP_LABEL when the condition is false, reverse
6931 the condition. */
6932 if (GET_CODE (XEXP (SET_SRC (PATTERN (jump)), 2)) == LABEL_REF
6933 && XEXP (XEXP (SET_SRC (PATTERN (jump)), 2), 0) == JUMP_LABEL (jump))
6934 code = reverse_condition (code), did_reverse_condition ^= 1;
6935
6936 /* If we are comparing a register with zero, see if the register is set
6937 in the previous insn to a COMPARE or a comparison operation. Perform
6938 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
6939 in cse.c */
6940
6941 while (GET_RTX_CLASS (code) == '<' && op1 == CONST0_RTX (GET_MODE (op0)))
6942 {
6943 /* Set non-zero when we find something of interest. */
6944 rtx x = 0;
6945
6946 #ifdef HAVE_cc0
6947 /* If comparison with cc0, import actual comparison from compare
6948 insn. */
6949 if (op0 == cc0_rtx)
6950 {
6951 if ((prev = prev_nonnote_insn (prev)) == 0
6952 || GET_CODE (prev) != INSN
6953 || (set = single_set (prev)) == 0
6954 || SET_DEST (set) != cc0_rtx)
6955 return 0;
6956
6957 op0 = SET_SRC (set);
6958 op1 = CONST0_RTX (GET_MODE (op0));
6959 if (earliest)
6960 *earliest = prev;
6961 }
6962 #endif
6963
6964 /* If this is a COMPARE, pick up the two things being compared. */
6965 if (GET_CODE (op0) == COMPARE)
6966 {
6967 op1 = XEXP (op0, 1);
6968 op0 = XEXP (op0, 0);
6969 continue;
6970 }
6971 else if (GET_CODE (op0) != REG)
6972 break;
6973
6974 /* Go back to the previous insn. Stop if it is not an INSN. We also
6975 stop if it isn't a single set or if it has a REG_INC note because
6976 we don't want to bother dealing with it. */
6977
6978 if ((prev = prev_nonnote_insn (prev)) == 0
6979 || GET_CODE (prev) != INSN
6980 || FIND_REG_INC_NOTE (prev, 0)
6981 || (set = single_set (prev)) == 0)
6982 break;
6983
6984 /* If this is setting OP0, get what it sets it to if it looks
6985 relevant. */
6986 if (rtx_equal_p (SET_DEST (set), op0))
6987 {
6988 enum machine_mode inner_mode = GET_MODE (SET_SRC (set));
6989
6990 if ((GET_CODE (SET_SRC (set)) == COMPARE
6991 || (((code == NE
6992 || (code == LT
6993 && GET_MODE_CLASS (inner_mode) == MODE_INT
6994 && (GET_MODE_BITSIZE (inner_mode)
6995 <= HOST_BITS_PER_WIDE_INT)
6996 && (STORE_FLAG_VALUE
6997 & ((HOST_WIDE_INT) 1
6998 << (GET_MODE_BITSIZE (inner_mode) - 1))))
6999 #ifdef FLOAT_STORE_FLAG_VALUE
7000 || (code == LT
7001 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
7002 && FLOAT_STORE_FLAG_VALUE < 0)
7003 #endif
7004 ))
7005 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<')))
7006 x = SET_SRC (set);
7007 else if (((code == EQ
7008 || (code == GE
7009 && (GET_MODE_BITSIZE (inner_mode)
7010 <= HOST_BITS_PER_WIDE_INT)
7011 && GET_MODE_CLASS (inner_mode) == MODE_INT
7012 && (STORE_FLAG_VALUE
7013 & ((HOST_WIDE_INT) 1
7014 << (GET_MODE_BITSIZE (inner_mode) - 1))))
7015 #ifdef FLOAT_STORE_FLAG_VALUE
7016 || (code == GE
7017 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
7018 && FLOAT_STORE_FLAG_VALUE < 0)
7019 #endif
7020 ))
7021 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<')
7022 {
7023 /* We might have reversed a LT to get a GE here. But this wasn't
7024 actually the comparison of data, so we don't flag that we
7025 have had to reverse the condition. */
7026 did_reverse_condition ^= 1;
7027 reverse_code = 1;
7028 x = SET_SRC (set);
7029 }
7030 else
7031 break;
7032 }
7033
7034 else if (reg_set_p (op0, prev))
7035 /* If this sets OP0, but not directly, we have to give up. */
7036 break;
7037
7038 if (x)
7039 {
7040 if (GET_RTX_CLASS (GET_CODE (x)) == '<')
7041 code = GET_CODE (x);
7042 if (reverse_code)
7043 {
7044 code = reverse_condition (code);
7045 did_reverse_condition ^= 1;
7046 reverse_code = 0;
7047 }
7048
7049 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
7050 if (earliest)
7051 *earliest = prev;
7052 }
7053 }
7054
7055 /* If constant is first, put it last. */
7056 if (CONSTANT_P (op0))
7057 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
7058
7059 /* If OP0 is the result of a comparison, we weren't able to find what
7060 was really being compared, so fail. */
7061 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
7062 return 0;
7063
7064 /* Canonicalize any ordered comparison with integers involving equality
7065 if we can do computations in the relevant mode and we do not
7066 overflow. */
7067
7068 if (GET_CODE (op1) == CONST_INT
7069 && GET_MODE (op0) != VOIDmode
7070 && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
7071 {
7072 HOST_WIDE_INT const_val = INTVAL (op1);
7073 unsigned HOST_WIDE_INT uconst_val = const_val;
7074 unsigned HOST_WIDE_INT max_val
7075 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
7076
7077 switch (code)
7078 {
7079 case LE:
7080 if (const_val != max_val >> 1)
7081 code = LT, op1 = GEN_INT (const_val + 1);
7082 break;
7083
7084 /* When cross-compiling, const_val might be sign-extended from
7085 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
7086 case GE:
7087 if ((const_val & max_val)
7088 != (((HOST_WIDE_INT) 1
7089 << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
7090 code = GT, op1 = GEN_INT (const_val - 1);
7091 break;
7092
7093 case LEU:
7094 if (uconst_val < max_val)
7095 code = LTU, op1 = GEN_INT (uconst_val + 1);
7096 break;
7097
7098 case GEU:
7099 if (uconst_val != 0)
7100 code = GTU, op1 = GEN_INT (uconst_val - 1);
7101 break;
7102
7103 default:
7104 break;
7105 }
7106 }
7107
7108 /* If this was floating-point and we reversed anything other than an
7109 EQ or NE, return zero. */
7110 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
7111 && did_reverse_condition && code != NE && code != EQ
7112 && ! flag_fast_math
7113 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
7114 return 0;
7115
7116 #ifdef HAVE_cc0
7117 /* Never return CC0; return zero instead. */
7118 if (op0 == cc0_rtx)
7119 return 0;
7120 #endif
7121
7122 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
7123 }
7124
7125 /* Similar to above routine, except that we also put an invariant last
7126 unless both operands are invariants. */
7127
7128 rtx
7129 get_condition_for_loop (x)
7130 rtx x;
7131 {
7132 rtx comparison = get_condition (x, NULL_PTR);
7133
7134 if (comparison == 0
7135 || ! invariant_p (XEXP (comparison, 0))
7136 || invariant_p (XEXP (comparison, 1)))
7137 return comparison;
7138
7139 return gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)), VOIDmode,
7140 XEXP (comparison, 1), XEXP (comparison, 0));
7141 }
7142
7143 #ifdef HAIFA
7144 /* Analyze a loop in order to instrument it with the use of count register.
7145 loop_start and loop_end are the first and last insns of the loop.
7146 This function works in cooperation with insert_bct ().
7147 loop_can_insert_bct[loop_num] is set according to whether the optimization
7148 is applicable to the loop. When it is applicable, the following variables
7149 are also set:
7150 loop_start_value[loop_num]
7151 loop_comparison_value[loop_num]
7152 loop_increment[loop_num]
7153 loop_comparison_code[loop_num] */
7154
7155 static
7156 void analyze_loop_iterations (loop_start, loop_end)
7157 rtx loop_start, loop_end;
7158 {
7159 rtx comparison, comparison_value;
7160 rtx iteration_var, initial_value, increment;
7161 enum rtx_code comparison_code;
7162
7163 rtx last_loop_insn;
7164 rtx insn;
7165 int i;
7166
7167 /* loop_variable mode */
7168 enum machine_mode original_mode;
7169
7170 /* find the number of the loop */
7171 int loop_num = uid_loop_num [INSN_UID (loop_start)];
7172
7173 /* we change our mind only when we are sure that loop will be instrumented */
7174 loop_can_insert_bct[loop_num] = 0;
7175
7176 /* is the optimization suppressed. */
7177 if ( !flag_branch_on_count_reg )
7178 return;
7179
7180 /* make sure that count-reg is not in use */
7181 if (loop_used_count_register[loop_num]){
7182 if (loop_dump_stream)
7183 fprintf (loop_dump_stream,
7184 "analyze_loop_iterations %d: BCT instrumentation failed: count register already in use\n",
7185 loop_num);
7186 return;
7187 }
7188
7189 /* make sure that the function has no indirect jumps. */
7190 if (indirect_jump_in_function){
7191 if (loop_dump_stream)
7192 fprintf (loop_dump_stream,
7193 "analyze_loop_iterations %d: BCT instrumentation failed: indirect jump in function\n",
7194 loop_num);
7195 return;
7196 }
7197
7198 /* make sure that the last loop insn is a conditional jump */
7199 last_loop_insn = PREV_INSN (loop_end);
7200 if (GET_CODE (last_loop_insn) != JUMP_INSN || !condjump_p (last_loop_insn)) {
7201 if (loop_dump_stream)
7202 fprintf (loop_dump_stream,
7203 "analyze_loop_iterations %d: BCT instrumentation failed: invalid jump at loop end\n",
7204 loop_num);
7205 return;
7206 }
7207
7208 /* First find the iteration variable. If the last insn is a conditional
7209 branch, and the insn preceding it tests a register value, make that
7210 register the iteration variable. */
7211
7212 /* We used to use prev_nonnote_insn here, but that fails because it might
7213 accidentally get the branch for a contained loop if the branch for this
7214 loop was deleted. We can only trust branches immediately before the
7215 loop_end. */
7216
7217 comparison = get_condition_for_loop (last_loop_insn);
7218 /* ??? Get_condition may switch position of induction variable and
7219 invariant register when it canonicalizes the comparison. */
7220
7221 if (comparison == 0) {
7222 if (loop_dump_stream)
7223 fprintf (loop_dump_stream,
7224 "analyze_loop_iterations %d: BCT instrumentation failed: comparison not found\n",
7225 loop_num);
7226 return;
7227 }
7228
7229 comparison_code = GET_CODE (comparison);
7230 iteration_var = XEXP (comparison, 0);
7231 comparison_value = XEXP (comparison, 1);
7232
7233 original_mode = GET_MODE (iteration_var);
7234 if (GET_MODE_CLASS (original_mode) != MODE_INT
7235 || GET_MODE_SIZE (original_mode) != UNITS_PER_WORD) {
7236 if (loop_dump_stream)
7237 fprintf (loop_dump_stream,
7238 "analyze_loop_iterations %d: BCT Instrumentation failed: loop variable not integer\n",
7239 loop_num);
7240 return;
7241 }
7242
7243 /* get info about loop bounds and increment */
7244 iteration_info (iteration_var, &initial_value, &increment,
7245 loop_start, loop_end);
7246
7247 /* make sure that all required loop data were found */
7248 if (!(initial_value && increment && comparison_value
7249 && invariant_p (comparison_value) && invariant_p (increment)
7250 && ! indirect_jump_in_function))
7251 {
7252 if (loop_dump_stream) {
7253 fprintf (loop_dump_stream,
7254 "analyze_loop_iterations %d: BCT instrumentation failed because of wrong loop: ", loop_num);
7255 if (!(initial_value && increment && comparison_value)) {
7256 fprintf (loop_dump_stream, "\tbounds not available: ");
7257 if ( ! initial_value )
7258 fprintf (loop_dump_stream, "initial ");
7259 if ( ! increment )
7260 fprintf (loop_dump_stream, "increment ");
7261 if ( ! comparison_value )
7262 fprintf (loop_dump_stream, "comparison ");
7263 fprintf (loop_dump_stream, "\n");
7264 }
7265 if (!invariant_p (comparison_value) || !invariant_p (increment))
7266 fprintf (loop_dump_stream, "\tloop bounds not invariant\n");
7267 }
7268 return;
7269 }
7270
7271 /* make sure that the increment is constant */
7272 if (GET_CODE (increment) != CONST_INT) {
7273 if (loop_dump_stream)
7274 fprintf (loop_dump_stream,
7275 "analyze_loop_iterations %d: instrumentation failed: not arithmetic loop\n",
7276 loop_num);
7277 return;
7278 }
7279
7280 /* make sure that the loop contains neither function call, nor jump on table.
7281 (the count register might be altered by the called function, and might
7282 be used for a branch on table). */
7283 for (insn = loop_start; insn && insn != loop_end; insn = NEXT_INSN (insn)) {
7284 if (GET_CODE (insn) == CALL_INSN){
7285 if (loop_dump_stream)
7286 fprintf (loop_dump_stream,
7287 "analyze_loop_iterations %d: BCT instrumentation failed: function call in the loop\n",
7288 loop_num);
7289 return;
7290 }
7291
7292 if (GET_CODE (insn) == JUMP_INSN
7293 && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
7294 || GET_CODE (PATTERN (insn)) == ADDR_VEC)){
7295 if (loop_dump_stream)
7296 fprintf (loop_dump_stream,
7297 "analyze_loop_iterations %d: BCT instrumentation failed: computed branch in the loop\n",
7298 loop_num);
7299 return;
7300 }
7301 }
7302
7303 /* At this point, we are sure that the loop can be instrumented with BCT.
7304 Some of the loops, however, will not be instrumented - the final decision
7305 is taken by insert_bct () */
7306 if (loop_dump_stream)
7307 fprintf (loop_dump_stream,
7308 "analyze_loop_iterations: loop (luid =%d) can be BCT instrumented.\n",
7309 loop_num);
7310
7311 /* mark all enclosing loops that they cannot use count register */
7312 /* ???: In fact, since insert_bct may decide not to instrument this loop,
7313 marking here may prevent instrumenting an enclosing loop that could
7314 actually be instrumented. But since this is rare, it is safer to mark
7315 here in case the order of calling (analyze/insert)_bct would be changed. */
7316 for (i=loop_num; i != -1; i = loop_outer_loop[i])
7317 loop_used_count_register[i] = 1;
7318
7319 /* Set data structures which will be used by the instrumentation phase */
7320 loop_start_value[loop_num] = initial_value;
7321 loop_comparison_value[loop_num] = comparison_value;
7322 loop_increment[loop_num] = increment;
7323 loop_comparison_code[loop_num] = comparison_code;
7324 loop_can_insert_bct[loop_num] = 1;
7325 }
7326
7327
7328 /* instrument loop for insertion of bct instruction. We distinguish between
7329 loops with compile-time bounds, to those with run-time bounds. The loop
7330 behaviour is analized according to the following characteristics/variables:
7331 ; Input variables:
7332 ; comparison-value: the value to which the iteration counter is compared.
7333 ; initial-value: iteration-counter initial value.
7334 ; increment: iteration-counter increment.
7335 ; Computed variables:
7336 ; increment-direction: the sign of the increment.
7337 ; compare-direction: '1' for GT, GTE, '-1' for LT, LTE, '0' for NE.
7338 ; range-direction: sign (comparison-value - initial-value)
7339 We give up on the following cases:
7340 ; loop variable overflow.
7341 ; run-time loop bounds with comparison code NE.
7342 */
7343
7344 static void
7345 insert_bct (loop_start, loop_end)
7346 rtx loop_start, loop_end;
7347 {
7348 rtx initial_value, comparison_value, increment;
7349 enum rtx_code comparison_code;
7350
7351 int increment_direction, compare_direction;
7352 int unsigned_p = 0;
7353
7354 /* if the loop condition is <= or >=, the number of iteration
7355 is 1 more than the range of the bounds of the loop */
7356 int add_iteration = 0;
7357
7358 /* the only machine mode we work with - is the integer of the size that the
7359 machine has */
7360 enum machine_mode loop_var_mode = SImode;
7361
7362 int loop_num = uid_loop_num [INSN_UID (loop_start)];
7363
7364 /* get loop-variables. No need to check that these are valid - already
7365 checked in analyze_loop_iterations (). */
7366 comparison_code = loop_comparison_code[loop_num];
7367 initial_value = loop_start_value[loop_num];
7368 comparison_value = loop_comparison_value[loop_num];
7369 increment = loop_increment[loop_num];
7370
7371 /* check analyze_loop_iterations decision for this loop. */
7372 if (! loop_can_insert_bct[loop_num]){
7373 if (loop_dump_stream)
7374 fprintf (loop_dump_stream,
7375 "insert_bct: [%d] - was decided not to instrument by analyze_loop_iterations ()\n",
7376 loop_num);
7377 return;
7378 }
7379
7380 /* It's impossible to instrument a competely unrolled loop. */
7381 if (loop_unroll_factor [loop_num] == -1)
7382 return;
7383
7384 /* make sure that the last loop insn is a conditional jump .
7385 This check is repeated from analyze_loop_iterations (),
7386 because unrolling might have changed that. */
7387 if (GET_CODE (PREV_INSN (loop_end)) != JUMP_INSN
7388 || !condjump_p (PREV_INSN (loop_end))) {
7389 if (loop_dump_stream)
7390 fprintf (loop_dump_stream,
7391 "insert_bct: not instrumenting BCT because of invalid branch\n");
7392 return;
7393 }
7394
7395 /* fix increment in case loop was unrolled. */
7396 if (loop_unroll_factor [loop_num] > 1)
7397 increment = GEN_INT ( INTVAL (increment) * loop_unroll_factor [loop_num] );
7398
7399 /* determine properties and directions of the loop */
7400 increment_direction = (INTVAL (increment) > 0) ? 1:-1;
7401 switch ( comparison_code ) {
7402 case LEU:
7403 unsigned_p = 1;
7404 /* fallthrough */
7405 case LE:
7406 compare_direction = 1;
7407 add_iteration = 1;
7408 break;
7409 case GEU:
7410 unsigned_p = 1;
7411 /* fallthrough */
7412 case GE:
7413 compare_direction = -1;
7414 add_iteration = 1;
7415 break;
7416 case EQ:
7417 /* in this case we cannot know the number of iterations */
7418 if (loop_dump_stream)
7419 fprintf (loop_dump_stream,
7420 "insert_bct: %d: loop cannot be instrumented: == in condition\n",
7421 loop_num);
7422 return;
7423 case LTU:
7424 unsigned_p = 1;
7425 /* fallthrough */
7426 case LT:
7427 compare_direction = 1;
7428 break;
7429 case GTU:
7430 unsigned_p = 1;
7431 /* fallthrough */
7432 case GT:
7433 compare_direction = -1;
7434 break;
7435 case NE:
7436 compare_direction = 0;
7437 break;
7438 default:
7439 abort ();
7440 }
7441
7442
7443 /* make sure that the loop does not end by an overflow */
7444 if (compare_direction != increment_direction) {
7445 if (loop_dump_stream)
7446 fprintf (loop_dump_stream,
7447 "insert_bct: %d: loop cannot be instrumented: terminated by overflow\n",
7448 loop_num);
7449 return;
7450 }
7451
7452 /* try to instrument the loop. */
7453
7454 /* Handle the simpler case, where the bounds are known at compile time. */
7455 if (GET_CODE (initial_value) == CONST_INT && GET_CODE (comparison_value) == CONST_INT)
7456 {
7457 int n_iterations;
7458 int increment_value_abs = INTVAL (increment) * increment_direction;
7459
7460 /* check the relation between compare-val and initial-val */
7461 int difference = INTVAL (comparison_value) - INTVAL (initial_value);
7462 int range_direction = (difference > 0) ? 1 : -1;
7463
7464 /* make sure the loop executes enough iterations to gain from BCT */
7465 if (difference > -3 && difference < 3) {
7466 if (loop_dump_stream)
7467 fprintf (loop_dump_stream,
7468 "insert_bct: loop %d not BCT instrumented: too small iteration count.\n",
7469 loop_num);
7470 return;
7471 }
7472
7473 /* make sure that the loop executes at least once */
7474 if ((range_direction == 1 && compare_direction == -1)
7475 || (range_direction == -1 && compare_direction == 1))
7476 {
7477 if (loop_dump_stream)
7478 fprintf (loop_dump_stream,
7479 "insert_bct: loop %d: does not iterate even once. Not instrumenting.\n",
7480 loop_num);
7481 return;
7482 }
7483
7484 /* make sure that the loop does not end by an overflow (in compile time
7485 bounds we must have an additional check for overflow, because here
7486 we also support the compare code of 'NE'. */
7487 if (comparison_code == NE
7488 && increment_direction != range_direction) {
7489 if (loop_dump_stream)
7490 fprintf (loop_dump_stream,
7491 "insert_bct (compile time bounds): %d: loop not instrumented: terminated by overflow\n",
7492 loop_num);
7493 return;
7494 }
7495
7496 /* Determine the number of iterations by:
7497 ;
7498 ; compare-val - initial-val + (increment -1) + additional-iteration
7499 ; num_iterations = -----------------------------------------------------------------
7500 ; increment
7501 */
7502 difference = (range_direction > 0) ? difference : -difference;
7503 #if 0
7504 fprintf (stderr, "difference is: %d\n", difference); /* @*/
7505 fprintf (stderr, "increment_value_abs is: %d\n", increment_value_abs); /* @*/
7506 fprintf (stderr, "add_iteration is: %d\n", add_iteration); /* @*/
7507 fprintf (stderr, "INTVAL (comparison_value) is: %d\n", INTVAL (comparison_value)); /* @*/
7508 fprintf (stderr, "INTVAL (initial_value) is: %d\n", INTVAL (initial_value)); /* @*/
7509 #endif
7510
7511 if (increment_value_abs == 0) {
7512 fprintf (stderr, "insert_bct: error: increment == 0 !!!\n");
7513 abort ();
7514 }
7515 n_iterations = (difference + increment_value_abs - 1 + add_iteration)
7516 / increment_value_abs;
7517
7518 #if 0
7519 fprintf (stderr, "number of iterations is: %d\n", n_iterations); /* @*/
7520 #endif
7521 instrument_loop_bct (loop_start, loop_end, GEN_INT (n_iterations));
7522
7523 /* Done with this loop. */
7524 return;
7525 }
7526
7527 /* Handle the more complex case, that the bounds are NOT known at compile time. */
7528 /* In this case we generate run_time calculation of the number of iterations */
7529
7530 /* With runtime bounds, if the compare is of the form '!=' we give up */
7531 if (comparison_code == NE) {
7532 if (loop_dump_stream)
7533 fprintf (loop_dump_stream,
7534 "insert_bct: fail for loop %d: runtime bounds with != comparison\n",
7535 loop_num);
7536 return;
7537 }
7538
7539 else {
7540 /* We rely on the existence of run-time guard to ensure that the
7541 loop executes at least once. */
7542 rtx sequence;
7543 rtx iterations_num_reg;
7544
7545 int increment_value_abs = INTVAL (increment) * increment_direction;
7546
7547 /* make sure that the increment is a power of two, otherwise (an
7548 expensive) divide is needed. */
7549 if (exact_log2 (increment_value_abs) == -1)
7550 {
7551 if (loop_dump_stream)
7552 fprintf (loop_dump_stream,
7553 "insert_bct: not instrumenting BCT because the increment is not power of 2\n");
7554 return;
7555 }
7556
7557 /* compute the number of iterations */
7558 start_sequence ();
7559 {
7560 /* CYGNUS LOCAL: HAIFA bug fix */
7561 rtx temp_reg;
7562
7563 /* Again, the number of iterations is calculated by:
7564 ;
7565 ; compare-val - initial-val + (increment -1) + additional-iteration
7566 ; num_iterations = -----------------------------------------------------------------
7567 ; increment
7568 */
7569 /* ??? Do we have to call copy_rtx here before passing rtx to
7570 expand_binop? */
7571 if (compare_direction > 0) {
7572 /* <, <= :the loop variable is increasing */
7573 temp_reg = expand_binop (loop_var_mode, sub_optab, comparison_value,
7574 initial_value, NULL_RTX, 0, OPTAB_LIB_WIDEN);
7575 }
7576 else {
7577 temp_reg = expand_binop (loop_var_mode, sub_optab, initial_value,
7578 comparison_value, NULL_RTX, 0, OPTAB_LIB_WIDEN);
7579 }
7580
7581 if (increment_value_abs - 1 + add_iteration != 0)
7582 temp_reg = expand_binop (loop_var_mode, add_optab, temp_reg,
7583 GEN_INT (increment_value_abs - 1 + add_iteration),
7584 NULL_RTX, 0, OPTAB_LIB_WIDEN);
7585
7586 if (increment_value_abs != 1)
7587 {
7588 /* ??? This will generate an expensive divide instruction for
7589 most targets. The original authors apparently expected this
7590 to be a shift, since they test for power-of-2 divisors above,
7591 but just naively generating a divide instruction will not give
7592 a shift. It happens to work for the PowerPC target because
7593 the rs6000.md file has a divide pattern that emits shifts.
7594 It will probably not work for any other target. */
7595 iterations_num_reg = expand_binop (loop_var_mode, sdiv_optab,
7596 temp_reg,
7597 GEN_INT (increment_value_abs),
7598 NULL_RTX, 0, OPTAB_LIB_WIDEN);
7599 }
7600 else
7601 iterations_num_reg = temp_reg;
7602 /* END CYGNUS LOCAL: HAIFA bug fix */
7603 }
7604 sequence = gen_sequence ();
7605 end_sequence ();
7606 emit_insn_before (sequence, loop_start);
7607 instrument_loop_bct (loop_start, loop_end, iterations_num_reg);
7608 }
7609 }
7610
7611 /* instrument loop by inserting a bct in it. This is done in the following way:
7612 1. A new register is created and assigned the hard register number of the count
7613 register.
7614 2. In the head of the loop the new variable is initialized by the value passed in the
7615 loop_num_iterations parameter.
7616 3. At the end of the loop, comparison of the register with 0 is generated.
7617 The created comparison follows the pattern defined for the
7618 decrement_and_branch_on_count insn, so this insn will be generated in assembly
7619 generation phase.
7620 4. The compare&branch on the old variable is deleted. So, if the loop-variable was
7621 not used elsewhere, it will be eliminated by data-flow analisys. */
7622
7623 static void
7624 instrument_loop_bct (loop_start, loop_end, loop_num_iterations)
7625 rtx loop_start, loop_end;
7626 rtx loop_num_iterations;
7627 {
7628 rtx temp_reg1, temp_reg2;
7629 rtx start_label;
7630
7631 rtx sequence;
7632 enum machine_mode loop_var_mode = SImode;
7633
7634 #ifdef HAVE_decrement_and_branch_on_count
7635 if (HAVE_decrement_and_branch_on_count)
7636 {
7637 if (loop_dump_stream)
7638 fprintf (loop_dump_stream, "Loop: Inserting BCT\n");
7639
7640 /* eliminate the check on the old variable */
7641 delete_insn (PREV_INSN (loop_end));
7642 delete_insn (PREV_INSN (loop_end));
7643
7644 /* insert the label which will delimit the start of the loop */
7645 start_label = gen_label_rtx ();
7646 emit_label_after (start_label, loop_start);
7647
7648 /* insert initialization of the count register into the loop header */
7649 start_sequence ();
7650 temp_reg1 = gen_reg_rtx (loop_var_mode);
7651 emit_insn (gen_move_insn (temp_reg1, loop_num_iterations));
7652
7653 /* this will be count register */
7654 temp_reg2 = gen_rtx_REG (loop_var_mode, COUNT_REGISTER_REGNUM);
7655 /* we have to move the value to the count register from an GPR
7656 because rtx pointed to by loop_num_iterations could contain
7657 expression which cannot be moved into count register */
7658 emit_insn (gen_move_insn (temp_reg2, temp_reg1));
7659
7660 sequence = gen_sequence ();
7661 end_sequence ();
7662 emit_insn_after (sequence, loop_start);
7663
7664 /* insert new comparison on the count register instead of the
7665 old one, generating the needed BCT pattern (that will be
7666 later recognized by assembly generation phase). */
7667 emit_jump_insn_before (gen_decrement_and_branch_on_count (temp_reg2, start_label),
7668 loop_end);
7669 LABEL_NUSES (start_label)++;
7670 }
7671
7672 #endif /* HAVE_decrement_and_branch_on_count */
7673 }
7674 #endif /* HAIFA */
7675
7676 /* Scan the function and determine whether it has indirect (computed) jumps.
7677
7678 This is taken mostly from flow.c; similar code exists elsewhere
7679 in the compiler. It may be useful to put this into rtlanal.c. */
7680 static int
7681 indirect_jump_in_function_p (start)
7682 rtx start;
7683 {
7684 rtx insn;
7685 int is_indirect_jump = 0;
7686
7687 for (insn = start; insn; insn = NEXT_INSN (insn))
7688 if (computed_jump_p (insn))
7689 return 1;
7690
7691 return 0;
7692 }